[r-cran-vgam] 37/63: Import Upstream version 0.9-4
Andreas Tille
tille at debian.org
Tue Jan 24 13:54:35 UTC 2017
This is an automated email from the git hooks/post-receive script.
tille pushed a commit to branch master
in repository r-cran-vgam.
commit b219bd7b6ec26985916c5758eaeede395ef0d841
Author: Andreas Tille <tille at debian.org>
Date: Tue Jan 24 14:16:59 2017 +0100
Import Upstream version 0.9-4
---
BUGS | 9 +
DESCRIPTION | 29 +-
MD5 | 771 +++++----
NAMESPACE | 33 +-
NEWS | 54 +-
R/Links.R | 14 +-
R/aamethods.q | 118 +-
R/add1.vglm.q | 2 +-
R/attrassign.R | 4 +-
R/bAIC.q | 16 +-
R/build.terms.vlm.q | 11 +-
R/calibrate.q | 4 +-
R/cao.R | 2 +-
R/cao.fit.q | 104 +-
R/coef.vlm.q | 37 +-
R/cqo.R | 2 +-
R/cqo.fit.q | 104 +-
R/deviance.vlm.q | 44 +-
R/effects.vglm.q | 15 +-
R/family.actuary.R | 1089 ++++++++----
R/family.aunivariate.R | 536 ++++--
R/family.basics.R | 244 ++-
R/family.binomial.R | 395 +++--
R/family.bivariate.R | 625 +++++--
R/family.categorical.R | 517 +++---
R/family.censored.R | 82 +-
R/family.circular.R | 56 +-
R/family.exp.R | 16 +-
R/family.extremes.R | 240 ++-
R/family.functions.R | 19 +-
R/family.genetic.R | 16 +-
R/family.glmgam.R | 583 ++++---
R/family.loglin.R | 109 +-
R/family.math.R | 10 +-
R/family.mixture.R | 90 +-
R/family.nonlinear.R | 12 +-
R/family.normal.R | 777 ++++++---
R/family.others.R | 156 +-
R/family.positive.R | 344 ++--
R/family.qreg.R | 685 +++++---
R/family.quantal.R | 81 +-
R/family.rcim.R | 38 +-
R/family.rcqo.R | 2 +-
R/family.robust.R | 75 +-
R/family.rrr.R | 1124 ++++++------
R/family.sur.R | 36 +-
R/family.survival.R | 56 +-
R/family.ts.R | 50 +-
R/family.univariate.R | 3170 ++++++++++++++++++++++++----------
R/family.vglm.R | 2 +-
R/family.zeroinf.R | 1293 +++++++++-----
R/fittedvlm.R | 6 +-
R/formula.vlm.q | 93 +-
R/generic.q | 2 +-
R/links.q | 20 +-
R/logLik.vlm.q | 28 +-
R/lrwaldtest.R | 2 +-
R/model.matrix.vglm.q | 78 +-
R/mux.q | 42 +-
R/nobs.R | 2 +-
R/plot.vglm.q | 6 +-
R/predict.vgam.q | 32 +-
R/predict.vglm.q | 2 +-
R/predict.vlm.q | 56 +-
R/print.vglm.q | 2 +-
R/print.vlm.q | 2 +-
R/qrrvglm.control.q | 51 +-
R/qtplot.q | 2 +-
R/residuals.vlm.q | 2 +-
R/rrvglm.R | 2 +-
R/rrvglm.control.q | 4 +-
R/rrvglm.fit.q | 365 ++--
R/s.q | 2 +-
R/s.vam.q | 46 +-
R/simulate.vglm.R | 62 +
R/smart.R | 40 +-
R/step.vglm.q | 2 +-
R/summary.vgam.q | 2 +-
R/summary.vglm.q | 2 +-
R/summary.vlm.q | 40 +-
R/vgam.R | 2 +-
R/vgam.control.q | 4 +-
R/vgam.fit.q | 190 +-
R/vgam.match.q | 4 +-
R/vglm.R | 2 +-
R/vglm.control.q | 2 +-
R/vglm.fit.q | 371 ++--
R/vlm.R | 8 +-
R/vlm.wfit.q | 24 +-
R/vsmooth.spline.q | 24 +-
build/vignette.rds | Bin 380 -> 0 bytes
data/Huggins89.t1.rda | Bin 441 -> 443 bytes
data/Huggins89table1.rda | Bin 443 -> 444 bytes
data/cfibrosis.rda | Bin 0 -> 264 bytes
data/lakeO.rda | Bin 0 -> 335 bytes
data/prinia.rda | Bin 1229 -> 1230 bytes
data/wine.rda | Bin 0 -> 269 bytes
demo/binom2.or.R | 45 +-
demo/cqo.R | 129 +-
demo/distributions.R | 40 +-
demo/lmsqreg.R | 41 +-
demo/vgam.R | 22 +-
demo/zipoisson.R | 37 +-
inst/doc/categoricalVGAM.R | 278 ---
inst/doc/categoricalVGAM.Rnw | 2323 -------------------------
inst/doc/categoricalVGAM.pdf | Bin 642552 -> 0 bytes
man/AB.Ab.aB.ab.Rd | 2 +-
man/ABO.Rd | 2 +-
man/AICvlm.Rd | 4 +-
man/BICvlm.Rd | 4 +-
man/Coef.qrrvglm-class.Rd | 4 +-
man/Coef.qrrvglm.Rd | 8 +-
man/Coef.rrvglm-class.Rd | 2 +-
man/Coef.rrvglm.Rd | 2 +-
man/CommonVGAMffArguments.Rd | 26 +-
man/G1G2G3.Rd | 2 +-
man/Huggins89.t1.Rd | 36 +-
man/Inv.gaussian.Rd | 4 +-
man/Links.Rd | 6 +-
man/MNSs.Rd | 1 +
man/QvarUC.Rd | 8 +-
man/Select.Rd | 262 +++
man/SurvS4-class.Rd | 4 +
man/SurvS4.Rd | 6 +-
man/Tol.Rd | 12 +-
man/VGAM-package.Rd | 4 +-
man/acat.Rd | 3 +-
man/alaplace3.Rd | 21 +-
man/amh.Rd | 3 +-
man/amlbinomial.Rd | 9 +-
man/amlnormal.Rd | 11 +-
man/amlpoisson.Rd | 1 +
man/benfUC.Rd | 3 +-
man/benini.Rd | 2 +-
man/beta.ab.Rd | 9 +-
man/betaII.Rd | 4 +-
man/betabinomUC.Rd | 4 +-
man/betabinomial.Rd | 5 +-
man/betabinomial.ab.Rd | 3 +-
man/betaff.Rd | 5 +-
man/betageomUC.Rd | 4 +-
man/betageometric.Rd | 4 +-
man/betaprime.Rd | 1 +
man/{frank.Rd => bifrankcop.Rd} | 3 +-
man/{frankUC.Rd => bifrankcopUC.Rd} | 0
man/bigumbelI.Rd | 4 +-
man/bilogistic4.Rd | 2 +-
man/binom2.or.Rd | 2 +-
man/binom2.orUC.Rd | 6 +-
man/binom2.rhoUC.Rd | 8 +-
man/binomialff.Rd | 4 +-
man/binormal.Rd | 2 +-
man/binormalUC.Rd | 5 +-
man/binormalcop.Rd | 2 +-
man/bistudenttUC.Rd | 6 +-
man/bivgamma.mckay.Rd | 2 +-
man/borel.tanner.Rd | 12 +-
man/bortUC.Rd | 36 +-
man/calibrate.qrrvglm.Rd | 8 +-
man/calibrate.qrrvglm.control.Rd | 4 +-
man/cao.Rd | 3 +-
man/cao.control.Rd | 14 +-
man/cardioid.Rd | 2 +-
man/cauchy.Rd | 32 +-
man/cennormal.Rd | 6 +-
man/cenpoisson.Rd | 8 +-
man/cfibrosis.Rd | 71 +
man/cgumbel.Rd | 2 +-
man/chinese.nz.Rd | 9 +-
man/chisq.Rd | 2 +-
man/clo.Rd | 3 +
man/cloglog.Rd | 13 +-
man/coalminers.Rd | 7 +
man/coefvlm.Rd | 100 ++
man/concoef.Rd | 4 +-
man/constraints.Rd | 15 +-
man/cqo.Rd | 96 +-
man/cratio.Rd | 2 +-
man/cumulative.Rd | 15 +-
man/dagum.Rd | 7 +-
man/depvar.Rd | 2 +-
man/df.residual.Rd | 2 +-
man/dirichlet.Rd | 15 +-
man/dirmultinomial.Rd | 20 +-
man/double.cennormal.Rd | 2 +-
man/enzyme.Rd | 2 +
man/erlang.Rd | 5 +-
man/eunifUC.Rd | 2 +
man/expexp.Rd | 2 +-
man/exponential.Rd | 11 +-
man/felix.Rd | 6 +-
man/felixUC.Rd | 16 +-
man/fff.Rd | 2 +-
man/fgm.Rd | 3 +-
man/fgmUC.Rd | 1 +
man/fill.Rd | 67 +-
man/fisk.Rd | 7 +-
man/fittedvlm.Rd | 2 +-
man/foldnormal.Rd | 19 +-
man/frechet.Rd | 2 +-
man/frechetUC.Rd | 2 +
man/freund61.Rd | 2 +-
man/fsqrt.Rd | 2 +-
man/gamma1.Rd | 5 +-
man/gamma2.Rd | 9 +-
man/gamma2.ab.Rd | 9 +-
man/gammahyp.Rd | 4 +-
man/garma.Rd | 15 +-
man/genbetaII.Rd | 2 +-
man/gengamma.Rd | 9 +-
man/genpoisson.Rd | 10 +-
man/geometric.Rd | 9 +-
man/get.smart.Rd | 35 +-
man/get.smart.prediction.Rd | 26 +-
man/gev.Rd | 10 +-
man/gompertz.Rd | 3 +-
man/gpd.Rd | 12 +-
man/grc.Rd | 183 +-
man/gumbel.Rd | 82 +-
man/gumbelII.Rd | 8 +-
man/hormone.Rd | 4 +-
man/huber.Rd | 16 +-
man/hypersecant.Rd | 8 +-
man/hzeta.Rd | 7 +-
man/hzetaUC.Rd | 26 +-
man/iam.Rd | 4 +-
man/{identity.Rd => identitylink.Rd} | 18 +-
man/inv.gaussianff.Rd | 4 +-
man/invbinomial.Rd | 8 +-
man/invlomax.Rd | 7 +-
man/invlomaxUC.Rd | 4 +-
man/invparalogistic.Rd | 7 +-
man/invparalogisticUC.Rd | 2 +-
man/is.parallel.Rd | 4 +-
man/is.smart.Rd | 16 +-
man/koenker.Rd | 8 +-
man/kumar.Rd | 6 +-
man/lakeO.Rd | 110 ++
man/laplace.Rd | 6 +-
man/leipnik.Rd | 6 +-
man/levy.Rd | 4 +-
man/lgammaff.Rd | 6 +-
man/lindUC.Rd | 8 +-
man/lindley.Rd | 3 +-
man/lino.Rd | 14 +-
man/lms.bcg.Rd | 9 +-
man/lms.bcn.Rd | 16 +-
man/lms.yjn.Rd | 27 +-
man/logF.Rd | 3 +
man/logF.UC.Rd | 7 +-
man/logLikvlm.Rd | 113 ++
man/logUC.Rd | 5 +-
man/logff.Rd | 14 +-
man/logistic.Rd | 11 +-
man/loglapUC.Rd | 5 +-
man/loglaplace.Rd | 51 +-
man/lognormal.Rd | 20 +-
man/lomax.Rd | 5 +-
man/lomaxUC.Rd | 9 +-
man/lqnorm.Rd | 4 +-
man/lrtest.Rd | 6 +-
man/lvplot.qrrvglm.Rd | 35 +-
man/lvplot.rrvglm.Rd | 7 +-
man/makeham.Rd | 3 +-
man/margeff.Rd | 4 +-
man/maxwell.Rd | 2 +-
man/mccullagh89.Rd | 2 +-
man/micmen.Rd | 5 +-
man/mix2exp.Rd | 7 +-
man/mix2normal.Rd | 14 +-
man/mix2poisson.Rd | 9 +-
man/mlogit.Rd | 2 +-
man/model.framevlm.Rd | 2 +
man/morgenstern.Rd | 4 +-
man/multinomial.Rd | 7 +-
man/nakagami.Rd | 6 +-
man/nbcanlink.Rd | 2 +-
man/negbinomial.Rd | 55 +-
man/negbinomial.size.Rd | 9 +-
man/normal.vcm.Rd | 15 +-
man/notdocumentedyet.Rd | 32 +-
man/paralogistic.Rd | 6 +-
man/paralogisticUC.Rd | 2 +-
man/paretoff.Rd | 4 +-
man/perks.Rd | 3 +-
man/persp.qrrvglm.Rd | 6 +-
man/plotqrrvglm.Rd | 6 +-
man/plotqtplot.lmscreg.Rd | 2 +
man/plotrcim0.Rd | 2 +
man/plotvgam.Rd | 2 +-
man/plotvglm.Rd | 4 +-
man/pneumo.Rd | 2 +-
man/poisson.points.Rd | 6 +-
man/poisson.pointsUC.Rd | 8 +-
man/poissonff.Rd | 9 +-
man/posbernUC.Rd | 13 +-
man/posbernoulli.b.Rd | 2 +
man/posbernoulli.t.Rd | 7 +-
man/posbernoulli.tb.Rd | 3 +-
man/posbinomUC.Rd | 4 +-
man/posbinomial.Rd | 3 +-
man/posgeomUC.Rd | 6 +-
man/posnegbinUC.Rd | 3 +
man/posnegbinomial.Rd | 9 +-
man/posnormUC.Rd | 1 +
man/posnormal.Rd | 4 +-
man/pospoisson.Rd | 7 +-
man/predictqrrvglm.Rd | 15 +-
man/prentice74.Rd | 4 +-
man/prinia.Rd | 8 +-
man/propodds.Rd | 6 +-
man/prplot.Rd | 2 +-
man/put.smart.Rd | 34 +-
man/qrrvglm.control.Rd | 146 +-
man/qtplot.lmscreg.Rd | 16 +-
man/quasipoissonff.Rd | 2 +-
man/rayleigh.Rd | 11 +-
man/rayleighUC.Rd | 3 +
man/rcqo.Rd | 4 +-
man/rdiric.Rd | 36 +-
man/recexp1.Rd | 9 +-
man/recnormal.Rd | 4 +-
man/riceUC.Rd | 8 +-
man/riceff.Rd | 3 +-
man/rigff.Rd | 6 +-
man/rlplot.egev.Rd | 6 +-
man/rrvglm-class.Rd | 2 +-
man/rrvglm.Rd | 18 +-
man/rrvglm.control.Rd | 8 +-
man/seq2binomial.Rd | 3 +-
man/setup.smart.Rd | 21 +-
man/simplex.Rd | 2 +-
man/simulate.vlm.Rd | 208 +++
man/sinmad.Rd | 7 +-
man/sinmadUC.Rd | 3 +-
man/skellam.Rd | 6 +-
man/skellamUC.Rd | 9 +-
man/skewnormUC.Rd | 8 +-
man/skewnormal.Rd | 6 +-
man/slash.Rd | 9 +-
man/slashUC.Rd | 43 +-
man/smart.expression.Rd | 16 +-
man/smart.mode.is.Rd | 27 +-
man/smartpred.Rd | 211 ++-
man/sratio.Rd | 1 +
man/studentt.Rd | 13 +-
man/tikuv.Rd | 4 +-
man/tobit.Rd | 20 +-
man/tobitUC.Rd | 2 +
man/toxop.Rd | 3 +-
man/triangle.Rd | 9 +-
man/trplot.Rd | 2 +-
man/truncweibull.Rd | 2 +-
man/undocumented-methods.Rd | 7 +-
man/uninormal.Rd | 21 +-
man/vgam.Rd | 19 +-
man/vglm-class.Rd | 2 +-
man/vglm.Rd | 38 +-
man/vglm.control.Rd | 17 +-
man/vglmff-class.Rd | 11 +-
man/vonmises.Rd | 2 +-
man/waldff.Rd | 2 +-
man/weibull.Rd | 2 +-
man/weightsvglm.Rd | 2 +-
man/wine.Rd | 91 +
man/wrapup.smart.Rd | 17 +-
man/yip88.Rd | 22 +
man/yulesimon.Rd | 7 +-
man/yulesimonUC.Rd | 7 +-
man/zabinomUC.Rd | 8 +-
man/zabinomial.Rd | 2 +-
man/zageomUC.Rd | 1 +
man/zageometric.Rd | 3 +-
man/zanegbinUC.Rd | 5 +
man/zanegbinomial.Rd | 5 +-
man/zapoisUC.Rd | 4 +
man/zapoisson.Rd | 11 +-
man/zero.Rd | 10 +-
man/zetaff.Rd | 2 +-
man/zibinomUC.Rd | 2 +
man/zibinomial.Rd | 6 +-
man/zigeometric.Rd | 5 +-
man/zinegbinomial.Rd | 2 +-
man/zipebcom.Rd | 2 +-
man/zipf.Rd | 7 +-
man/zipfUC.Rd | 8 +
man/zipoisson.Rd | 15 +-
src/muxr.c | 516 ------
src/muxr3.c | 465 +++++
src/rgam3.c | 12 +-
src/vgam.f | 20 +-
src/vgam3.c | 43 +-
src/vmux.f | 28 +-
src/vmux3.c | 16 +-
vignettes/categoricalVGAM.Rnw | 2323 -------------------------
vignettes/categoricalVGAMbib.bib | 611 -------
396 files changed, 13584 insertions(+), 12833 deletions(-)
diff --git a/BUGS b/BUGS
index 70475f2..d80b37c 100755
--- a/BUGS
+++ b/BUGS
@@ -1,6 +1,15 @@
Here is a list of known bugs.
+2014-02
+
+The subset argument of vgam() may not work, especially with
+multiple responses. To get around this, use subset() to create
+a smaller data frame and then feed that into vgam().
+
+
+
+
2013-11
vgam() can only handle constraint matrices cmat, say, such that
diff --git a/DESCRIPTION b/DESCRIPTION
index 6548b2a..7a5f7f3 100755
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,22 +1,27 @@
Package: VGAM
-Version: 0.9-3
-Date: 2013-11-11
+Version: 0.9-4
+Date: 2014-05-28
Title: Vector Generalized Linear and Additive Models
Author: Thomas W. Yee <t.yee at auckland.ac.nz>
Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
-Depends: R (>= 3.0.0), methods, splines, stats, stats4
+Depends: R (>= 3.0.0), methods, stats, splines, stats4
Suggests: VGAMdata, MASS
-Description: Vector generalized linear and additive models, and
- associated models (Reduced-Rank VGLMs, Quadratic RR-VGLMs,
- Reduced-Rank VGAMs). This package fits many models and
- distribution by maximum likelihood estimation (MLE) or
- penalized MLE. Also fits constrained ordination models in
- ecology.
+Description: This package fits many (150+) models and
+ distributions by maximum likelihood estimation (MLE)
+ or penalized MLE, using Fisher scoring. This is done
+ via the vector generalized linear and additive model
+ (VGLM/VGAM) classes and associated models (reduced-rank
+ VGLMs, quadratic RR-VGLMs, reduced-rank VGAMs, row-column
+ interaction models). These are abbreviated RR-VGLMs,
+ QRR-VGLMs, RR-VGAMs and RCIMs. These include constrained
+ and unconstrained quadratic ordination (CQO/UQO) models
+ in ecology as well as constrained additive ordination
+ (CAO).
License: GPL-2
URL: http://www.stat.auckland.ac.nz/~yee/VGAM
+NeedsCompilation: yes
LazyLoad: yes
LazyData: yes
-Packaged: 2013-11-11 08:56:33 UTC; tyee001
-NeedsCompilation: yes
+Packaged: 2014-05-28 09:38:42 UTC; tyee001
Repository: CRAN
-Date/Publication: 2013-11-11 10:44:08
+Date/Publication: 2014-05-28 18:09:36
diff --git a/MD5 b/MD5
index 0627046..e54155d 100644
--- a/MD5
+++ b/MD5
@@ -1,94 +1,94 @@
-8c5675aa567c646d89290b58a4cfaa9f *BUGS
-b910a842720c3379e3dd13c4f917c933 *DESCRIPTION
-e37447d4cea5fa0da1a71f245361a74e *NAMESPACE
-565401ecc939e282c87a3f98eabd6d39 *NEWS
-59420bff11b9834fd831e16ea40bb5de *R/Links.R
-b68412217ebf58e1fab01023685f2e28 *R/aamethods.q
-15d1737d6888f9bb905bb0d2d82a385b *R/add1.vglm.q
-1738067495adbcc1127a1f8f3cc7988a *R/attrassign.R
-4671105abcdfd3f97ae5d90d175a900d *R/bAIC.q
-b3f7c41a22160b2be01388bb4729a2e4 *R/build.terms.vlm.q
-311795db1aa566d8ff417aee9c4df740 *R/calibrate.q
-0a1d46f67706361aa60b097432ea8fc2 *R/cao.R
-504faaa01a59e539ffd15f30f9ff5acd *R/cao.fit.q
-fb3f18427d53bac3ec8c617de30c85d8 *R/coef.vlm.q
-0e7cd8ad7b3eb7105d7a7c861f82aaaf *R/cqo.R
-aa2c06a5c9ca30e30759e18ad0e793f9 *R/cqo.fit.q
-cf576980ae24bf3f85a7020dbd256c21 *R/deviance.vlm.q
-a6b5f34153d4828e30d35addf7d6ba9f *R/effects.vglm.q
-5668902675c4e8818c973a8aca4adbd5 *R/family.actuary.R
-9d0e98376672881f6693a5a4c7af5b61 *R/family.aunivariate.R
-b49d01e3292f91ce879c7e7b1145fad4 *R/family.basics.R
-c1a10631b9328703644c6b22ff393d7e *R/family.binomial.R
-fe3f5130bbed41000e153186dd773c8a *R/family.bivariate.R
-1bb78a77f7ac54cc92004d0e9f162e9e *R/family.categorical.R
-3ae5afb6905d7b3c5ea35c706e445920 *R/family.censored.R
-ced3316d303a84675faad1940500beba *R/family.circular.R
-3b7b74a4ef27e54824a4af494c0c721f *R/family.exp.R
-4549f805202ec51f190ceea31f99396c *R/family.extremes.R
-84b6392d2e2ab0572c82e48289a36086 *R/family.functions.R
-f7ffc2d272c714de5312d5cb622ca252 *R/family.genetic.R
-57af159832a0ed6286695e496c8a3106 *R/family.glmgam.R
-a6b54c94a4b3a3194d6e2195389a5cbe *R/family.loglin.R
-196748235f416e4ea7bab4ba3e89c735 *R/family.math.R
-b1df44d9c95b0a181b5c04d608433473 *R/family.mixture.R
-1235bba7beac46448fec0eb4ae270247 *R/family.nonlinear.R
-ff0ae86bd8d96ed432db192282a36a19 *R/family.normal.R
-8af9bc39cfc06bc48b430e5dfa3e74bd *R/family.others.R
-004d050b5870d5be780d2bc9c432a8bc *R/family.positive.R
-222f359413083e8f5df24f5bcc72c326 *R/family.qreg.R
-2a7d98513a51f5f184162b42a3dfb0e4 *R/family.quantal.R
-37446b205d1c2511162d879f8896ac70 *R/family.rcim.R
-f9ed7587232d6df5e85bafa74ec287d9 *R/family.rcqo.R
-374b452b264858120b6fe4c96fb2f09a *R/family.robust.R
-0301dc8dd0500af5ae42fe1c2745bda3 *R/family.rrr.R
-14aa4be83182d3144b232b0bf092a9cb *R/family.sur.R
-a9db58da404d57dba9c49297e09f5de6 *R/family.survival.R
-89c9cd66745c1821d65a3b42b2450867 *R/family.ts.R
-13eabf023bb3d4adb8b953c655a91809 *R/family.univariate.R
-cda84d0b9c9b9febcbce42193cd4d86c *R/family.vglm.R
-1241fa6635376025a3ce09b463818256 *R/family.zeroinf.R
-bb94b04f5e868cba658a15690a98f92a *R/fittedvlm.R
-26ec5b1b01a6c0533d11223e46d48f5c *R/formula.vlm.q
-6ac6db14a8544716dd49fdc95cc06893 *R/generic.q
-ffb44fe3c44c388a0519b758d63308ce *R/links.q
-7ea40ac8830ca2f0a346a4244eceb228 *R/logLik.vlm.q
-14cc4a53774931fcc2bf130a5961b197 *R/lrwaldtest.R
-ea71f8066cb5ee1ed6615608e3d6d3b7 *R/model.matrix.vglm.q
-fcbd53c0bd1e21449e830a04f6be5b0e *R/mux.q
-22818bb0f90e52348f924f17a83c49ae *R/nobs.R
-bebc81a8fda1b69c9e27230ea163ce8f *R/plot.vglm.q
-6847d405f3a0341820301dabd53d8ab5 *R/predict.vgam.q
-781f33cd4e62a7c257952e9362438e6b *R/predict.vglm.q
-5028a2f7e1673086aa97cecd32cfe51c *R/predict.vlm.q
-bf13a3f20024f5e86dd6d38b06d9b4e4 *R/print.vglm.q
-940248a46a8416309969ac8c5dd40d1c *R/print.vlm.q
-040f26d21df40bc36d99249db0dfbfa3 *R/qrrvglm.control.q
-dcf8385e539b00336f686acbfb668783 *R/qtplot.q
-976febd2962ad51d6b1696c682ced15e *R/residuals.vlm.q
-9883705f0c2220c32b62e9e81751a0bb *R/rrvglm.R
-a3ce0cf9a60b25826617da994ca429f4 *R/rrvglm.control.q
-300acbfd67a6481d082202c3fd691520 *R/rrvglm.fit.q
-d0f49d2c6af83f79ce6894993a42b79d *R/s.q
-8e27b82927b85d6cd0e6d925854e2216 *R/s.vam.q
-176cba5df3fbe75d6fa0813d210d2cee *R/smart.R
-1bccef4ed724ae0a8d0f677c7d12c15d *R/step.vglm.q
-e01d1641a65f79de44e2cfee95a2f1e8 *R/summary.vgam.q
-d2aecdb1141d8c2486f2873a54bed7b1 *R/summary.vglm.q
-3c6b2ee4fc757b53078cc50a04aa9aa6 *R/summary.vlm.q
-a2f1a96aea82d912a0fc93f2499fe8a6 *R/vgam.R
-f374f7fae261b36a8c91939c5105b007 *R/vgam.control.q
-09feb272244683c3f0f0cc5c7b79dbf9 *R/vgam.fit.q
-c0d5062be47a6b446b4b4416e0a6f436 *R/vgam.match.q
-4c319e2a242c0aa7cca457b890c34e44 *R/vglm.R
-b54d7372f871e98fb7a81c846e4f7f18 *R/vglm.control.q
-c0ab3dbe4060941da72a0d46aec995b1 *R/vglm.fit.q
-9e44046b8fac44f5b19622a18fd817bb *R/vlm.R
-5a9f82ff56f366f908ef6282f03d5c9e *R/vlm.wfit.q
-e8b766779301274b0e5687ea9bda6039 *R/vsmooth.spline.q
-c6883c2e9dc163e099ef80d9dc60697e *build/vignette.rds
-b4cd6d82916eeed0e7ea66d88d47bfcb *data/Huggins89.t1.rda
-6d595282e66deeba6520b9e6036ab9a9 *data/Huggins89table1.rda
+66414b6ed296192426033f4ac29a6af2 *BUGS
+65dbed4d4709192005caace0ace41093 *DESCRIPTION
+40ee4f24a8b7a7a083f93a031907066d *NAMESPACE
+c079de6fe31ca109067e66373e433ee7 *NEWS
+21f682d56c4fc9327d48e2179788422e *R/Links.R
+1de714ae1b807fc880805d2aee94d7a1 *R/aamethods.q
+a0675022e96fd5fb7fddac8f0a57fd30 *R/add1.vglm.q
+b2b2be3f5bab46f3400b3e0448dd1e37 *R/attrassign.R
+a4768fa044164f345867ae8f77c4b5d9 *R/bAIC.q
+749604f5f400577a87ba72d3776660c4 *R/build.terms.vlm.q
+7f5741c3d74a10d20d2af5a9b726ded1 *R/calibrate.q
+e6585b1cef707a15cc6f07be91c46925 *R/cao.R
+71175038e1350d91ea14061aadaaac12 *R/cao.fit.q
+4dc53e3eae827b9729fb799d6d8b3d7a *R/coef.vlm.q
+c8c22372a0a69c23aeb94fddab38428e *R/cqo.R
+8d234b9f98d2e5413a9a2e177ee083bc *R/cqo.fit.q
+5e023504bbd292f65c3cb661bdd1a103 *R/deviance.vlm.q
+3d38c5950f4f9e52085e9ba782feeb90 *R/effects.vglm.q
+dc5a9908d2aef316c0ec049cf1ec731c *R/family.actuary.R
+eaf63a235b8d77e71ef81a24ad73d501 *R/family.aunivariate.R
+6f9837f66c09de69d5e2d5e436321647 *R/family.basics.R
+2e69a109b9d2eedbc7557c6bf64d2e4e *R/family.binomial.R
+923e995bd9bb91e9a174d980b7f6aa3e *R/family.bivariate.R
+8591e4e3bc3a58624bbe2fc7535628a3 *R/family.categorical.R
+63962030a0d145780c8cee5c6b01182a *R/family.censored.R
+c3026d87fe298fc355d6246bcef9592a *R/family.circular.R
+92fd84e0156ec22a2d97ab7a64b4ae5e *R/family.exp.R
+e79c06e47264498e2efea95c3e059dcb *R/family.extremes.R
+b7be2dc5c08287d4de03813371580486 *R/family.functions.R
+1a5a57fef114fa8a723a349f9569ecc6 *R/family.genetic.R
+f6bc5e914f9cdce4cb77c73ff5ef3261 *R/family.glmgam.R
+3e9ceb976c148ea8c84a075faf4f17b1 *R/family.loglin.R
+530540365fe3dcc08df646434e4f89cf *R/family.math.R
+5857ce228880accd72948eefa5473c79 *R/family.mixture.R
+d07db31f1d78500fa771541c96ac9b56 *R/family.nonlinear.R
+5d4cf6ddae66090b22d809bcce630d2e *R/family.normal.R
+3a1a58857f0dbb061554ddbc5de6a83e *R/family.others.R
+5dca7e937d0283c3de6ac527a5f1e886 *R/family.positive.R
+0366e052ff3c037b7c34888ebb1bfcfb *R/family.qreg.R
+1872cae3106eeebab56fb5b77686c55e *R/family.quantal.R
+4138f00529378d68b3e7f132406d1238 *R/family.rcim.R
+ebcafc714199b26cbb8a3ba56ccb28bb *R/family.rcqo.R
+298a298e0df22e88b2ebc89e552771c8 *R/family.robust.R
+aad7d2bedce4ad11f05e2a16bd3fe3d4 *R/family.rrr.R
+38e15e4f9090bbf918f3fa44ea3b2284 *R/family.sur.R
+dba8d81981e93b994f26de19d1a1b6f8 *R/family.survival.R
+36448d9640c9dcea8568c4c1ed4c1ea9 *R/family.ts.R
+5c134fb9f96a959466c1ae44eddccf07 *R/family.univariate.R
+23e25d09aed81048919b15e35d2a3fdf *R/family.vglm.R
+8fb138a98bd398259a8ea78f96b4fede *R/family.zeroinf.R
+10d7ae4e806309cb6c8b5d541e227585 *R/fittedvlm.R
+e0f39e9a543e616c179f53c153ada17b *R/formula.vlm.q
+66dceb0aa1906a6358ddf5840e5c3a10 *R/generic.q
+3532a47fb5dc1f75a48281dd64bdf0f4 *R/links.q
+fa5db320fd8b18c7810e228a13730571 *R/logLik.vlm.q
+9b3ae4a5273f991e6ac2feac29320343 *R/lrwaldtest.R
+b4b3fdae6ee99caee6d9185213148740 *R/model.matrix.vglm.q
+6aa9138e0343b812d000949720fa3036 *R/mux.q
+b294455eb2982638f52c28176249218a *R/nobs.R
+3538974f12ad64b4fd1b439b4dd0a24c *R/plot.vglm.q
+d3050d098e945c23a2617410e4698b9a *R/predict.vgam.q
+040d0f0864c562edb80d01199011caed *R/predict.vglm.q
+1685c6757a7ddf887d842bfdcf66bff9 *R/predict.vlm.q
+6b6c07d978349b4c2dd82d1e3768f763 *R/print.vglm.q
+2a6435e29721cdb571796c0f2b2ba2f4 *R/print.vlm.q
+a70ae668b6cfda0ecbbab74d4e0f8e17 *R/qrrvglm.control.q
+4081d6213a8454f5d847ee2981f75ce1 *R/qtplot.q
+cd95e96c163efedcf7dc452b5b9b80aa *R/residuals.vlm.q
+26fdc28282fb9f20f338f878e2078edb *R/rrvglm.R
+63aafad9e90d5ec0e7b35efb3eea4d2a *R/rrvglm.control.q
+ce0b32d9486058bca0cdff470484978e *R/rrvglm.fit.q
+4d6331d1b7f7e9f49511ac68ed7ab830 *R/s.q
+8818a393944e9aacf3ca58907a9e0b8a *R/s.vam.q
+400c72e71f4820b58f292523029c6245 *R/simulate.vglm.R
+e3733f10e243f1a9f433d1a3bebfac14 *R/smart.R
+40b65c32c98ed7fe00459f089182209f *R/step.vglm.q
+df48678099b0d4b643d22d0a25adc5f1 *R/summary.vgam.q
+daef9a10964e0ee69648fcd61e134c67 *R/summary.vglm.q
+35d715ccd0ceac184efe1e85c949af0e *R/summary.vlm.q
+7053fc5a348fa10962cf86faa0cd6be5 *R/vgam.R
+43f5ac8e4ce129c2cfd060f2a720e89d *R/vgam.control.q
+2aa25abd6c64976065b527635b5ce52a *R/vgam.fit.q
+58bb89dc29480d468782ac4e410716de *R/vgam.match.q
+fd0eeed4746bd415316290c34d907f2d *R/vglm.R
+8cb747a68c70f0f98bc2117a64bd6047 *R/vglm.control.q
+9adecdd26db8de0856aba98cdbb305cc *R/vglm.fit.q
+0d279e4ac54a18c3b86931b97b9cb686 *R/vlm.R
+19455ed547e314ec5996c798587d2442 *R/vlm.wfit.q
+5369622d6ffaaf8209d41d5df861284e *R/vsmooth.spline.q
+23322d92942d7d395dbe248845f7ff27 *data/Huggins89.t1.rda
+6a596d2bb591e0593405ed1a6b8bb4bf *data/Huggins89table1.rda
d89f69ab78bc3c7a526960c8bdb9454b *data/V1.txt.gz
bab76494dc8067695f3b634016765a65 *data/alclevels.rda
c02f13a9cda10a0a0ff58a74ba7b7a84 *data/alcoff.rda
@@ -98,6 +98,7 @@ e762d480945696788738c174b84147c1 *data/backPain.rda
3b8b6009d5fbce815d622c05678c496f *data/beggs.rda
e039fd36c33b359830b2ac811ca7fc49 *data/bmi.nz.txt.xz
52d2f9cab55848f2dbc0f469b9c0ef94 *data/car.all.rda
+23f6d39f731945a533c86c4b77f4660e *data/cfibrosis.rda
b29c1a4125f0898885b0a723442d6a92 *data/chest.nz.txt.bz2
4df5fd8b5db905c4c19071e1e6a698a4 *data/chinese.nz.txt.gz
3cb8bc8e1fc615416f0c8838a50b3f51 *data/coalminers.txt.gz
@@ -116,6 +117,7 @@ bec512b2d2d680889c9b71c7b97dbffd *data/grain.us.txt.bz2
9dcb8cdf026f5468fa70f8037fd72a0b *data/hormone.txt.bz2
dfc26b76841c27a6c6fca69fb137f555 *data/hspider.rda
dffe21fbabf645127bccc3f3733098a7 *data/hunua.txt.bz2
+1f3caa03946758feb9a0cd344e9e7d89 *data/lakeO.rda
a988992fe21c5ef19588440bc2e65fd5 *data/leukemia.rda
aba4885e0eeda8ee887a422fee01e02a *data/lirat.txt.gz
7d7e59127af09903659c5727d71acc56 *data/machinists.txt.gz
@@ -126,152 +128,155 @@ fe334fe839d5efbe61aa3a757c38faeb *data/olym12.txt.gz
3ed63397c4a34f3233326ade6cfd1279 *data/oxtemp.txt.gz
e1a792d5a43fba44f13bd72fc3252c25 *data/pneumo.rda
0cd66b7ce4e596ad3ca75e1e2ec0a73c *data/prats.txt.gz
-f3d44d37a6379352f8a73ec856b569ca *data/prinia.rda
+e7742492a1d4b0ea36ef08c475a96332 *data/prinia.rda
1189583668fac01318e26539ecdc52e2 *data/ruge.rda
d4e79e6a83e94ce43ea81c00a5475427 *data/toxop.rda
1b059fc42c890bf89f2282298828d098 *data/ucberk.txt.gz
8fbebe25dcb4bd9ff9fe14e3604fef31 *data/venice.rda
2210f364ad19eff32bba9423b4a593d2 *data/venice90.rda
e990ca4deea25b60febd2d315a6a9ec4 *data/waitakere.txt.bz2
+1fa8460cb7624658da0488af0f43a273 *data/wine.rda
81f7f0844a196dc48e91870c4cfafc99 *demo/00Index
-532aba4ad4cac611141491a5bb886236 *demo/binom2.or.R
-a7db0d0c4cc964b01ddbe0cb74153304 *demo/cqo.R
-6376ee1862c11e847aab6f7f6fd74d24 *demo/distributions.R
-d2c02ccaf4d548cc83b3148e55ff0fa3 *demo/lmsqreg.R
-a3d2728927fc5a3090f8f4ae9af19e1a *demo/vgam.R
-00eee385e1a5c716a6f37797c3b4bec5 *demo/zipoisson.R
+9327dcfa4015cf47172717bac166f353 *demo/binom2.or.R
+b9f0af62a654d77a3052997eb4cc15e2 *demo/cqo.R
+2b8f46b985ee4354d6f2cc48ac81df8b *demo/distributions.R
+541e1a831b9abf6a2a5bfe193b03b1b4 *demo/lmsqreg.R
+ab8081763fe2144558be25f3a154327b *demo/vgam.R
+65570d10948785994d70d817f574bd96 *demo/zipoisson.R
60616e1e78fe61c1fd4acdf0d3129747 *inst/CITATION
-2f7d9e53aaea653d70152d64eda3a81a *inst/doc/categoricalVGAM.R
-ab0ec8b49daf41071e1636bb52b71294 *inst/doc/categoricalVGAM.Rnw
-afa76725c38b56f1c3b2e9282ef4dcfe *inst/doc/categoricalVGAM.pdf
387dc3b872f48144d7dc5fdabb9b15c2 *man/AA.Aa.aa.Rd
-b7384a3c4136b52b9d997d81fab15968 *man/AB.Ab.aB.ab.Rd
+48010689a7ea309ce7068a78cb826bcc *man/AB.Ab.aB.ab.Rd
a84233bee5f8105875949d2f47887852 *man/AB.Ab.aB.ab2.Rd
-c58d80b1cb01d6e058b25951e080a05f *man/ABO.Rd
-31d7e2bd48719e41df4b8a2fdabe7f13 *man/AICvlm.Rd
-27f7f9c75e026d1c833dcff3afc9c6c6 *man/BICvlm.Rd
+d81a6dce7eb60dd61bcbfa4a9bffa05c *man/ABO.Rd
+61aea504f08590115f6a1c76298dec26 *man/AICvlm.Rd
+0f4a799e95b245cfa0b5a37280a446ef *man/BICvlm.Rd
2dda55df0947c86b4614e2d722efb713 *man/Coef.Rd
-5e47a4ab6785fd816784baed909dc8c5 *man/Coef.qrrvglm-class.Rd
-563c43dfaca1676ce0a61f19bc485830 *man/Coef.qrrvglm.Rd
-9335dbbcdb81716ec556bf5bcf0be309 *man/Coef.rrvglm-class.Rd
-dd9202d518789994bd081f16a81631ef *man/Coef.rrvglm.Rd
+a07c068d1608d195ee2ba4c2ce44377d *man/Coef.qrrvglm-class.Rd
+713e4545f026c38ada6a3aafc709cf6b *man/Coef.qrrvglm.Rd
+a89beda3a48d5ff1cfdfae4636032a62 *man/Coef.rrvglm-class.Rd
+4da595e2cf6fffc2227871e745a5ee77 *man/Coef.rrvglm.Rd
9d39d6e12ea6e56f687a10f76cb1803c *man/Coef.vlm.Rd
-863e44c0199b67cbe16316108c576808 *man/CommonVGAMffArguments.Rd
-184a5e03d395c83b20d6e825e22d4759 *man/G1G2G3.Rd
-5d9fd8fec8bfa485686d8ccb002f4c94 *man/Huggins89.t1.Rd
-64ffee88303582877fe76eedb51f797e *man/Inv.gaussian.Rd
-05808209caa226d937f9edf4364db34a *man/Links.Rd
-0a95f8292850ef5b0fcf516400864c84 *man/MNSs.Rd
+9b251bc5d8dcddc426408cfca23bc493 *man/CommonVGAMffArguments.Rd
+392487b64b2b3b65bc466df62b150270 *man/G1G2G3.Rd
+098a57d6e5525de04157c61dea2e1b9b *man/Huggins89.t1.Rd
+ce79d0626711d299c9c0cc2efab3abac *man/Inv.gaussian.Rd
+3025bd52b3bc055c7f5939bc21b28d3b *man/Links.Rd
+30328145767a8cd2fff97506d5a2e9f7 *man/MNSs.Rd
a730679155e139e134b186e7852c1ef9 *man/Max.Rd
76bbf26207744bec8c21ae1d71701071 *man/Opt.Rd
624e0666b195bc9596e0869aa35823cc *man/Pareto.Rd
-39d9ad33246126697c462d454a1d190e *man/QvarUC.Rd
+ee7e9a7ef0ce310b8fd8286a1ffd56d7 *man/QvarUC.Rd
bd689bfc27028aea403c93863cf2e207 *man/Rcim.Rd
becc3fe17f46d3c92c2b81da5499da83 *man/SUR.Rd
-2db32b22773df2628c8dbc168636c9f0 *man/SurvS4-class.Rd
-4f4e89cb6c8d7db676f3e5224d450271 *man/SurvS4.Rd
-40c8ffbfe412d1774e540a3e7ddf1f2f *man/Tol.Rd
+021e7a60b6ba9058acf6f7e996a49596 *man/Select.Rd
+20a760cb2a7468d974d2de5c88d870e3 *man/SurvS4-class.Rd
+47d9bbe45fe9a53bdd10db39feee0bb0 *man/SurvS4.Rd
+21dc3918d6b5375c18dcc6cc05be554e *man/Tol.Rd
6930cfc91e602940cafeb95cbe4a60d3 *man/V1.Rd
-b07f7f8fb09dceb4cb770698fbcb3281 *man/VGAM-package.Rd
-41de97f0bacb4bedc36a589af710ff99 *man/acat.Rd
-97dc0fad7cb454bfa669ea6de9e564a1 *man/alaplace3.Rd
+4e83b546f545fa216bc9c4a29e8f4495 *man/VGAM-package.Rd
+f27b784569a22f080ff1ded6d9bbd17a *man/acat.Rd
+c8865faa06424eb7ead0f3cf4efbbc57 *man/alaplace3.Rd
573cdf092fc48b9b1c1f10e9af6b0fe5 *man/alaplaceUC.Rd
-98d2372de743a12819fb8b33f583ee0a *man/amh.Rd
+5386dd7d0ed806b21fe9626c92cfd068 *man/amh.Rd
f10fff7b5d459f0325e70423488dde18 *man/amhUC.Rd
-3cd4ccbc6bdca02fc5e30e2455ee8719 *man/amlbinomial.Rd
+8e181f4f03b718c6c9825ea3b6c4b8d6 *man/amlbinomial.Rd
f6c521d0142c7e65e7d5aad6880616ee *man/amlexponential.Rd
-7cb04022bf8b6dadd34def3eb23fb776 *man/amlnormal.Rd
-33f35a4ec1b03b95a6ef9736ac771ec2 *man/amlpoisson.Rd
+a9b52ea0ee41c27fdb259aa21a621582 *man/amlnormal.Rd
+ec213548ebb41e47b727541566160dfb *man/amlpoisson.Rd
9f1ddcb0af49daaec702a1284341d778 *man/auuc.Rd
c8efe93df8799ff106b6784e1bf50597 *man/auxposbernoulli.t.Rd
bcddb8c1df8893cf14a4400ee5dee6df *man/backPain.Rd
6ac5a3f07851ac3f7e19eaa977365e0f *man/beggs.Rd
-4f14c76e1d581aa54d7a523c105fb08a *man/benfUC.Rd
-2fc68a0b1d11b944430c4290f159b9bf *man/benini.Rd
+80c65642cf41be59e4b49be5d05d93f2 *man/benfUC.Rd
+3ff1c71b6f613cdb990ef0183db42c1b *man/benini.Rd
d970a382e22015a5542a5d2bbe289688 *man/beniniUC.Rd
-5eb9ae4be18386c3c2f8539609db5130 *man/beta.ab.Rd
-1f0cc4a87b011d3367a1049e8dad9a89 *man/betaII.Rd
-5219942fe925a1a83799b51f6d5655ce *man/betabinomUC.Rd
-cc4e4ca6ab31fd48735dbd44629f2209 *man/betabinomial.Rd
-1c27699e07dbf2e5c1d756692c6b6535 *man/betabinomial.ab.Rd
-08edf0cff53ce982a32e28bf9feae6cc *man/betaff.Rd
-1adc8d4b6eac60c0ef38de8459995797 *man/betageomUC.Rd
-0c114f8c3cbe786f1fd17b9898e59386 *man/betageometric.Rd
+ef17305a57f7c7208e749af0acab93b0 *man/beta.ab.Rd
+a9e33b0592552305e3501095b322ee9a *man/betaII.Rd
+39e052506d2f0fe3b733ae4340ae38eb *man/betabinomUC.Rd
+ab6c7d6b7483845ec535b9e764ddb218 *man/betabinomial.Rd
+5b51a37e2d0db141cdf62c4effbed59a *man/betabinomial.ab.Rd
+4fbf0280f00212d01fff554e0d86c4b5 *man/betaff.Rd
+4b590ee6208b2f3025109b82c1f6d67c *man/betageomUC.Rd
+8a730685525c85f22045f7de14896c4b *man/betageometric.Rd
151cdf70cb16f8095369b88093ba48c7 *man/betanormUC.Rd
-db809a59222bc951683a84c9f06e48ca *man/betaprime.Rd
+5a0a047bcd18649d5076999057bd1d49 *man/betaprime.Rd
0ab04f3892c3b98eb2c914bf8043afb2 *man/biclaytoncop.Rd
94e05525dff5548fadbcd6efad58b086 *man/biclaytoncopUC.Rd
-e79003557390777e6cb8ab38380c673d *man/bigumbelI.Rd
+729cbe9de5f560c300006b548f164d1f *man/bifrankcop.Rd
+f96df0cd8d773d5152f39cf2fb12608c *man/bifrankcopUC.Rd
+44aa896474dda679aee6f833c9fb8062 *man/bigumbelI.Rd
adddf7bb27d9517288660180b4240058 *man/bilogis4UC.Rd
-01b65c61360678a60eb9ebb3c0758db6 *man/bilogistic4.Rd
-64e2272ebc4d5b8a5ed9d934943afd68 *man/binom2.or.Rd
-1f1a653e623b2abbb4662b16070019db *man/binom2.orUC.Rd
+f5eddde2e045ba31b3d37ad7785fdff6 *man/bilogistic4.Rd
+72ce4d6755354bf6082b82891a16fa5d *man/binom2.or.Rd
+dff1f2e8b34e8ebdfa7d090436dd5dbd *man/binom2.orUC.Rd
a8cc7cbfa4c21672956a187c4ffba22d *man/binom2.rho.Rd
-c3f3f95519510e5a324c74369bc52a63 *man/binom2.rhoUC.Rd
-2d3fb605e1605c90d0d6160ed9d7d67b *man/binomialff.Rd
-b46c088fd812d7f402a2d9757f022880 *man/binormal.Rd
-ca1a757bf6b2402617898d84abbc0c33 *man/binormalUC.Rd
-dbd8b9788f6f7cee598241a83c64726f *man/binormalcop.Rd
+a784926c9e5eb31b3ba4f40c1202fca3 *man/binom2.rhoUC.Rd
+83d059c09800ddf81edaf22f3557a039 *man/binomialff.Rd
+7bddfc42ae50a6b5a86e40e7d3f78cf0 *man/binormal.Rd
+4fdf8e186c66f5627ae9b8681cb72ae4 *man/binormalUC.Rd
+ad66bf95a28851ff1f77b8675352cc04 *man/binormalcop.Rd
9758ba4618c9c24caafec486b01238f5 *man/binormcopUC.Rd
bdad9ecfb116c4f30f930bcaf7208735 *man/biplot-methods.Rd
00a210fc4a1bf5bf21f6da4f63dad66d *man/bisa.Rd
8104993144f45c1fbe49da814cb05a41 *man/bisaUC.Rd
18ab34ad46a2437bf0bcc89957164418 *man/bistudentt.Rd
-a55c8615d46c010bdd9d61ee81f1041a *man/bistudenttUC.Rd
-a8639663ca91af2d81b888cb59fc37ae *man/bivgamma.mckay.Rd
+0489e2ceeed7b2aaf9cbcf6cfcabae81 *man/bistudenttUC.Rd
+2a8c550e3daf4b2874de434bf2bf6004 *man/bivgamma.mckay.Rd
81a2433effb7547679702256a5536b04 *man/bmi.nz.Rd
-44f06f92ed85ef1cf5e447ffed182989 *man/borel.tanner.Rd
-4e692566eefaedf275e8693ea2f6efbe *man/bortUC.Rd
+2458f06d80b410c2c41b2ea691e1a668 *man/borel.tanner.Rd
+a7c85af5f86dd3cb74a1cb87bdbad789 *man/bortUC.Rd
b727c9787c7fcfe1e3dc19f92f6a4cb1 *man/brat.Rd
4b158e93b6c981f016ed121e987c50b7 *man/bratUC.Rd
5ee1485749d235a2d1aa1be8849accc7 *man/bratt.Rd
f640961a0c1a206ce052a54bb7b4ca34 *man/calibrate-methods.Rd
b121ffb4e604644ef7082d777b4411df *man/calibrate.Rd
-f1b9a1c35838eceaf41c61e06164f9da *man/calibrate.qrrvglm.Rd
-bde0c3c0dcbbd53b6a415ae3b73d2b9c *man/calibrate.qrrvglm.control.Rd
-6283590c76e5dcf1aff1e0a1314d970b *man/cao.Rd
-e270445a52d0e9e00086d4f437e2078b *man/cao.control.Rd
+22f73cce0070ea9bb785567af837e14f *man/calibrate.qrrvglm.Rd
+abeec828fcb66694c75bcefed43a70c1 *man/calibrate.qrrvglm.control.Rd
+d5906e548cdac74caa0e9b8ffacb2f59 *man/cao.Rd
+43460f676389d6a89d57d646ef83f314 *man/cao.control.Rd
af70e01bb01bebbc1d06e309d8ec6ba5 *man/cardUC.Rd
-3d662c3707b6b1e9d8dea58850a94f2d *man/cardioid.Rd
+3abb66a3286726574ccc6ac96fa5d3cb *man/cardioid.Rd
bfe6f5beb1de5e92cbf788afff8c4022 *man/cauchit.Rd
-b4447d89801713c6c067c1d14dce2c25 *man/cauchy.Rd
+6bea5a8d5ead4312b2f878fc9e6e8e84 *man/cauchy.Rd
9035d92ae411d748c08d35086d5d3be1 *man/cdf.lmscreg.Rd
-94b38b26a83a96d6ab67911eaaaa8954 *man/cennormal.Rd
-4f6fb991110be4815314754a4ed4432d *man/cenpoisson.Rd
+656cad836c7b8f0257f2e87422779d87 *man/cennormal.Rd
+affb84486f73c830ef44682c99919bb7 *man/cenpoisson.Rd
+94e6c5ea5488d93e0400ce9675e4d692 *man/cfibrosis.Rd
a443fafdb223e2fa87d3766ea31d3fd8 *man/cgo.Rd
-b986ad79160959105d5a22a6d110504b *man/cgumbel.Rd
+64eef4f31abc80ff9f0daf23e6866d1d *man/cgumbel.Rd
1d5073eb8aded1b67fc52855c72fbc8d *man/chest.nz.Rd
-df235f073a2db2ac0bb1530b33b87896 *man/chinese.nz.Rd
-c82dda43b26d7b0e0009ed38b76ba381 *man/chisq.Rd
-8ecbb478efcf4b0184a994182b5b2b94 *man/clo.Rd
-d3e192aff657835843eed8b6cb3c5fe2 *man/cloglog.Rd
-1aa6ee888bb532eef1f232c9f6a02b5d *man/coalminers.Rd
+922ebc06682ee2090eb1804d9939ec03 *man/chinese.nz.Rd
+9dc1deb6ea4940257ebab8f072584b74 *man/chisq.Rd
+aff05a422130d8ced689190eec1b09dd *man/clo.Rd
+66677ed162d3e368ad0f330c49467a25 *man/cloglog.Rd
+b1985e33c967fdddf79e10cbb646b974 *man/coalminers.Rd
+e492f5f148514df05cc4bf101b7505e2 *man/coefvlm.Rd
8a8b05c233949dd6095d4d11ff31326a *man/concoef-methods.Rd
-647dabdfe18b69f1ef4a7d08b3b5a625 *man/concoef.Rd
-cb0e57f8d57e64cd0698f6acfe494adb *man/constraints.Rd
+364060a0f3177a3d5144cdd677e0f55a *man/concoef.Rd
+e9cef803313f5a964f99b76995dd235f *man/constraints.Rd
523567ea78adcaaeab2d9629b2aa2cf2 *man/corbet.Rd
-074850576b28ecd9d41090805c0bc9d6 *man/cqo.Rd
+7ac196f506705ff2ed574b37fffbab9e *man/cqo.Rd
8b1b3a39d15fe353a7eceec9f6a327d4 *man/crashes.Rd
-e591cff73505c3e967aea2aa47a4dddf *man/cratio.Rd
-f6b0e6e3ea8064c9556a773963d737ca *man/cumulative.Rd
-c909335c97a9ae26916016dfcc31b804 *man/dagum.Rd
+ca3db2c26abb8120651e1d179ac6fbb3 *man/cratio.Rd
+d7fe2dd88f14e6c9a3bc2fc1f7f2211a *man/cumulative.Rd
+9b4269d1f89f9cd1181012d53d1d3631 *man/dagum.Rd
97868e30408a4a35750f9692f5e87b68 *man/dagumUC.Rd
8fa6a29bde444a45be31b3d8979afc00 *man/deermice.Rd
dbebc9542906034905fe1137e86a1256 *man/deplot.lmscreg.Rd
-af4a340811c4458baf556a340192208b *man/depvar.Rd
-b9edd4abba2c0772b342f440536d50d4 *man/df.residual.Rd
-87b5592713a2367b4559c84944307614 *man/dirichlet.Rd
+0e0f2e7368fa906e837d8432bb3cfb36 *man/depvar.Rd
+bffbb780b54bd3c8c76cf546ec87e4a0 *man/df.residual.Rd
+63207ab9225a1e84c8a6efadc8fb4302 *man/dirichlet.Rd
07eb43ee6da403b89b19ff55406ab130 *man/dirmul.old.Rd
-d5c9bd16cfcf80f66fa1af56977c236d *man/dirmultinomial.Rd
-6e6523b060c3e982a559bf73fb42a924 *man/double.cennormal.Rd
+342522859f8dcb3c621738ee45bd3f7b *man/dirmultinomial.Rd
+534db08b8488159ac057e6c4cde5cff4 *man/double.cennormal.Rd
0f57c4635e0faf9485cf7e208098ce66 *man/double.expbinomial.Rd
f8f3e5bb715d519d3c99cc94c81bae93 *man/eexpUC.Rd
8271f348d0cfbd2765ae2e86c745ba2a *man/enormUC.Rd
-72492c419917c144ffadc656ee56a63b *man/enzyme.Rd
+ca3e766bd344902d3b8bf05c65d6c12b *man/enzyme.Rd
b733cc1da9bd902ea8903b9a53cf9bba *man/erf.Rd
-0ca2068324f7898df1516fe8081c45bd *man/erlang.Rd
-3f633760a4767aae2fd8ce930befa08b *man/eunifUC.Rd
-00704fa05f7f6fcd1166a886c0a56b72 *man/expexp.Rd
+5f6cdc1e6862241215ae5b340dde4825 *man/erlang.Rd
+a41abeaa6bc1cb826199b1bfdeed8427 *man/eunifUC.Rd
+fddd62bd1da33ed1d01a8f67b5160efd *man/expexp.Rd
996fe6f72ef5c7097c4677153ddfce4e *man/expexp1.Rd
779c6a5aff218b1b3daf8bd86bcd671e *man/expgeometric.Rd
f39dd0be93d3e24eda78f08310ff4b2f *man/expgeometricUC.Rd
@@ -279,303 +284,305 @@ f39dd0be93d3e24eda78f08310ff4b2f *man/expgeometricUC.Rd
59e10a79028eef76da5bdc868e6bb38e *man/explink.Rd
89ce96662b931aa17182192618085ed0 *man/explogUC.Rd
f2c881a921ae32035e8d41699faa7969 *man/explogff.Rd
-756267192c82e69270f3b6b44e7e7c59 *man/exponential.Rd
+55891c7c57998f785aa4a277338aafc2 *man/exponential.Rd
8ba1a5f581e370f49e5b91e12f90e42e *man/exppoisson.Rd
2bfab14d29e3df39995627cfed355e85 *man/exppoissonUC.Rd
-e3f13f0719fe68193454ccf6949ff5cc *man/felix.Rd
-a971e1d3759018a41544d7976de1f254 *man/felixUC.Rd
-e55a6b3e93a04e0a68d8c363595cb222 *man/fff.Rd
-78e0fe6433266ad454e0169d178aef36 *man/fgm.Rd
-bd384422574aff8eb42ba7bd55634a2e *man/fgmUC.Rd
-f935c7559c9ddbf1266b19543023e0a9 *man/fill.Rd
+9a0ac8c5f8e7cc3d5fe05e1f937944ed *man/felix.Rd
+c5d0b237e64605d008502da6b8f4f64c *man/felixUC.Rd
+09fc6553edb037bc708396a30fe3c8f2 *man/fff.Rd
+539720cd34a0ad024848602974a5fc63 *man/fgm.Rd
+194983ad64cdcf165c37e8f48fed1db8 *man/fgmUC.Rd
+741f6474d688a5bc6ed61042d9a12eb6 *man/fill.Rd
b929e2ab670eb59700bc4a1db07bbbc0 *man/finney44.Rd
5fd279ebc2d6ec3df74557cdca6940c0 *man/fisherz.Rd
-464a5be86b451beaef25e096cff36273 *man/fisk.Rd
+3581b41c402484cad5bf602a04c0c497 *man/fisk.Rd
8215ca60f756bf8f9f2e3b404741fbd7 *man/fiskUC.Rd
-514c750201a82629ecfd0c5daf5cc9c7 *man/fittedvlm.Rd
+5cb189f9d314ea1057b801c5eb2bfe71 *man/fittedvlm.Rd
cd73efab4c3e718d1a77a603eb5e341c *man/foldnormUC.Rd
-c9b39250cd464b9f9f8b45abe36b4ee6 *man/foldnormal.Rd
-a039a64693a75336ca125a2f30703a38 *man/frank.Rd
-f96df0cd8d773d5152f39cf2fb12608c *man/frankUC.Rd
-4e9a55fb4be11f3464b81ab678b40d45 *man/frechet.Rd
-7d3ee6f153a63e96ec770dfedbd13204 *man/frechetUC.Rd
-6d7242b05f9006cb2a7343356d734b08 *man/freund61.Rd
-4b7619368e2cc01107743d21b7fd67fc *man/fsqrt.Rd
-da2e9cdccb5b9abe30cbe5cde43d41f7 *man/gamma1.Rd
-dabc3bedd5b206aba441d7ea23a86c4b *man/gamma2.Rd
-44261a600c3189b10f9f2e61c16ad2df *man/gamma2.ab.Rd
-5f57ead6a37803c347e759fd12fb5c96 *man/gammahyp.Rd
-68181a9850e3d8d4cd52ea5c8c45f369 *man/garma.Rd
+055879697fc53566c542526623fcc08f *man/foldnormal.Rd
+ccf7154b56d6f4fc317fa1e4007eb2a2 *man/frechet.Rd
+9a20f21cc479ec153a62b3cbba264fed *man/frechetUC.Rd
+cad07bc11ec21b13ecdbc3b93ec8efc0 *man/freund61.Rd
+17c995a0692e2f600397ade32fcd6399 *man/fsqrt.Rd
+e894daa1763db143c10b7eb052ce19b0 *man/gamma1.Rd
+90368535c67e0169e92b84e4700f51cc *man/gamma2.Rd
+7c16404e8a5091a8f08869d8e39a22e2 *man/gamma2.ab.Rd
+1bec36703078a92dcc710f799742e253 *man/gammahyp.Rd
+edd2c4cefb99138667d2528f3d878bad *man/garma.Rd
2907a13f1f68692ce6e621131fa0d35e *man/gaussianff.Rd
-48fa44983f28bf53d520128d8ead6954 *man/genbetaII.Rd
-68cd2f025132585e1180bf71be281b5a *man/gengamma.Rd
+dc2223631aac6c39212ee87fbac5a3c6 *man/genbetaII.Rd
+a51b1b6a73ff92fcec1b212e2ad71de2 *man/gengamma.Rd
795f7e16b356cea3db6294b6ed430b91 *man/gengammaUC.Rd
-fba0014f17bf3bc38466ca8ca0e952ff *man/genpoisson.Rd
+b44fdd5d441068c7443b4022d6158ec7 *man/genpoisson.Rd
ca65498360cbe30840cfa4c9d931fb3b *man/genrayleigh.Rd
5193d3fe8ab3e3a790247fd93a2c513c *man/genrayleighUC.Rd
-8f48d9859354c4542c32367ee99103c7 *man/geometric.Rd
-78b7d9455f1eaa4572ff54427d77935f *man/get.smart.Rd
-14a7e2eca6a27884e1673bd908df11e1 *man/get.smart.prediction.Rd
-0e3d3c898babad94713a55cb5472243d *man/gev.Rd
+94c6189883bf1848735e23156e25cdc0 *man/geometric.Rd
+ee09405e381f088c31edde7c524c7f61 *man/get.smart.Rd
+d89a22500e2031841b7bcfa1d8607d44 *man/get.smart.prediction.Rd
+c89eadbed89ac2c589d03fe3bb3964bc *man/gev.Rd
838c81d8d6c94f4f3ae49df0b25d1cfa *man/gevUC.Rd
f87241a6011f5f5a49921a1842a177ed *man/gew.Rd
711704243b30d0270d3ac2a51e2768a8 *man/golf.Rd
-5cc8c0cabb839b34f4f37de4b57f4428 *man/gompertz.Rd
+a0ce2419f0f16af5872c1b9b0eccedb3 *man/gompertz.Rd
a521f6b84e19a2acd6080cdd01a538a3 *man/gompertzUC.Rd
-33a1c86c4103534119b18dfa226dd7ea *man/gpd.Rd
+1533f3b411ceb3b9e7d55af00fab8e93 *man/gpd.Rd
54b49cf2e3ba865dc7c9297948036d9a *man/gpdUC.Rd
7e50fed7b6ffe72b14e243fcc601fc50 *man/grain.us.Rd
-c8484625df61017b76ba14d9aa4759f5 *man/grc.Rd
-ebdc9bf4ecc9db057793acbf7c7b4283 *man/gumbel.Rd
-a6df41a1cc82c1744cad46ba89a5b161 *man/gumbelII.Rd
+0f4c8edd387b2f1334b9cccf09c209e9 *man/grc.Rd
+98ee8f7cc9da2e8288ae3545b2cb745c *man/gumbel.Rd
+72a533d779f90f1d43c6eb61e8f7f8e2 *man/gumbelII.Rd
09d6b3c902029eeda151ea7408436746 *man/gumbelIIUC.Rd
6e8fe2f3bce2e1f173f97fcd5f25d38d *man/gumbelUC.Rd
fc6b1658cbcb87054ab516552b6875f9 *man/guplot.Rd
d5ad348b7727127369874c7e7faf49bd *man/hatvalues.Rd
-f13c76795259bf8c257751f6bfc82995 *man/hormone.Rd
+7d01681e24795448b9d0639c5f1b05c5 *man/hormone.Rd
8ef9d44522eaef45b284b7f98794d48b *man/hspider.Rd
-59409b2ff67e8099de04beb52371ad2e *man/huber.Rd
+f4fc4645d2d190ef9b82cce1ee8b29d2 *man/huber.Rd
ea67b113e21bbe6197fff2358cb47179 *man/huberUC.Rd
d3df700bb2a4f9ae85b13abe7ffea123 *man/hunua.Rd
592f01af00d4309ecb01ed58b764e12e *man/hyperg.Rd
-dafa920729c2512ac9fab7550f1dc2ee *man/hypersecant.Rd
-70e0d9e1f05930b5845f5ccb465c9dd0 *man/hzeta.Rd
-77b69beb073dddc46ab505277604d36c *man/hzetaUC.Rd
-dbcb8ac1c022d2a71cb8692cdf684cfc *man/iam.Rd
-941e6c172212119e9f189b447fe89b1c *man/identity.Rd
-6df749d9e38dcc7c3e9bc1ffc9e60dcf *man/inv.gaussianff.Rd
-798b9114bd27dc81b06409ecb5098ccb *man/invbinomial.Rd
-ceafec1c5c64f77d3bf0e39bee2b0277 *man/invlomax.Rd
-93c76dca757056d75f7978328608cce8 *man/invlomaxUC.Rd
-5aeacd9294068b2ea86d1f7269c56965 *man/invparalogistic.Rd
-d5b78c1484a4756f09a7f109c753626d *man/invparalogisticUC.Rd
-6c4bcbe8b24a63891d991d8983365008 *man/is.parallel.Rd
-a286dd7874899803d31aa0a72aad64f2 *man/is.smart.Rd
+aa2659c9ddd6b4323d23e4a8c8536026 *man/hypersecant.Rd
+2bf15af91bb331e94b94dd69050589c0 *man/hzeta.Rd
+db89dbd9462622d0e70f1648fd4ccfcd *man/hzetaUC.Rd
+c4b8cf96eae282e0746bf8126231a7f5 *man/iam.Rd
+c2796439b1c32144c3a1ffcbd7f6da72 *man/identitylink.Rd
+4fdddd75421773a580c9b319001a7c33 *man/inv.gaussianff.Rd
+225d88d9e072069bc74598a5fa35c2d6 *man/invbinomial.Rd
+80cb134d920e53979d9126c98013772a *man/invlomax.Rd
+f8b2a040f409d90fcc51c6b673f21d16 *man/invlomaxUC.Rd
+19f7f87e98e991b8bce976c92ece2a13 *man/invparalogistic.Rd
+a0cccd34cb51df34d82db073c574ec97 *man/invparalogisticUC.Rd
+9479a4710c3d24d98e2f7aacf460c0f2 *man/is.parallel.Rd
+2527fdc9fb684c48b4003ea6546f6029 *man/is.smart.Rd
1b33dcd08e9f444146fb7fe03a425add *man/is.zero.Rd
5cf973ee22fcfd1442e61458a9d91ce9 *man/kendall.tau.Rd
-690c801050d201570745a4f635ed2df0 *man/koenker.Rd
+586e77c2987689b1983f03b269901296 *man/koenker.Rd
47bca557052f9620a8bfb73e48801b95 *man/koenkerUC.Rd
-a97c9e81bf9b2ba86208a7ab334d4275 *man/kumar.Rd
+149c759079151bd06084810c29f6c72c *man/kumar.Rd
2e07c2e87f84e59aac2c1d4d6d7a3789 *man/kumarUC.Rd
+1bcedd3ac3a0c7467e5dee8ba1de9ace *man/lakeO.Rd
decbd103cc5311735e70d906d170c742 *man/lambertW.Rd
-3f61e79d47f859c3afc0003262113196 *man/laplace.Rd
+e80a85ec4d067a1549cc8249666f75c2 *man/laplace.Rd
1e0d24321650e214570c5ee3b703a261 *man/laplaceUC.Rd
16b21ecf83bb8fce76079502877b2fbd *man/latvar.Rd
-5345c003b2bc82b5844cb339cb609f05 *man/leipnik.Rd
+2cd5151baff29f9d8dd996dc48293301 *man/leipnik.Rd
2e88465ad75446bbbccf208661193a8c *man/lerch.Rd
8c7fca39c92e5f79391a7881a0f44026 *man/leukemia.Rd
-465842cdf41dc2bbac523bf563060123 *man/levy.Rd
+1fc675bb94504679ecd167636c4daf71 *man/levy.Rd
0c6b5e56369b01507cef3729eac6290c *man/lgammaUC.Rd
-5eea7fa51644d3179618b0a0d82fa1a6 *man/lgammaff.Rd
-9e95d91d1a94e459178a738700a16499 *man/lindUC.Rd
-c3a54373cf8bd8ab360ea514a2aae05b *man/lindley.Rd
-6f035793e3afef2ae6977c22b6f69681 *man/lino.Rd
+2ddc0f62bcc1db94e566aacd3e932427 *man/lgammaff.Rd
+22cc8bb5e5ce47158dc867012db7c9c5 *man/lindUC.Rd
+7cfa64df25fe6f4b732b5a1ed0178be6 *man/lindley.Rd
+8d88640bdf9d18e6b356e491bf1e94d7 *man/lino.Rd
8a4a3a1cc12bdb111c6de98ec1c45e9f *man/linoUC.Rd
b5dfa4faa955b15ebade0a3bdc8f93fe *man/lirat.Rd
-d567e6c1a92069a8e976eab44ffd12a6 *man/lms.bcg.Rd
-0ef70d825afc1a45b5768eaca3bd88d1 *man/lms.bcn.Rd
-1547fe696e826d09308dd9dd98e7d913 *man/lms.yjn.Rd
+5a9126c71990d5fec145c2d50ad5a2df *man/lms.bcg.Rd
+61db2b9d962515f51747d89186b0a261 *man/lms.bcn.Rd
+614541de8e2c01e8600536c85090c00c *man/lms.yjn.Rd
20824c03fc9d40f749ca42d60805124d *man/log1pexp.Rd
-5b95c393c4c558bf6b33afbbc614f370 *man/logF.Rd
-770d2f1b0efbbdd35656567250ebe941 *man/logF.UC.Rd
-22d6d79d0a45641f9b48b84e6e0c22a0 *man/logUC.Rd
+edd3e0869b059c33a01e3a2860e6feb8 *man/logF.Rd
+06a1ce6e6f01fca7e7037eabc6cf3dad *man/logF.UC.Rd
+9f80bd504e1c75b0c7b29b3449cf7362 *man/logLikvlm.Rd
+f840f8e85c2092093673d6805cd21dc8 *man/logUC.Rd
e956c4aae749e9034b7cf7fdf8661a64 *man/logc.Rd
8c871e5697ed43662cd313fc777c2bcd *man/loge.Rd
-7b13f286faa6848dceee780f8c1ca670 *man/logff.Rd
-9e5b5e84d9fa6d0fd5661882e0465ac0 *man/logistic.Rd
+20cc0c73ee555790179879533cb526f7 *man/logff.Rd
+ca29f90ff7b7c4fed8b19781f7fc745b *man/logistic.Rd
c1c9415c6f05f8e8d3e6aee71f7ea967 *man/logit.Rd
-1f63716471926cf3baae3150c94beb74 *man/loglapUC.Rd
-33ee8ead6d8e9c900690ee966a056ea1 *man/loglaplace.Rd
+15e03c1d93d5eef749c03ecb446945b3 *man/loglapUC.Rd
+b3b9edd1fc27bcebf7c4756db41454da *man/loglaplace.Rd
49d5183ac04d29b5427b9159fa101dc3 *man/loglinb2.Rd
a569b31d918209e8b54a62e8594a3268 *man/loglinb3.Rd
f5f48817604ad9b59304d4fb571359dd *man/loglog.Rd
-a6cf3e7329e66d0780f0318f4b53355f *man/lognormal.Rd
+31652b3efe7d67b788e6995cc8642aea *man/lognormal.Rd
e859c980e26eb3e483d0f3648b502d13 *man/logoff.Rd
-5ce7aa8f16e81795577cc553d40a1e9c *man/lomax.Rd
-c551ab73c0874a6bdcd66a569897b050 *man/lomaxUC.Rd
-950443559c152cc441b4b08dd5c7e12e *man/lqnorm.Rd
-fafc126c62f806baebf8dcf1b3adac17 *man/lrtest.Rd
+22b7830eacf728a157992ee6a974adb2 *man/lomax.Rd
+1c4a4a2ce7661905273c47b4d8b6f898 *man/lomaxUC.Rd
+356e56edee88c1a1e235fe40471df54e *man/lqnorm.Rd
+fc9ca61a4c495cf650cba5a458b0dae1 *man/lrtest.Rd
f0a38f0b82c1525dcd51687a2f2768c1 *man/lvplot.Rd
-19a0bb0240f8f5bdb5e1729806a4a82c *man/lvplot.qrrvglm.Rd
-0e27d1c327ebf057f1eef3e243accc47 *man/lvplot.rrvglm.Rd
+f478dcd30289d69e7dc8468325b1c23f *man/lvplot.qrrvglm.Rd
+16b238586876d84bad0a1420402b5718 *man/lvplot.rrvglm.Rd
c5760c3960748f906230ded119478271 *man/machinists.Rd
-6fab686982d148f43e04ca4674dd14cf *man/makeham.Rd
+0984609f96be7b543c69a4767b734b24 *man/makeham.Rd
c01957cac49ff8e3444d143901efab18 *man/makehamUC.Rd
-c35da03ffb7149d76ddbbf6747964c70 *man/margeff.Rd
+583f3f406844c550079d2592ecba0c25 *man/margeff.Rd
b5c6a5a36ebe07a60b152387e8096d9a *man/marital.nz.Rd
ce0b52f5d9275e79be867d5e472155bf *man/matched.binomial.Rd
-3e71e728ec202a03204fd81f6411c881 *man/maxwell.Rd
+aba16890d9923de9ddd31b18886beabe *man/maxwell.Rd
0d3df98163de7b80cc3c600a791792c7 *man/maxwellUC.Rd
-1912321bc56a20e92697651612a5c185 *man/mccullagh89.Rd
+bd8250aaa1bc17c017c0b201642882dd *man/mccullagh89.Rd
4d8d0f37dc8249d00e52283764534e98 *man/meplot.Rd
-3b5d203389f18b3847122d3a78152f21 *man/micmen.Rd
-fa9997a45317c4c489bbeb2dad5a4624 *man/mix2exp.Rd
-4f8db594bdcf9cd9e794054ca3e4bd95 *man/mix2normal.Rd
-8cce6252ede9ce339d092542422af715 *man/mix2poisson.Rd
-3916d708fd16ec091b1b85e6eb8ef7fd *man/mlogit.Rd
+3fe36bd9f77590dc17a9a2e9380dc0bd *man/micmen.Rd
+fb797d07f6906c113862ea3aff57eee2 *man/mix2exp.Rd
+232e7ac50df002b7c0a1d7ba70fd0bbf *man/mix2normal.Rd
+805b04c7a832073cf90af42c891720da *man/mix2poisson.Rd
+e3f93b50736c1a14398c6677f1efba97 *man/mlogit.Rd
a8e171aca3ff63d12fdfd97587a81734 *man/mmt.Rd
-0ba11a09fea865c9841a25948bb4d381 *man/model.framevlm.Rd
+131aaa836a137554786e8bda01d8e334 *man/model.framevlm.Rd
3d875985c00b26af9cb66e0ae0e3aef8 *man/model.matrixvlm.Rd
199ef13d300d6fe1210885af1647c13b *man/moffset.Rd
-a3a01c06a13da4347808bf8c711c6408 *man/morgenstern.Rd
-4cf5efc13bf771e48e16d9fca9634ed7 *man/multinomial.Rd
-01967c3669be9b38d543aa9b4927a4ad *man/nakagami.Rd
+e052249885fb0375b1a364bb8fe50f15 *man/morgenstern.Rd
+261ead0cc3b72f59bba126853a37c21c *man/multinomial.Rd
+efbd8f76e06e80c92c826cd15cb5d411 *man/nakagami.Rd
dab44218c0733703c125d8741f07bb80 *man/nakagamiUC.Rd
-9149385b6c2733a76575b094267b9a8f *man/nbcanlink.Rd
+892ee6d069216d6568be506a7460c1c4 *man/nbcanlink.Rd
798f2e547a94356359c3d50a57ccef17 *man/nbolf.Rd
-ff125ef7b5a04871d130ab037315e68b *man/negbinomial.Rd
-9f830407df57526fcdbff0044cf54609 *man/negbinomial.size.Rd
-484e10fe67dc21bf6626de4e3d0f941f *man/normal.vcm.Rd
-d1c18da23bbd768038a150e09d7d2920 *man/notdocumentedyet.Rd
+192eb0236ed35e7b1f20c81271cc7781 *man/negbinomial.Rd
+f6048338d4d698e967110e1840ed79a1 *man/negbinomial.size.Rd
+0dd90278ee057c748e4ad673838b08d0 *man/normal.vcm.Rd
+120be8e546976970aa92bb293e019b8e *man/notdocumentedyet.Rd
d361e050435d7a4e64474487ecfd782c *man/olym.Rd
858c73ce3c458d33e5151342a4e36707 *man/ordpoisson.Rd
025c5545a37dd996931ea7d2b42211b5 *man/oxtemp.Rd
-24a97e3b9709df47d079f4e2665f497b *man/paralogistic.Rd
-2fc2cf7200b0f4409471aa2e584168a3 *man/paralogisticUC.Rd
+ec7adc2f811b041ed01628cbb019d617 *man/paralogistic.Rd
+73228cd851fcf468b1fe1ff209ef5eca *man/paralogisticUC.Rd
725a5efd172a9dda442a25b138ee2486 *man/paretoIV.Rd
d0228dcb5ba3bd2a99272100a401c989 *man/paretoIVUC.Rd
-4cdffa085b488c20e9f622b5b211cc25 *man/paretoff.Rd
-f2d5a9aea90051e6675c2701bc58300d *man/perks.Rd
+8f1a34eab62c4e9f14809c42746b46ad *man/paretoff.Rd
+873783fc64c0e3ae5e9e1ff1f7ef2788 *man/perks.Rd
a3658af3f9766a5ce0dfc20aebdf3186 *man/perksUC.Rd
-e539c4c35e170d0216201a6a002ab5b5 *man/persp.qrrvglm.Rd
+a704e5245d54a9e8094b52925c237385 *man/persp.qrrvglm.Rd
a38168dd57b4be503cf47732714e441b *man/pgamma.deriv.Rd
8e0120c68b69d0760218c483490aed8e *man/pgamma.deriv.unscaled.Rd
8ca9de18625c08de9d4acfa8001c7ca3 *man/plackUC.Rd
bcda813e9efa01ebeff0c6db8fec5b2b *man/plackett.Rd
791d04a5c3a3bc514bf0ed1fc639f8ab *man/plotdeplot.lmscreg.Rd
-d4e06a919cfb634b706b8fbb159c2409 *man/plotqrrvglm.Rd
-958dcd119ee66e5d5318c4cf19f024f8 *man/plotqtplot.lmscreg.Rd
-51e5a6c384fa6d24af24bbea491b0323 *man/plotrcim0.Rd
-8ab7d1738b0a3ad0a1d727d9ae158ef1 *man/plotvgam.Rd
+cea29349aed21cbaf8c70f81b7900b15 *man/plotqrrvglm.Rd
+29857fd00ca50614d9564247b07a2bf3 *man/plotqtplot.lmscreg.Rd
+d875d55c83b8ec53e2f46b3206f434f8 *man/plotrcim0.Rd
+783d0e876b16eed32b4ab9be314cda14 *man/plotvgam.Rd
72bade4a008240a55ae5a8e5298e30b8 *man/plotvgam.control.Rd
-652ff39d9d0920ed5e728c8ed5fcc013 *man/plotvglm.Rd
-bbe8bffd4bcfa945d9573d135bb543f3 *man/pneumo.Rd
-f8662b37b320d93d8ed7a3cb34bff24d *man/poisson.points.Rd
-f3822f18acf678b31e8e22bcc749af59 *man/poisson.pointsUC.Rd
-8521dea982d091e7e1693649fa6a00dd *man/poissonff.Rd
+6196fac00cd0044ba818ec0a794a031a *man/plotvglm.Rd
+40f1661d2f26cb11f54c9140c767c61b *man/pneumo.Rd
+606c4d8331ff8e0e4241f0284aba98cd *man/poisson.points.Rd
+8c7d77fdf6933ab63d412be61e3fa0ec *man/poisson.pointsUC.Rd
+2ddaa395bbdea2574df3e7c387186db1 *man/poissonff.Rd
c0578de27756a8b6912b7940f2de96e5 *man/polf.Rd
696c74487d4cebf0251299be00d545c7 *man/polonoUC.Rd
-037cd4e84fd5a18732c784b71f3bb649 *man/posbernUC.Rd
-abc93018c379e95040c4f6e21b1526f8 *man/posbernoulli.b.Rd
-3d9d9160e337b4f988bb9254a705c939 *man/posbernoulli.t.Rd
-cb4ac0ab64c6d3a49650e655ae90db11 *man/posbernoulli.tb.Rd
-7657ea90758d0a212597516214ece468 *man/posbinomUC.Rd
-5897f14bdd81eb3e3ad39c4a91b8bc3f *man/posbinomial.Rd
-bda3e76da50396ae5dcf4f318ec7cfb4 *man/posgeomUC.Rd
-a0e8c366513c21a4d84651b7abff9033 *man/posnegbinUC.Rd
-ed57599c85659a342ec65848f9848806 *man/posnegbinomial.Rd
-c593f447dea64e4e0395ec3629efe430 *man/posnormUC.Rd
-ed1743d16c3327574349781fe338bcc7 *man/posnormal.Rd
+2f4dfc6a802a52da2e14e9789e0170ae *man/posbernUC.Rd
+a746161f043ec5c5517df4b9cf71501e *man/posbernoulli.b.Rd
+f995f3aeff44e63929519b7752bc240a *man/posbernoulli.t.Rd
+ea0f67b9b92d46c30c3a6a4ef6979d87 *man/posbernoulli.tb.Rd
+ca1949d75cb146d17b16d46009f55b9a *man/posbinomUC.Rd
+a0ff19c3e87fa3697f2d3a48a4230473 *man/posbinomial.Rd
+dc19e3d023a2a46c670e431a2cc853e0 *man/posgeomUC.Rd
+2963a956fa63f0bd9452b10b432d4fc8 *man/posnegbinUC.Rd
+a9f3ff5f799d60588fe55623ba98a0ed *man/posnegbinomial.Rd
+7176035d384054db426d3f3322429372 *man/posnormUC.Rd
+e130fade4adc7216d9d825d73cf83dd6 *man/posnormal.Rd
137d3986fcbad41bf77c10585dace0b0 *man/pospoisUC.Rd
-91a5bc77597943b0c6d75fbf3658d1d6 *man/pospoisson.Rd
+02066c793ac6cc88cdcb14ceb9b67fcb *man/pospoisson.Rd
f35d86d08cb2181e69403304101af4e7 *man/powerlink.Rd
af139c6afa9ed0d34045609975dca53f *man/prats.Rd
-9ca451a2f4a9739a455beafc898d0c42 *man/predictqrrvglm.Rd
+4ff051b77b97c2f0cd68f56063620bf5 *man/predictqrrvglm.Rd
6a2efe9d46b7c686e320469698f9c1c7 *man/predictvglm.Rd
-db3e0667302072ff2451204e735a1952 *man/prentice74.Rd
-37a3381ed799347600ffec8e42e20e3c *man/prinia.Rd
+95b3d2a018fb1afc48dba46e1170e566 *man/prentice74.Rd
+5f4fbb060b2d8386d8d2bfde926d9d5d *man/prinia.Rd
d1b88140c378a21755511fb4a6ae6bce *man/probit.Rd
-d5531627b3f539ed90d41705b46a72ed *man/propodds.Rd
-dcb96910494ff4a7a8fee201557e5aa6 *man/prplot.Rd
-de570e252375d7052edaa7fb175f67eb *man/put.smart.Rd
-ea988b454e438711d9e7c9cb46e69925 *man/qrrvglm.control.Rd
+0718999b2644fa5d30ffcd81722350e5 *man/propodds.Rd
+241402d089ef4159f01fb4cd2c72b9a3 *man/prplot.Rd
+236c21982c4d7b440f8e5768c6806ecb *man/put.smart.Rd
+398396f20c5d46304cf9ec527505d541 *man/qrrvglm.control.Rd
0b4cf628cd3e15b0668ae4ddae4d3ee6 *man/qtplot.gumbel.Rd
-0cbead5889047f9a0a73d9c753ef955e *man/qtplot.lmscreg.Rd
+19419758045a8282b21c6c7a8412a725 *man/qtplot.lmscreg.Rd
2d496ded26329ff563f7d838c1f6a2cd *man/quasibinomialff.Rd
-3453476890320828ffadfdf9a7171fcd *man/quasipoissonff.Rd
+65cf15223e019a1e223afc9c15ca183e *man/quasipoissonff.Rd
6691fe12d23149a7c28a75a62230b2d2 *man/qvar.Rd
-2b0e10b17a1b309556eb8431149cba54 *man/rayleigh.Rd
-8d466449dd9801fe604e7c8e86f8f966 *man/rayleighUC.Rd
-ac35007b1dc2625a01d199b8c7f49503 *man/rcqo.Rd
-1d9601bd76b8c0cddcf567b144b5ef89 *man/rdiric.Rd
-e0d655cedebcefe5aa661cf4036b09d6 *man/recexp1.Rd
+d288908bb349df629f1f057b63ec7fb3 *man/rayleigh.Rd
+45b293604a0e71f14b2dad2976d7b845 *man/rayleighUC.Rd
+42759865ba0f19ea889d982bc7abc121 *man/rcqo.Rd
+97b7c30ea27ac4fa16167599c35b136e *man/rdiric.Rd
+5eda556cce0510064a330d66565a00c8 *man/recexp1.Rd
49abf27f1c088a43cda71f0723cf188b *man/reciprocal.Rd
-42d285fa089073e392d9c62dd14cf6c0 *man/recnormal.Rd
+38ff5f23f427985a349ba222b390e615 *man/recnormal.Rd
a56ddce8598af2320fdadb94c42a9b24 *man/rhobit.Rd
-a35866590c58ec9deb162a0d900bd1f5 *man/riceUC.Rd
-eb9e85df130898be3aac04e146eb0a9c *man/riceff.Rd
-f3302a07fa476f2870dce74a90b1cb77 *man/rigff.Rd
-a0001cabc868c3d8f82232283338499f *man/rlplot.egev.Rd
+8320763391f5f25b47fe931a0cd1fa2a *man/riceUC.Rd
+47ea579d67d72e713f55cadf6d0b7fad *man/riceff.Rd
+9dd5a151bfc05adcce0ae88a02eb08a8 *man/rigff.Rd
+94d70d96afa235759b7d4a6b6775fe94 *man/rlplot.egev.Rd
3c6afb0af10ae003dfa8cf9caa567d9b *man/rrar.Rd
-b766f22051e15e2bdf9394746c04e072 *man/rrvglm-class.Rd
-e39a026bc1841c5efacc1a95ca0d788a *man/rrvglm.Rd
-7aa0211643b3345131b2704c71b4404c *man/rrvglm.control.Rd
+c1638b6d6833abcd2eb5814a328a6777 *man/rrvglm-class.Rd
+b814d064706da7d367758f186a9d4bb1 *man/rrvglm.Rd
+3df0ab81d836bcbe3c7f34e7c698ae2a *man/rrvglm.control.Rd
59d8b84425a1ce32215e150773386617 *man/rrvglm.optim.control.Rd
ecc44804896b8f3d4a9d469a952fe9a6 *man/ruge.Rd
850477e7023b0617c4dd9bf177881736 *man/s.Rd
-c5192ce3ce27b4a32088eae60a9647b1 *man/seq2binomial.Rd
-71367fe3b494a45c98f9a96e1fd791e0 *man/setup.smart.Rd
-29b795708516a3cc440b5ddb50425004 *man/simplex.Rd
+c3096134b4f765a7d1d893fb9388488b *man/seq2binomial.Rd
+9985ea15444cc317e3e8fc2aad7200da *man/setup.smart.Rd
+ad56969af369fe4120838caeb6ac60b6 *man/simplex.Rd
f158e6c60a4e6b6e13f2a9519515a021 *man/simplexUC.Rd
-4d13e6cf2248dde66a69216540cd2e87 *man/sinmad.Rd
-754b3dbc268f1df1bf8f675da6a2ebf8 *man/sinmadUC.Rd
-8b7bce1ea79b811ba476d4fab9866530 *man/skellam.Rd
-44b30a23c850108381d7b76c575377eb *man/skellamUC.Rd
-9f648ab47f83275ed69c036a6bd650de *man/skewnormUC.Rd
-d7b412af782b794441e67dbc9848a6a0 *man/skewnormal.Rd
-0c30d059794a31ec06e43da1590496cc *man/slash.Rd
-9d45778b7f284934351777b4b9686c50 *man/slashUC.Rd
-1ed10e28c013e2e08ac5f053b2454714 *man/smart.expression.Rd
-163cdb3e4a225aceee82e2d19488d56e *man/smart.mode.is.Rd
-2b68a9e20182e8892bb7be344e58e997 *man/smartpred.Rd
-3849f780d823a1a0aa67bb65ac35510e *man/sratio.Rd
-db9ae337a136caa28914e5eeea6ae0a6 *man/studentt.Rd
-d033dcd015105ccb015ce263c55adf62 *man/tikuv.Rd
+82f4e35552730791bb18e789c01ff861 *man/simulate.vlm.Rd
+158ce60e4d3abe5d370a43327e361ffd *man/sinmad.Rd
+702d8c7998205774dde5a93d2e5a49fe *man/sinmadUC.Rd
+5327f9644795a6ed4e1909159156b656 *man/skellam.Rd
+2424940e3cff6d5a3ddd0ee99565ea39 *man/skellamUC.Rd
+b62da6a60b01916a10d691e980253bc0 *man/skewnormUC.Rd
+ba2cd271d5f1ed6eed17629588e7c4ec *man/skewnormal.Rd
+9f34bfb220e6d0400971a1efa5db28c6 *man/slash.Rd
+213b0f18e657b3c80f1af5f2bc1f4c6b *man/slashUC.Rd
+0b280d5a21d5f9f48d14caf7d366a06e *man/smart.expression.Rd
+41adcb0db6c8b560af197a65b7226477 *man/smart.mode.is.Rd
+3d5d3a55f66ef8048b446da063e36ceb *man/smartpred.Rd
+098bc8b943b6ae2e0de9a4da57fcfd22 *man/sratio.Rd
+c8a04bcb150fa0dd37c4fc6f1e8efc1b *man/studentt.Rd
+1ba10a9db17520656ec1b0ca1c32d2b9 *man/tikuv.Rd
dc0ae67e1d293040bf2d088e9bd4945b *man/tikuvUC.Rd
-ce3b83e22114d609561ad80353be4f18 *man/tobit.Rd
-53d34225f637b8e3cf35242dc436078b *man/tobitUC.Rd
-8abb5eb3e6670f618e91babde7b396f2 *man/toxop.Rd
-507517bbd56e61a591f43ffff0e1f018 *man/triangle.Rd
+821969db6bd5c548c51b3cbd82b6352a *man/tobit.Rd
+2b4e875a4415043bf0cd019e71e955cd *man/tobitUC.Rd
+b70afa170b0cf98a6c2a9eea9dc58483 *man/toxop.Rd
+5a424c4e215899bc18b87099fcaf98e1 *man/triangle.Rd
b35739c390fd5566b8851cd070b09492 *man/triangleUC.Rd
-0911220727ac754cb2e370b72c1ba88b *man/trplot.Rd
+1d13e92969384eebec80c2b5901bc5db *man/trplot.Rd
c786330c607d69d19e59fc3823d1e2f2 *man/trplot.qrrvglm.Rd
d77a2419400b9ae1059949803b8a1dd2 *man/truncparetoUC.Rd
-686b7a6a1920375d43a8071a22f86e48 *man/truncweibull.Rd
+d23efcf6ab32a3bd3e02555bc177e5c3 *man/truncweibull.Rd
50ada9ecd189456ce9f218d22b49089c *man/ucberk.Rd
-4ca96b163b2b9c931a39a07f25d040a3 *man/undocumented-methods.Rd
-7e0c6b38a66c0b4feca9420717e08f37 *man/uninormal.Rd
+9987a953e8b42a47a8c3c515249dbe22 *man/undocumented-methods.Rd
+c5d717e96565afac189ec5e103fd30af *man/uninormal.Rd
f787bf505e7e68f5f16a49f48abb9bcb *man/venice.Rd
ecf0058b783f675c77a3ca1e5ab1a90a *man/vgam-class.Rd
-129c86ebc03ed114be4d836b984fb5e2 *man/vgam.Rd
+6db59f46bb2fbdbd6329f07498eca6d5 *man/vgam.Rd
c059eb2c3a2c325bd3b9498abe0a5d46 *man/vgam.control.Rd
-12fd658b80fd45359d027177e43cb3a1 *man/vglm-class.Rd
-f1e74e2eca2768bdd9c0c132c2393266 *man/vglm.Rd
-e1716bf18db44df404475959a6a122a5 *man/vglm.control.Rd
-a8508ebb5ce0d2fed90d3e9e1d081455 *man/vglmff-class.Rd
-b577458c794529beca42f1af8531172e *man/vonmises.Rd
+1efef5d732a8585b81478fd03e103e5f *man/vglm-class.Rd
+cecde8d7fd2706132b92762bfed8055a *man/vglm.Rd
+84d3293dabcbc437cead24a6a39ede91 *man/vglm.control.Rd
+8d9fa0cc290e49e459947c38c292df4c *man/vglmff-class.Rd
+4b0a3f2794103d8d6014a58041855f7f *man/vonmises.Rd
7787a423c41dec21ed7c4440288ef9b7 *man/vsmooth.spline.Rd
c498f29d7fc8156fd345b4892f02190d *man/waitakere.Rd
-6b73eaac72f0b349d162f4480684577a *man/waldff.Rd
-dbcdaf4674e14f3e2de11c0fc937a38a *man/weibull.Rd
-0c5747e9524dd180f278210ab769e535 *man/weightsvglm.Rd
-655258cff21a67e1549b204ff3d451a5 *man/wrapup.smart.Rd
+9b9bdfbbf8060eb284c84e8ed9273154 *man/waldff.Rd
+c223012cb1da31f7e6cbd864de218cd2 *man/weibull.Rd
+31edfdbcd09aec27897f7a5167a57b40 *man/weightsvglm.Rd
+3557b17f6054a1699cb653b36f6d1a37 *man/wine.Rd
+f5a3078b689d511325cb1dc0fd4e21f3 *man/wrapup.smart.Rd
622f0105b04159f54fcfb361972e4fb7 *man/yeo.johnson.Rd
-d99d82fda3a3f6ab855da4281771753f *man/yip88.Rd
-21a90fbde0228b4e74bba93b50300b54 *man/yulesimon.Rd
-a6128b966f2d5d6df5f36b11bc2c3607 *man/yulesimonUC.Rd
-b7f37725c67ca8dc93fe767086bfb8e5 *man/zabinomUC.Rd
-6b1e7a5dcefe1eedbf2e7b05989866ce *man/zabinomial.Rd
-7d16a3a8e022ae6d78e55a77e4855241 *man/zageomUC.Rd
-94a6ea72fac92e624106a82761f6c8f9 *man/zageometric.Rd
-ab85bfd95caf0171ec7c515309d7c74f *man/zanegbinUC.Rd
-107900993ddcc02efe5d47b2463d5ddd *man/zanegbinomial.Rd
-14b74bf11cf34181db7919c2f2727d52 *man/zapoisUC.Rd
-1fa7c1787639131cb6d157afdd69a3e0 *man/zapoisson.Rd
-61cce538df41d42d6e5daf8f37635527 *man/zero.Rd
+ebfff81b0f4730417de95f80b7c82c41 *man/yip88.Rd
+225fcd19868f17b4a5d2590e834cb888 *man/yulesimon.Rd
+5057781d1bd6967a924665f8d63f6447 *man/yulesimonUC.Rd
+ae671324c0f93f66adc72f053ef9ebd9 *man/zabinomUC.Rd
+87b0b38fe7357a2259edc9f1159add84 *man/zabinomial.Rd
+7d5df5fee6f78c5cf37faaf71adbbb91 *man/zageomUC.Rd
+925e2c8e227ffb6a26192aeeb1fd4f28 *man/zageometric.Rd
+78eef8b541d039b00e9990ff758e53e9 *man/zanegbinUC.Rd
+8e35cb598399b4051aee185a72911f5c *man/zanegbinomial.Rd
+b4bcb3a52a6e60efbdaa5d3cfed6fbf4 *man/zapoisUC.Rd
+9fddb7dcd81ef0e4d6777a4ae2a56bff *man/zapoisson.Rd
+41b375aed0074b0d0e87b2913685cda9 *man/zero.Rd
7985338d08e88fa23cce9cc0a09724b6 *man/zeta.Rd
e0ef189ae8251b5e0d20b614c18cdd5a *man/zetaUC.Rd
-aa30ce673db2dd8df0fe47d43305e6c2 *man/zetaff.Rd
-2a95549db11962cd7175b63cd62fd850 *man/zibinomUC.Rd
-82966f9946785f1c1208ac604b796cde *man/zibinomial.Rd
+648342ad0677587e55e4f92d906d0d42 *man/zetaff.Rd
+4e7a3f2f0afb030cf5b4f1a30373871a *man/zibinomUC.Rd
+476f5935d0a6fcbe67f6e8cb39509a35 *man/zibinomial.Rd
cf47526db95bc439da054ac97d2da36f *man/zigeomUC.Rd
-228eb0e6ce7d0cafeb4fa171a858d08a *man/zigeometric.Rd
+8de969235239ce10332c2b91304931f5 *man/zigeometric.Rd
b4d704d064746b54f31f7d3d5c7e71c8 *man/zinegbinUC.Rd
-be4a96e387bdaee0622eab5af4547ec5 *man/zinegbinomial.Rd
-aafae05baaa222dc5f54b30553e30caf *man/zipebcom.Rd
-de0330d5599faa509ea5038ab38a7eb2 *man/zipf.Rd
-e83338d61c48dfc56179cf190ec05006 *man/zipfUC.Rd
+d720aa8eac5ca7628305e3a71585bf52 *man/zinegbinomial.Rd
+a9b1d67033daa03a9880227187041ae5 *man/zipebcom.Rd
+abfe2e5adf8a4fcd610adccf060e4f45 *man/zipf.Rd
+24ccbcefd8c1d93f609a39a1d29e4c17 *man/zipfUC.Rd
0b8c923247c77bffa3dc24440e5d8bae *man/zipoisUC.Rd
-ff3ba2c8f8ad4fb8bd0105e21f944396 *man/zipoisson.Rd
+1ca7235ece422fbb566c94e46f0de6b2 *man/zipoisson.Rd
f306f4262366ba8c13d31e6afd0e393b *src/caqo3.c
ec1b60ab786ea922f9c9665ae352b147 *src/cqof.f
8daac3d03d7cb7a355a4c5ba548c9793 *src/ei.f
@@ -583,22 +590,20 @@ ec1b60ab786ea922f9c9665ae352b147 *src/cqof.f
f8fe99dcda865eceb06b66f4976f4bf2 *src/gautr.c
dc1ca5b4e9a67b6d48c25e7107112d9c *src/lerchphi.c
c54afdee58cf86ecaf1072c492b49001 *src/lms.f
-9cfd5e51c2dba024afc28b0fffaece4a *src/muxr.c
+feba7ba09eca8007392e0405c4b373a8 *src/muxr3.c
65ef45ba4e422c33db9848bb549ea93c *src/rgam.f
-bd461ca234f78bf7313a986ad9cdcd4b *src/rgam3.c
+473bc0b2f4d6757fa9b397ac0d7c9e47 *src/rgam3.c
6aee7dc8f242ea6e9446ade5b7edeee5 *src/specfun3.c
4814bb73b4c3eedc7507ad99511c7dc5 *src/tyeepolygamma.f
10939d9fb380d54da716a835d37fdf75 *src/tyeepolygamma3.c
79cf39f1d83f25e29a6c56d344ea8d76 *src/vcall2.f
83c304cbbe3f0a9bfbe7ab5aa0eefd4e *src/vdigami.f
3e145d8721d17dbd0e642508c2de1472 *src/veigen.f
-3046b06e0ff0de2724a8c1d57d2f21c7 *src/vgam.f
-b6bf432138f1f11cef21ef473cac82d9 *src/vgam3.c
+5ea414b5b42454c8efa73152c45ea62b *src/vgam.f
+cc42ab525129d3d763a8f590fd4a8238 *src/vgam3.c
bbb4ca20dcf50cd985b411b9a65b68f2 *src/vlinpack1.f
80c0a0f512ae74ecbed144c5f115fb16 *src/vlinpack2.f
e9187111f5c6ce1e5808bbb3dc088c17 *src/vlinpack3.f
-4510a716373a71c3f2da66ddb4d39267 *src/vmux.f
-df3bc03743117dbb36e3a74f6ccb9d21 *src/vmux3.c
+753359563526a9cd5ebac104dab2d754 *src/vmux.f
+9083b462bcc275ee6dda47e97f1ebf94 *src/vmux3.c
b19585d2495c46800b0c95f347fe89f9 *src/zeta3.c
-ab0ec8b49daf41071e1636bb52b71294 *vignettes/categoricalVGAM.Rnw
-e4c5415e487f533b70695b17e40d97bc *vignettes/categoricalVGAMbib.bib
diff --git a/NAMESPACE b/NAMESPACE
index 4f3c56b..54ed662 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -7,6 +7,31 @@
useDynLib(VGAM)
+export(sm.bs, sm.ns, sm.scale.default, sm.poly, sm.scale)
+exportMethods(coefficients, coef)
+importFrom("stats", coefficients, coef)
+export(case.names,
+ coef,
+ coefficients,
+ df.residual,
+ fitted,
+ fitted.values,
+ formula,
+ residuals,
+ variable.names,
+ weights)
+
+
+
+export(Select, subsetc)
+
+
+export(simulate.vlm)
+importFrom("stats", simulate)
+
+
+
+
export(family.name.vlm)
export(family.name.vglmff)
exportMethods(family.name)
@@ -83,6 +108,7 @@ exportMethods(hatvalues)
importFrom("stats", hatvalues)
+importFrom("stats", dfbeta) # Added 20140509
export(dfbeta, dfbetavlm)
exportMethods(dfbeta)
@@ -186,7 +212,6 @@ export(pnorm2, dnorm2)
-export( bs, ns, scale.default, poly )
@@ -250,7 +275,7 @@ coefvsmooth.spline, coefvsmooth.spline.fit,
constraints, constraints.vlm,
deplot, deplot.default, deplot.lms.bcg, deplot.lms.bcn,
deplot.lms.yjn, deplot.lms.yjn2, deplot.vglm,
-deviance.vglm, deviance.vlm,
+deviance.vlm,
df.residual_vlm,
dirmultinomial, dirmul.old,
dtheta.deta)
@@ -488,7 +513,7 @@ geometric, truncgeometric,
dlino, plino, qlino, rlino, lino,
grc,
dhzeta, phzeta, qhzeta, rhzeta, hzeta,
-negidentity, identity,
+negidentity, identitylink,
prentice74,
amlnormal, amlbinomial, amlexponential, amlpoisson, Wr1, Wr2,
dkumar, pkumar, qkumar, rkumar, kumar,
diff --git a/NEWS b/NEWS
index 8d16b2c..effee19 100755
--- a/NEWS
+++ b/NEWS
@@ -1,4 +1,4 @@
- **************************************************
+ *************************************************
* *
* 0.9 SERIES NEWS *
* *
@@ -6,6 +6,58 @@
+ CHANGES IN VGAM VERSION 0.9-4
+
+NEW FEATURES
+
+ o New data sets: cfibrosis, lakeO, wine.
+ o New functions: Select().
+ o negbinomial(deviance = TRUE) works, provided criterion = "coef"
+ is used too.
+ o simulate() works with binomialff(), poissonff(), rayleigh() and
+ several other families. See help(simulate.vlm) for a current listing.
+ o coef(colon = FALSE) works for VLM objects.
+ o pslash() has a 'very.negative = -4' argument.
+ Thanks to Tiago Pereira for picking this up.
+ o Some family functions have a 'summation = TRUE' argument in
+ the loglikelihood slot. Can be accessed using, e.g.,
+ logLik(fit, summation = FALSE). See ?logLik.vlm.
+ Similarly for deviance(fit, summation = FALSE).
+ o Tested okay on R 3.1.0.
+
+
+BUG FIXES and CHANGES
+
+ o bs(), ns(), scale() and poly() are no longer smart,
+ but they will handle simple terms such as bs(x) and scale(x).
+ The smart version of those functions have been renamed to
+ sm.bs(), sm.ns(), sm.scale(), sm.poly();
+ these will handle complicated terms such as sm.bs(sm.scale(x)).
+ o Renamed functions: identity() has become identitylink().
+ o Argument names changed:
+ 'ITolerances' renamed to 'I.tolerances' thoughout,
+ 'EqualTolerances' renamed to 'eq.tolerances' thoughout.
+ o Bug in mix2normal() fixed in @initialize. Thanks to Troels Ring
+ for finding the bug.
+ o Upon loading the package, no warnings (such as masking) is
+ given.
+ o multinomial(parallel = TRUE) now applies the parallelism
+ constraint to the intercept.
+ o If a factor response is ordered then a warning is issued for
+ multinomial().
+ o predict(fit, newdata = zdata2, type = "response") used
+ to fail for z[ai][poisson][,ff]() and z[ai][negbinomial][,ff]()
+ families. Thanks to Diego Nieto Lugilde for picking this up.
+ o A bug with offsets and coefstart has been fixed.
+ Thanks to Giuseppe Casalicchio for picking this up.
+ o Variables "Blist" replaced by "Hlist".
+ o Expression new.s.call no longer used in vglm.fit() and vgam.fit().
+ Musual has been replaced by M1.
+ o Variable names changed: prinia, Huggins89table1, Huggins89.t1.
+ o Memory leaks found by valgrind have been patched.
+
+
+
CHANGES IN VGAM VERSION 0.9-3
NEW FEATURES
diff --git a/R/Links.R b/R/Links.R
index f0ab8ab..61eff8c 100644
--- a/R/Links.R
+++ b/R/Links.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -17,7 +17,7 @@
dtheta.deta <-
function(theta,
- link = "identity",
+ link = "identitylink",
earg = list(theta = theta, # Needed
inverse = FALSE,
deriv = 1,
@@ -46,7 +46,7 @@
d2theta.deta2 <-
function(theta,
- link = "identity",
+ link = "identitylink",
earg = list(theta = theta, # Needed
inverse = FALSE,
deriv = 2,
@@ -71,7 +71,7 @@
theta2eta <-
function(theta,
- link = "identity",
+ link = "identitylink",
earg = list(theta = NULL)) {
function.name <- link
@@ -90,7 +90,7 @@
eta2theta <-
function(theta, # This is really eta.
- link = "identity",
+ link = "identitylink",
earg = list(theta = NULL)) {
@@ -178,7 +178,7 @@
namesof <- function(theta,
- link = "identity",
+ link = "identitylink",
earg = list(tag = tag, short = short),
tag = FALSE,
short = TRUE) {
@@ -201,7 +201,7 @@
if (FALSE)
namesof <- function(theta,
- link = "identity",
+ link = "identitylink",
earg = list(tag = tag, short = short),
tag = FALSE,
short = TRUE) {
diff --git a/R/aamethods.q b/R/aamethods.q
index 14612ee..b055aed 100644
--- a/R/aamethods.q
+++ b/R/aamethods.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -45,31 +45,32 @@ VGAMenv <- new.env()
setClass("vglmff", representation(
- "blurb" = "character",
- "constraints" = "expression",
- "deviance" = "function",
- "fini" = "expression",
- "first" = "expression",
- "infos" = "function", # Added 20101203
- "initialize" = "expression",
- "last" = "expression",
- "linkfun" = "function",
- "linkinv" = "function",
- "loglikelihood"= "function",
- "middle" = "expression",
- "middle2" = "expression",
+ "blurb" = "character",
+ "constraints" = "expression",
+ "deviance" = "function",
+ "fini" = "expression",
+ "first" = "expression",
+ "infos" = "function", # Added 20101203
+ "initialize" = "expression",
+ "last" = "expression",
+ "linkfun" = "function",
+ "linkinv" = "function",
+ "loglikelihood" = "function",
+ "middle" = "expression",
+ "middle2" = "expression",
"summary.dispersion" = "logical",
- "vfamily" = "character",
- "deriv" = "expression",
- "weight" = "expression"), # "call"
-prototype = .VGAM.prototype.list)
+ "vfamily" = "character",
+ "simslot" = "function",
+ "deriv" = "expression",
+ "weight" = "expression"), # "call"
+ prototype = .VGAM.prototype.list )
valid.vglmff <- function(object) {
compulsory <- c("initialize", "weight", "deriv", "linkinv")
for (ii in compulsory) {
if (!length(slot(object, ii)))
- stop("slot ", ii, " is empty")
+ stop("slot ", ii, " is empty")
}
if (length(as.list(object at linkinv)) != 3)
@@ -78,7 +79,7 @@ valid.vglmff <- function(object) {
if (FALSE)
- setValidity("vglmff", valid.vglmff)
+ setValidity("vglmff", valid.vglmff)
@@ -136,22 +137,22 @@ setMethod("show", "vglmff",
setClass("vlmsmall", representation(
- "call" = "call",
- "coefficients" = "numeric",
- "constraints" = "list",
- "control" = "list",
- "criterion" = "list",
- "fitted.values"= "matrix",
- "misc" = "list",
- "model" = "data.frame",
- "na.action" = "list",
- "post" = "list",
- "preplot" = "list",
- "prior.weights"= "matrix",
- "residuals" = "matrix",
- "weights" = "matrix",
- "x" = "matrix",
- "y" = "matrix"),
+ "call" = "call",
+ "coefficients" = "numeric",
+ "constraints" = "list",
+ "control" = "list",
+ "criterion" = "list",
+ "fitted.values" = "matrix",
+ "misc" = "list",
+ "model" = "data.frame",
+ "na.action" = "list",
+ "post" = "list",
+ "preplot" = "list",
+ "prior.weights" = "matrix",
+ "residuals" = "matrix",
+ "weights" = "matrix",
+ "x" = "matrix",
+ "y" = "matrix"),
)
@@ -418,30 +419,17 @@ setGeneric("lvplot", function(object, ...) standardGeneric("lvplot"),
- setGeneric("coef", function(object, ...) standardGeneric("coef"),
- package = "VGAM")
- setGeneric("coefficients", function(object, ...)
- standardGeneric("coefficients"),
- package = "VGAM")
-if (!isGeneric("df.residual"))
- setGeneric("df.residual", function(object, ...)
- standardGeneric("df.residual"),
- package = "VGAM")
-if (!isGeneric("fitted"))
- setGeneric("fitted", function(object, ...) standardGeneric("fitted"),
- package = "VGAM")
- if (!isGeneric("fitted.values"))
- setGeneric("fitted.values", function(object, ...)
- standardGeneric("fitted.values"),
- package = "VGAM")
+
+
+
if (!isGeneric("model.matrix"))
@@ -465,19 +453,15 @@ if (!isGeneric("predict"))
+
+
if (!isGeneric("resid"))
setGeneric("resid", function(object, ...) standardGeneric("resid"))
-if (!isGeneric("residuals"))
- setGeneric("residuals", function(object, ...)
- standardGeneric("residuals"),
- package = "VGAM")
-if (!isGeneric("weights"))
- setGeneric("weights", function(object, ...)
- standardGeneric("weights"), package = "VGAM")
+
@@ -489,21 +473,15 @@ if (!isGeneric("AIC"))
-if (!isGeneric("formula"))
- setGeneric("formula", function(x, ...) standardGeneric("formula"),
- package = "VGAM")
-if (!isGeneric("case.names"))
- setGeneric("case.names", function(object, ...)
- standardGeneric("case.names"),
- package = "VGAM")
-if (!isGeneric("variable.names"))
- setGeneric("variable.names", function(object, ...)
- standardGeneric("variable.names"),
- package = "VGAM")
+
+
+
+
+
diff --git a/R/add1.vglm.q b/R/add1.vglm.q
index 496c131..f45ea88 100644
--- a/R/add1.vglm.q
+++ b/R/add1.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/attrassign.R b/R/attrassign.R
index f05c201..430ab2c 100644
--- a/R/attrassign.R
+++ b/R/attrassign.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -26,7 +26,7 @@ attrassigndefault <- function(mmat,tt) {
}
-if(!isGeneric("attrassign"))
+if (!isGeneric("attrassign"))
setGeneric("attrassign", function(object, ...)
standardGeneric("attrassign"))
diff --git a/R/bAIC.q b/R/bAIC.q
index a4d6037..fc4cdd2 100644
--- a/R/bAIC.q
+++ b/R/bAIC.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -126,22 +126,22 @@ AICqrrvglm <- function(object, ...,
- EqualTolerances <- object at control$EqualTolerances
- ITolerances <- object at control$ITolerances
- if (!(length(EqualTolerances) == 1 && is.logical(EqualTolerances)))
+ eq.tolerances <- object at control$eq.tolerances
+ I.tolerances <- object at control$I.tolerances
+ if (!(length(eq.tolerances) == 1 && is.logical(eq.tolerances)))
stop("could not determine whether the fitted object used an ",
"equal-tolerances assumption based on ",
- "argument 'EqualTolerances'")
- if (!(length(ITolerances) == 1 && is.logical(ITolerances)))
+ "argument 'eq.tolerances'")
+ if (!(length(I.tolerances) == 1 && is.logical(I.tolerances)))
stop("could not determine whether the fitted object used an ",
- "equal-tolerances assumption based on argument 'ITolerances'")
+ "equal-tolerances assumption based on argument 'I.tolerances'")
NOS <- if (length(object at y)) ncol(object at y) else MMM
MSratio <- MMM / NOS # First value is g(mean) = quadratic form in l
if (round(MSratio) != MSratio)
stop("variable 'MSratio' is not an integer")
- elts.D <- ifelse(ITolerances || EqualTolerances, 1, NOS) *
+ elts.D <- ifelse(I.tolerances || eq.tolerances, 1, NOS) *
Rank * (Rank + 1) / 2
diff --git a/R/build.terms.vlm.q b/R/build.terms.vlm.q
index df6dcd4..951a472 100644
--- a/R/build.terms.vlm.q
+++ b/R/build.terms.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -64,8 +64,8 @@ Build.terms.vlm <-
if (cov.true)
se <- fit
TL <- sapply(assign, length)
- simple <- TL == 1
- complex <- TL > 1
+ simple <- (TL == 1)
+ complex <- (TL > 1)
if (any(simple)) {
asss <- unlist(assign[simple])
ones <- rep(1, nrow(x))
@@ -86,7 +86,10 @@ Build.terms.vlm <-
}
attr(fit, "constant") <- constant
- if (cov.true) list(fitted.values = fit, se.fit = se) else fit
+ if (cov.true)
+ list(fitted.values = fit,
+ se.fit = se) else
+ fit
}
}
diff --git a/R/calibrate.q b/R/calibrate.q
index e9dd31b..5e23488 100644
--- a/R/calibrate.q
+++ b/R/calibrate.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -27,7 +27,7 @@ calibrate.qrrvglm.control <-
varI.latvar = FALSE, ...) {
Rank <- object at control$Rank
- EqualTolerances <- object at control$EqualTolerances
+ eq.tolerances <- object at control$eq.tolerances
if (!is.Numeric(gridSize, positive = TRUE,
integer.valued = TRUE, length.arg = 1))
stop("bad input for 'gridSize'")
diff --git a/R/cao.R b/R/cao.R
index 82c699a..12a6bde 100644
--- a/R/cao.R
+++ b/R/cao.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/cao.fit.q b/R/cao.fit.q
index 0b4fea6..c08f857 100644
--- a/R/cao.fit.q
+++ b/R/cao.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -18,7 +18,8 @@ cao.fit <-
Terms = Terms, function.name = "cao", ...) {
- maxitl <- fv <- NULL
+ maxitl <- NULL
+ fv <- NULL
eff.n <- nrow(x) # + sum(abs(w[1:nrow(x)]))
@@ -143,13 +144,13 @@ cao.fit <-
rrcontrol$Cinit <- control$Cinit <- Cmat # Good for valt()
- Blist <- process.constraints(constraints, x, M, specialCM = specialCM)
+ Hlist <- process.constraints(constraints, x, M, specialCM = specialCM)
- nice31 <- checkCMCO(Blist, control = control, modelno = modelno)
+ nice31 <- checkCMCO(Hlist, control = control, modelno = modelno)
if (nice31 != 1)
stop("not nice")
- ncolBlist <- unlist(lapply(Blist, ncol))
+ ncolHlist <- unlist(lapply(Hlist, ncol))
latvar.mat <- x[, colx2.index, drop = FALSE] %*% Cmat
@@ -167,12 +168,12 @@ cao.fit <-
lenbeta <- pstar. * ifelse(Nice21, NOS, 1)
othint <-
- c(Rank, control$EqualTol, pstar. ,
+ c(Rank, control$eq.tol, pstar. ,
dim2wz = 1, inited = 0, # w(, dimw) cols
modelno, maxitl = control$maxitl,
actnits = 0, twice = 0, p1star. ,
p2star. , Nice21, lenbeta,
- controlITolerances = 0, control$trace,
+ controlI.tolerances = 0, control$trace,
p1, p2 = p2, imethod = control$imethod, bchat = 0)
othdbl <- c(small = control$SmallNo, fseps = control$epsilon,
.Machine$double.eps,
@@ -211,8 +212,11 @@ cao.fit <-
}
if (!converged) {
- if (maxitl > 1) {
- warning("convergence not obtained in", maxitl, "iterations.")
+
+
+ if (control$maxitl > 1) {
+ warning("convergence not obtained in ", control$maxitl,
+ " iterations.")
} else {
warning("convergence not obtained")
}
@@ -450,8 +454,8 @@ cao.control <- function(Rank = 1,
ans <- list(
Corner = FALSE, # A constant, not a control parameter; unneeded?
- EqualTolerances = FALSE, # A constant, not a control parameter; needed
- ITolerances = FALSE, # A constant, not a control parameter; unneeded?
+ eq.tolerances = FALSE, # A constant, not a control parameter; needed
+ I.tolerances = FALSE, # A constant, not a control parameter; unneeded?
Quadratic = FALSE, # A constant, not a control parameter; unneeded?
all.knots = as.logical(all.knots)[1],
Bestof = Bestof,
@@ -496,15 +500,15 @@ create.cms <- function(Rank = 1, M, MSratio = 1, which, p1 = 1) {
if (!is.Numeric(p1, length.arg = 1,
integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'p1'")
- Blist. <- vector("list", p1 + Rank)
+ Hlist. <- vector("list", p1 + Rank)
for (rr in 1:(p1+Rank))
- Blist.[[rr]] <- diag(M)
- names(Blist.) <- if (p1 == 1) c("(Intercept)", names(which)) else stop()
+ Hlist.[[rr]] <- diag(M)
+ names(Hlist.) <- if (p1 == 1) c("(Intercept)", names(which)) else stop()
if (MSratio == 2) {
for (r in 1:Rank)
- Blist.[[p1+r]] <- eijfun(1, M)
+ Hlist.[[p1+r]] <- eijfun(1, M)
}
- Blist.
+ Hlist.
}
@@ -595,10 +599,10 @@ callcaoc <- function(cmatrix,
which <- p1 + (1:Rank) # These columns are smoothed
nwhich <- names(which) <- mynames5
- origBlist <-
- Blist. <- create.cms(Rank = Rank, M = M., MSratio = MSratio,
+ origHlist <-
+ Hlist. <- create.cms(Rank = Rank, M = M., MSratio = MSratio,
which = which, p1 = p1) # For 1 species only
- ncolBlist. <- unlist(lapply(Blist. , ncol))
+ ncolHlist. <- unlist(lapply(Hlist. , ncol))
smooth.frame <- s.vam(x = nu1mat, zedd = NULL,
wz = NULL, smomat = NULL,
which = which,
@@ -606,24 +610,24 @@ callcaoc <- function(cmatrix,
bf.maxit = control$bf.maxit,
bf.epsilon = control$bf.epsilon,
trace = FALSE, se.fit = control$se.fit,
- X.vlm.save = bnumat, Blist = Blist. ,
- ncolBlist = ncolBlist. ,
+ X.vlm.save = bnumat, Hlist = Hlist. ,
+ ncolHlist = ncolHlist. ,
M = M. ,
qbig = NULL, Umat = NULL, # NULL ==> unneeded
all.knots = control$all.knots, nk = NULL,
sf.only = TRUE)
- ldk <- 3 * max(ncolBlist.[nwhich]) + 1 # 11/7/02
+ ldk <- 3 * max(ncolHlist.[nwhich]) + 1 # 11/7/02
dimw. <- M. # Smoothing one spp. at a time
dim1U. <- M.
wz. <- matrix(0, n, dimw. )
- if (names(Blist.)[1] != "(Intercept)")
+ if (names(Hlist.)[1] != "(Intercept)")
stop("something wrong here")
- Blist.[[1]] <- NULL
+ Hlist.[[1]] <- NULL
trivc <- rep(2 - M. , len = queue)
- ncbvec <- ncolBlist.[nwhich]
+ ncbvec <- ncolHlist.[nwhich]
ncolb <- max(ncbvec)
qbig. <- NOS * qbig # == NOS * Rank; holds all the smooths
@@ -683,7 +687,7 @@ callcaoc <- function(cmatrix,
which = as.integer(which),
smomat = as.double(matrix(0, n, qbig. )),
nu1mat = as.double(nu1mat),
- blist = as.double(unlist( Blist. )),
+ Hlist = as.double(unlist( Hlist. )),
as.integer(ncbvec),
smap = as.integer(1:(Rank+1)), #
trivc = as.integer(trivc),
@@ -700,7 +704,7 @@ callcaoc <- function(cmatrix,
bindex = as.integer(smooth.frame$bindex),
lindex = as.integer(smooth.frame$lindex),
nknots = as.integer(smooth.frame$nknots),
- kindex = as.integer(smooth.frame$kindex), PACKAGE = "VGAM")
+ kindex = as.integer(smooth.frame$kindex))
flush.console()
@@ -719,7 +723,7 @@ flush.console()
returnans <- if (alldump) {
bindex <- ans1$bindex
- ncolBlist <- ncbvec
+ ncolHlist <- ncbvec
Bspline2 <- vector("list", NOS)
names(Bspline2) <- dimnames(ymat)[[2]]
Bspline <- vector("list", length(nwhich))
@@ -729,7 +733,7 @@ flush.console()
for (ii in 1:length(nwhich)) {
ind7 <- (smooth.frame$bindex[ii]):(smooth.frame$bindex[ii+1]-1)
ans <- ans1$bcoeff[ind9+ind7]
- ans <- matrix(ans, ncol = ncolBlist[nwhich[ii]])
+ ans <- matrix(ans, ncol = ncolHlist[nwhich[ii]])
Bspline[[ii]] <-
new(Class = "vsmooth.spline.fit",
"Bcoefficients" = ans,
@@ -865,10 +869,10 @@ calldcaoc <- function(cmatrix,
which <- p1 + (1:Rank) # The first 1 is the intercept term
nwhich <- names(which) <- mynames5
- origBlist <- Blist. <-
+ origHlist <- Hlist. <-
create.cms(Rank = Rank, M = M., MSratio = MSratio,
which = which, p1 = p1) # For 1 species
- ncolBlist. <- unlist(lapply(Blist. , ncol))
+ ncolHlist. <- unlist(lapply(Hlist. , ncol))
nu1mat <- cbind("(Intercept)" = 1, latvar = numat)
dimnames(nu1mat) <- list(dimnames(xmat)[[1]],
c("(Intercept)", "latvar"))
@@ -879,16 +883,16 @@ calldcaoc <- function(cmatrix,
bf.maxit = control$bf.maxit,
bf.epsilon = control$bf.epsilon,
trace = FALSE, se.fit = control$se.fit,
- X.vlm.save = bnumat, Blist = Blist.,
- ncolBlist = ncolBlist. ,
+ X.vlm.save = bnumat, Hlist = Hlist.,
+ ncolHlist = ncolHlist. ,
M = M. , qbig = NULL,
Umat = U, # NULL value ==> not needed
all.knots = control$all.knots, nk = NULL,
sf.only = TRUE)
- ldk <- 4 * max(ncolBlist.[nwhich]) # was M; # Prior to 11/7/02
- ldk <- 3 * max(ncolBlist.[nwhich]) + 1 # 11/7/02
+ ldk <- 4 * max(ncolHlist.[nwhich]) # was M; # Prior to 11/7/02
+ ldk <- 3 * max(ncolHlist.[nwhich]) + 1 # 11/7/02
@@ -905,9 +909,9 @@ calldcaoc <- function(cmatrix,
- Blist.[[1]] <- NULL
+ Hlist.[[1]] <- NULL
trivc <- rep(2 - M. , len = queue)
- ncbvec <- ncolBlist.[nwhich]
+ ncbvec <- ncolHlist.[nwhich]
ncolb <- max(ncbvec)
@@ -944,7 +948,7 @@ warning("20100405; this is old:")
npetc <-
c(n = n, p = 1+Rank, length(which), se.fit = control$se.fit, 0,
maxitl = control$maxitl, qrank = 0, M = M. , n.M = n* M. ,
- pbig = sum( ncolBlist.),
+ pbig = sum( ncolHlist.),
qbig = qbig, dimw = dimw. , dim1U = dim1U. ,
ierror = 0, ldk = ldk)
@@ -986,7 +990,7 @@ warning("20100405; this is new:")
as.integer(which),
smomat = as.double(matrix(0, n, qbig. )),
nu1mat = as.double(nu1mat),
- as.double(unlist( Blist. )),
+ as.double(unlist( Hlist. )),
as.integer(ncbvec), smap = as.integer(1:(Rank+1)),
trivc = as.integer(trivc),
@@ -1002,7 +1006,7 @@ warning("20100405; this is new:")
bindex = as.integer(smooth.frame$bindex),
lindex = as.integer(smooth.frame$lindex),
nknots = as.integer(smooth.frame$nknots),
- kindex = as.integer(smooth.frame$kindex), PACKAGE = "VGAM")
+ kindex = as.integer(smooth.frame$kindex))
flush.console()
assign(".VGAM.CAO.etamat", ans1$etamat, envir = VGAMenv)
@@ -1016,7 +1020,7 @@ warning("20100405; this is new:")
returnans <- if (alldump) {
bindex <- ans1$bindex
- ncolBlist <- ncbvec
+ ncolHlist <- ncbvec
Bspline2 <- vector("list", NOS)
names(Bspline2) <- dimnames(ymat)[[2]]
Bspline <- vector("list", length(nwhich))
@@ -1026,7 +1030,7 @@ warning("20100405; this is new:")
for (ii in 1:length(nwhich)) {
ind9 <- ind9[length(ind9)] + (bindex[ii]):(bindex[ii+1]-1)
ans <- ans1$bcoeff[ind9]
- ans <- matrix(ans, ncol = ncolBlist[nwhich[ii]])
+ ans <- matrix(ans, ncol = ncolHlist[nwhich[ii]])
Bspline[[ii]] <-
new(Class = "vsmooth.spline.fit",
"Bcoefficients" = ans,
@@ -1180,10 +1184,10 @@ Coef.cao <- function(object,
if (is.na(indexSpecies))
stop("mismatch found in 'which.species'")
- while(griditer == 1 ||
- ((griditer <= maxgriditer) &&
- ((gridres1 > epsOptimum) ||
- (gridres2 > epsOptimum)))) {
+ while (griditer == 1 ||
+ ((griditer <= maxgriditer) &&
+ ((gridres1 > epsOptimum) ||
+ (gridres2 > epsOptimum)))) {
temp <- predictcao(object, grid = gridd, sppno = thisSpecies,
Rank = Rank, deriv = 0, MSratio = MSratio)
yvals <- temp$yvals # gridlen-vector
@@ -1915,7 +1919,7 @@ persp.cao <-
}
-if(!isGeneric("persp"))
+if (!isGeneric("persp"))
setGeneric("persp", function(x, ...) standardGeneric("persp"))
setMethod("persp", "cao", function(x, ...) persp.cao(x = x, ...))
@@ -1928,7 +1932,7 @@ latvar.cao <- function(object, ...) {
-if(!isGeneric("lv"))
+if (!isGeneric("lv"))
setGeneric("lv",
function(object, ...) {
.Deprecated("latvar")
@@ -2054,7 +2058,7 @@ setMethod("concoef", "Coef.cao", function(object, ...)
-if(!isGeneric("calibrate"))
+if (!isGeneric("calibrate"))
setGeneric("calibrate", function(object, ...)
standardGeneric("calibrate"))
@@ -2071,7 +2075,7 @@ Tol.cao <- function(object, ...) {
stop("The tolerance for a 'cao' object is undefined")
}
-if(!isGeneric("Tol"))
+if (!isGeneric("Tol"))
setGeneric("Tol", function(object, ...) standardGeneric("Tol"))
setMethod("Tol", "cao", function(object, ...)
Tol.cao(object, ...))
diff --git a/R/coef.vlm.q b/R/coef.vlm.q
index 89d6495..0935a86 100644
--- a/R/coef.vlm.q
+++ b/R/coef.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -7,20 +7,39 @@
-coefvlm <- function(object, matrix.out = FALSE, label = TRUE) {
+coefvlm <- function(object, matrix.out = FALSE, label = TRUE,
+ colon = FALSE) {
ans <- object at coefficients
+
+ if (colon) {
+ if (matrix.out)
+ stop("cannot have 'matrix.out = TRUE' and 'colon = TRUE'")
+ if (!label)
+ stop("cannot have 'label = FALSE' and 'colon = TRUE'")
+
+ d1 <- object at misc$colnames.x
+ Hlist <- object at constraints
+ M <- object at misc$M
+ ncolHlist <- unlist(lapply(Hlist, ncol))
+ new.labs <- vlabel(xn = d1, ncolHlist, M = M, colon = colon)
+ names(ans) <- new.labs
+ return(ans)
+ }
+
+
if (!label)
names(ans) <- NULL
if (!matrix.out)
return(ans)
-
+
+
ncolx <- object at misc$p # = length(object at constraints)
M <- object at misc$M
- Blist <- object at constraints
- if (all(trivial.constraints(Blist) == 1)) {
+ Hlist <- object at constraints
+ if (all(trivial.constraints(Hlist) == 1)) {
Bmat <- matrix(ans, nrow = ncolx, ncol = M, byrow = TRUE)
} else {
Bmat <- matrix(as.numeric(NA), nrow = ncolx, ncol = M)
@@ -28,12 +47,12 @@ coefvlm <- function(object, matrix.out = FALSE, label = TRUE) {
if (!matrix.out)
return(ans)
- ncolBlist <- unlist(lapply(Blist, ncol))
- nasgn <- names(Blist)
- temp <- c(0, cumsum(ncolBlist))
+ ncolHlist <- unlist(lapply(Hlist, ncol))
+ nasgn <- names(Hlist)
+ temp <- c(0, cumsum(ncolHlist))
for (ii in 1:length(nasgn)) {
index <- (temp[ii] + 1):temp[ii + 1]
- cmat <- Blist[[nasgn[ii]]]
+ cmat <- Hlist[[nasgn[ii]]]
Bmat[ii,] <- cmat %*% ans[index]
}
}
diff --git a/R/cqo.R b/R/cqo.R
index 59fccf2..bb7f8ee 100644
--- a/R/cqo.R
+++ b/R/cqo.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/cqo.fit.q b/R/cqo.fit.q
index 05fb32c..1f5f9a5 100644
--- a/R/cqo.fit.q
+++ b/R/cqo.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -23,7 +23,7 @@ callcqoc <- function(cmatrix, etamat, xmat, ymat, wvec,
NOS <- ifelse(modelno %in% c(3, 5), M/2, M)
lenbeta <- pstar * ifelse(nice31, NOS, 1)
- if (itol <- control$ITolerances) {
+ if (I.tol <- control$I.tolerances) {
if (Rank > 1) {
numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
evnu <- eigen(var(numat))
@@ -66,23 +66,22 @@ callcqoc <- function(cmatrix, etamat, xmat, ymat, wvec,
usethisbeta <- if (inited == 2)
getfromVGAMenv("beta", prefix = ".VGAM.CQO.") else double(lenbeta)
- othint <- c(Rank = Rank, control$EqualTol, pstar = pstar,
+ othint <- c(Rank = Rank, control$eq.tol, pstar = pstar,
dimw = 1, inited = inited, modelno = modelno,
maxitl = control$maxitl, actnits = 0, twice = 0,
p1star = p1star, p2star = p2star, nice31 = nice31,
- lenbeta = lenbeta, itol = itol, control$trace,
+ lenbeta = lenbeta, I.tol = I.tol, control$trace,
p1 = p1, p2 = p2, control$imethod)
bnumat <- if (nice31) matrix(0,nstar,pstar) else
cbind(matrix(0, nstar, p2star), X.vlm.1save)
- ans1 <- if (nice31)
- .C("cqo_1",
+ ans1 <- if (nice31) .C("cqo_1",
numat = as.double(numat), as.double(ymat),
- as.double(if (p1) xmat[,control$colx1.index] else 999),
+ as.double(if (p1) xmat[, control$colx1.index] else 999),
as.double(wvec), etamat = as.double(usethiseta),
- moff = double(if (itol) n else 1),
+ moff = double(if (I.tol) n else 1),
fv = double(NOS*n), z = double(n*M), wz = double(n*M),
U = double(M*n), bnumat = as.double(bnumat),
qr = double(nstar*pstar), qraux = double(pstar),
@@ -94,12 +93,12 @@ callcqoc <- function(cmatrix, etamat, xmat, ymat, wvec,
othdbl = as.double(c(small = control$SmallNo,
epsilon = control$epsilon, .Machine$double.eps,
iKvector = rep(control$iKvector, len = NOS),
- iShape = rep(control$iShape, len = NOS))), PACKAGE = "VGAM") else
+ iShape = rep(control$iShape, len = NOS)))) else
.C("cqo_2",
numat = as.double(numat), as.double(ymat),
- as.double(if (p1) xmat[,control$colx1.index] else 999),
+ as.double(if (p1) xmat[, control$colx1.index] else 999),
as.double(wvec), etamat = as.double(usethiseta),
- moff = double(if (itol) n else 1),
+ moff = double(if (I.tol) n else 1),
fv = double(NOS*n), z = double(n*M), wz = double(n*M),
U = double(M*n), bnumat = as.double(bnumat),
qr = double(nstar*pstar), qraux = double(pstar),
@@ -111,7 +110,7 @@ callcqoc <- function(cmatrix, etamat, xmat, ymat, wvec,
othdbl = as.double(c(small = control$SmallNo,
epsilon = control$epsilon, .Machine$double.eps,
iKvector = rep(control$iKvector, len = NOS),
- iShape = rep(control$iShape, len = NOS))), PACKAGE = "VGAM")
+ iShape = rep(control$iShape, len = NOS))))
@@ -142,6 +141,7 @@ callcqoc <- function(cmatrix, etamat, xmat, ymat, wvec,
+
calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
X.vlm.1save, modelno, Control,
n, M, p1star, p2star, nice31, allofit = FALSE) {
@@ -159,7 +159,7 @@ calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
NOS <- ifelse(modelno == 3 || modelno == 5, M/2, M)
lenbeta <- pstar * ifelse(nice31, NOS, 1)
- if (itol <- control$ITolerances) {
+ if (I.tol <- control$I.tolerances) {
if (Rank > 1) {
numat <- xmat[, control$colx2.index, drop=FALSE] %*% cmatrix
evnu <- eigen(var(numat))
@@ -203,11 +203,11 @@ calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
usethisbeta <- if (inited == 2)
getfromVGAMenv("beta", prefix = ".VGAM.CQO.") else double(lenbeta)
- othint <- c(Rank, control$EqualTol, pstar, dimw = 1, inited = inited,
+ othint <- c(Rank, control$eq.tol, pstar, dimw = 1, inited = inited,
modelno, maxitl = control$maxitl, actnits = 0, twice = 0,
p1star = p1star, p2star = p2star,
nice31 = nice31, lenbeta,
- itol = itol, control$trace,
+ I.tol = I.tol, control$trace,
p1, p2, control$imethod) # other ints
bnumat <- if (nice31) matrix(0,nstar,pstar) else
cbind(matrix(0,nstar,p2star), X.vlm.1save)
@@ -218,7 +218,7 @@ calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
numat = as.double(numat), as.double(ymat),
as.double(if (p1) xmat[,control$colx1.index] else 999),
as.double(wvec), etamat = as.double(usethiseta),
- moff = double(if (itol) n else 1),
+ moff = double(if (I.tol) n else 1),
fv = double(NOS*n), z = double(n*M), wz = double(n*M),
U = double(M*n), bnumat = as.double(bnumat),
qr = double(nstar * pstar), qraux = double(pstar),
@@ -234,7 +234,7 @@ calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
xmat2 = as.double(xmat2),
cmat = as.double(cmatrix),
p2 = as.integer(p2), deriv = double(p2*Rank),
- hstep = as.double(control$Hstep), PACKAGE = "VGAM")
+ hstep = as.double(control$Hstep))
if (ans1$errcode[1] != 0) {
warning("error code in calldcqo = ", ans1$errcode[1])
@@ -245,45 +245,46 @@ calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
}
-checkCMCO <- function(Blist, control, modelno) {
+
+checkCMCO <- function(Hlist, control, modelno) {
p1 <- length(colx1.index <- control$colx1.index)
p2 <- length(colx2.index <- control$colx2.index)
- if (p1 + p2 != length(Blist))
- stop("'Blist' is the wrong length")
+ if (p1 + p2 != length(Hlist))
+ stop("'Hlist' is the wrong length")
if (p1 == 0 || p2 == 0)
stop("Some variables are needed in noRRR and non-noRRR arguments")
if (all(names(colx1.index) != "(Intercept)"))
stop("an intercept term must be in the argument 'noRRR' formula")
- Blist1 <- vector("list", p1)
- Blist2 <- vector("list", p2)
+ Hlist1 <- vector("list", p1)
+ Hlist2 <- vector("list", p2)
for (kk in 1:p1)
- Blist1[[kk]] <- Blist[[(colx1.index[kk])]]
+ Hlist1[[kk]] <- Hlist[[(colx1.index[kk])]]
for (kk in 1:p2)
- Blist2[[kk]] <- Blist[[(colx2.index[kk])]]
+ Hlist2[[kk]] <- Hlist[[(colx2.index[kk])]]
if (modelno == 3 || modelno == 5) {
if (p1 > 1)
for (kk in 2:p1)
- Blist1[[kk]] <- (Blist1[[kk]])[c(TRUE,FALSE),,drop = FALSE]
+ Hlist1[[kk]] <- (Hlist1[[kk]])[c(TRUE,FALSE),,drop = FALSE]
for (kk in 1:p2)
- Blist2[[kk]] <- (Blist2[[kk]])[c(TRUE,FALSE),,drop = FALSE]
+ Hlist2[[kk]] <- (Hlist2[[kk]])[c(TRUE,FALSE),,drop = FALSE]
}
- if (!all(trivial.constraints(Blist2) == 1))
+ if (!all(trivial.constraints(Hlist2) == 1))
stop("the constraint matrices for the non-noRRR terms ",
"are not trivial")
- if (!trivial.constraints(Blist1[[1]]))
+ if (!trivial.constraints(Hlist1[[1]]))
stop("the constraint matrices for intercept term is ",
"not trivial")
if (p1 > 1)
- for (kk in 2:p1)
- if (!trivial.constraints(list(Blist1[[kk]])))
- stop("the constraint matrices for some 'noRRR' ",
- "terms is not trivial")
+ for (kk in 2:p1)
+ if (!trivial.constraints(list(Hlist1[[kk]])))
+ stop("the constraint matrices for some 'noRRR' ",
+ "terms is not trivial")
nice31 <- if (control$Quadratic)
- (!control$EqualTol || control$ITolerances) else TRUE
+ (!control$eq.tol || control$I.tolerances) else TRUE
as.numeric(nice31)
}
@@ -320,6 +321,7 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
trace <- control$trace
orig.stepsize <- control$stepsize
+ny <- names(y)
n <- dim(x)[1]
@@ -439,11 +441,11 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
}
}
- if (rrcontrol$ITolerances) {
+ if (rrcontrol$I.tolerances) {
latvarmat <- x[, rrcontrol$colx2.index, drop = FALSE] %*% Cmat
latvarmatmeans <- t(latvarmat) %*% matrix(1/n, n, 1)
if (!all(abs(latvarmatmeans) < 4))
- warning("ITolerances = TRUE but the variables making up the ",
+ warning("I.tolerances = TRUE but the variables making up the ",
"latent variable(s) do not appear to be centered.")
}
if (modelno %in% c(3, 5))
@@ -455,23 +457,23 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
rrcontrol$Ainit <- control$Ainit <- Amat # Good for valt()
rrcontrol$Cinit <- control$Cinit <- Cmat # Good for valt()
- Blist <- process.constraints(constraints, x, M, specialCM = specialCM)
- nice31 <- checkCMCO(Blist, control = control, modelno = modelno)
- ncolBlist <- unlist(lapply(Blist, ncol))
- dimB <- sum(ncolBlist)
+ Hlist <- process.constraints(constraints, x, M, specialCM = specialCM)
+ nice31 <- checkCMCO(Hlist, control = control, modelno = modelno)
+ ncolHlist <- unlist(lapply(Hlist, ncol))
+ dimB <- sum(ncolHlist)
X.vlm.save <- if (nice31) {
NULL
} else {
- tmp500 <- lm2qrrvlm.model.matrix(x = x, Blist = Blist,
+ tmp500 <- lm2qrrvlm.model.matrix(x = x, Hlist = Hlist,
C = Cmat, control = control)
xsmall.qrr <- tmp500$new.latvar.model.matrix
- B.list <- tmp500$constraints
+ H.list <- tmp500$constraints
latvar.mat <- tmp500$latvar.mat
if (length(tmp500$offset)) {
offset <- tmp500$offset
}
- lm2vlm.model.matrix(xsmall.qrr, B.list, xij = control$xij)
+ lm2vlm.model.matrix(xsmall.qrr, H.list, xij = control$xij)
}
if (length(coefstart) && length(X.vlm.save)) {
@@ -498,7 +500,7 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
asgn <- attr(x, "assign")
coefs <- getfromVGAMenv("beta", prefix = ".VGAM.CQO.")
- if (control$ITolerances) {
+ if (control$I.tolerances) {
if (NOS == M) {
coefs <- c(t(matrix(coefs, ncol = M))) # Get into right order
} else {
@@ -533,7 +535,7 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
df.residual <- 55 - 8 - Rank*p2
fit <- list(assign = asgn,
coefficients = coefs,
- constraints = Blist,
+ constraints = Hlist,
df.residual = df.residual,
df.total = n*M,
fitted.values = mu,
@@ -659,7 +661,7 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
if (length(X2)) {
alt <- valt(x = cbind(X1, X2), z = etamat,
U = sqrt(t(wts)), Rank = effrank,
- Blist = NULL, Cinit = NULL, trace = FALSE,
+ Hlist = NULL, Cinit = NULL, trace = FALSE,
colx1.index = 1:ncol(X1), Criterion = "res.ss")
temp.control <- list(Rank = effrank, colx1.index = 1:ncol(X1),
Alpha = 0.5,
@@ -685,7 +687,7 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
} else {
xij <- NULL # temporary measure
U <- t(sqrt(wts))
- tmp <- vlm.wfit(xmat = X1, zmat = etamat, Blist = NULL, U = U,
+ tmp <- vlm.wfit(xmat = X1, zmat = etamat, Hlist = NULL, U = U,
matrix.out = TRUE,
is.vlmX = FALSE, res.ss = TRUE, qr = FALSE, xij = xij)
ans <- crow1C(as.matrix(tmp$resid),
@@ -736,7 +738,7 @@ cqo.init.derivative.expression <- expression({
constraints <- replace.constraints(constraints, diag(M),
rrcontrol$colx2.index)
- nice31 <- (!control$EqualTol || control$ITolerances) &&
+ nice31 <- (!control$eq.tol || control$I.tolerances) &&
all(trivial.constraints(constraints) == 1)
}
@@ -747,8 +749,8 @@ cqo.init.derivative.expression <- expression({
stop("cannot fit this model using fast algorithm")
p2star <- if (nice31)
- ifelse(control$IToleran, Rank, Rank + Rank*(Rank+1)/2) else
- (NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol, 1, NOS))
+ ifelse(control$I.toleran, Rank, Rank + Rank*(Rank+1)/2) else
+ (NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$eq.tol, 1, NOS))
p1star <- if (nice31) ifelse(modelno %in% c(3, 5), 1+p1, p1) else
(ncol(X.vlm.save) - p2star)
@@ -788,7 +790,7 @@ cqo.derivative.expression <- expression({
warning("solution does not correspond to .VGAM.CQO.cmatrix")
}
- alt <- valt.1iter(x = x, z = z, U = U, Blist = Blist,
+ alt <- valt.1iter(x = x, z = z, U = U, Hlist = Hlist,
C = Cmat, nice31 = nice31,
control = rrcontrol, lp.names = predictors.names,
MSratio = M / NOS)
@@ -837,7 +839,7 @@ cqo.end.expression <- expression({
extra$Dmat <- Dmat # Not the latest iteration
extra$B1 <- B1.save # Not the latest iteration (not good)
} else {
- Blist <- replace.constraints(Blist.save, Amat, colx2.index)
+ Hlist <- replace.constraints(Hlist.save, Amat, colx2.index)
}
diff --git a/R/deviance.vlm.q b/R/deviance.vlm.q
index 08972de..a42fa1e 100644
--- a/R/deviance.vlm.q
+++ b/R/deviance.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -7,16 +7,49 @@
-deviance.vlm <- function(object, ...)
- object at criterion$deviance
+deviance.vlm <- function(object,
+ summation = TRUE,
+ ...) {
+ if (summation) {
+ object at criterion$deviance
+ } else {
+
+
+ Args <- formals(args(object at family@deviance))
+ if (length(Args$summation) == 0)
+ stop("there is no 'summation' argument for the function in the ",
+ "'deviance' slot of the object.")
+
+
+ object at family@deviance(mu = fitted(object),
+ y = depvar(object),
+ w = weights(object, type = "prior"),
+ residuals = FALSE,
+ eta = predict(object),
+ extra = object at extra,
+ summation = summation)
+ }
+}
+
-deviance.vglm <- function(object, ...)
+if (FALSE)
+deviance.vglm <- function(object,
+ summation = TRUE,
+ ...)
object at criterion$deviance
-if(!isGeneric("deviance"))
+
+
+
+
+
+
+
+
+if (!isGeneric("deviance"))
setGeneric("deviance", function(object, ...)
standardGeneric("deviance"))
@@ -25,6 +58,7 @@ setMethod("deviance", "vlm", function(object, ...)
deviance.vlm(object, ...))
+if (FALSE)
setMethod("deviance", "vglm", function(object, ...)
deviance.vglm(object, ...))
diff --git a/R/effects.vglm.q b/R/effects.vglm.q
index afe953c..55fe5d9 100644
--- a/R/effects.vglm.q
+++ b/R/effects.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -11,15 +11,18 @@ effects.vlm <- function(object, ...) {
invisible(NULL)
}
-if(!isGeneric("effects"))
- setGeneric("effects", function(object, ...) standardGeneric("effects"))
-if(is.R()) {
+if (!isGeneric("effects"))
+ setGeneric("effects", function(object, ...)
+ standardGeneric("effects"))
+
+
+if (is.R()) {
setMethod("effects", "vlm", function(object, ...)
- effects.vlm(object, ...))
+ effects.vlm(object, ...))
} else {
setMethod("effects", "vlm", function(object, ...)
- effects.vlm(object, ...))
+ effects.vlm(object, ...))
}
diff --git a/R/family.actuary.R b/R/family.actuary.R
index 434aae7..8d39058 100644
--- a/R/family.actuary.R
+++ b/R/family.actuary.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -99,8 +99,7 @@ rgumbelII <- function(n, shape, scale = 1) {
ishape = NULL, iscale = NULL,
probs.y = c(0.2, 0.5, 0.8),
perc.out = NULL, # 50,
- imethod = 1, zero = -2)
-{
+ imethod = 1, zero = -2) {
lshape <- as.list(substitute(lshape))
@@ -147,12 +146,13 @@ rgumbelII <- function(n, shape, scale = 1) {
"gamma(1 + 1/shape)^2)"),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
perc.out = .perc.out ,
zero = .zero )
}, list( .zero = zero,
@@ -173,10 +173,10 @@ rgumbelII <- function(n, shape, scale = 1) {
y <- temp5$y
ncoly <- ncol(y)
- Musual <- 2
+ M1 <- 2
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -186,12 +186,12 @@ rgumbelII <- function(n, shape, scale = 1) {
predictors.names <-
c(namesof(mynames1, .lshape , .eshape , tag = FALSE),
namesof(mynames2, .lscale , .escale , tag = FALSE))[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
- Shape.init <- matrix(if(length( .ishape )) .ishape else 0 + NA,
+ Shape.init <- matrix(if (length( .ishape )) .ishape else 0 + NA,
n, ncoly, byrow = TRUE)
- Scale.init <- matrix(if(length( .iscale )) .iscale else 0 + NA,
+ Scale.init <- matrix(if (length( .iscale )) .iscale else 0 + NA,
n, ncoly, byrow = TRUE)
if (!length(etastart)) {
@@ -213,12 +213,12 @@ rgumbelII <- function(n, shape, scale = 1) {
if (!is.Numeric(Scale.init[, ilocal]))
Scale.init[, ilocal] <-
exp(fit0$coef["Intercept"] / Shape.init[, ilocal])
- } # ilocal
+ } # ilocal
etastart <-
cbind(theta2eta(Shape.init, .lshape , .eshape ),
theta2eta(Scale.init, .lscale , .escale ))[,
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
}
}
}), list(
@@ -256,21 +256,21 @@ rgumbelII <- function(n, shape, scale = 1) {
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <-
c(rep( .lshape , length = ncoly),
- rep( .lscale , length = ncoly))[interleave.VGAM(M, M = Musual)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ rep( .lscale , length = ncoly))[interleave.VGAM(M, M = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .eshape
- misc$earg[[Musual*ii ]] <- .escale
+ misc$earg[[M1*ii-1]] <- .eshape
+ misc$earg[[M1*ii ]] <- .escale
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$imethod <- .imethod
misc$expected <- TRUE
misc$multipleResponses <- TRUE
@@ -284,19 +284,48 @@ rgumbelII <- function(n, shape, scale = 1) {
.perc.out = perc.out,
.imethod = imethod ) )),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * dgumbelII(x = y, shape = Shape,
- scale = Scale, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dgumbelII(x = y, shape = Shape,
+ scale = Scale, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape
) )),
vfamily = c("gumbelII"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+ rgumbelII(nsim * length(Scale), shape = Shape, scale = Scale)
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape
+ ) )),
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
@@ -310,7 +339,7 @@ rgumbelII <- function(n, shape, scale = 1) {
myderiv <- c(w) * cbind(dl.dshape, dl.dscale) *
cbind(dshape.deta, dscale.deta)
- myderiv[, interleave.VGAM(M, M = Musual)]
+ myderiv[, interleave.VGAM(M, M = M1)]
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape
) )),
@@ -326,8 +355,8 @@ rgumbelII <- function(n, shape, scale = 1) {
wz <- array(c(c(w) * ned2l.dshape2 * dshape.deta^2,
c(w) * ned2l.dscale2 * dscale.deta^2,
c(w) * ned2l.dshapescale * dscale.deta * dshape.deta),
- dim = c(n, M / Musual, 3))
- wz <- arwz2wz(wz, M = M, Musual = Musual)
+ dim = c(n, M / M1, 3))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
wz
}), list( .lscale = lscale, .lshape = lshape ))))
}
@@ -507,24 +536,26 @@ dbeard <- function(x, shape, scale = 1, rho, log = FALSE) {
dbeard <- function(x, shape, scale = 1, rho, log = FALSE) {
-alpha = shape; beta = scale;
+ alpha <- shape
+ beta <- scale
warning("does not integrate to unity")
- ret=ifelse(x<=0 | beta<=0,NaN,
- exp(alpha+beta*x)*(1+exp(alpha+rho))**(exp(-rho/beta))/
- (1+exp(alpha+rho+beta*x))**(1+exp(-rho/beta)))
+ ret <- ifelse(x <= 0 | beta <= 0, NaN,
+ exp(alpha+beta*x)*(1+exp(alpha+rho))**(exp(-rho/beta))/
+ (1+exp(alpha+rho+beta*x))**(1+exp(-rho/beta)))
ret
}
qbeard <- function(x, u = 0.5, alpha = 1, beta = 1,rho = 1) {
- ret <- ifelse(x<=0 | u<=0 | u>=1 |
- length(x)!=length(u) | beta<=0,
- NaN, (1/beta)*
- (log((u**(-beta*exp(rho)))*
- (1+exp(alpha+rho+beta*x))-1)-alpha-rho)-x)
+ ret <-
+ ifelse(x <= 0 | u <= 0 | u >= 1 |
+ length(x) != length(u) | beta <= 0,
+ NaN,
+ (1/beta) * (log((u**(-beta*exp(rho))) *
+ (1+exp(alpha+rho+beta*x))-1)-alpha-rho)-x)
return(ret)
}
@@ -667,12 +698,13 @@ perks.control <- function(save.weight = TRUE, ...) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
nsimEIM = .nsimEIM,
zero = .zero )
}, list( .zero = zero,
@@ -693,10 +725,10 @@ perks.control <- function(save.weight = TRUE, ...) {
ncoly <- ncol(y)
- Musual <- 2
+ M1 <- 2
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -704,7 +736,7 @@ perks.control <- function(save.weight = TRUE, ...) {
predictors.names <-
c(namesof(mynames1, .lshape , .eshape , tag = FALSE),
namesof(mynames2, .lscale , .escale , tag = FALSE))[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
@@ -744,13 +776,13 @@ perks.control <- function(save.weight = TRUE, ...) {
matH[, spp.] <- shape.grid[index.shape]
if (!length( .iscale ))
matC[, spp.] <- mymat[index.shape, 1]
- } # spp.
+ } # spp.
etastart <-
cbind(theta2eta(matH, .lshape , .eshape ),
theta2eta(matC, .lscale , .escale ))[,
- interleave.VGAM(M, M = Musual)]
- } # End of !length(etastart)
+ interleave.VGAM(M, M = M1)]
+ } # End of !length(etastart)
}), list( .lscale = lscale, .lshape = lshape,
.eshape = eshape, .escale = escale,
.gshape = gshape, .gscale = gscale,
@@ -768,19 +800,19 @@ perks.control <- function(save.weight = TRUE, ...) {
misc$link <-
c(rep( .lshape , length = ncoly),
- rep( .lscale , length = ncoly))[interleave.VGAM(M, M = Musual)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ rep( .lscale , length = ncoly))[interleave.VGAM(M, M = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .eshape
- misc$earg[[Musual*ii ]] <- .escale
+ misc$earg[[M1*ii-1]] <- .eshape
+ misc$earg[[M1*ii ]] <- .escale
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$multipleResponses <- TRUE
misc$nsimEIM <- .nsimEIM
@@ -788,20 +820,51 @@ perks.control <- function(save.weight = TRUE, ...) {
.escale = escale, .eshape = eshape,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dperks(x = y, shape = Shape,
- scale = Scale, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dperks(x = y, shape = Shape,
+ scale = Scale, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape ))),
vfamily = c("perks"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+ rperks(nsim * length(Scale), shape = Shape, scale = Scale)
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
+
+
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
shape <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
.lshape , .eshape )
scale <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
@@ -821,19 +884,19 @@ perks.control <- function(save.weight = TRUE, ...) {
dthetas.detas <- cbind(dshape.deta, dscale.deta)
myderiv <- c(w) * cbind(dl.dshape, dl.dscale) * dthetas.detas
- myderiv[, interleave.VGAM(M, M = Musual)]
+ myderiv[, interleave.VGAM(M, M = M1)]
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape ))),
weight = eval(substitute(expression({
- NOS <- M / Musual
- dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
+ NOS <- M / M1
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
- ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
for (spp. in 1:NOS) {
@@ -894,26 +957,26 @@ if (ii < 3) {
nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else
run.varcov
- wz1 <- wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
- dThetas.detas[, Musual * (spp. - 1) + ind1$col]
+ wz1 <- wz1 * dThetas.detas[, M1 * (spp. - 1) + ind1$row] *
+ dThetas.detas[, M1 * (spp. - 1) + ind1$col]
- for (jay in 1:Musual)
- for (kay in jay:Musual) {
- cptr <- iam((spp. - 1) * Musual + jay,
- (spp. - 1) * Musual + kay,
+ for (jay in 1:M1)
+ for (kay in jay:M1) {
+ cptr <- iam((spp. - 1) * M1 + jay,
+ (spp. - 1) * M1 + kay,
M = M)
- wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
+ wz[, cptr] <- wz1[, iam(jay, kay, M = M1)]
}
- } # End of for (spp.) loop
+ } # End of for (spp.) loop
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / M1)
}), list( .lscale = lscale,
.escale = escale,
.nsimEIM = nsimEIM, .oim.mean = oim.mean ))))
-} # perks()
+} # perks()
@@ -1075,12 +1138,13 @@ makeham.control <- function(save.weight = TRUE, ...) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 3
+ M1 <- 3
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 3,
+ list(M1 = 3,
+ Q1 = 1,
nsimEIM = .nsimEIM,
zero = .zero )
}, list( .zero = zero,
@@ -1102,10 +1166,10 @@ makeham.control <- function(save.weight = TRUE, ...) {
ncoly <- ncol(y)
- Musual <- 3
+ M1 <- 3
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -1115,7 +1179,7 @@ makeham.control <- function(save.weight = TRUE, ...) {
c(namesof(mynames1, .lshape , .eshape , tag = FALSE),
namesof(mynames2, .lscale , .escale , tag = FALSE),
namesof(mynames3, .lepsil , .eepsil , tag = FALSE))[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
if (!length(etastart)) {
@@ -1162,7 +1226,7 @@ makeham.control <- function(save.weight = TRUE, ...) {
matH[, spp.] <- shape.grid[index.shape]
if (!length( .iscale ))
matC[, spp.] <- mymat[index.shape, 1]
- } # spp.
+ } # spp.
@@ -1188,14 +1252,14 @@ makeham.control <- function(save.weight = TRUE, ...) {
Scale = matC[1, spp.]))
matE[, spp.] <- Init.epsil
- } # spp.
+ } # spp.
etastart <- cbind(theta2eta(matH, .lshape , .eshape ),
theta2eta(matC, .lscale , .escale ),
theta2eta(matE, .lepsil , .eepsil ))[,
- interleave.VGAM(M, M = Musual)]
- } # End of !length(etastart)
+ interleave.VGAM(M, M = M1)]
+ } # End of !length(etastart)
}), list(
.lshape = lshape, .lscale = lscale, .lepsil = lepsil,
.eshape = eshape, .escale = escale, .eepsil = eepsil,
@@ -1213,24 +1277,24 @@ makeham.control <- function(save.weight = TRUE, ...) {
.eshape = eshape, .escale = escale, .eepsil = eepsil
))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <-
c(rep( .lshape , length = ncoly),
rep( .lscale , length = ncoly),
- rep( .lepsil , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ rep( .lepsil , length = ncoly))[interleave.VGAM(M, M = M1)]
temp.names <- c(mynames1, mynames2, mynames3)[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii-2]] <- .eshape
- misc$earg[[Musual*ii-1]] <- .escale
- misc$earg[[Musual*ii ]] <- .eepsil
+ misc$earg[[M1*ii-2]] <- .eshape
+ misc$earg[[M1*ii-1]] <- .escale
+ misc$earg[[M1*ii ]] <- .eepsil
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$multipleResponses <- TRUE
misc$nsimEIM <- .nsimEIM
@@ -1239,23 +1303,54 @@ makeham.control <- function(save.weight = TRUE, ...) {
.eshape = eshape, .escale = escale, .eepsil = eepsil,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
shape <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lshape , .eshape )
scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lscale , .escale )
epsil <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lepsil , .eepsil )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dmakeham(x = y, shape = shape, scale = scale,
- epsil = epsil, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dmakeham(x = y, shape = shape, scale = scale,
+ epsil = epsil, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
- }, list(
- .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
- .eshape = eshape, .escale = escale, .eepsil = eepsil
- ))),
+ }, list( .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
+ .eshape = eshape, .escale = escale, .eepsil = eepsil ))),
vfamily = c("makeham"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ shape <- eta2theta(eta[, c(TRUE, FALSE, FALSE), drop = FALSE],
+ .lshape , .eshape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE), drop = FALSE],
+ .lscale , .escale )
+ epsil <- eta2theta(eta[, c(FALSE, FALSE, TRUE), drop = FALSE],
+ .lepsil , .eepsil )
+ rmakeham(nsim * length(Scale),
+ shape = c(shape), scale = c(Scale), epsilon = c(epsil))
+ }, list( .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
+ .eshape = eshape, .escale = escale, .eepsil = eepsil ))),
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 3
+ M1 <- 3
shape <- eta2theta(eta[, c(TRUE, FALSE, FALSE), drop = FALSE],
.lshape , .eshape )
scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE), drop = FALSE],
@@ -1281,7 +1376,7 @@ makeham.control <- function(save.weight = TRUE, ...) {
myderiv <- c(w) * cbind(dl.dshape,
dl.dscale,
dl.depsil) * dthetas.detas
- myderiv[, interleave.VGAM(M, M = Musual)]
+ myderiv[, interleave.VGAM(M, M = M1)]
}), list(
.lshape = lshape, .lscale = lscale, .lepsil = lepsil,
.eshape = eshape, .escale = escale, .eepsil = eepsil
@@ -1290,12 +1385,12 @@ makeham.control <- function(save.weight = TRUE, ...) {
weight = eval(substitute(expression({
- NOS <- M / Musual
- dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
+ NOS <- M / M1
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
wz <- matrix(0.0, n, M + M - 1 + M - 2) # wz has half-bw 3
- ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
for (spp. in 1:NOS) {
@@ -1371,27 +1466,27 @@ if (ii < 3) {
run.varcov
- wz1 <- wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
- dThetas.detas[, Musual * (spp. - 1) + ind1$col]
+ wz1 <- wz1 * dThetas.detas[, M1 * (spp. - 1) + ind1$row] *
+ dThetas.detas[, M1 * (spp. - 1) + ind1$col]
- for (jay in 1:Musual)
- for (kay in jay:Musual) {
- cptr <- iam((spp. - 1) * Musual + jay,
- (spp. - 1) * Musual + kay,
+ for (jay in 1:M1)
+ for (kay in jay:M1) {
+ cptr <- iam((spp. - 1) * M1 + jay,
+ (spp. - 1) * M1 + kay,
M = M)
- wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
+ wz[, cptr] <- wz1[, iam(jay, kay, M = M1)]
}
- } # End of for (spp.) loop
+ } # End of for (spp.) loop
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / M1)
}), list(
.lshape = lshape, .lscale = lscale, .lepsil = lepsil,
.eshape = eshape, .escale = escale, .eepsil = eepsil,
.nsimEIM = nsimEIM, .oim.mean = oim.mean ))))
-} # makeham()
+} # makeham()
@@ -1525,12 +1620,13 @@ gompertz.control <- function(save.weight = TRUE, ...) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
nsimEIM = .nsimEIM,
zero = .zero )
}, list( .zero = zero,
@@ -1551,10 +1647,10 @@ gompertz.control <- function(save.weight = TRUE, ...) {
ncoly <- ncol(y)
- Musual <- 2
+ M1 <- 2
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -1562,7 +1658,7 @@ gompertz.control <- function(save.weight = TRUE, ...) {
predictors.names <-
c(namesof(mynames1, .lshape , .eshape , tag = FALSE),
namesof(mynames2, .lscale , .escale , tag = FALSE))[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
@@ -1605,12 +1701,12 @@ gompertz.control <- function(save.weight = TRUE, ...) {
matH[, spp.] <- shape.grid[index.shape]
if (!length( .iscale ))
matC[, spp.] <- mymat[index.shape, 1]
- } # spp.
+ } # spp.
etastart <- cbind(theta2eta(matH, .lshape , .eshape ),
theta2eta(matC, .lscale , .escale ))[,
- interleave.VGAM(M, M = Musual)]
- } # End of !length(etastart)
+ interleave.VGAM(M, M = M1)]
+ } # End of !length(etastart)
}), list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale,
.ishape = ishape, .iscale = iscale
@@ -1623,21 +1719,21 @@ gompertz.control <- function(save.weight = TRUE, ...) {
}, list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <-
c(rep( .lshape , length = ncoly),
- rep( .lscale , length = ncoly))[interleave.VGAM(M, M = Musual)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ rep( .lscale , length = ncoly))[interleave.VGAM(M, M = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .eshape
- misc$earg[[Musual*ii ]] <- .escale
+ misc$earg[[M1*ii-1]] <- .eshape
+ misc$earg[[M1*ii ]] <- .escale
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$multipleResponses <- TRUE
misc$nsimEIM <- .nsimEIM
@@ -1645,20 +1741,48 @@ gompertz.control <- function(save.weight = TRUE, ...) {
.eshape = eshape, .escale = escale,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dgompertz(x = y, shape = shape,
- scale = scale, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dgompertz(x = y, shape = shape,
+ scale = scale, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale ))),
vfamily = c("gompertz"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+ rgompertz(nsim * length(Scale),
+ shape = c(Shape), scale = c(Scale))
+ }, list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
shape <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lshape ,
.eshape )
scale <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .lscale ,
@@ -1676,19 +1800,19 @@ gompertz.control <- function(save.weight = TRUE, ...) {
dthetas.detas <- cbind(dshape.deta, dscale.deta)
myderiv <- c(w) * cbind(dl.dshape, dl.dscale) * dthetas.detas
- myderiv[, interleave.VGAM(M, M = Musual)]
+ myderiv[, interleave.VGAM(M, M = M1)]
}), list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale ))),
weight = eval(substitute(expression({
- NOS <- M / Musual
- dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
+ NOS <- M / M1
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
- ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
for (spp. in 1:NOS) {
@@ -1720,38 +1844,40 @@ if (ii < 3) {
nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else
run.varcov
- wz1 <- wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
- dThetas.detas[, Musual * (spp. - 1) + ind1$col]
+ wz1 <- wz1 * dThetas.detas[, M1 * (spp. - 1) + ind1$row] *
+ dThetas.detas[, M1 * (spp. - 1) + ind1$col]
- for (jay in 1:Musual)
- for (kay in jay:Musual) {
- cptr <- iam((spp. - 1) * Musual + jay,
- (spp. - 1) * Musual + kay,
+ for (jay in 1:M1)
+ for (kay in jay:M1) {
+ cptr <- iam((spp. - 1) * M1 + jay,
+ (spp. - 1) * M1 + kay,
M = M)
- wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
+ wz[, cptr] <- wz1[, iam(jay, kay, M = M1)]
}
- } # End of for (spp.) loop
+ } # End of for (spp.) loop
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / M1)
}), list( .lscale = lscale,
.escale = escale,
.nsimEIM = nsimEIM ))))
-} # gompertz()
+} # gompertz()
-dmoe <- function (x, alpha = 1, lambda = 1, log = FALSE) {
+ dmoe <- function (x, alpha = 1, lambda = 1, log = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
- LLL <- max(length(x), length(alpha), length(lambda))
+ LLL <- max(length(x),
+ length(alpha),
+ length(lambda))
if (length(x) != LLL) x <- rep(x, length.out = LLL)
if (length(alpha) != LLL) alpha <- rep(alpha, length.out = LLL)
if (length(lambda) != LLL) lambda <- rep(lambda, length.out = LLL)
@@ -1771,7 +1897,7 @@ dmoe <- function (x, alpha = 1, lambda = 1, log = FALSE) {
-pmoe <- function (q, alpha = 1, lambda = 1) {
+ pmoe <- function (q, alpha = 1, lambda = 1) {
ret <- ifelse(alpha <= 0 | lambda <= 0, NaN,
1 - 1 / (expm1(lambda * q) + alpha))
ret[q < log(2 - alpha) / lambda] <- 0
@@ -1858,12 +1984,13 @@ exponential.mo.control <- function(save.weight = TRUE, ...) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
nsimEIM = .nsimEIM,
zero = .zero )
}, list( .zero = zero,
@@ -1885,10 +2012,10 @@ exponential.mo.control <- function(save.weight = TRUE, ...) {
ncoly <- ncol(y)
- Musual <- 2
+ M1 <- 2
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
mynames1 <- paste("alpha", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -1896,7 +2023,7 @@ exponential.mo.control <- function(save.weight = TRUE, ...) {
predictors.names <-
c(namesof(mynames1, .lalpha0 , .ealpha0 , tag = FALSE),
namesof(mynames2, .llambda , .elambda , tag = FALSE))[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
@@ -1932,13 +2059,13 @@ exponential.mo.control <- function(save.weight = TRUE, ...) {
matA[, spp.] <- Alpha0.init
if (!length( .ilambda ))
matL[, spp.] <- Lambda.init
- } # spp.
+ } # spp.
etastart <- cbind(theta2eta(matA, .lalpha0, .ealpha0 ),
theta2eta(matL, .llambda, .elambda ))[,
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
mustart <- NULL # Since etastart has been computed.
- } # End of !length(etastart)
+ } # End of !length(etastart)
}), list( .lalpha0 = lalpha0, .llambda = llambda,
.ealpha0 = ealpha0, .elambda = elambda,
.ialpha0 = ialpha0, .ilambda = ilambda,
@@ -1952,21 +2079,21 @@ exponential.mo.control <- function(save.weight = TRUE, ...) {
}, list( .lalpha0 = lalpha0, .llambda = llambda,
.ealpha0 = ealpha0, .elambda = elambda ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <-
c(rep( .lalpha0 , length = ncoly),
- rep( .llambda , length = ncoly))[interleave.VGAM(M, M = Musual)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ rep( .llambda , length = ncoly))[interleave.VGAM(M, M = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .ealpha0
- misc$earg[[Musual*ii ]] <- .elambda
+ misc$earg[[M1*ii-1]] <- .ealpha0
+ misc$earg[[M1*ii ]] <- .elambda
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$imethod <- .imethod
misc$expected <- TRUE
misc$multipleResponses <- TRUE
@@ -1976,20 +2103,28 @@ exponential.mo.control <- function(save.weight = TRUE, ...) {
.nsimEIM = nsimEIM,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
alpha0 <- eta2theta(eta[, c(TRUE, FALSE)], .lalpha0 , .ealpha0 )
lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , .elambda )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * log(dmoe(x = y, alpha = alpha0,
- lambda = lambda)))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * log(dmoe(x = y, alpha = alpha0,
+ lambda = lambda))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lalpha0 = lalpha0, .llambda = llambda,
.ealpha0 = ealpha0, .elambda = elambda ))),
vfamily = c("exponential.mo"),
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
alpha0 <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lalpha0 ,
.ealpha0 )
lambda <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .llambda ,
@@ -2005,19 +2140,19 @@ exponential.mo.control <- function(save.weight = TRUE, ...) {
dthetas.detas <- cbind(dalpha0.deta,
dlambda.deta)
myderiv <- c(w) * cbind(dl.dalpha0, dl.dlambda) * dthetas.detas
- myderiv[, interleave.VGAM(M, M = Musual)]
+ myderiv[, interleave.VGAM(M, M = M1)]
}), list( .lalpha0 = lalpha0, .llambda = llambda,
.ealpha0 = ealpha0, .elambda = elambda ))),
weight = eval(substitute(expression({
- NOS <- M / Musual
- dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
+ NOS <- M / M1
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
- ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
for (spp. in 1:NOS) {
@@ -2048,27 +2183,27 @@ if (ii < 3) {
nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else
run.varcov
- wz1 <- wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
- dThetas.detas[, Musual * (spp. - 1) + ind1$col]
+ wz1 <- wz1 * dThetas.detas[, M1 * (spp. - 1) + ind1$row] *
+ dThetas.detas[, M1 * (spp. - 1) + ind1$col]
- for (jay in 1:Musual)
- for (kay in jay:Musual) {
- cptr <- iam((spp. - 1) * Musual + jay,
- (spp. - 1) * Musual + kay,
+ for (jay in 1:M1)
+ for (kay in jay:M1) {
+ cptr <- iam((spp. - 1) * M1 + jay,
+ (spp. - 1) * M1 + kay,
M = M)
- wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
+ wz[, cptr] <- wz1[, iam(jay, kay, M = M1)]
}
- } # End of for (spp.) loop
+ } # End of for (spp.) loop
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / M1)
}), list( .llambda = llambda,
.elambda = elambda,
.nsimEIM = nsimEIM ))))
-} # exponential.mo()
+} # exponential.mo()
@@ -2132,10 +2267,10 @@ if (ii < 3) {
predictors.names <-
- c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
- namesof("scale", .lscale , earg = .escale , tag = FALSE),
- namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE),
- namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
+ c(namesof("shape1.a", .lshape1.a , earg = .eshape1.a , tag = FALSE),
+ namesof("scale" , .lscale , earg = .escale , tag = FALSE),
+ namesof("shape2.p", .lshape2.p , earg = .eshape2.p , tag = FALSE),
+ namesof("shape3.q", .lshape3.q , earg = .eshape3.q , tag = FALSE))
if (!length( .ishape1.a ) || !length( .iscale )) {
qvec <- c( .25, .5, .75) # Arbitrary; could be made an argument
@@ -2163,10 +2298,10 @@ if (ii < 3) {
etastart <-
- cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ cbind(theta2eta(aa, .lshape1.a , earg = .eshape1.a ),
theta2eta(scale, .lscale , earg = .escale ),
- theta2eta(parg, .lshape2.p, earg = .eshape2.p),
- theta2eta(qq, .lshape3.q, earg = .eshape3.q))
+ theta2eta(parg, .lshape2.p , earg = .eshape2.p ),
+ theta2eta(qq, .lshape3.q , earg = .eshape3.q ))
}
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
@@ -2175,10 +2310,10 @@ if (ii < 3) {
.ishape1.a = ishape1.a, .iscale = iscale,
.ishape2.p = ishape2.p, .ishape3.q = ishape3.q ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
- parg <- eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
- qq <- eta2theta(eta[, 4], .lshape3.q, earg = .eshape3.q)
+ parg <- eta2theta(eta[, 3], .lshape2.p , earg = .eshape2.p )
+ qq <- eta2theta(eta[, 4], .lshape3.q , earg = .eshape3.q )
ans <- Scale * exp(lgamma(parg + 1/aa) +
lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
ans[parg + 1/aa <= 0] <- NA
@@ -2199,21 +2334,32 @@ if (ii < 3) {
misc$earg <- list(shape1.a = .eshape1.a , scale = .escale ,
shape2.p = .eshape2.p , shape3.q = .eshape3.q )
+ misc$expected <- TRUE
+ misc$multipleResponses <- FALSE
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale,
.eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
.lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
- parg <- eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
- qq <- eta2theta(eta[, 4], .lshape3.q, earg = .eshape3.q)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * (log(aa) + (aa * parg - 1) * log(y) -
- aa * parg * log(scale) +
- - lbeta(parg, qq) - (parg + qq) * log1p((y/scale)^aa)))
+ parg <- eta2theta(eta[, 3], .lshape2.p , earg = .eshape2.p )
+ qq <- eta2theta(eta[, 4], .lshape3.q , earg = .eshape3.q )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * (log(aa) + (aa * parg - 1) * log(y) -
+ aa * parg * log(scale) +
+ - lbeta(parg, qq) - (parg + qq) * log1p((y/scale)^aa))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale,
@@ -2221,10 +2367,10 @@ if (ii < 3) {
.lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
vfamily = c("genbetaII"),
deriv = eval(substitute(expression({
- aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
- parg <- eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
- qq <- eta2theta(eta[, 4], .lshape3.q, earg = .eshape3.q)
+ parg <- eta2theta(eta[, 3], .lshape2.p , earg = .eshape2.p )
+ qq <- eta2theta(eta[, 4], .lshape3.q , earg = .eshape3.q )
temp1 <- log(y/scale)
temp2 <- (y/scale)^aa
@@ -2238,10 +2384,10 @@ if (ii < 3) {
dl.dp <- aa * temp1 + temp3 - temp3a - temp4
dl.dq <- temp3 - temp3b - temp4
- da.deta <- dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
- dp.deta <- dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
- dq.deta <- dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
+ da.deta <- dtheta.deta(aa, .lshape1.a , earg = .eshape1.a )
+ dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
+ dp.deta <- dtheta.deta(parg, .lshape2.p , earg = .eshape2.p )
+ dq.deta <- dtheta.deta(qq, .lshape3.q , earg = .eshape3.q )
c(w) * cbind( dl.da * da.deta,
dl.dscale * dscale.deta,
@@ -2536,8 +2682,10 @@ ddagum <- function(x, shape1.a, scale = 1, shape2.p, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- LLL <- max(length(x), length(shape1.a), length(scale),
- length(shape2.p))
+ LLL <- max(length(x),
+ length(shape1.a),
+ length(scale),
+ length(shape2.p))
x <- rep(x, length.out = LLL)
shape1.a <- rep(shape1.a, length.out = LLL)
scale <- rep(scale, length.out = LLL)
@@ -2556,7 +2704,8 @@ ddagum <- function(x, shape1.a, scale = 1, shape2.p, log = FALSE) {
x.eq.0 <- (x == 0) & !is.na(x)
Loglik[x.eq.0] <- log(shape1.a[x.eq.0]) +
log(shape2.p[x.eq.0]) -
- shape1.a[x.eq.0] * shape2.p[x.eq.0] * log(scale[x.eq.0])
+ shape1.a[x.eq.0] * shape2.p[x.eq.0] *
+ log(scale[x.eq.0])
Loglik[is.na(x)] <- NA
Loglik[is.nan(x)] <- NaN
Loglik[x == Inf] <- log(0)
@@ -2621,9 +2770,9 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
predictors.names <-
- c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
- namesof("scale", .lscale , earg = .escale , tag = FALSE),
- namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
+ c(namesof("shape1.a", .lshape1.a , earg = .eshape1.a , tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape3.q", .lshape3.q , earg = .eshape3.q , tag = FALSE))
parg <- 1
if (!length( .ishape1.a) || !length( .iscale )) {
@@ -2649,9 +2798,9 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
etastart <-
- cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
- theta2eta(scale, .lscale , earg = .escale ),
- theta2eta(qq, .lshape3.q, earg = .eshape3.q))
+ cbind(theta2eta(aa, .lshape1.a , earg = .eshape1.a ),
+ theta2eta(scale, .lscale , earg = .escale ),
+ theta2eta(qq, .lshape3.q , earg = .eshape3.q ))
}
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.lshape3.q = lshape3.q,
@@ -2661,10 +2810,10 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
.ishape3.q = ishape3.q ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
parg <- 1
- qq <- eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
+ qq <- eta2theta(eta[, 3], .lshape3.q , earg = .eshape3.q )
ans <- Scale * exp(lgamma(parg + 1/aa) +
lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
@@ -2681,37 +2830,72 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
last = eval(substitute(expression({
misc$link <-
- c(shape1.a = .lshape1.a, scale = .lscale , shape3.q = .lshape3.q)
+ c(shape1.a = .lshape1.a ,
+ scale = .lscale ,
+ shape3.q = .lshape3.q )
misc$earg <-
- list(shape1.a = .eshape1.a, scale = .escale , shape3.q = .eshape3.q)
+ list(shape1.a = .eshape1.a ,
+ scale = .escale ,
+ shape3.q = .eshape3.q )
+ misc$expected <- TRUE
+ misc$multipleResponses <- FALSE
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale,
.eshape3.q = eshape3.q,
.lshape3.q = lshape3.q ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a)
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
parg <- 1
qq <- eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dsinmad(x = y, shape1.a = aa, scale = scale,
- shape3.q = qq, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dsinmad(x = y, shape1.a = aa, scale = scale,
+ shape3.q = qq, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape1.a = lshape1.a, .lscale = lscale,
.lshape3.q = lshape3.q,
.eshape1.a = eshape1.a, .escale = escale,
.eshape3.q = eshape3.q ))),
vfamily = c("sinmad"),
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ qq <- eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q )
+ rsinmad(nsim * length(qq), shape1.a = aa, scale = scale,
+ shape3.q = qq)
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .lshape3.q = lshape3.q,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape3.q = eshape3.q ))),
+
+
deriv = eval(substitute(expression({
- aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
parg <- 1
- qq <- eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
+ qq <- eta2theta(eta[, 3], .lshape3.q , earg = .eshape3.q )
temp1 <- log(y/scale)
temp2 <- (y/scale)^aa
@@ -2722,9 +2906,9 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
dl.dscale <- (aa / scale) * (-parg + (parg + qq) / (1 + 1 / temp2))
dl.dq <- digamma(parg + qq) - temp3b - log1p(temp2)
- da.deta <- dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
- dq.deta <- dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
+ da.deta <- dtheta.deta(aa, .lshape1.a , earg = .eshape1.a )
+ dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
+ dq.deta <- dtheta.deta(qq, .lshape3.q , earg = .eshape3.q )
c(w) * cbind(dl.da * da.deta,
dl.dscale * dscale.deta,
@@ -2806,9 +2990,9 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
ncol.w.max = 1, ncol.y.max = 1)
predictors.names <-
- c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
+ c(namesof("shape1.a", .lshape1.a , earg = .eshape1.a , tag = FALSE),
namesof("scale", .lscale , earg = .escale , tag = FALSE),
- namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE))
+ namesof("shape2.p", .lshape2.p , earg = .eshape2.p , tag = FALSE))
if (!length( .ishape1.a) || !length( .iscale )) {
qvec <- c( .25, .5, .75) # Arbitrary; could be made an argument
@@ -2836,9 +3020,9 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
etastart <-
- cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
- theta2eta(scale, .lscale , earg = .escale ),
- theta2eta(parg, .lshape2.p, earg = .eshape2.p))
+ cbind(theta2eta(aa, .lshape1.a , earg = .eshape1.a ),
+ theta2eta(scale, .lscale , earg = .escale ),
+ theta2eta(parg, .lshape2.p , earg = .eshape2.p ))
}
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.lshape2.p = lshape2.p,
@@ -2847,9 +3031,9 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
.ishape1.a = ishape1.a, .iscale = iscale,
.ishape2.p = ishape2.p ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
- parg <- eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 3], .lshape2.p , earg = .eshape2.p )
qq <- 1
ans <- Scale * exp(lgamma(parg + 1/aa) +
@@ -2865,36 +3049,67 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
.eshape2.p = eshape2.p,
.lshape2.p = lshape2.p ))),
last = eval(substitute(expression({
- misc$link <- c(shape1.a = .lshape1.a, scale = .lscale ,
+ misc$link <- c(shape1.a = .lshape1.a , scale = .lscale ,
p = .lshape2.p )
- misc$earg <- list(shape1.a = .eshape1.a, scale = .escale ,
+ misc$earg <- list(shape1.a = .eshape1.a , scale = .escale ,
p = .eshape2.p )
+ misc$expected <- TRUE
+ misc$multipleResponses <- FALSE
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.lshape2.p = lshape2.p,
.eshape1.a = eshape1.a, .escale = escale,
.eshape2.p = eshape2.p ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
- parg <- eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 3], .lshape2.p , earg = .eshape2.p )
qq <- 1
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * ddagum(x = y, shape1.a = aa, scale = Scale,
- shape2.p = parg, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * ddagum(x = y, shape1.a = aa, scale = Scale,
+ shape2.p = parg, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape1.a = lshape1.a, .lscale = lscale,
.lshape2.p = lshape2.p,
.eshape1.a = eshape1.a, .escale = escale,
.eshape2.p = eshape2.p ))),
vfamily = c("dagum"),
- deriv = eval(substitute(expression({
- aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
- parg <- eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
+ parg <- eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p )
+ rdagum(nsim * length(parg), shape1.a = aa, scale = Scale,
+ shape2.p = parg)
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .lshape2.p = lshape2.p,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p ))),
+
+
+ deriv = eval(substitute(expression({
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 3], .lshape2.p , earg = .eshape2.p )
qq <- 1
temp1 <- log(y / Scale)
@@ -2906,9 +3121,9 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
dl.dscale <- (aa/Scale) * (-parg + (parg+qq) / (1+1/temp2))
dl.dp <- aa * temp1 + digamma(parg + qq) - temp3a - log1p(temp2)
- da.deta <- dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
- dp.deta <- dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
+ da.deta <- dtheta.deta(aa, .lshape1.a , earg = .eshape1.a )
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+ dp.deta <- dtheta.deta(parg, .lshape2.p , earg = .eshape2.p )
c(w) * cbind( dl.da * da.deta,
dl.dscale * dscale.deta,
@@ -3019,7 +3234,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
etastart <-
cbind(theta2eta(scale, .lscale , earg = .escale ),
- theta2eta(parg, .lshape2.p, earg = .eshape2.p),
+ theta2eta(parg, .lshape2.p, earg = .eshape2.p ),
theta2eta(qq, .lshape3.q, earg = .eshape3.q))
}
}), list( .lscale = lscale,
@@ -3032,7 +3247,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
linkinv = eval(substitute(function(eta, extra = NULL) {
aa <- 1
Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
- parg <- eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
+ parg <- eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p )
qq <- eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
ans <- Scale * exp(lgamma(parg + 1/aa) +
@@ -3057,19 +3272,29 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
shape3.q = .eshape3.q)
misc$expected <- TRUE
+ misc$multipleResponses <- FALSE
}), list(
.lscale = lscale, .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
.escale = escale, .eshape2.p = eshape2.p, .eshape3.q = eshape3.q ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
aa <- 1
scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
- parg <- eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
+ parg <- eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p )
qq <- eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * (log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
- (-lbeta(parg, qq)) - (parg+qq)*log1p((y/scale)^aa)))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * (log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
+ (-lbeta(parg, qq)) - (parg+qq)*log1p((y/scale)^aa))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lscale = lscale,
.escale = escale,
@@ -3079,7 +3304,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
deriv = eval(substitute(expression({
aa <- 1
scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
- parg <- eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
+ parg <- eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p )
qq <- eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
temp1 <- log(y/scale)
@@ -3094,7 +3319,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
dl.dq <- temp3 - temp3b - temp4
dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
- dp.deta <- dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
+ dp.deta <- dtheta.deta(parg, .lshape2.p, earg = .eshape2.p )
dq.deta <- dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
c(w) * cbind( dl.dscale * dscale.deta,
@@ -3252,28 +3477,54 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
}, list( .lscale = lscale, .lshape3.q = lshape3.q,
.escale = escale, .eshape3.q = eshape3.q ))),
last = eval(substitute(expression({
- misc$link <- c(scale = .lscale , shape3.q = .lshape3.q)
+ misc$link <- c(scale = .lscale , shape3.q = .lshape3.q )
- misc$earg <- list(scale = .escale , shape3.q = .eshape3.q)
+ misc$earg <- list(scale = .escale , shape3.q = .eshape3.q )
misc$expected <- TRUE
+ misc$multipleResponses <- FALSE
}), list( .lscale = lscale, .lshape3.q = lshape3.q,
.escale = escale, .eshape3.q = eshape3.q ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
aa <- 1
scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
parg <- 1
- qq <- eta2theta(eta[, 2], .lshape3.q, earg = .eshape3.q)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dlomax(x = y, scale = scale,
- shape3.q = qq, log = TRUE))
+ qq <- eta2theta(eta[, 2], .lshape3.q, earg = .eshape3.q )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dlomax(x = y, scale = scale,
+ shape3.q = qq, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lscale = lscale, .lshape3.q = lshape3.q,
.escale = escale, .eshape3.q = eshape3.q ))),
vfamily = c("lomax"),
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ qq <- eta2theta(eta[, 2], .lshape3.q, earg = .eshape3.q )
+ rlomax(nsim * length(qq), scale = scale, shape3.q = qq)
+ }, list( .lscale = lscale, .lshape3.q = lshape3.q,
+ .escale = escale, .eshape3.q = eshape3.q ))),
+
+
deriv = eval(substitute(expression({
aa <- 1
scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
@@ -3381,7 +3632,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
.eshape1.a = eshape1.a, .escale = escale,
.ishape1.a = ishape1.a, .iscale = iscale ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a)
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
parg <- 1
qq <- 1
@@ -3401,23 +3652,49 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
misc$earg <- list(shape1.a = .eshape1.a , scale = .escale )
misc$expected <- TRUE
+ misc$multipleResponses <- FALSE
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
- scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
parg <- qq <- 1
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dfisk(x = y, shape1.a = aa, scale = scale, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dfisk(x = y, shape1.a = aa, scale = Scale, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale))),
vfamily = c("fisk"),
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ rfisk(nsim * length(aa), shape1.a = aa, scale = Scale)
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))),
+
+
deriv = eval(substitute(expression({
- aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
parg <- qq <- 1
temp1 <- log(y/scale)
@@ -3494,8 +3771,8 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
predictors.names <-
- c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
- namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE))
+ c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape2.p", .lshape2.p, earg = .eshape2.p , tag = FALSE))
qq <- aa <- 1
@@ -3517,14 +3794,14 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
etastart <-
cbind(theta2eta(scale, .lscale , earg = .escale ),
- theta2eta(parg, .lshape2.p, earg = .eshape2.p))
+ theta2eta(parg, .lshape2.p, earg = .eshape2.p ))
}
}), list( .lscale = lscale, .lshape2.p = lshape2.p,
.escale = escale, .eshape2.p = eshape2.p,
.iscale = iscale, .ishape2.p = ishape2.p ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
- parg <- eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
+ parg <- eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p )
NA * Scale
}, list( .lscale = lscale,
@@ -3537,28 +3814,54 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
misc$earg <- list(scale = .escale , shape2.p = .eshape2.p )
misc$expected <- TRUE
+ misc$multipleResponses <- FALSE
}), list( .lscale = lscale,
.escale = escale,
.eshape2.p = eshape2.p,
.lshape2.p = lshape2.p ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
aa <- 1
- scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
- parg <- eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 2], .lshape2.p , earg = .eshape2.p )
qq <- 1
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dinvlomax(x = y, scale = scale,
- shape2.p = parg, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dinvlomax(x = y, scale = Scale,
+ shape2.p = parg, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lscale = lscale, .lshape2.p = lshape2.p,
.escale = escale, .eshape2.p = eshape2.p ))),
vfamily = c("invlomax"),
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 2], .lshape2.p , earg = .eshape2.p )
+ rinvlomax(nsim * length(Scale), scale = Scale, shape2.p = parg)
+ }, list( .lscale = lscale, .lshape2.p = lshape2.p,
+ .escale = escale, .eshape2.p = eshape2.p ))),
+
+
deriv = eval(substitute(expression({
aa <- qq <- 1
- scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
- parg <- eta2theta(eta[, 2], .lshape2.p , earg = .eshape2.p)
+ scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 2], .lshape2.p , earg = .eshape2.p )
temp1 <- log(y/scale)
temp2 <- (y/scale)^aa
@@ -3567,7 +3870,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
dl.dp <- aa * temp1 + digamma(parg + qq) - digamma(parg) - log1p(temp2)
dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
- dp.deta <- dtheta.deta(parg, .lshape2.p , earg = .eshape2.p)
+ dp.deta <- dtheta.deta(parg, .lshape2.p , earg = .eshape2.p )
c(w) * cbind( dl.dscale * dscale.deta,
dl.dp * dp.deta )
@@ -3629,8 +3932,8 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
predictors.names <-
- c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
- namesof("scale", .lscale , earg = .escale , tag = FALSE))
+ c(namesof("shape1.a", .lshape1.a , earg = .eshape1.a , tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
parg <- 1
@@ -3662,8 +3965,8 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
etastart <-
- cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
- theta2eta(scale, .lscale , earg = .escale ))
+ cbind(theta2eta(aa, .lshape1.a , earg = .eshape1.a ),
+ theta2eta(scale, .lscale , earg = .escale ))
}
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale,
@@ -3685,29 +3988,57 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
}, list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale))),
last = eval(substitute(expression({
- misc$link <- c(shape1.a = .lshape1.a, scale = .lscale)
+ misc$link <- c(shape1.a = .lshape1.a , scale = .lscale )
- misc$earg <- list(shape1.a = .eshape1.a, scale = .escale )
+ misc$earg <- list(shape1.a = .eshape1.a , scale = .escale )
misc$expected <- TRUE
+ misc$multipleResponses <- FALSE
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
parg <- 1
qq <- aa
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dparalogistic(x = y, shape1.a = aa,
- scale = scale, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dparalogistic(x = y, shape1.a = aa,
+ scale = Scale, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale))),
vfamily = c("paralogistic"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ rparalogistic(nsim * length(Scale), shape1.a = aa, scale = Scale)
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
+
+
+
deriv = eval(substitute(expression({
- aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a)
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
parg <- 1
qq <- aa
@@ -3720,8 +4051,8 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
dl.da <- 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
dl.dscale <- (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- da.deta <- dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
+ da.deta <- dtheta.deta(aa, .lshape1.a , earg = .eshape1.a )
+ dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
c(w) * cbind( dl.da * da.deta,
dl.dscale * dscale.deta)
@@ -3782,8 +4113,8 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
predictors.names <-
- c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a , tag = FALSE),
- namesof("scale", .lscale , earg = .escale , tag = FALSE))
+ c(namesof("shape1.a", .lshape1.a , earg = .eshape1.a , tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
if (!length( .ishape1.a) || !length( .iscale )) {
qvec <- c( .25, .5, .75) # Arbitrary; could be made an argument
@@ -3814,15 +4145,15 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
etastart <-
- cbind(theta2eta(aa, .lshape1.a , earg = .eshape1.a),
+ cbind(theta2eta(aa, .lshape1.a , earg = .eshape1.a ),
theta2eta(scale, .lscale , earg = .escale ))
}
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale,
.ishape1.a = ishape1.a, .iscale = iscale ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
parg <- aa
qq <- 1
@@ -3836,30 +4167,58 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
}, list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale))),
last = eval(substitute(expression({
- misc$link <- c(shape1.a = .lshape1.a, scale = .lscale )
+ misc$link <- c(shape1.a = .lshape1.a , scale = .lscale )
- misc$earg <- list(shape1.a = .eshape1.a, scale = .escale )
+ misc$earg <- list(shape1.a = .eshape1.a , scale = .escale )
misc$expected <- TRUE
+ misc$multipleResponses <- FALSE
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
parg <- aa
qq <- 1
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dinvparalogistic(x = y, shape1.a = aa,
- scale = scale, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dinvparalogistic(x = y, shape1.a = aa,
+ scale = Scale, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale))),
vfamily = c("invparalogistic"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ rinvparalogistic(nsim * length(Scale), shape1.a = aa, scale = Scale)
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
+
+
+
deriv = eval(substitute(expression({
- aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
parg <- aa
qq <- 1
@@ -3871,8 +4230,8 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
dl.da <- 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
dl.dscale <- (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- da.deta <- dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
+ da.deta <- dtheta.deta(aa, .lshape1.a , earg = .eshape1.a )
+ dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
c(w) * cbind( dl.da * da.deta,
dl.dscale * dscale.deta )
@@ -3889,7 +4248,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
parg * qq * (temp3a -temp3b)) / (scale *
(1 + parg + qq))
- wz <- matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+ wz <- matrix(as.numeric(NA), n, dimm(M)) # M==3 means 6=dimm(M)
wz[, iam(1, 1, M)] <- ned2l.da * da.deta^2
wz[, iam(2, 2, M)] <- ned2l.dscale * dscale.deta^2
wz[, iam(1, 2, M)] <- ned2l.dascale * da.deta * dscale.deta
@@ -3944,57 +4303,67 @@ warning("20040402; does not work, possibly because first derivs are ",
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
+ stop("response must be a vector or a one-column matrix")
predictors.names <-
- c(namesof("loc", "identity", earg = list(), tag = FALSE),
+ c(namesof("loc", "identitylink", earg = list(), tag = FALSE),
namesof("sigma", .link.sigma, earg = .esigma, tag = FALSE),
namesof("r", .link.r, earg = .er, tag = FALSE))
if (!length( .init.sigma) || !length( .init.r)) {
init.r <- if (length( .init.r)) .init.r else 1
sigma.init <- (0.5 * sum(abs(log(y) -
- mean(log(y )))^init.r))^(1/init.r)
+ mean(log(y )))^init.r))^(1/init.r)
}
- if (any(y <= 0)) stop("y must be positive")
+ if (any(y <= 0))
+ stop("y must be positive")
if (!length(etastart)) {
- sigma.init <- rep(if (length( .init.sigma)) .init.sigma else
- sigma.init, length.out = n)
- r.init <- if (length( .init.r)) .init.r else init.r
- etastart <-
- cbind(mu = rep(log(median(y)), length.out = n),
- sigma = sigma.init,
- r = r.init)
- }
+ sigma.init <- rep(if (length( .init.sigma)) .init.sigma else
+ sigma.init, length.out = n)
+ r.init <- if (length( .init.r)) .init.r else init.r
+ etastart <-
+ cbind(mu = rep(log(median(y)), length.out = n),
+ sigma = sigma.init,
+ r = r.init)
+ }
}), list( .link.sigma = link.sigma, .link.r = link.r,
.init.sigma = init.sigma, .init.r = init.r ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mymu <- eta2theta(eta[, 1], "identity", earg = list())
- sigma <- eta2theta(eta[, 2], .link.sigma, earg = .esigma)
- r <- eta2theta(eta[, 3], .link.r, earg = .er)
- r
+ mymu <- eta2theta(eta[, 1], "identitylink" , earg = list())
+ sigma <- eta2theta(eta[, 2], .link.sigma , earg = .esigma )
+ rrrrr <- eta2theta(eta[, 3], .link.r , earg = .er )
+ rrrrr
}, list( .link.sigma = link.sigma, .link.r = link.r ))),
last = eval(substitute(expression({
- misc$link = c(loc = "identity",
+ misc$link = c(loc = "identitylink",
"sigma" = .link.sigma,
r = .link.r )
misc$expected = TRUE
}), list( .link.sigma = link.sigma, .link.r = link.r ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mymu <- eta2theta(eta[, 1], "identity", earg = list())
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ mymu <- eta2theta(eta[, 1], "identitylink", earg = list())
sigma <- eta2theta(eta[, 2], .link.sigma, earg = .esigma)
r <- eta2theta(eta[, 3], .link.r, earg = .er)
temp89 <- (abs(log(y)-mymu)/sigma)^r
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else
- sum(c(w) * (-log(r^(1/r) * sigma) - lgamma(1+1/r) - temp89/r))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * (-log(r^(1/r) * sigma) - lgamma(1+1/r) - temp89/r)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .link.sigma = link.sigma, .link.r = link.r ))),
vfamily = c("genlognormal3"),
deriv = eval(substitute(expression({
- mymu <- eta2theta(eta[, 1], "identity", earg = list())
+ mymu <- eta2theta(eta[, 1], "identitylink", earg = list())
sigma <- eta2theta(eta[, 2], .link.sigma, earg = .esigma)
r <- eta2theta(eta[, 3], .link.r, earg = .er)
@@ -4007,7 +4376,7 @@ warning("20040402; does not work, possibly because first derivs are ",
dl.dr <- (log(r) - 1 + digamma(ss) + temp33*temp33r1)/r^2 -
temp33r1 * log(temp33r1) / r
- dmymu.deta <- dtheta.deta(mymu, "identity", earg = list())
+ dmymu.deta <- dtheta.deta(mymu, "identitylink", earg = list())
dsigma.deta <- dtheta.deta(sigma, .link.sigma, earg = .esigma)
dr.deta <- dtheta.deta(r, .link.r, earg = .er)
diff --git a/R/family.aunivariate.R b/R/family.aunivariate.R
index 49663b2..a61482e 100644
--- a/R/family.aunivariate.R
+++ b/R/family.aunivariate.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -112,12 +112,13 @@ pkumar <- function(q, shape1, shape2) {
"shape2 * beta(1+1/shape1, shape2)"),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
lshape1 = .lshape1 ,
zero = .zero )
}, list( .zero = zero,
@@ -143,10 +144,10 @@ pkumar <- function(q, shape1, shape2) {
ncoly <- ncol(y)
- Musual <- 2
+ M1 <- 2
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
mynames1 <- paste("shape1", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -154,7 +155,7 @@ pkumar <- function(q, shape1, shape2) {
predictors.names <-
c(namesof(mynames1, .lshape1 , earg = .eshape1 , tag = FALSE),
namesof(mynames2, .lshape2 , earg = .eshape2 , tag = FALSE))[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
if (!length(etastart)) {
@@ -192,51 +193,80 @@ pkumar <- function(q, shape1, shape2) {
etastart <- cbind(
theta2eta(shape1.init, .lshape1 , earg = .eshape1 ),
theta2eta(shape2.init, .lshape2 , earg = .eshape2 ))[,
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
}
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.ishape1 = ishape1, .ishape2 = ishape2,
.eshape1 = eshape1, .eshape2 = eshape2,
.grid.shape1 = grid.shape1 ))),
- linkinv = eval(substitute(function(eta, extra = NULL){
+ linkinv = eval(substitute(function(eta, extra = NULL) {
shape1 <- eta2theta(eta[, c(TRUE, FALSE)], .lshape1 , earg = .eshape1 )
shape2 <- eta2theta(eta[, c(FALSE, TRUE)], .lshape2 , earg = .eshape2 )
shape2 * (base::beta(1 + 1/shape1, shape2))
}, list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <-
c(rep( .lshape1 , length = ncoly),
- rep( .lshape2 , length = ncoly))[interleave.VGAM(M, M = Musual)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ rep( .lshape2 , length = ncoly))[interleave.VGAM(M, M = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .eshape1
- misc$earg[[Musual*ii ]] <- .eshape2
+ misc$earg[[M1*ii-1]] <- .eshape1
+ misc$earg[[M1*ii ]] <- .eshape2
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$multipleResponses <- TRUE
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
shape1 <- eta2theta(eta[, c(TRUE, FALSE)], .lshape1 , earg = .eshape1 )
shape2 <- eta2theta(eta[, c(FALSE, TRUE)], .lshape2 , earg = .eshape2 )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(c(w) * dkumar(x = y, shape1 = shape1,
- shape2 = shape2, log = TRUE))
+ ll.elts <- c(w) * dkumar(x = y, shape1 = shape1,
+ shape2 = shape2, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
vfamily = c("kumar"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ shape1 <- eta2theta(eta[, c(TRUE, FALSE)], .lshape1 , earg = .eshape1 )
+ shape2 <- eta2theta(eta[, c(FALSE, TRUE)], .lshape2 , earg = .eshape2 )
+ rkumar(nsim * length(shape1),
+ shape1 = shape1, shape2 = shape2)
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+
+
+
+
+
deriv = eval(substitute(expression({
shape1 <- eta2theta(eta[, c(TRUE, FALSE)], .lshape1 , earg = .eshape1 )
shape2 <- eta2theta(eta[, c(FALSE, TRUE)], .lshape2 , earg = .eshape2 )
@@ -250,7 +280,7 @@ pkumar <- function(q, shape1, shape2) {
myderiv <- c(w) * cbind(dl.dshape1 * dshape1.deta,
dl.dshape2 * dshape2.deta)
- myderiv[, interleave.VGAM(M, M = Musual)]
+ myderiv[, interleave.VGAM(M, M = M1)]
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
weight = eval(substitute(expression({
@@ -275,8 +305,8 @@ pkumar <- function(q, shape1, shape2) {
wz <- array(c(c(w) * ned2l.dshape11 * dshape1.deta^2,
c(w) * ned2l.dshape22 * dshape2.deta^2,
c(w) * ned2l.dshape12 * dshape1.deta * dshape2.deta),
- dim = c(n, M / Musual, 3))
- wz <- arwz2wz(wz, M = M, Musual = Musual)
+ dim = c(n, M / M1, 3))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
wz
@@ -384,7 +414,7 @@ riceff.control <- function(save.weight = TRUE, ...) {
predictors.names <-
c(namesof("vee", .lvee, earg = .evee, tag = FALSE),
- namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
+ namesof("sigma", .lsigma , earg = .esigma, tag = FALSE))
@@ -407,14 +437,14 @@ riceff.control <- function(save.weight = TRUE, ...) {
sigma.init <- rep(sigma.init, length = length(y))
etastart <-
cbind(theta2eta(vee.init, .lvee, earg = .evee),
- theta2eta(sigma.init, .lsigma, earg = .esigma))
+ theta2eta(sigma.init, .lsigma , earg = .esigma ))
}
}), list( .lvee = lvee, .lsigma = lsigma,
.ivee = ivee, .isigma = isigma,
.evee = evee, .esigma = esigma ))),
- linkinv = eval(substitute(function(eta, extra = NULL){
+ linkinv = eval(substitute(function(eta, extra = NULL) {
vee <- eta2theta(eta[, 1], link = .lvee, earg = .evee)
- sigma <- eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
+ sigma <- eta2theta(eta[, 2], link = .lsigma , earg = .esigma )
temp9 <- -vee^2 / (2*sigma^2)
@@ -426,7 +456,7 @@ riceff.control <- function(save.weight = TRUE, ...) {
last = eval(substitute(expression({
misc$link <- c("vee" = .lvee, "sigma" = .lsigma)
- misc$earg <- list("vee" = .evee, "sigma" = .esigma)
+ misc$earg <- list("vee" = .evee, "sigma" = .esigma )
misc$expected <- TRUE
misc$nsimEIM <- .nsimEIM
@@ -434,22 +464,49 @@ riceff.control <- function(save.weight = TRUE, ...) {
}), list( .lvee = lvee, .lsigma = lsigma,
.evee = evee, .esigma = esigma, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu,y, w, residuals = FALSE,eta,extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
vee <- eta2theta(eta[, 1], link = .lvee , earg = .evee )
sigma <- eta2theta(eta[, 2], link = .lsigma , earg = .esigma )
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(c(w) * drice(x = y, vee = vee, sigma = sigma, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * drice(x = y, vee = vee, sigma = sigma, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lvee = lvee, .lsigma = lsigma,
.evee = evee, .esigma = esigma ))),
vfamily = c("riceff"),
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ vee <- eta2theta(eta[, 1], link = .lvee , earg = .evee )
+ sigma <- eta2theta(eta[, 2], link = .lsigma , earg = .esigma )
+ rrice(nsim * length(vee),
+ vee = vee, sigma = sigma)
+ }, list( .lvee = lvee, .lsigma = lsigma,
+ .evee = evee, .esigma = esigma ))),
+
+
+
deriv = eval(substitute(expression({
vee <- eta2theta(eta[, 1], link = .lvee, earg = .evee)
- sigma <- eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
+ sigma <- eta2theta(eta[, 2], link = .lsigma , earg = .esigma )
dvee.deta <- dtheta.deta(vee, link = .lvee, earg = .evee)
- dsigma.deta <- dtheta.deta(sigma, link = .lsigma, earg = .esigma)
+ dsigma.deta <- dtheta.deta(sigma, link = .lsigma , earg = .esigma )
temp8 <- y * vee / sigma^2
dl.dvee <- -vee/sigma^2 + (y/sigma^2) *
@@ -632,7 +689,7 @@ skellam.control <- function(save.weight = TRUE, ...) {
}), list( .lmu1 = lmu1, .lmu2 = lmu2,
.imu1 = imu1, .imu2 = imu2,
.emu1 = emu1, .emu2 = emu2 ))),
- linkinv = eval(substitute(function(eta, extra = NULL){
+ linkinv = eval(substitute(function(eta, extra = NULL) {
mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1 )
mu2 <- eta2theta(eta[, 2], link = .lmu2, earg = .emu2 )
mu1 - mu2
@@ -649,29 +706,57 @@ skellam.control <- function(save.weight = TRUE, ...) {
.emu1 = emu1, .emu2 = emu2,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu,y, w, residuals = FALSE,eta,extra = NULL) {
- mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1 )
- mu2 <- eta2theta(eta[, 2], link = .lmu2, earg = .emu2 )
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
-
-
-
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1 )
+ mu2 <- eta2theta(eta[, 2], link = .lmu2, earg = .emu2 )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
if ( is.logical( .parallel ) &&
length( .parallel ) == 1 &&
.parallel )
- sum(c(w) * log(besselI(2*mu1, nu = y, expon = TRUE))) else
- sum(c(w) * (-mu1 - mu2 +
+ c(w) * log(besselI(2*mu1, nu = y, expon = TRUE)) else
+ c(w) * (-mu1 - mu2 +
0.5 * y * log(mu1) -
0.5 * y * log(mu2) +
2 * sqrt(mu1*mu2) + # Use this when expon = TRUE
- log(besselI(2 * sqrt(mu1*mu2), nu = y, expon = TRUE))))
- }
+ log(besselI(2 * sqrt(mu1*mu2), nu = y, expon = TRUE)))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .lmu1 = lmu1, .lmu2 = lmu2,
.emu1 = emu1, .emu2 = emu2,
.parallel = parallel ))),
vfamily = c("skellam"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1 )
+ mu2 <- eta2theta(eta[, 2], link = .lmu2, earg = .emu2 )
+ rskellam(nsim * length(mu1), mu1, mu2)
+ }, list( .lmu1 = lmu1, .lmu2 = lmu2,
+ .emu1 = emu1, .emu2 = emu2,
+ .parallel = parallel ))),
+
+
+
+
+
deriv = eval(substitute(expression({
mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1 )
mu2 <- eta2theta(eta[, 2], link = .lmu2, earg = .emu2 )
@@ -802,12 +887,13 @@ yulesimon.control <- function(save.weight = TRUE, ...) {
"provided rho > 2"),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
nsimEIM = .nsimEIM,
zero = .zero )
}, list( .zero = zero,
@@ -832,10 +918,10 @@ yulesimon.control <- function(save.weight = TRUE, ...) {
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
mynames1 <- paste("rho", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -858,7 +944,7 @@ yulesimon.control <- function(save.weight = TRUE, ...) {
ans
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .link , length = ncoly))
names(misc$link) <- mynames1
@@ -868,7 +954,7 @@ yulesimon.control <- function(save.weight = TRUE, ...) {
misc$earg[[ii]] <- .earg
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$irho <- .irho
misc$expected <- TRUE
misc$multipleResponses <- TRUE
@@ -876,16 +962,47 @@ yulesimon.control <- function(save.weight = TRUE, ...) {
}), list( .link = link, .earg = earg, .nsimEIM = nsimEIM,
.irho = irho ))),
loglikelihood = eval(substitute(
- function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
rho <- eta2theta(eta, .link , earg = .earg )
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(c(w) * dyules(x = y, rho = rho, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dyules(x = y, rho = rho, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("yulesimon"),
+
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ rho <- eta2theta(eta, .link , earg = .earg )
+ ryules(nsim * length(rho), rho = rho)
+ }, list( .link = link, .earg = earg ))),
+
+
+
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 1
+ M1 <- 1
rho <- eta2theta(eta, .link , earg = .earg )
dl.drho <- 1/rho + digamma(1+rho) - digamma(1+rho+y)
drho.deta <- dtheta.deta(rho, .link , earg = .earg )
@@ -950,11 +1067,16 @@ plind <- function(q, theta) {
rlind <- function(n, theta) {
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ length.arg = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
- ifelse(runif(n) < theta / (1 + theta),
- rexp(n, theta),
- rgamma(n, shape = 2, scale = 1 / theta))
+ ifelse(runif(use.n) < rep(theta / (1 + theta), length = use.n),
+ rexp(use.n, theta),
+ rgamma(use.n, shape = 2, scale = 1 / theta))
}
@@ -985,16 +1107,17 @@ rlind <- function(n, theta) {
"Link: ",
namesof("theta", link, earg = earg), "\n\n",
"Mean: (theta + 2) / (theta * (theta + 1))\n",
- "Variance: (theta^2 + 4 * theta + 2) / (theta * (theta + 1))^2"),
+ "Variance: (theta^2 + 4 * theta + 2) / (theta * (theta + 1))^2"),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero ))),
@@ -1016,10 +1139,10 @@ rlind <- function(n, theta) {
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
mynames1 <- paste("theta", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -1041,7 +1164,7 @@ rlind <- function(n, theta) {
(theta + 2) / (theta * (theta + 1))
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .link , length = ncoly))
names(misc$link) <- mynames1
@@ -1051,23 +1174,48 @@ rlind <- function(n, theta) {
misc$earg[[ii]] <- .earg
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$itheta <- .itheta
misc$expected <- TRUE
misc$multipleResponses <- TRUE
}), list( .link = link, .earg = earg,
.itheta = itheta ))),
loglikelihood = eval(substitute(
- function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
theta <- eta2theta(eta, .link , earg = .earg )
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(c(w) * dlind(x = y, theta = theta, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dlind(x = y, theta = theta, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("lindley"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ theta <- eta2theta(eta, .link , earg = .earg )
+ rlind(nsim * length(theta), theta = theta)
+ }, list( .link = link, .earg = earg ))),
+
+
+
deriv = eval(substitute(expression({
- Musual <- 1
+ M1 <- 1
theta <- eta2theta(eta, .link , earg = .earg )
dl.dtheta <- 2 / theta - 1 / (1 + theta) - y
@@ -1167,12 +1315,13 @@ if (FALSE)
),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
nsimEIM = .nsimEIM,
zero = .zero )
}, list( .zero = zero,
@@ -1197,10 +1346,10 @@ if (FALSE)
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
mynames1 <- paste("theta", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -1226,7 +1375,7 @@ if (FALSE)
(theta + 2) / (theta * (theta + 1))
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .link , length = ncoly))
names(misc$link) <- mynames1
@@ -1236,7 +1385,7 @@ if (FALSE)
misc$earg[[ii]] <- .earg
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$itheta <- .itheta
misc$expected <- TRUE
misc$multipleResponses <- TRUE
@@ -1244,16 +1393,24 @@ if (FALSE)
}), list( .link = link, .earg = earg, .nsimEIM = nsimEIM,
.itheta = itheta ))),
loglikelihood = eval(substitute(
- function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
theta = eta2theta(eta, .link , earg = .earg )
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(c(w) * dpoislindley(x = y, theta = theta, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dpoislindley(x = y, theta = theta, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("poissonlindley"),
deriv = eval(substitute(expression({
- Musual <- 1
+ M1 <- 1
theta <- eta2theta(eta, .link , earg = .earg )
dl.dtheta <- 2 / theta + 1 / (y + 2 + theta) - (y + 3) / (theta + 1)
@@ -1305,35 +1462,67 @@ dslash <- function(x, mu = 0, sigma = 1, log = FALSE,
if (length(sigma) != L) sigma <- rep(sigma, len = L)
zedd <- (x-mu)/sigma
- if (log.arg)
- ifelse(abs(zedd)<smallno, -log(2*sigma*sqrt(2*pi)),
- log1p(-exp(-zedd^2/2)) - log(sqrt(2*pi)*sigma*zedd^2)) else
- ifelse(abs(zedd)<smallno, 1/(2*sigma*sqrt(2*pi)),
- -expm1(-zedd^2/2)/(sqrt(2*pi)*sigma*zedd^2))
+ if (log.arg) {
+ ifelse(abs(zedd) < smallno,
+ -log(2*sigma*sqrt(2*pi)),
+ log1p(-exp(-zedd^2/2)) - log(sqrt(2*pi)*sigma*zedd^2))
+ } else {
+ ifelse(abs(zedd) < smallno,
+ 1/(2*sigma*sqrt(2*pi)),
+ -expm1(-zedd^2/2)/(sqrt(2*pi)*sigma*zedd^2))
+ }
}
-pslash <- function(q, mu = 0, sigma = 1) {
- if (!is.Numeric(sigma) || any(sigma <= 0))
- stop("argument 'sigma' must be positive")
+
+
+pslash <- function(q, mu = 0, sigma = 1, very.negative = -10000) {
+ if (any(is.na(q)))
+ stop("argument 'q' must have non-missing values")
+ if (!is.Numeric(mu))
+ stop("argument 'mu' must have finite and non-missing values")
+ if (!is.Numeric(sigma, positive = TRUE))
+ stop("argument 'sigma' must have positive finite non-missing values")
+ if (!is.Numeric(very.negative, length.arg = 1) ||
+ (very.negative >= 0))
+ stop("argument 'very.negative' must be quite negative")
L <- max(length(q), length(mu), length(sigma))
if (length(q) != L) q <- rep(q, len = L)
if (length(mu) != L) mu <- rep(mu, len = L)
if (length(sigma) != L) sigma <- rep(sigma, len = L)
- ans <- q * NA
+ zedd <- (q - mu)/sigma
+ ans <- as.numeric(q * NA)
+ extreme.q <- FALSE
for (ii in 1:L) {
- temp <- integrate(dslash, lower = -Inf, upper = q[ii])
- if (temp$message != "OK") {
- warning("integrate() failed")
+ use.trick <- (-abs(zedd[ii]) <= very.negative)
+ if (use.trick) {
+ ans[ii] <- ifelse(zedd[ii] < 0, 0.0, 1.0)
+ extreme.q <- TRUE
+ } else
+ if ((zedd[ii] >= very.negative) &&
+ zedd[ii] <= 0.0) {
+ temp2 <- integrate(dslash, lower = q[ii], upper = mu[ii],
+ mu = mu[ii], sigma = sigma[ii])
+ if (temp2$message != "OK")
+ warning("integrate() failed on 'temp2'")
+ ans[ii] <- 0.5 - temp2$value
} else {
- ans[ii] <- temp$value
+ temp1 <- integrate(dslash, lower = mu[ii], upper = q[ii],
+ mu = mu[ii], sigma = sigma[ii])
+ if (temp1$message != "OK")
+ warning("integrate() failed")
+ ans[ii] <- 0.5 + temp1$value
}
}
+ if (extreme.q)
+ warning("returning 0 or 1 values for extreme values of argument 'q'")
ans
}
+
+
rslash <- function (n, mu = 0, sigma = 1) {
rnorm(n = n, mean = mu, sd = sigma) / runif(n = n)
}
@@ -1345,7 +1534,7 @@ slash.control <- function(save.weight = TRUE, ...) {
}
- slash <- function(lmu = "identity", lsigma = "loge",
+ slash <- function(lmu = "identitylink", lsigma = "loge",
imu = NULL, isigma = NULL,
iprobs = c(0.1, 0.9),
nsimEIM = 250, zero = NULL,
@@ -1410,8 +1599,8 @@ slash.control <- function(save.weight = TRUE, ...) {
predictors.names <- c(
- namesof("mu", .lmu, earg = .emu, tag = FALSE),
- namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
+ namesof("mu", .lmu , earg = .emu, tag = FALSE),
+ namesof("sigma", .lsigma , earg = .esigma, tag = FALSE))
if (!length(etastart)) {
@@ -1438,44 +1627,74 @@ slash.control <- function(save.weight = TRUE, ...) {
mu.init) / qnorm(0.75))
mu.init <- rep(mu.init, length = length(y))
etastart <- matrix(0, n, 2)
- etastart[, 1] <- theta2eta(mu.init, .lmu, earg = .emu)
- etastart[, 2] <- theta2eta(sigma.init, .lsigma, earg = .esigma)
+ etastart[, 1] <- theta2eta(mu.init, .lmu , earg = .emu )
+ etastart[, 2] <- theta2eta(sigma.init, .lsigma , earg = .esigma )
}
}), list( .lmu = lmu, .lsigma = lsigma,
.imu = imu, .isigma = isigma,
.emu = emu, .esigma = esigma,
.iprobs = iprobs, .smallno = smallno))),
- linkinv = eval(substitute(function(eta, extra = NULL){
- NA * eta2theta(eta[, 1], link = .lmu, earg = .emu)
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ NA * eta2theta(eta[, 1], link = .lmu , earg = .emu )
}, list( .lmu = lmu, .emu = emu ))),
last = eval(substitute(expression({
- misc$link <- c("mu" = .lmu, "sigma" = .lsigma)
+ misc$link <- c("mu" = .lmu , "sigma" = .lsigma)
- misc$earg <- list("mu" = .emu, "sigma" = .esigma)
+ misc$earg <- list("mu" = .emu, "sigma" = .esigma )
misc$expected <- TRUE
misc$nsimEIM <- .nsimEIM
}), list( .lmu = lmu, .lsigma = lsigma,
.emu = emu, .esigma = esigma, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu,y, w, residuals = FALSE,eta,extra = NULL) {
- mu <- eta2theta(eta[, 1], link = .lmu, earg = .emu)
- sigma <- eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ mu <- eta2theta(eta[, 1], link = .lmu , earg = .emu )
+ sigma <- eta2theta(eta[, 2], link = .lsigma , earg = .esigma )
zedd <- (y - mu) / sigma
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(c(w) * dslash(x = y, mu = mu, sigma = sigma, log = TRUE,
- smallno = .smallno))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dslash(x = y, mu = mu, sigma = sigma, log = TRUE,
+ smallno = .smallno)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lmu = lmu, .lsigma = lsigma,
.emu = emu, .esigma = esigma, .smallno = smallno ))),
vfamily = c("slash"),
+
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ mu <- eta2theta(eta[, 1], link = .lmu , earg = .emu )
+ sigma <- eta2theta(eta[, 2], link = .lsigma , earg = .esigma )
+ rslash(nsim * length(sigma), mu = mu, sigma = sigma)
+ }, list( .lmu = lmu, .lsigma = lsigma,
+ .emu = emu, .esigma = esigma, .smallno = smallno ))),
+
+
+
+
deriv = eval(substitute(expression({
- mu <- eta2theta(eta[, 1], link = .lmu, earg = .emu)
- sigma <- eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
+ mu <- eta2theta(eta[, 1], link = .lmu , earg = .emu )
+ sigma <- eta2theta(eta[, 2], link = .lsigma , earg = .esigma )
- dmu.deta <- dtheta.deta(mu, link = .lmu, earg = .emu)
- dsigma.deta <- dtheta.deta(sigma, link = .lsigma, earg = .esigma)
+ dmu.deta <- dtheta.deta(mu, link = .lmu , earg = .emu )
+ dsigma.deta <- dtheta.deta(sigma, link = .lsigma , earg = .esigma )
zedd <- (y-mu)/sigma
d3 <- deriv3(~ w * log(1-exp(-(((y-mu)/sigma)^2)/2))-
@@ -1619,11 +1838,19 @@ dnefghs <- function(x, tau, log = FALSE) {
}), list( .link = link, .earg = earg,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
tau <- eta2theta(eta, .link , earg = .earg )
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(c(w) * dnefghs(x = y, tau = tau, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dnefghs(x = y, tau = tau, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("nefghs"),
@@ -1752,17 +1979,37 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
.eshape1 = eshape1, .eshape2 = eshape2,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(c(w) * dlogF(x = y, shape1 = shape1,
- shape2 = shape2, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dlogF(x = y, shape1 = shape1,
+ shape2 = shape2, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
vfamily = c("logF"),
+
+
+
+
+
+
+
+
+
+
+
+
deriv = eval(substitute(expression({
shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
@@ -1958,12 +2205,13 @@ qbenf <- function(p, ndigits = 1) {
"")),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
upper.limit = .upper.limit ,
zero = .zero )
}, list( .zero = zero,
@@ -1985,10 +2233,10 @@ qbenf <- function(p, ndigits = 1) {
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
extra$upper.limit <- matrix( .upper.limit , n, ncoly, byrow = TRUE)
if (any(y > extra$upper.limit))
@@ -2032,7 +2280,7 @@ qbenf <- function(p, ndigits = 1) {
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .link , length = ncoly))
names(misc$link) <- mynames1
@@ -2042,7 +2290,7 @@ qbenf <- function(p, ndigits = 1) {
misc$earg[[ii]] <- .earg
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$multipleResponses <- TRUE
misc$expected <- .expected
misc$imethod <- .imethod
@@ -2052,13 +2300,21 @@ qbenf <- function(p, ndigits = 1) {
.upper.limit = upper.limit,
.expected = expected, .imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
prob <- eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- upper.limit <- extra$upper.limit
- sum(c(w) * (dgeom(x = y, prob = prob, log = TRUE) -
- log1p(-(1.0 - prob)^(1 + upper.limit))))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ upper.limit <- extra$upper.limit
+ ll.elts <- c(w) * (dgeom(x = y, prob = prob, log = TRUE) -
+ log1p(-(1.0 - prob)^(1 + upper.limit)))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("truncgeometric"),
diff --git a/R/family.basics.R b/R/family.basics.R
index ce9fce1..6cedd07 100644
--- a/R/family.basics.R
+++ b/R/family.basics.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -10,6 +10,180 @@
+
+
+subsetc <-
+Select <-
+ function(
+ data = list(),
+ prefix = "y",
+
+ lhs = NULL,
+ rhs = NULL, # Can be "0" to suppress an intercept, else "".
+ rhs2 = NULL, # Can be "0" to suppress an intercept, else "".
+ rhs3 = NULL, # Can be "0" to suppress an intercept, else "".
+
+ as.character = FALSE,
+ as.formula.arg = FALSE,
+ tilde = TRUE,
+ exclude = NULL,
+ sort.arg = TRUE) {
+
+
+
+
+
+ if (is.character(exclude))
+ if (any(nchar(prefix) == 0))
+ stop("bad input for argument 'exclude'")
+ if (!is.logical(sort.arg) ||
+ length(sort.arg) != 1)
+ stop("bad input for argument 'sort.arg'")
+
+
+ col.names <- colnames(data)
+ if (is.logical(prefix)) {
+ index <- if (prefix) 1:length(col.names) else
+ stop("cannot have 'prefix = FALSE'")
+ } else {
+ index <- NULL
+ for (ii in 1:length(prefix)) {
+ small.col.names <- substr(col.names, 1, nchar(prefix[ii]))
+
+ index <- c(index, grep(prefix[ii], small.col.names))
+ }
+ }
+
+
+
+
+
+ temp.col.names <- col.names[index]
+
+
+ if (length(exclude)) {
+ exclude.index <- NULL
+ for (ii in 1:length(exclude)) {
+ exclude.index <- c(exclude.index,
+ (1:length(col.names))[exclude[ii] == col.names])
+ }
+ exclude.index <- unique(sort(exclude.index))
+ index <- setdiff(index, exclude.index)
+ temp.col.names <- col.names[index]
+ }
+
+
+
+
+ if (sort.arg) {
+ ooo <- order(temp.col.names)
+ index <- index[ooo]
+ temp.col.names <- temp.col.names[ooo]
+ }
+
+
+
+ ltcn.positive <- (length(temp.col.names) > 0)
+
+
+
+ if (as.formula.arg) {
+ form.string <- paste0(ifelse(length(lhs), lhs, ""),
+ ifelse(tilde, " ~ ", ""),
+ if (ltcn.positive)
+ paste(temp.col.names, collapse = " + ") else
+ "",
+ ifelse(ltcn.positive && length(rhs ), " + ", ""),
+ ifelse(length(rhs ), rhs, ""),
+ ifelse(length(rhs2), paste(" +", rhs2), ""),
+ ifelse(length(rhs3), paste(" +", rhs3), ""))
+
+ if (as.character) {
+ form.string
+ } else {
+ as.formula(form.string)
+ }
+ } else {
+ if (as.character) {
+ paste0("cbind(",
+ paste(temp.col.names, collapse = ", "),
+ ")")
+ } else {
+ ans <- if (is.matrix(data)) data[, index] else
+ if (is.list(data)) data[index] else
+ stop("argument 'data' is neither a list or a matrix")
+ if (length(ans)) {
+ as.matrix(ans)
+ } else {
+ NULL
+ }
+ }
+ }
+}
+
+
+
+
+
+
+
+
+if (FALSE)
+subsetc <-
+ function(x, select,
+ prefix = NULL,
+ subset = TRUE, drop = FALSE,
+ exclude = NULL,
+ sort.arg = !is.null(prefix),
+ as.character = FALSE) {
+
+ if (!is.null(prefix)) {
+ if (!missing(select))
+ warning("overwriting argument 'select' by something ",
+ "using 'prefix'")
+ select <- grepl(paste0("^", prefix), colnames(x))
+ }
+
+ if (missing(select)) {
+ vars <- TRUE
+ } else {
+ nl <- as.list(seq_along(x)) # as.list(1L:ncol(x))
+ names(nl) <- names(x) # colnames(x)
+ vars <- eval(substitute(select), nl, parent.frame())
+ }
+
+ ans <- x[subset & !is.na(subset), vars, drop = drop]
+ if (sort.arg) {
+ cna <- colnames(ans)
+ ooo <- order(cna)
+ ans <- ans[, ooo, drop = drop]
+ }
+
+ if (!is.null(exclude)) {
+ cna <- colnames(ans)
+ ooo <- match(exclude, cna)
+ ans <- ans[, -ooo, drop = drop]
+ }
+
+ if (as.character) {
+ cna <- colnames(ans)
+ paste0("cbind(", paste0(cna, collapse = ", "), ")")
+ } else {
+ ans
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
getind <- function(constraints, M, ncolx) {
@@ -286,7 +460,7 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
return(temp)
constraints <- temp
- Blist <- vector("list", ncol(x))
+ Hlist <- vector("list", ncol(x))
for (ii in 1:length(asgn)) {
cols <- asgn[[ii]]
ictr <- 0
@@ -299,42 +473,42 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
} else {
constraints[[ii]]
}
- Blist[[jay]] <- cm
+ Hlist[[jay]] <- cm
}
}
- names(Blist) <- dimnames(x)[[2]]
- Blist
+ names(Hlist) <- dimnames(x)[[2]]
+ Hlist
}
- trivial.constraints <- function(Blist, target = diag(M)) {
+ trivial.constraints <- function(Hlist, target = diag(M)) {
- if (is.null(Blist))
+ if (is.null(Hlist))
return(1)
- if (is.matrix(Blist))
- Blist <- list(Blist)
- M <- dim(Blist[[1]])[1]
+ if (is.matrix(Hlist))
+ Hlist <- list(Hlist)
+ M <- dim(Hlist[[1]])[1]
if (!is.matrix(target))
stop("target is not a matrix")
dimtar <- dim(target)
- trivc <- rep(1, length(Blist))
- names(trivc) <- names(Blist)
- for (ii in 1:length(Blist)) {
- d <- dim(Blist[[ii]])
+ trivc <- rep(1, length(Hlist))
+ names(trivc) <- names(Hlist)
+ for (ii in 1:length(Hlist)) {
+ d <- dim(Hlist[[ii]])
if (d[1] != dimtar[1]) trivc[ii] <- 0
if (d[2] != dimtar[2]) trivc[ii] <- 0
if (d[1] != M) trivc[ii] <- 0
- if (length(Blist[[ii]]) != length(target))
+ if (length(Hlist[[ii]]) != length(target))
trivc[ii] <- 0
if (trivc[ii] == 0) next
- if (!all(c(Blist[[ii]]) == c(target)))
+ if (!all(c(Hlist[[ii]]) == c(target)))
trivc[ii] <- 0
if (trivc[ii] == 0) next
}
@@ -481,7 +655,7 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
as.integer(index$row-1),
as.integer(index$col-1),
as.integer(n), as.integer(M),
- as.integer(as.numeric(upper)), NAOK = TRUE, PACKAGE = "VGAM")
+ as.integer(as.numeric(upper)), NAOK = TRUE)
dim(fred$ans) <- c(M, M, n)
alpn <- NULL
dimnames(fred$ans) <- list(alpn, alpn, dimnames(m)[[1]])
@@ -510,7 +684,7 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
as.integer(dimm.value),
as.integer(index$row-1),
as.integer(index$col-1),
- as.integer(n), as.integer(M), NAOK = TRUE, PACKAGE = "VGAM")
+ as.integer(n), as.integer(M), NAOK = TRUE)
dim(fred$m) <- c(dimm.value,n)
fred$m <- t(fred$m)
@@ -583,7 +757,7 @@ if (!exists("is.R"))
if (any(slotNames(object) == "predictors"))
eta <- object at predictors
mt <- terms(object) # object at terms$terms; 20030811
- Blist <- constraints <- object at constraints
+ Hlist <- constraints <- object at constraints
new.coeffs <- object at coefficients
if (any(slotNames(object) == "iter"))
iter <- object at iter
@@ -781,7 +955,7 @@ qnupdate <- function(w, wzold, dderiv, deta, M, keeppd = TRUE,
}
Bs <- mux22(t(wzold), deta, M = M,
upper = FALSE, as.matrix = TRUE) # n x M
- sBs <- c( (deta * Bs) %*% rep(1, M) ) # should have positive values
+ sBs <- c( (deta * Bs) %*% rep(1, M) ) # should have positive values
sy <- c( (dderiv * deta) %*% rep(1, M) )
wznew <- wzold
index <- iam(NA, NA, M = M, both = TRUE)
@@ -929,7 +1103,7 @@ lerch <- function(x, s, v, tolerance = 1.0e-10, iter = 100) {
err = integer(L), as.integer(L),
as.double(x), as.double(s), as.double(v),
acc=as.double(tolerance), result=double(L),
- as.integer(iter), PACKAGE = "VGAM")
+ as.integer(iter))
ifelse(ans$err == 0 & xok , ans$result, NA)
}
@@ -953,11 +1127,11 @@ negzero.expression <- expression({
if (!is.Numeric(-negdotzero, positive = TRUE,
integer.valued = TRUE) ||
- max(-negdotzero) > Musual)
+ max(-negdotzero) > M1)
stop("bad input for argument 'zero'")
zneg.index <- rep(0:bigUniqInt, rep(length(negdotzero),
- 1 + bigUniqInt)) * Musual + abs(negdotzero)
+ 1 + bigUniqInt)) * M1 + abs(negdotzero)
sort(intersect(zneg.index, 1:M))
} else {
NULL
@@ -1013,7 +1187,7 @@ w.wz.merge <- function(w, wz, n, M, ndepy,
w <- matrix(w, n, ndepy)
w.rep <- matrix(0, n, ncol(wz))
- Musual <- M / ndepy
+ M1 <- M / ndepy
all.indices <- iam(NA, NA, M = M, both = TRUE)
@@ -1021,17 +1195,17 @@ w.wz.merge <- function(w, wz, n, M, ndepy,
if (FALSE)
for (ii in 1:ncol(wz)) {
- if ((ind1 <- ceiling(all.indices$row[ii] / Musual)) ==
- ceiling(all.indices$col[ii] / Musual)) {
+ if ((ind1 <- ceiling(all.indices$row[ii] / M1)) ==
+ ceiling(all.indices$col[ii] / M1)) {
w.rep[, ii] <- w[, ind1]
}
- } # ii
+ } # ii
- res.Ind1 <- ceiling(all.indices$row.index / Musual)
- Ind1 <- res.Ind1 == ceiling(all.indices$col.index / Musual)
+ res.Ind1 <- ceiling(all.indices$row.index / M1)
+ Ind1 <- res.Ind1 == ceiling(all.indices$col.index / M1)
LLLL <- min(ncol(wz), length(Ind1))
Ind1 <- Ind1[1:LLLL]
@@ -1040,7 +1214,7 @@ w.wz.merge <- function(w, wz, n, M, ndepy,
for (ii in 1:ndepy) {
sub.ind1 <- (1:LLLL)[Ind1 & (res.Ind1 == ii)]
w.rep[, sub.ind1] <- w[, ii]
- } # ii
+ } # ii
w.rep * wz
}
@@ -1140,7 +1314,7 @@ w.y.check <- function(w, y,
-arwz2wz <- function(arwz, M = 1, Musual = 1) {
+arwz2wz <- function(arwz, M = 1, M1 = 1) {
@@ -1155,14 +1329,14 @@ arwz2wz <- function(arwz, M = 1, Musual = 1) {
return(arwz)
}
- wz <- matrix(0.0, nrow = n, ncol = sum(M:(M-Musual+1)))
- ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ wz <- matrix(0.0, nrow = n, ncol = sum(M:(M-M1+1)))
+ ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
len.ind1 <- dim.val # length(ind1$col.index)
for (ii in 1:ndepy) {
for (jlocal in 1:len.ind1) {
- wz[, iam(Musual * (ii - 1) + ind1$row[jlocal],
- Musual * (ii - 1) + ind1$col[jlocal],
+ wz[, iam(M1 * (ii - 1) + ind1$row[jlocal],
+ M1 * (ii - 1) + ind1$col[jlocal],
M = M)] <- arwz[, ii, jlocal]
}
}
diff --git a/R/family.binomial.R b/R/family.binomial.R
index b00fcf8..1d5beee 100644
--- a/R/family.binomial.R
+++ b/R/family.binomial.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -198,7 +198,8 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
.emu = emu, .erho = erho,
.nsimEIM = nsimEIM, .zero = zero ))),
loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
@@ -221,13 +222,48 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dbetabinom.ab(x = ycounts, size = nvec, shape1 = shape1,
- shape2 = shape2, log = TRUE ))
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dbetabinom.ab(x = ycounts, size = nvec, shape1 = shape1,
+ shape2 = shape2, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lmu = lmu, .lrho = lrho,
.emu = emu, .erho = erho ))),
vfamily = c("betabinomial"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ w <- pwts
+ eta <- predict(object)
+ extra <- object at extra
+
+ mymu <- eta2theta(eta[, 1], .lmu , earg = .emu )
+ rho <- eta2theta(eta[, 2], .lrho , earg = .erho )
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+
+ rbetabinom(nsim * length(rho), size = nvec,
+ prob = mymu, rho = rho)
+ }, list( .lmu = lmu, .lrho = lrho,
+ .emu = emu, .erho = erho ))),
+
+
+
+
+
deriv = eval(substitute(expression({
nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
@@ -542,17 +578,19 @@ rbinom2.or <-
.emu1 = emu1, .emu2 = emu2, .eoratio = eoratio,
.tol = tol ))),
linkfun = eval(substitute(function(mu, extra = NULL) {
- pmargin <- cbind(mu[, 3]+mu[, 4], mu[, 2]+mu[, 4])
- oratio <- mu[, 4]*mu[, 1] / (mu[, 2]*mu[, 3])
- cbind(theta2eta(pmargin[, 1], .lmu1 , earg = .emu1),
- theta2eta(pmargin[, 2], .lmu2 , earg = .emu2),
- theta2eta(oratio, .loratio, earg = .eoratio))
+ pmargin <- cbind(mu[, 3]+mu[, 4], mu[, 2]+mu[, 4])
+ oratio <- mu[, 4]*mu[, 1] / (mu[, 2]*mu[, 3])
+ cbind(theta2eta(pmargin[, 1], .lmu1 , earg = .emu1),
+ theta2eta(pmargin[, 2], .lmu2 , earg = .emu2),
+ theta2eta(oratio, .loratio, earg = .eoratio))
}, list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
.emu1 = emu1, .emu2 = emu2, .eoratio = eoratio ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
if ( .more.robust) {
vsmallno <- 1.0e4 * .Machine$double.xmin
mu[mu < vsmallno] <- vsmallno
@@ -568,13 +606,26 @@ rbinom2.or <-
warning("converting 'ycounts' to integer in @loglikelihood")
ycounts <- round(ycounts)
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
-
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .more.robust = more.robust ))),
vfamily = c("binom2.or", "binom2"),
+
+
+
+
+
+
+
+
deriv = eval(substitute(expression({
smallno <- 1.0e4 * .Machine$double.eps
mu.use <- mu
@@ -789,7 +840,7 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
}), list( .exchangeable = exchangeable, .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 3,
+ list(M1 = 3,
multipleResponses = FALSE,
zero = .zero )
}, list( .zero = zero ))),
@@ -925,9 +976,11 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
.emu12 = emu12, .erho = erho ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
ycounts <- if (is.numeric(extra$orig.w))
y * c(w) / extra$orig.w else
@@ -941,12 +994,24 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
+ log = TRUE, dochecking = FALSE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
}, list( .erho = erho ))),
vfamily = c("binom2.rho", "binom2"),
+
+
+
+
+
+
deriv = eval(substitute(expression({
nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
@@ -1117,7 +1182,7 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
answer <- .C("pnorm2",
ah = as.double(-Z1), ak = as.double(-Z2), r = as.double(rho),
size = as.integer(LLL), singler = as.integer(singler),
- ans = as.double(ans), PACKAGE = "VGAM")$ans
+ ans = as.double(ans))$ans
if (any(answer < 0.0))
warning("some negative values returned")
answer
@@ -1174,7 +1239,7 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
answer <- .C("pnorm2",
ah = as.double(-Z1), ak = as.double(-Z2), r = as.double(rho),
size = as.integer(LLL), singler = as.integer(singler),
- ans = as.double(ans), PACKAGE = "VGAM")$ans
+ ans = as.double(ans))$ans
if (any(answer < 0.0))
warning("some negative values returned")
answer
@@ -1241,14 +1306,22 @@ my.dbinom <- function(x,
theta2eta(nvec, .link)
}, list( .link = link ))),
loglikelihood = eval(substitute(
- function(mu, y, w, res = FALSE,eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
nvec <- mu / extra$temp2
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
- sum(c(w) * (lgamma(nvec+1) - lgamma(y+1) - lgamma(nvec-y+1) +
- y * log( .prob / (1- .prob )) +
- nvec * log1p(- .prob )))
+ ll.elts <-
+ c(w) * (lgamma(nvec+1) - lgamma(y+1) - lgamma(nvec-y+1) +
+ y * log( .prob / (1- .prob )) +
+ nvec * log1p(- .prob ))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .prob = prob ))),
vfamily = c("size.binomial"),
@@ -1438,20 +1511,20 @@ my.dbinom <- function(x,
dbetabinom <- function(x, size, prob, rho = 0, log = FALSE) {
- dbetabinom.ab(x = x, size = size, shape1 = prob*(1-rho)/rho,
+ dbetabinom.ab(x = x, size = size, shape1 = prob*(1-rho)/rho,
shape2 = (1-prob)*(1-rho)/rho, log = log,
.dontuse.prob = prob)
}
pbetabinom <- function(q, size, prob, rho, log.p = FALSE) {
- pbetabinom.ab(q = q, size = size, shape1 = prob*(1-rho)/rho,
+ pbetabinom.ab(q = q, size = size, shape1 = prob*(1-rho)/rho,
shape2 = (1-prob)*(1-rho)/rho, log.p = log.p)
}
rbetabinom <- function(n, size, prob, rho = 0) {
- rbetabinom.ab(n = n, size = size, shape1 = prob*(1-rho)/rho,
+ rbetabinom.ab(n = n, size = size, shape1 = prob*(1-rho)/rho,
shape2 = (1-prob)*(1-rho)/rho,
.dontuse.prob = prob)
}
@@ -1551,8 +1624,8 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
if (length(mustart.orig))
mustart <- mustart.orig # Retain it if inputted
predictors.names <-
- c(namesof("shape1", .lshape12, earg = .earg, tag = FALSE),
- namesof("shape2", .lshape12, earg = .earg, tag = FALSE))
+ c(namesof("shape1", .lshape12 , earg = .earg, tag = FALSE),
+ namesof("shape2", .lshape12 , earg = .earg, tag = FALSE))
if (!length(etastart)) {
@@ -1579,16 +1652,16 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
warning("the response (as counts) does not appear to ",
"be integer-valued. Am rounding to integer values.")
ycounts <- round(ycounts) # Make sure it is an integer
- etastart <- cbind(theta2eta(shape1, .lshape12, earg = .earg),
- theta2eta(shape2, .lshape12, earg = .earg))
+ etastart <- cbind(theta2eta(shape1, .lshape12 , earg = .earg ),
+ theta2eta(shape2, .lshape12 , earg = .earg ))
mustart <- NULL # Since etastart has been computed.
}
}), list( .lshape12 = lshape12, .earg = earg, .i1 = i1, .i2 = i2,
.nsimEIM = nsimEIM,
.imethod = imethod, .sinit = shrinkage.init ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape1 <- eta2theta(eta[, 1], .lshape12, earg = .earg)
- shape2 <- eta2theta(eta[, 2], .lshape12, earg = .earg)
+ shape1 <- eta2theta(eta[, 1], .lshape12 , earg = .earg )
+ shape2 <- eta2theta(eta[, 2], .lshape12 , earg = .earg )
shape1 / (shape1 + shape2)
}, list( .lshape12 = lshape12, .earg = earg ))),
last = eval(substitute(expression({
@@ -1596,8 +1669,8 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
misc$earg <- list("shape1" = .earg , "shape2" = .earg )
- shape1 <- eta2theta(eta[, 1], .lshape12, earg = .earg)
- shape2 <- eta2theta(eta[, 2], .lshape12, earg = .earg)
+ shape1 <- eta2theta(eta[, 1], .lshape12 , earg = .earg )
+ shape2 <- eta2theta(eta[, 2], .lshape12 , earg = .earg )
misc$rho <- 1 / (shape1 + shape2 + 1)
misc$expected <- TRUE
@@ -1606,37 +1679,73 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
}), list( .lshape12 = lshape12, .earg = earg,
.nsimEIM = nsimEIM, .zero = zero ))),
loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
+ y * w # Convert proportions to counts
smallno <- 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
warning("converting 'ycounts' to integer in @loglikelihood")
ycounts <- round(ycounts)
- shape1 <- eta2theta(eta[, 1], .lshape12, earg = .earg)
- shape2 <- eta2theta(eta[, 2], .lshape12, earg = .earg)
+ shape1 <- eta2theta(eta[, 1], .lshape12 , earg = .earg )
+ shape2 <- eta2theta(eta[, 2], .lshape12 , earg = .earg )
nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dbetabinom.ab(x = ycounts, size = nvec, shape1 = shape1,
- shape2 = shape2, log = TRUE ))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dbetabinom.ab(x = ycounts, size = nvec, shape1 = shape1,
+ shape2 = shape2, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape12 = lshape12, .earg = earg ))),
vfamily = c("betabinomial.ab"),
+
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ w <- pwts
+ eta <- predict(object)
+ extra <- object at extra
+ shape1 <- eta2theta(eta[, 1], .lshape12 , earg = .earg )
+ shape2 <- eta2theta(eta[, 2], .lshape12 , earg = .earg )
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+ rbetabinom.ab(nsim * length(shape1), size = nvec,
+ shape1 = shape1,
+ shape2 = shape2)
+ }, list( .lshape12 = lshape12, .earg = earg ))),
+
+
+
+
+
deriv = eval(substitute(expression({
nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
- shape1 <- eta2theta(eta[, 1], .lshape12, earg = .earg)
- shape2 <- eta2theta(eta[, 2], .lshape12, earg = .earg)
+ shape1 <- eta2theta(eta[, 1], .lshape12 , earg = .earg )
+ shape2 <- eta2theta(eta[, 2], .lshape12 , earg = .earg )
- dshape1.deta <- dtheta.deta(shape1, .lshape12, earg = .earg)
- dshape2.deta <- dtheta.deta(shape2, .lshape12, earg = .earg)
+ dshape1.deta <- dtheta.deta(shape1, .lshape12 , earg = .earg )
+ dshape2.deta <- dtheta.deta(shape2, .lshape12 , earg = .earg )
dl.dshape1 <- digamma(shape1+ycounts) -
digamma(shape1+shape2+nvec) -
@@ -1738,8 +1847,8 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
eval(geometric()@initialize)
predictors.names <-
- c(namesof("prob", .lprob, earg = .eprob, tag = FALSE),
- namesof("shape", .lshape, earg = .eshape, short = FALSE))
+ c(namesof("prob", .lprob , earg = .eprob, tag = FALSE),
+ namesof("shape", .lshape , earg = .eshape, short = FALSE))
if (length( .iprob ))
prob.init <- rep( .iprob , len = n)
@@ -1748,15 +1857,15 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
ncol(cbind(etastart)) != 2) {
shape.init <- rep( .ishape , len = n)
etastart <-
- cbind(theta2eta(prob.init, .lprob, earg = .eprob),
- theta2eta(shape.init, .lshape, earg = .eshape))
+ cbind(theta2eta(prob.init, .lprob , earg = .eprob ),
+ theta2eta(shape.init, .lshape , earg = .eshape ))
}
}), list( .iprob = iprob, .ishape = ishape, .lprob = lprob,
.eprob = eprob, .eshape = eshape,
.lshape = lshape ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- prob <- eta2theta(eta[, 1], .lprob, earg = .eprob)
- shape <- eta2theta(eta[, 2], .lshape, earg = .eshape)
+ prob <- eta2theta(eta[, 1], .lprob , earg = .eprob )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
mymu <- (1-prob) / (prob - shape)
ifelse(mymu >= 0, mymu, NA)
}, list( .lprob = lprob, .lshape = lshape,
@@ -1779,13 +1888,15 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
.tolerance = tolerance,
.moreSummation = moreSummation, .zero = zero ))),
loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
- prob <- eta2theta(eta[, 1], .lprob, earg = .eprob)
- shape <- eta2theta(eta[, 2], .lshape, earg = .eshape)
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ prob <- eta2theta(eta[, 1], .lprob , earg = .eprob )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
ans <- log(prob)
maxy <- max(y)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
for (ii in 1:maxy) {
index <- (ii <= y)
ans[index] <- ans[index] +
@@ -1797,17 +1908,44 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
- sum(w * ans)
+ ll.elts <- w * ans
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lprob = lprob, .lshape = lshape,
.eprob = eprob, .eshape = eshape ))),
vfamily = c("betageometric"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ prob <- eta2theta(eta[, 1], .lprob , earg = .eprob )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+ rbetageom(nsim * length(shape),
+ shape1 = shape, shape2 = shape)
+ }, list( .lprob = lprob, .lshape = lshape,
+ .eprob = eprob, .eshape = eshape ))),
+
+
+
+
deriv = eval(substitute(expression({
- prob <- eta2theta(eta[, 1], .lprob, earg = .eprob)
- shape <- eta2theta(eta[, 2], .lshape, earg = .eshape)
+ prob <- eta2theta(eta[, 1], .lprob , earg = .eprob )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
shape1 <- prob / shape; shape2 <- (1 - prob) / shape;
- dprob.deta <- dtheta.deta(prob, .lprob, earg = .eprob)
- dshape.deta <- dtheta.deta(shape, .lshape, earg = .eshape)
+ dprob.deta <- dtheta.deta(prob, .lprob , earg = .eprob )
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
dl.dprob <- 1 / prob
dl.dshape <- 0 * y
maxy <- max(y)
@@ -1966,7 +2104,8 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
.apply.parint = apply.parint,
.zero = zero ))),
loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
prob1 <- eta2theta(eta[, 1], .lprob1, earg = .eprob1)
prob2 <- eta2theta(eta[, 2], .lprob2, earg = .eprob2)
@@ -1978,13 +2117,19 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
mvector <- w
rvector <- w * y[, 1]
svector <- rvector * y[, 2]
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
ans1 <-
- sum(dbinom(rvector, size = mvector, prob = prob1, log = TRUE) +
- dbinom(svector, size = rvector, prob = prob2, log = TRUE))
+ dbinom(rvector, size = mvector, prob = prob1, log = TRUE) +
+ dbinom(svector, size = rvector, prob = prob2, log = TRUE)
- ans1
+ ll.elts <- ans1
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lprob1 = lprob1, .lprob2 = lprob2,
.eprob1 = eprob1, .eprob2 = eprob2 ))),
@@ -2129,9 +2274,12 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
}), list( .tol = tol, .addRidge = addRidge,
.lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio,
.emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio ))),
- loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
+ loglikelihood =
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
@@ -2143,10 +2291,17 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
warning("converting 'ycounts' to integer in @loglikelihood")
ycounts <- round(ycounts)
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ },
vfamily = c("zipebcom"),
deriv = eval(substitute(expression({
A1vec <- eta2theta(eta[, 1], .lmu12 , earg = .emu12 )
@@ -2282,7 +2437,8 @@ if (FALSE)
.erhopos = erhopos, .erhoneg = erhoneg,
.irhopos = irhopos, .irhoneg = irhoneg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
rhopos <- eta2theta(eta[, 1], .lrhopos, earg = .erhopos)
rhoneg <- eta2theta(eta[, 2], .lrhoneg, earg = .erhoneg)
pee2 <- (1 - rhoneg) / (rhopos - rhoneg)
@@ -2304,8 +2460,9 @@ if (FALSE)
pee2[pee2 >= 0.5] <- 0.44
}
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
nnn1 <- round(w * (y[, 1] + y[, 2]))
nnn2 <- round(w * (y[, 3] + y[, 4]))
index1 <- nnn1 > 0
@@ -2315,10 +2472,16 @@ if (FALSE)
prob = pee1[index1], log = TRUE), 18))
- sum(dbinom(round(w[index1] * y[index1, 1]), nnn1[index1],
+ ll.elts <-
+ (dbinom(round(w[index1] * y[index1, 1]), nnn1[index1],
prob = pee1[index1], log = TRUE)) +
- sum(dbinom(round(w[index2] * y[index2, 3]), nnn2[index2],
+ (dbinom(round(w[index2] * y[index2, 3]), nnn2[index2],
prob = pee2[index2], log = TRUE))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lrhopos = lrhopos, .lrhoneg = lrhoneg,
.erhopos = erhopos, .erhoneg = erhoneg,
@@ -2452,9 +2615,11 @@ if (FALSE)
.emu12 = emu12,
.rho = rho, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
@@ -2465,9 +2630,15 @@ if (FALSE)
warning("converting 'ycounts' to integer in @loglikelihood")
ycounts <- round(ycounts)
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .rho = rho ))),
vfamily = c("binom2.Rho", "binom2"),
@@ -2593,7 +2764,7 @@ if (FALSE)
}), list( .exchangeable = exchangeable, .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 3,
+ list(M1 = 3,
multipleResponses = FALSE,
zero = .zero )
}, list( .zero = zero ))),
@@ -2628,7 +2799,7 @@ if (FALSE)
"10" = y[, 1] * (1 - y[, 2]),
"11" = y[, 1] * y[, 2])
} else {
- if(!all(rowSums(y) == 1))
+ if (!all(rowSums(y) == 1))
stop("response matrix must have two 0s and one 1 in each row")
y1vec <- 1 - y[, 1] # Not a 0 means a 1.
y2vec <- ifelse(y1vec == 1, y[, 3], 0)
@@ -2765,9 +2936,11 @@ if (FALSE)
.emu12 = emu12, .e.rho = e.rho ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
ycounts <- y # n x 3
nvec <- 1
@@ -2777,14 +2950,20 @@ if (FALSE)
p11 <- pmax(smallno, pbinorm(eta[, 1], eta[, 2], cov12 = rhovec))
p10 <- pmax(smallno, pnorm( eta[, 1]) - p11)
p0 <- pmax(smallno, pnorm(-eta[, 1]))
- sumprob <- p11 + p10 + p0
- p11 <- p11 / sumprob
- p10 <- p10 / sumprob
- p0 <- p0 / sumprob
+ sumprob <- p11 + p10 + p0
+ p11 <- p11 / sumprob
+ p10 <- p10 / sumprob
+ p0 <- p0 / sumprob
- sum(c(w) * dmultinomial(x = ycounts, size = nvec, prob = mu, # use.mu,
- log = TRUE, dochecking = FALSE))
+ ll.elts <-
+ c(w) * dmultinomial(x = ycounts, size = nvec, prob = mu, # use.mu,
+ log = TRUE, dochecking = FALSE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .l.rho = l.rho, .e.rho = e.rho ))),
vfamily = c("binom2.rho.ss", "binom2"),
@@ -2839,7 +3018,7 @@ if (FALSE)
ans.deriv <- c(w) * cbind(dl.dprob1 * dprob1.deta,
dl.dprob2 * dprob2.deta,
dl.drho * drho...deta)
- } # else {
+ } # else {
eta1 <- eta[, 1] # dat1 %*% params[1:X1.d2]
eta2 <- eta[, 2] # dat2 %*% params[(X1.d2 + 1):(X1.d2 + X2.d2)]
corr.st <- eta[, 3] # params[(X1.d2 + X2.d2 + 1)]
@@ -2905,7 +3084,7 @@ if (FALSE)
wz[, iam(1, 2, M)] <- ned2l.dprob1prob2 * dprob1.deta * dprob2.deta
wz[, iam(1, 3, M)] <- ned2l.dprob1rho * dprob1.deta * drho...deta
wz[, iam(2, 3, M)] <- ned2l.dprob2rho * dprob2.deta * drho...deta
- } # else {
+ } # else {
ned2l.be1.be1 <- (A^2/p11 + A.c^2/p10 + 1/p0) * d.n1^2
ned2l.be2.be2 <- ( 1/p11 + 1/p10) * B^2 * d.n2^2
diff --git a/R/family.bivariate.R b/R/family.bivariate.R
index 68fc84f..8045c8e 100644
--- a/R/family.bivariate.R
+++ b/R/family.bivariate.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -102,7 +102,7 @@ rbiclaytoncop <- function(n, alpha = 0) {
apply.int = .apply.parint )
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
Yusual <- 2
eval(negzero.expression)
}), list( .zero = zero,
@@ -110,17 +110,18 @@ rbiclaytoncop <- function(n, alpha = 0) {
.parallel = parallel ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
- Yusual = 2,
- apply.parint = .apply.parint,
- parallel = .parallel,
- zero = .zero )
- }, list( .zero = zero,
- .apply.parint = apply.parint,
- .parallel = parallel ))),
+ list(M1 = 1,
+ Q1 = 2,
+ Yusual = 2,
+ apply.parint = .apply.parint,
+ parallel = .parallel,
+ zero = .zero )
+ }, list( .zero = zero,
+ .apply.parint = apply.parint,
+ .parallel = parallel ))),
initialize = eval(substitute(expression({
- Musual <- 1
+ M1 <- 1
Yusual <- 2
temp5 <-
@@ -139,10 +140,10 @@ rbiclaytoncop <- function(n, alpha = 0) {
ncoly <- ncol(y)
extra$ncoly <- ncoly
- extra$Musual <- Musual
+ extra$M1 <- M1
extra$Yusual <- Yusual
- M <- Musual * (ncoly / Yusual)
- mynames1 <- paste("alpha", if (M / Musual > 1) 1:(M / Musual) else "",
+ M <- M1 * (ncoly / Yusual)
+ mynames1 <- paste("alpha", if (M / M1 > 1) 1:(M / M1) else "",
sep = "")
predictors.names <- c(
namesof(mynames1, .lalpha , earg = .ealpha , short = TRUE))
@@ -155,10 +156,10 @@ rbiclaytoncop <- function(n, alpha = 0) {
if (!length(etastart)) {
alpha.init <- matrix(if (length( .ialpha )) .ialpha else 0 + NA,
- n, M / Musual, byrow = TRUE)
+ n, M / M1, byrow = TRUE)
if (!length( .ialpha ))
- for (spp. in 1:(M / Musual)) {
+ for (spp. in 1:(M / M1)) {
ymatj <- y[, (Yusual * spp. - 1):(Yusual * spp.)]
@@ -192,20 +193,20 @@ rbiclaytoncop <- function(n, alpha = 0) {
.ialpha = ialpha ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta <- as.matrix(eta)
- fv.matrix <- matrix(0.5, nrow(eta), extra$ncoly)
+ eta <- as.matrix(eta)
+ fv.matrix <- matrix(0.5, nrow(eta), extra$ncoly)
- if (length(extra$dimnamesy2))
- dimnames(fv.matrix) <- list(extra$dimnamesy1,
- extra$dimnamesy2)
- fv.matrix
- } , list( .lalpha = lalpha,
- .ealpha = ealpha ))),
+ if (length(extra$dimnamesy2))
+ dimnames(fv.matrix) <- list(extra$dimnamesy1,
+ extra$dimnamesy2)
+ fv.matrix
+ } , list( .lalpha = lalpha,
+ .ealpha = ealpha ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
Yusual <- extra$Yusual
misc$link <- rep( .lalpha , length = M)
temp.names <- mynames1
@@ -217,7 +218,7 @@ rbiclaytoncop <- function(n, alpha = 0) {
misc$earg[[ii]] <- .ealpha
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$Yusual <- Yusual
misc$imethod <- .imethod
misc$expected <- TRUE
@@ -230,20 +231,49 @@ rbiclaytoncop <- function(n, alpha = 0) {
.lalpha = lalpha,
.ealpha = ealpha ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Alpha <- eta2theta(eta, .lalpha , earg = .ealpha )
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ Alpha <- eta2theta(eta, .lalpha , earg = .ealpha )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
- sum(c(w) * dbiclaytoncop(x1 = c(y[, c(TRUE, FALSE)]),
- x2 = c(y[, c(FALSE, TRUE)]),
- alpha = c(Alpha), log = TRUE))
+ ll.elts <-
+ c(w) * dbiclaytoncop(x1 = c(y[, c(TRUE, FALSE)]),
+ x2 = c(y[, c(FALSE, TRUE)]),
+ alpha = c(Alpha), log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
- } , list( .lalpha = lalpha,
- .ealpha = ealpha,
- .imethod = imethod ))),
+ }
+ } , list( .lalpha = lalpha,
+ .ealpha = ealpha,
+ .imethod = imethod ))),
vfamily = c("biclaytoncop"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ Alpha <- eta2theta(eta, .lalpha , earg = .ealpha )
+ rbiclaytoncop(nsim * length(Alpha),
+ alpha = c(Alpha))
+ } , list( .lalpha = lalpha,
+ .ealpha = ealpha ))),
+
+
+
+
deriv = eval(substitute(expression({
Alpha <- eta2theta(eta, .lalpha , earg = .ealpha )
Yindex1 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual)) - 1
@@ -406,7 +436,7 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
apply.int = .apply.parint )
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
Yusual <- 2
eval(negzero.expression)
}), list( .zero = zero,
@@ -414,7 +444,8 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
.parallel = parallel ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 2,
Yusual = 2,
apply.parint = .apply.parint ,
parallel = .parallel ,
@@ -424,7 +455,7 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
.parallel = parallel ))),
initialize = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
Yusual <- 2
temp5 <-
@@ -442,17 +473,17 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
ncoly <- ncol(y)
extra$ncoly <- ncoly
- extra$Musual <- Musual
+ extra$M1 <- M1
extra$Yusual <- Yusual
- M <- Musual * (ncoly / Yusual)
- mynames1 <- paste("df", if (M / Musual > 1) 1:(M / Musual) else "",
+ M <- M1 * (ncoly / Yusual)
+ mynames1 <- paste("df", if (M / M1 > 1) 1:(M / M1) else "",
sep = "")
- mynames2 <- paste("rho", if (M / Musual > 1) 1:(M / Musual) else "",
+ mynames2 <- paste("rho", if (M / M1 > 1) 1:(M / M1) else "",
sep = "")
predictors.names <- c(
namesof(mynames1, .ldof , earg = .edof , short = TRUE),
namesof(mynames2, .lrho , earg = .erho , short = TRUE))[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
extra$dimnamesy1 <- dimnames(y)[[1]]
@@ -462,13 +493,13 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
if (!length(etastart)) {
dof.init <- matrix(if (length( .idof )) .idof else 0 + NA,
- n, M / Musual, byrow = TRUE)
+ n, M / M1, byrow = TRUE)
rho.init <- matrix(if (length( .irho )) .irho else 0 + NA,
- n, M / Musual, byrow = TRUE)
+ n, M / M1, byrow = TRUE)
if (!length( .idof ) || !length( .irho ))
- for (spp. in 1:(M / Musual)) {
- ymatj <- y[, (Musual * spp. - 1):(Musual * spp.)]
+ for (spp. in 1:(M / M1)) {
+ ymatj <- y[, (M1 * spp. - 1):(M1 * spp.)]
dof.init0 <- if ( .imethod == 1) {
@@ -498,7 +529,7 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
cbind(theta2eta(dof.init, .ldof , earg = .edof ),
theta2eta(rho.init, .lrho , earg = .erho ))
- etastart <- etastart[, interleave.VGAM(M, M = Musual)]
+ etastart <- etastart[, interleave.VGAM(M, M = M1)]
}
}), list( .imethod = imethod,
@@ -520,23 +551,23 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
Yusual <- extra$Yusual
misc$link <-
- c(rep( .ldof , length = M / Musual),
- rep( .lrho , length = M / Musual))[
- interleave.VGAM(M, M = Musual)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ c(rep( .ldof , length = M / M1),
+ rep( .lrho , length = M / M1))[
+ interleave.VGAM(M, M = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
- for (ii in 1:(M / Musual)) {
- misc$earg[[Musual*ii-1]] <- .edof
- misc$earg[[Musual*ii ]] <- .erho
+ for (ii in 1:(M / M1)) {
+ misc$earg[[M1*ii-1]] <- .edof
+ misc$earg[[M1*ii ]] <- .erho
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$Yusual <- Yusual
misc$imethod <- .imethod
misc$expected <- TRUE
@@ -550,27 +581,36 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
.lrho = lrho, .ldof = ldof,
.erho = erho, .edof = edof ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
Dof <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
.ldof , earg = .edof )
Rho <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
.lrho , earg = .erho )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
Yindex1 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual)) - 1
Yindex2 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual))
- sum(c(w) * dbistudentt(x1 = y[, Yindex1, drop = FALSE],
- x2 = y[, Yindex2, drop = FALSE],
- df = Dof,
- rho = Rho, log = TRUE))
+ ll.elts <-
+ c(w) * dbistudentt(x1 = y[, Yindex1, drop = FALSE],
+ x2 = y[, Yindex2, drop = FALSE],
+ df = Dof,
+ rho = Rho, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
- } , list( .lrho = lrho, .ldof = ldof,
- .erho = erho, .edof = edof,
- .imethod = imethod ))),
+ }, list( .lrho = lrho, .ldof = ldof,
+ .erho = erho, .edof = edof,
+ .imethod = imethod ))),
vfamily = c("bistudentt"),
deriv = eval(substitute(expression({
- Musual <- Yusual <- 2
+ M1 <- Yusual <- 2
Dof <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
.ldof , earg = .edof )
Rho <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
@@ -627,7 +667,7 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
ans <- c(w) * cbind(dl.ddof * ddof.deta,
dl.drho * drho.deta)
- ans <- ans[, interleave.VGAM(M, M = Musual)]
+ ans <- ans[, interleave.VGAM(M, M = M1)]
ans
}), list( .lrho = lrho, .ldof = ldof,
.erho = erho, .edof = edof,
@@ -652,8 +692,8 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
wz <- array(c(c(w) * ned2l.ddof2 * ddof.deta^2,
c(w) * ned2l.drho2 * drho.deta^2,
c(w) * ned2l.ddofrho * ddof.deta * drho.deta),
- dim = c(n, M / Musual, 3))
- wz <- arwz2wz(wz, M = M, Musual = Musual)
+ dim = c(n, M / M1, 3))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
wz
}), list( .lrho = lrho, .ldof = ldof,
.erho = erho, .edof = edof,
@@ -752,7 +792,7 @@ rbinormcop <- function(n, rho = 0) {
apply.int = .apply.parint )
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
Yusual <- 2
eval(negzero.expression)
}), list( .zero = zero,
@@ -760,7 +800,8 @@ rbinormcop <- function(n, rho = 0) {
.parallel = parallel ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 2,
Yusual = 2,
apply.parint = .apply.parint ,
parallel = .parallel ,
@@ -770,7 +811,7 @@ rbinormcop <- function(n, rho = 0) {
.parallel = parallel ))),
initialize = eval(substitute(expression({
- Musual <- 1
+ M1 <- 1
Yusual <- 2
temp5 <-
@@ -789,10 +830,10 @@ rbinormcop <- function(n, rho = 0) {
ncoly <- ncol(y)
extra$ncoly <- ncoly
- extra$Musual <- Musual
+ extra$M1 <- M1
extra$Yusual <- Yusual
- M <- Musual * (ncoly / Yusual)
- mynames1 <- paste("rho", if (M / Musual > 1) 1:(M / Musual) else "",
+ M <- M1 * (ncoly / Yusual)
+ mynames1 <- paste("rho", if (M / M1 > 1) 1:(M / M1) else "",
sep = "")
predictors.names <- c(
namesof(mynames1, .lrho , earg = .erho , short = TRUE))
@@ -805,10 +846,10 @@ rbinormcop <- function(n, rho = 0) {
if (!length(etastart)) {
rho.init <- matrix(if (length( .irho )) .irho else 0 + NA,
- n, M / Musual, byrow = TRUE)
+ n, M / M1, byrow = TRUE)
if (!length( .irho ))
- for (spp. in 1:(M / Musual)) {
+ for (spp. in 1:(M / M1)) {
ymatj <- y[, (Yusual * spp. - 1):(Yusual * spp.)]
@@ -852,7 +893,7 @@ rbinormcop <- function(n, rho = 0) {
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
Yusual <- extra$Yusual
misc$link <- rep( .lrho , length = M)
temp.names <- mynames1
@@ -864,7 +905,7 @@ rbinormcop <- function(n, rho = 0) {
misc$earg[[ii]] <- .erho
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$Yusual <- Yusual
misc$imethod <- .imethod
misc$expected <- TRUE
@@ -878,21 +919,49 @@ rbinormcop <- function(n, rho = 0) {
.lrho = lrho,
.erho = erho ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
Rho <- eta2theta(eta, .lrho , earg = .erho )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
Yindex1 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual)) - 1
Yindex2 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual))
- sum(c(w) * dbinormcop(x1 = y[, Yindex1, drop = FALSE],
- x2 = y[, Yindex2, drop = FALSE],
- rho = Rho, log = TRUE))
+ ll.elts <-
+ c(w) * dbinormcop(x1 = y[, Yindex1, drop = FALSE],
+ x2 = y[, Yindex2, drop = FALSE],
+ rho = Rho, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
} , list( .lrho = lrho,
.erho = erho,
.imethod = imethod ))),
vfamily = c("binormalcop"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ Rho <- eta2theta(eta, .lrho , earg = .erho )
+ rbinormcop(nsim * length(Rho),
+ rho = c(Rho))
+ } , list( .lrho = lrho,
+ .erho = erho ))),
+
+
+
deriv = eval(substitute(expression({
Rho <- eta2theta(eta, .lrho , earg = .erho )
Yindex1 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual)) - 1
@@ -933,7 +1002,7 @@ bilogistic4.control <- function(save.weight = TRUE, ...) {
}
- bilogistic4 <- function(llocation = "identity",
+ bilogistic4 <- function(llocation = "identitylink",
lscale = "loge",
iloc1 = NULL, iscale1 = NULL,
iloc2 = NULL, iscale2 = NULL,
@@ -1044,7 +1113,9 @@ bilogistic4.control <- function(save.weight = TRUE, ...) {
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
locat1 <- eta2theta(eta[, 1], .llocat , .elocat )
Scale1 <- eta2theta(eta[, 2], .lscale , .escale )
locat2 <- eta2theta(eta[, 3], .llocat , .elocat )
@@ -1053,14 +1124,45 @@ bilogistic4.control <- function(save.weight = TRUE, ...) {
zedd1 <- (y[, 1]-locat1) / Scale1
zedd2 <- (y[, 2]-locat2) / Scale2
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * (-zedd1 - zedd2 -
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * (-zedd1 - zedd2 -
3 * log1p(exp(-zedd1) + exp(-zedd2)) -
- log(Scale1) - log(Scale2)))
+ log(Scale1) - log(Scale2))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
vfamily = c("bilogistic4"),
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ locat1 <- eta2theta(eta[, 1], .llocat , .elocat )
+ Scale1 <- eta2theta(eta[, 2], .lscale , .escale )
+ locat2 <- eta2theta(eta[, 3], .llocat , .elocat )
+ Scale2 <- eta2theta(eta[, 4], .lscale , .escale )
+ rbilogis4(nsim * length(locat1),
+ loc1 = locat1, scale1 = Scale1,
+ loc2 = locat2, scale2 = Scale2)
+ }, list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale ))),
+
+
+
+
deriv = eval(substitute(expression({
locat1 <- eta2theta(eta[, 1], .llocat , .elocat )
Scale1 <- eta2theta(eta[, 2], .lscale , .escale )
@@ -1280,13 +1382,16 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
}), list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
.ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
alpha <- eta2theta(eta[, 1], .la, earg = .ea )
alphap <- eta2theta(eta[, 2], .lap, earg = .eap )
beta <- eta2theta(eta[, 3], .lb, earg = .eb )
betap <- eta2theta(eta[, 4], .lbp, earg = .ebp )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
tmp88 <- extra$y1.lt.y2
ell1 <- log(alpha[tmp88]) + log(betap[tmp88]) -
betap[tmp88] * y[tmp88, 2] -
@@ -1294,7 +1399,16 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
ell2 <- log(beta[!tmp88]) + log(alphap[!tmp88]) -
alphap[!tmp88] * y[!tmp88, 1] -
(alpha+beta-alphap)[!tmp88] * y[!tmp88, 2]
- sum(w[tmp88] * ell1) + sum(w[!tmp88] * ell2) }
+ all.vec <- alpha
+ all.vec[ tmp88] <- ell1
+ all.vec[!tmp88] <- ell2
+ ll.elts <- c(w) * all.vec
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
.ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
vfamily = c("freund61"),
@@ -1446,14 +1560,14 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
ainit <- getMaxMin(a.grid, objfun = mcg2.loglik,
y = y, x = x, w = w, maximize = TRUE,
extraargs = extraargs)
- ainit <- rep(if(is.Numeric( .iscale )) .iscale else ainit,
+ ainit <- rep(if (is.Numeric( .iscale )) .iscale else ainit,
length.out = n)
pinit <- (1/ainit) * abs(momentsY[1]) + 0.01
qinit <- (1/ainit) * abs(momentsY[2] - momentsY[1]) + 0.01
- pinit <- rep(if(is.Numeric( .ishape1 )) .ishape1 else pinit,
+ pinit <- rep(if (is.Numeric( .ishape1 )) .ishape1 else pinit,
length.out = n)
- qinit <- rep(if(is.Numeric( .ishape2 )) .ishape2 else qinit,
+ qinit <- rep(if (is.Numeric( .ishape2 )) .ishape2 else qinit,
length.out = n)
etastart <-
@@ -1492,16 +1606,26 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
.iscale = iscale, .ishape1 = ishape1, .ishape2 = ishape2,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
a <- eta2theta(eta[, 1], .lscale , .escale )
p <- eta2theta(eta[, 2], .lshape1 , .eshape1 )
q <- eta2theta(eta[, 3], .lshape2 , .eshape2 )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * (-(p+q)*log(a) - lgamma(p) - lgamma(q) +
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * (-(p+q)*log(a) - lgamma(p) - lgamma(q) +
(p - 1)*log(y[, 1]) + (q - 1)*log(y[, 2]-y[, 1]) -
- y[, 2] / a))
+ y[, 2] / a)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .lscale = lscale, .lshape1 = lshape1, .lshape2 = lshape2,
.escale = escale, .eshape1 = eshape1, .eshape2 = eshape2 ))),
vfamily = c("bigamma.mckay"),
@@ -1560,13 +1684,13 @@ rbifrankcop <- function(n, alpha) {
stop("bad input for argument 'n'") else n
if (!is.Numeric(alpha, positive = TRUE))
stop("bad input for argument 'alpha'")
- alpha <- rep(alpha, length.out = use.n)
+ if (length(alpha) != use.n) alpha <- rep(alpha, length.out = use.n)
U <- runif(use.n)
V <- runif(use.n)
T <- alpha^U + (alpha - alpha^U) * V
X <- U
- index <- abs(alpha - 1) < .Machine$double.eps
+ index <- (abs(alpha - 1) < .Machine$double.eps)
Y <- U
if (any(!index))
Y[!index] <- logb(T[!index] / (T[!index] +
@@ -1723,15 +1847,41 @@ bifrankcop.control <- function(save.weight = TRUE, ...) {
misc$multipleResponses <- FALSE
}), list( .lapar = lapar, .eapar = eapar, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
apar <- eta2theta(eta, .lapar , earg = .eapar )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dbifrankcop(x1 = y[, 1], x2 = y[, 2],
- alpha = apar, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dbifrankcop(x1 = y[, 1], x2 = y[, 2],
+ alpha = apar, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lapar = lapar, .eapar = eapar ))),
vfamily = c("bifrankcop"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ apar <- eta2theta(eta, .lapar , earg = .eapar )
+ rbifrankcop(nsim * length(apar), alpha = c(apar))
+ }, list( .lapar = lapar, .eapar = eapar ))),
+
+
+
+
deriv = eval(substitute(expression({
apar <- eta2theta(eta, .lapar , earg = .eapar )
dapar.deta <- dtheta.deta(apar, .lapar , earg = .eapar )
@@ -1859,11 +2009,19 @@ bifrankcop.control <- function(save.weight = TRUE, ...) {
.etheta = etheta, .expected = expected ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
theta <- eta2theta(eta, .ltheta , .etheta )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * (-exp(-theta) * y[, 1] / theta - theta * y[, 2]))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * (-exp(-theta) * y[, 1] / theta - theta * y[, 2])
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .ltheta = ltheta, .etheta = etheta ))),
vfamily = c("gammahyp"),
@@ -1970,16 +2128,24 @@ bifrankcop.control <- function(save.weight = TRUE, ...) {
misc$multipleResponses <- FALSE
}), list( .lapar = lapar, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
alpha <- eta2theta(eta, .lapar , earg = .earg )
alpha[abs(alpha) < .tola0 ] <- .tola0
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
denom <- (1 + alpha - 2*alpha*(exp(-y[, 1]) + exp(-y[, 2])) +
4*alpha*exp(-y[, 1] - y[, 2]))
- sum(c(w) * (-y[, 1] - y[, 2] + log(denom)))
+ ll.elts <- c(w) * (-y[, 1] - y[, 2] + log(denom))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
- }, list( .lapar = lapar, .earg = earg, .tola0=tola0 ))),
+ }, list( .lapar = lapar, .earg = earg, .tola0 = tola0 ))),
vfamily = c("morgenstern"),
deriv = eval(substitute(expression({
alpha <- eta2theta(eta, .lapar , earg = .earg )
@@ -2191,15 +2357,43 @@ pfgm <- function(q1, q2, alpha) {
misc$multipleResponses <- FALSE
}), list( .lapar = lapar, .earg = earg))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
alpha <- eta2theta(eta, .lapar , earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dfgm(x1 = y[, 1],
- x2 = y[, 2], alpha = alpha, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dfgm(x1 = y[, 1],
+ x2 = y[, 2], alpha = alpha, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lapar = lapar, .earg = earg ))),
vfamily = c("fgm"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ alpha <- eta2theta(eta, .lapar , earg = .earg )
+ rfgm(nsim * length(alpha), alpha = c(alpha))
+ }, list( .lapar = lapar, .earg = earg ))),
+
+
+
+
+
deriv = eval(substitute(expression({
alpha <- eta2theta(eta, .lapar , earg = .earg )
@@ -2229,7 +2423,7 @@ pfgm <- function(q1, q2, alpha) {
- bigumbelI <- function(lapar = "identity", iapar = NULL, imethod = 1) {
+ bigumbelI <- function(lapar = "identitylink", iapar = NULL, imethod = 1) {
lapar <- as.list(substitute(lapar))
earg <- link2list(lapar)
@@ -2295,10 +2489,13 @@ pfgm <- function(q1, q2, alpha) {
misc$multipleResponses <- FALSE
}), list( .lapar = lapar, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
alpha <- eta2theta(eta, .lapar , earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
denom <- (alpha*y[, 1] - 1) * (alpha*y[, 2] - 1) + alpha
mytolerance <- .Machine$double.xmin
bad <- (denom <= mytolerance) # Range violation
@@ -2306,9 +2503,17 @@ pfgm <- function(q1, q2, alpha) {
cat("There are some range violations in @deriv\n")
flush.console()
}
+
+
+
+
+ if (summation) {
sum(bad) * (-1.0e10) +
sum(w[!bad] * (-y[!bad, 1] - y[!bad, 2] +
alpha[!bad] * y[!bad, 1] * y[!bad, 2] + log(denom[!bad])))
+ } else {
+ stop("argument 'summation = FALSE' does not work yet")
+ }
}
}, list( .lapar = lapar, .earg = earg ))),
vfamily = c("bigumbelI"),
@@ -2512,15 +2717,39 @@ plackett.control <- function(save.weight = TRUE, ...) {
}), list( .link = link, .earg = earg,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
oratio <- eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dplack(x1 = y[, 1], x2 = y[, 2],
- oratio = oratio, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dplack(x1 = y[, 1], x2 = y[, 2],
+ oratio = oratio, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("plackett"),
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ oratio <- eta2theta(eta, .link , earg = .earg )
+ rplack(nsim * length(oratio), oratio = c(oratio))
+ }, list( .link = link, .earg = earg ))),
+
+
+
deriv = eval(substitute(expression({
oratio <- eta2theta(eta, .link , earg = .earg )
doratio.deta <- dtheta.deta(oratio, .link , earg = .earg )
@@ -2545,7 +2774,7 @@ plackett.control <- function(save.weight = TRUE, ...) {
name = "oratio", hessian = FALSE)
run.var <- 0
for (ii in 1:( .nsimEIM )) {
- ysim <- rplack(n, oratio=oratio)
+ ysim <- rplack(n, oratio = oratio)
y1sim <- ysim[, 1]
y2sim <- ysim[, 1]
eval.sd3 <- eval(sd3)
@@ -2724,15 +2953,40 @@ amh.control <- function(save.weight = TRUE, ...) {
}), list( .lalpha = lalpha,
.ealpha = ealpha, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
alpha <- eta2theta(eta, .lalpha, earg = .ealpha )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * damh(x1 = y[, 1], x2 = y[, 2],
- alpha = alpha, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * damh(x1 = y[, 1], x2 = y[, 2],
+ alpha = alpha, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lalpha = lalpha, .ealpha = ealpha ))),
vfamily = c("amh"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ alpha <- eta2theta(eta, .lalpha , earg = .ealpha )
+ ramh(nsim * length(alpha), alpha = c(alpha))
+ }, list( .lalpha = lalpha, .ealpha = ealpha ))),
+
+
+
deriv = eval(substitute(expression({
alpha <- eta2theta(eta, .lalpha, earg = .ealpha )
@@ -2835,8 +3089,8 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
- binormal <- function(lmean1 = "identity",
- lmean2 = "identity",
+ binormal <- function(lmean1 = "identitylink",
+ lmean2 = "identitylink",
lsd1 = "loge",
lsd2 = "loge",
lrho = "rhobit",
@@ -2990,19 +3244,29 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
.lsd1 = lsd1 , .lsd2 = lsd2 , .lrho = lrho,
.esd1 = esd1 , .esd2 = esd2 , .erho = erho ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
mean1 <- eta2theta(eta[, 1], .lmean1, earg = .emean1)
mean2 <- eta2theta(eta[, 2], .lmean2, earg = .emean2)
sd1 <- eta2theta(eta[, 3], .lsd1 , earg = .esd1 )
sd2 <- eta2theta(eta[, 4], .lsd2 , earg = .esd2 )
Rho <- eta2theta(eta[, 5], .lrho , earg = .erho )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dbinorm(x1 = y[, 1], x2 = y[, 2],
- mean1 = mean1, mean2 = mean2,
- var1 = sd1^2, var2 = sd2^2, cov12 = Rho *sd1*sd2,
- log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dbinorm(x1 = y[, 1], x2 = y[, 2],
+ mean1 = mean1, mean2 = mean2,
+ var1 = sd1^2, var2 = sd2^2,
+ cov12 = Rho * sd1 * sd2,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
} , list( .lmean1 = lmean1, .lmean2 = lmean2,
.emean1 = emean1, .emean2 = emean2,
@@ -3010,6 +3274,33 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
.esd1 = esd1 , .esd2 = esd2 , .erho = erho,
.imethod = imethod ))),
vfamily = c("binormal"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ mean1 <- eta2theta(eta[, 1], .lmean1, earg = .emean1)
+ mean2 <- eta2theta(eta[, 2], .lmean2, earg = .emean2)
+ sd1 <- eta2theta(eta[, 3], .lsd1 , earg = .esd1 )
+ sd2 <- eta2theta(eta[, 4], .lsd2 , earg = .esd2 )
+ Rho <- eta2theta(eta[, 5], .lrho , earg = .erho )
+ rbinorm(nsim * length(sd1),
+ mean1 = mean1, mean2 = mean2,
+ var1 = sd1^2, var2 = sd2^2, cov12 = Rho * sd1 * sd2)
+ } , list( .lmean1 = lmean1, .lmean2 = lmean2,
+ .emean1 = emean1, .emean2 = emean2,
+ .lsd1 = lsd1 , .lsd2 = lsd2 , .lrho = lrho,
+ .esd1 = esd1 , .esd2 = esd2 , .erho = erho ))),
+
+
+
+
deriv = eval(substitute(expression({
mean1 <- eta2theta(eta[, 1], .lmean1, earg = .emean1)
mean2 <- eta2theta(eta[, 2], .lmean2, earg = .emean2)
@@ -3089,7 +3380,7 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
gumbelI <-
- function(la = "identity", earg = list(), ia = NULL, imethod = 1) {
+ function(la = "identitylink", earg = list(), ia = NULL, imethod = 1) {
la <- as.list(substitute(la))
earg <- link2list(la)
@@ -3143,20 +3434,28 @@ gumbelI <-
misc$pooled.weight <- pooled.weight
}), list( .la = la, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
alpha <- eta2theta(eta, .la, earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- denom <- (alpha*y[,1] - 1) * (alpha*y[,2] - 1) + alpha
- mytolerance <- .Machine$double.xmin
- bad <- (denom <= mytolerance) # Range violation
- if (any(bad)) {
- cat("There are some range violations in @deriv\n")
- flush.console()
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ denom <- (alpha*y[,1] - 1) * (alpha*y[,2] - 1) + alpha
+ mytolerance <- .Machine$double.xmin
+ bad <- (denom <= mytolerance) # Range violation
+ if (any(bad)) {
+ cat("There are some range violations in @deriv\n")
+ flush.console()
denom[bad] <- 2 * mytolerance
}
- sum(w * (-y[,1] - y[,2] + alpha*y[,1]*y[,2] + log(denom)))
+ ll.elts <- c(w) * (-y[,1] - y[,2] + alpha*y[,1]*y[,2] + log(denom))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
}, list( .la = la, .earg = earg ))),
vfamily = c("gumbelI"),
deriv = eval(substitute(expression({
@@ -3208,7 +3507,7 @@ kendall.tau <- function(x, y, exact = FALSE, max.n = 3000) {
c( .C("VGAM_C_kend_tau",
as.double(x), as.double(y),
as.integer(NN), ans = double(3),
- NAOK = TRUE, PACKAGE = "VGAM")$ans)
+ NAOK = TRUE)$ans)
con <- ans3[1] + ans3[2] / 2 # Ties put half and half
dis <- ans3[3] + ans3[2] / 2
diff --git a/R/family.categorical.R b/R/family.categorical.R
index 797e3bc..2032185 100644
--- a/R/family.categorical.R
+++ b/R/family.categorical.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -49,10 +49,10 @@ process.categorical.data.vgam <- expression({
stop("the response must be non-negative counts (integers)")
if (!exists("delete.zero.colns") ||
- (exists("delete.zero.colns") && delete.zero.colns)) {
+ (exists("delete.zero.colns") && delete.zero.colns)) {
sumy2 <- colSums(y)
if (any(index <- sumy2 == 0)) {
- y <- y[,!index, drop = FALSE]
+ y <- y[, !index, drop = FALSE]
sumy2 <- sumy2[!index]
if (all(index) || ncol(y) <= 1)
stop("'y' matrix has 0 or 1 columns")
@@ -91,7 +91,8 @@ process.categorical.data.vgam <- expression({
Deviance.categorical.data.vgam <-
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
@@ -109,7 +110,7 @@ Deviance.categorical.data.vgam <-
nonz <- (y != 0)
devy[nonz] <- y[nonz] * log(y[nonz])
- devmu <- 0 * y # filler; y*log(mu) gives a warning (fixed up anyway).
+ devmu <- 0 * y # filler; y*log(mu) gives a warning (fixed up anyway).
if (any(smallmu <- (mu * (1 - mu) < double.eps))) {
warning("fitted values close to 0 or 1")
smu <- mu[smallmu]
@@ -131,7 +132,12 @@ Deviance.categorical.data.vgam <-
devi <- devi %*% rep(1, ncol(devi)) # deviance = \sum_i devi[i]
return(c(sign(y[, 1] - mu[, 1]) * sqrt(abs(devi) * w)))
} else {
- sum(w * devi)
+ dev.elts <- c(w) * devi
+ if (summation) {
+ sum(dev.elts)
+ } else {
+ dev.elts
+ }
}
}
@@ -297,9 +303,11 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
}
}, list( .earg = earg, .link = link, .reverse = reverse) )),
loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE)
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
@@ -310,9 +318,15 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
warning("converting 'ycounts' to integer in @loglikelihood")
ycounts <- round(ycounts)
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
},
vfamily = c("sratio", "vcategorical"),
deriv = eval(substitute(expression({
@@ -469,24 +483,32 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
}
}, list( .earg = earg, .link = link, .reverse = reverse) )),
loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
-
- smallno <- 1.0e4 * .Machine$double.eps
- if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts <- round(ycounts)
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
+ smallno <- 1.0e4 * .Machine$double.eps
+ if (max(abs(ycounts - round(ycounts))) > smallno)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts <- round(ycounts)
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ },
vfamily = c("cratio", "vcategorical"),
deriv = eval(substitute(expression({
@@ -557,11 +579,12 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
vglm.vcategorical.control <-
- function(maxit = 30, trace = FALSE,
+ function(maxit = 30,
+ trace = FALSE,
panic = TRUE, ...) {
if (maxit < 1) {
warning("bad value of maxit; using 200 instead")
- maxit = 200
+ maxit <- 200
}
list(maxit = maxit,
trace = as.logical(trace)[1],
@@ -578,6 +601,8 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
nointercept = NULL, refLevel = "last",
whitespace = FALSE) {
+
+
if (length(refLevel) != 1)
stop("the length of 'refLevel' must be one")
@@ -589,6 +614,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
if (is.factor(refLevel)) {
if (is.ordered(refLevel))
warning("'refLevel' is from an ordered factor")
+
refLevel <- as.character(refLevel) == levels(refLevel)
refLevel <- (1:length(refLevel))[refLevel]
if (!is.Numeric(refLevel, length.arg = 1,
@@ -639,8 +665,8 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
constraints <- cm.vgam(matrix(1, M, 1), x = x,
bool = .parallel,
- constraints = constraints,
- apply.int = FALSE)
+ apply.int = TRUE,
+ constraints = constraints)
constraints <- cm.zero.vgam(constraints, x, .zero, M)
constraints <- cm.nointercept.vgam(constraints, x, .nointercept, M)
}), list( .parallel = parallel, .zero = zero,
@@ -652,7 +678,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
infos = eval(substitute(function(...) {
list(parallel = .parallel ,
refLevel = .refLevel ,
- Musual = -1,
+ M1 = -1,
multipleResponses = FALSE,
zero = .zero )
}, list( .zero = zero,
@@ -661,6 +687,12 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
))),
initialize = eval(substitute(expression({
+
+ if (is.factor(y) && is.ordered(y))
+ warning("response should be nominal, not ordinal")
+
+
+
delete.zero.colns <- TRUE
eval(process.categorical.data.vgam)
@@ -725,9 +757,11 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
}), list( .refLevel = refLevel )),
loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
@@ -738,10 +772,17 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
warning("converting 'ycounts' to integer in @loglikelihood")
ycounts <- round(ycounts)
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ },
vfamily = c("multinomial", "vcategorical"),
deriv = eval(substitute(expression({
if ( .refLevel < 0) {
@@ -836,14 +877,14 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
Hk.matrix <- kronecker(diag(NOS), matrix(1,Llevels-1,1))
constraints <- cm.vgam(Hk.matrix, x = x,
bool = .parallel ,
- constraints = constraints,
- apply.int = .apply.parint )
+ apply.int = .apply.parint ,
+ constraints = constraints)
}
} else {
constraints <- cm.vgam(matrix(1, M, 1), x = x,
bool = .parallel ,
- constraints = constraints,
- apply.int = .apply.parint )
+ apply.int = .apply.parint ,
+ constraints = constraints)
}
}), list( .parallel = parallel, .mv = mv,
.apply.parint = apply.parint ))),
@@ -856,20 +897,23 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
NOS <- extra$NOS
Llevels <- extra$Llevels
for (iii in 1:NOS) {
- cindex <- (iii-1)*(Llevels-1) + 1:(Llevels-1)
- aindex <- (iii-1)*(Llevels) + 1:(Llevels)
- totdev <- totdev + Deviance.categorical.data.vgam(
- mu = mu[, aindex, drop = FALSE],
- y = y[, aindex, drop = FALSE], w = w,
- residuals = residuals,
- eta = eta[, cindex, drop = FALSE],
- extra = extra)
+ cindex <- (iii-1)*(Llevels-1) + 1:(Llevels-1)
+ aindex <- (iii-1)*(Llevels) + 1:(Llevels)
+ totdev <- totdev +
+ Deviance.categorical.data.vgam(
+ mu = mu[, aindex, drop = FALSE],
+ y = y[, aindex, drop = FALSE], w = w,
+ residuals = residuals,
+ eta = eta[, cindex, drop = FALSE],
+ extra = extra,
+ summation = TRUE)
}
totdev
} else {
Deviance.categorical.data.vgam(mu = mu, y = y, w = w,
residuals = residuals,
- eta = eta, extra = extra)
+ eta = eta, extra = extra,
+ summation = TRUE)
}
answer
}, list( .earg = earg, .link = link, .mv = mv ) )),
@@ -1050,9 +1094,11 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
.link = link, .earg = earg,
.reverse = reverse, .mv = mv ))),
loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
@@ -1063,10 +1109,17 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
warning("converting 'ycounts' to integer in @loglikelihood")
ycounts <- round(ycounts)
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ },
vfamily = c("cumulative", "vcategorical"),
deriv = eval(substitute(expression({
mu.use <- pmax(mu, .Machine$double.eps * 1.0e-0)
@@ -1148,7 +1201,8 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
propodds <- function(reverse = TRUE, whitespace = FALSE) {
- if (!is.logical(reverse) || length(reverse) != 1)
+ if (!is.logical(reverse) ||
+ length(reverse) != 1)
stop("argument 'reverse' must be a single logical")
cumulative(parallel = TRUE, reverse = reverse,
@@ -1265,9 +1319,11 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
.link , earg = .earg )
}, list( .earg = earg, .link = link, .reverse = reverse) )),
loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
@@ -1278,10 +1334,17 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
warning("converting 'ycounts' to integer in @loglikelihood")
ycounts <- round(ycounts)
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ },
vfamily = c("acat", "vcategorical"),
deriv = eval(substitute(expression({
zeta <- eta2theta(eta, .link , earg = .earg ) # May be zetar
@@ -1425,9 +1488,11 @@ acat.deriv <- function(zeta, reverse, M, n) {
}), list( .refgp = refgp, .refvalue = refvalue ))),
loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
@@ -1438,10 +1503,17 @@ acat.deriv <- function(zeta, reverse, M, n) {
warning("converting 'ycounts' to integer in @loglikelihood")
ycounts <- round(ycounts)
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ },
vfamily = c("brat"),
deriv = eval(substitute(expression({
ans <- NULL
@@ -1591,12 +1663,20 @@ acat.deriv <- function(zeta, reverse, M, n) {
misc$alpha0 <- alpha0
}), list( .refgp = refgp, .refvalue = refvalue ))),
loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * (y * log(mu) +
- 0.5 * extra$ties * log(attr(mu, "probtie"))))
- },
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * (y * log(mu) +
+ 0.5 * extra$ties * log(attr(mu, "probtie")))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ },
vfamily = c("bratt"),
deriv = eval(substitute(expression({
ans <- NULL
@@ -1876,8 +1956,8 @@ tapplymat1 <- function(mat,
constraints = eval(substitute(expression({
constraints <- cm.vgam(matrix(1, M, 1), x = x,
bool = .parallel ,
- constraints = constraints,
- apply.int = TRUE)
+ apply.int = TRUE,
+ constraints = constraints)
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .parallel = parallel, .zero = zero ))),
initialize = eval(substitute(expression({
@@ -1964,53 +2044,59 @@ print("y.names")
misc$true.mu = FALSE # $fitted is not a true mu
}), list( .link = link, .countdata = countdata, .earg = earg ))),
loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- probs <- ordpoissonProbs(extra, mu)
- index0 <- y == 0
- probs[index0] <- 1
- pindex0 <- probs == 0
- probs[pindex0] <- 1
- sum(pindex0) * (-1.0e+10) + sum(w * y * log(probs))
- }
- },
- vfamily = c("ordpoisson", "vcategorical"),
- deriv = eval(substitute(expression({
- probs <- ordpoissonProbs(extra, mu)
- probs.use <- pmax(probs, .Machine$double.eps * 1.0e-0)
-
- cp.vector <- extra$cutpoints
- NOS <- extra$NOS
- Levels <- extra$Levels
- resmat <- matrix(0, n, M)
- dl.dprob <- y / probs.use
- dmu.deta <- dtheta.deta(mu, .link , earg = .earg )
- dprob.dmu <- ordpoissonProbs(extra, mu, deriv = 1)
- cptr <- 1
- for (iii in 1:NOS) {
- for (kkk in 1:Levels[iii]) {
- resmat[,iii] <- resmat[,iii] +
- dl.dprob[,cptr] * dprob.dmu[,cptr]
- cptr <- cptr + 1
- }
- }
- resmat <- c(w) * resmat * dmu.deta
- resmat
- }), list( .link = link, .earg = earg, .countdata=countdata ))),
- weight = eval(substitute(expression({
- d2l.dmu2 <- matrix(0, n, M) # Diagonal matrix
- cptr <- 1
- for (iii in 1:NOS) {
- for (kkk in 1:Levels[iii]) {
- d2l.dmu2[,iii] <- d2l.dmu2[,iii] +
- dprob.dmu[,cptr]^2 / probs.use[,cptr]
- cptr <- cptr + 1
- }
- }
- wz <- c(w) * d2l.dmu2 * dmu.deta^2
- wz
- }), list( .earg = earg, .link = link, .countdata = countdata ))))
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ probs <- ordpoissonProbs(extra, mu)
+ index0 <- y == 0
+ probs[index0] <- 1
+ pindex0 <- probs == 0
+ probs[pindex0] <- 1
+ if (summation) {
+ sum(pindex0) * (-1.0e+10) + sum(w * y * log(probs))
+ } else {
+ stop("20140311; 'summation=F' not done yet")
+ }
+ }
+ },
+ vfamily = c("ordpoisson", "vcategorical"),
+ deriv = eval(substitute(expression({
+ probs <- ordpoissonProbs(extra, mu)
+ probs.use <- pmax(probs, .Machine$double.eps * 1.0e-0)
+
+ cp.vector <- extra$cutpoints
+ NOS <- extra$NOS
+ Levels <- extra$Levels
+ resmat <- matrix(0, n, M)
+ dl.dprob <- y / probs.use
+ dmu.deta <- dtheta.deta(mu, .link , earg = .earg )
+ dprob.dmu <- ordpoissonProbs(extra, mu, deriv = 1)
+ cptr <- 1
+ for (iii in 1:NOS) {
+ for (kkk in 1:Levels[iii]) {
+ resmat[,iii] <- resmat[,iii] +
+ dl.dprob[,cptr] * dprob.dmu[,cptr]
+ cptr <- cptr + 1
+ }
+ }
+ resmat <- c(w) * resmat * dmu.deta
+ resmat
+ }), list( .link = link, .earg = earg, .countdata=countdata ))),
+ weight = eval(substitute(expression({
+ d2l.dmu2 <- matrix(0, n, M) # Diagonal matrix
+ cptr <- 1
+ for (iii in 1:NOS) {
+ for (kkk in 1:Levels[iii]) {
+ d2l.dmu2[,iii] <- d2l.dmu2[,iii] +
+ dprob.dmu[,cptr]^2 / probs.use[,cptr]
+ cptr <- cptr + 1
+ }
+ }
+ wz <- c(w) * d2l.dmu2 * dmu.deta^2
+ wz
+ }), list( .earg = earg, .link = link, .countdata = countdata ))))
}
@@ -2032,7 +2118,7 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
probs[,cptr] <- ppois(q = cp.vector[cptr], lambda = mu[,iii])
}
cptr <- cptr + 1
- while(is.finite(cp.vector[cptr])) {
+ while (is.finite(cp.vector[cptr])) {
if (deriv == 1) {
dprob.dmu[,cptr] <-
dpois(x = cp.vector[cptr-1], lambda = mu[,iii]) -
@@ -2095,34 +2181,36 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
namesof("scale_j", lscale, escale)),
constraints = eval(substitute(expression({
J <- M / 2
- constraints <- cm.vgam(matrix(1,J,1), x = x,
+ constraints <- cm.vgam(matrix(1, J, 1), x = x,
bool = .parallel ,
- constraints = constraints,
- apply.int = FALSE)
+ apply.int = FALSE,
+ constraints = constraints)
constraints[["(Intercept)"]] = rbind(constraints[["(Intercept)"]],
matrix(0, J, ncol(constraints[["(Intercept)"]])))
- cm2 <- cm.vgam(matrix(1,J,1), x = x,
+ cm2 <- cm.vgam(matrix(1, J, 1), x = x,
bool = .sparallel ,
- constraints = NULL,
- apply.int = FALSE)
+ apply.int = FALSE,
+ constraints = NULL)
for (ii in 2:length(constraints))
- constraints[[ii]] =
+ constraints[[ii]] <-
cbind(rbind(constraints[[ii]],
matrix(0, J, ncol(constraints[[ii]]))),
rbind(matrix(0, J, ncol(cm2[[ii]])), cm2[[ii]]))
for (ii in 1:length(constraints))
- constraints[[ii]] =
+ constraints[[ii]] <-
(constraints[[ii]])[interleave.VGAM(M, M = 2),, drop = FALSE]
}), list( .parallel = parallel, .sparallel=sparallel ))),
deviance = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- answer =
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ answer <-
Deviance.categorical.data.vgam(mu = mu,
y = y, w = w, residuals = residuals,
- eta = eta, extra = extra)
+ eta = eta, extra = extra,
+ summation = summation)
answer
}, list( .earg = earg, .link = link ) )),
initialize = eval(substitute(expression({
@@ -2139,10 +2227,10 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
eval(process.categorical.data.vgam)
- M = 2*(ncol(y)-1)
- J = M / 2
- extra$J = J
- mynames = if ( .reverse )
+ M <- 2*(ncol(y)-1)
+ J <- M / 2
+ extra$J <- J
+ mynames <- if ( .reverse )
paste("P[Y>=", 2:(1+J), "]", sep = "") else
paste("P[Y<=", 1:J, "]", sep = "")
predictors.names <- c(
@@ -2151,109 +2239,118 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
.lscale, short = TRUE, earg = .escale ))
- y.names = paste("mu", 1:(J+1), sep = "")
+ y.names <- paste("mu", 1:(J+1), sep = "")
if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
+ extra$dimnamesy2 <- dimnames(y)[[2]]
predictors.names <- predictors.names[interleave.VGAM(M, M = 2)]
}), list( .link = link, .lscale = lscale, .reverse = reverse,
.earg = earg, .escale = escale ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
- J = extra$J
- M = 2*J
- etamat1 = eta[, 2*(1:J)-1, drop = FALSE]
- etamat2 = eta[, 2*(1:J), drop = FALSE]
- scalemat = eta2theta(etamat2, .lscale, earg = .escale )
- fv.matrix =
+ J <- extra$J
+ M <- 2*J
+ etamat1 <- eta[, 2*(1:J)-1, drop = FALSE]
+ etamat2 <- eta[, 2*(1:J), drop = FALSE]
+ scalemat <- eta2theta(etamat2, .lscale, earg = .escale )
+ fv.matrix <-
if ( .reverse ) {
- ccump = cbind(1,
+ ccump <- cbind(1,
eta2theta(etamat1 / scalemat,
.link , earg = .earg ))
cbind(-tapplymat1(ccump, "diff"), ccump[, ncol(ccump)])
} else {
- cump = cbind(eta2theta(etamat1 / scalemat,
+ cump <- cbind(eta2theta(etamat1 / scalemat,
.link , earg = .earg ),
1)
cbind(cump[, 1], tapplymat1(cump, "diff"))
}
if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(dimnames(eta)[[1]],
+ dimnames(fv.matrix) <- list(dimnames(eta)[[1]],
extra$dimnamesy2)
fv.matrix
}, list( .link = link, .lscale = lscale, .reverse = reverse,
.earg = earg, .escale = escale ))),
last = eval(substitute(expression({
- J = extra$J
- misc$link =
+ J <- extra$J
+ misc$link <-
c(rep( .link , length = J),
rep( .lscale, length = J))[interleave.VGAM(M, M = 2)]
- names(misc$link) = predictors.names
+ names(misc$link) <- predictors.names
misc$earg <- vector("list", M)
names(misc$earg) <- names(misc$link)
for (ii in 1:J) misc$earg[[2*ii-1]] <- .earg
for (ii in 1:J) misc$earg[[2*ii ]] <- .escale
- misc$parameters = mynames
- misc$reverse = .reverse
- misc$parallel = .parallel
- misc$sparallel = .sparallel
+ misc$parameters <- mynames
+ misc$reverse <- .reverse
+ misc$parallel <- .parallel
+ misc$sparallel <- .sparallel
}), list( .link = link, .lscale = lscale,
.reverse = reverse, .parallel = parallel,
.sparallel = sparallel,
.earg = earg, .escale = escale ))),
linkfun = eval(substitute( function(mu, extra = NULL) {
- cump = tapplymat1(as.matrix(mu), "cumsum")
- J = ncol(as.matrix(mu)) - 1
- M = 2 * J
- answer = cbind(
+ cump <- tapplymat1(as.matrix(mu), "cumsum")
+ J <- ncol(as.matrix(mu)) - 1
+ M <- 2 * J
+ answer <- cbind(
theta2eta(if ( .reverse ) 1-cump[, 1:J] else cump[, 1:J],
.link ,
earg = .earg ),
matrix(theta2eta( .iscale, .lscale , earg = .escale ),
nrow(as.matrix(mu)), J, byrow = TRUE))
- answer = answer[,interleave.VGAM(M, M = 2)]
+ answer <- answer[,interleave.VGAM(M, M = 2)]
answer
}, list( .link = link, .lscale = lscale, .reverse = reverse,
.iscale = iscale, .earg = earg, .escale = escale ))),
loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
- smallno = 1.0e4 * .Machine$double.eps
+ smallno <- 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ ycounts <- round(ycounts)
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ },
vfamily = c("scumulative", "vcategorical"),
deriv = eval(substitute(expression({
- ooz = iter %% 2
+ ooz <- iter %% 2
- J = extra$J
+ J <- extra$J
mu.use = pmax(mu, .Machine$double.eps * 1.0e-0)
- etamat1 = eta[, 2*(1:J)-1, drop = FALSE]
- etamat2 = eta[, 2*(1:J) , drop = FALSE]
- scalemat = eta2theta(etamat2, .lscale, earg = .escale )
+ etamat1 <- eta[, 2*(1:J)-1, drop = FALSE]
+ etamat2 <- eta[, 2*(1:J) , drop = FALSE]
+ scalemat <- eta2theta(etamat2, .lscale, earg = .escale )
- cump = eta2theta(etamat1 / scalemat, .link , earg = .earg )
- dcump.deta = dtheta.deta(cump, .link , earg = .earg )
- dscale.deta = dtheta.deta(scalemat, .lscale, earg = .escale )
- dl.dcump = (if ( .reverse ) -w else w) *
+ cump <- eta2theta(etamat1 / scalemat, .link , earg = .earg )
+ dcump.deta <- dtheta.deta(cump, .link , earg = .earg )
+ dscale.deta <- dtheta.deta(scalemat, .lscale, earg = .escale )
+ dl.dcump <- (if ( .reverse ) -w else w) *
(y[, 1:J]/mu.use[, 1:J] - y[, -1]/mu.use[, -1])
- dcump.dscale = -dcump.deta * etamat1 / scalemat^2
- ans = cbind(dl.dcump * dcump.deta / scalemat,
+ dcump.dscale <- -dcump.deta * etamat1 / scalemat^2
+ ans <- cbind(dl.dcump * dcump.deta / scalemat,
dl.dcump * dcump.dscale * dscale.deta)
- ans = ans[,interleave.VGAM(M, M = 2)]
+ ans <- ans[,interleave.VGAM(M, M = 2)]
if (ooz) ans[, c(TRUE, FALSE)] = 0 else
ans[, c(FALSE, TRUE)] = 0
ans
@@ -2261,48 +2358,48 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
.earg = earg, .escale = escale ))),
weight = eval(substitute(expression({
- wz = matrix(0, n, 2*(2*M-3))
+ wz <- matrix(0, n, 2*(2*M-3))
- wz[, 2*(1:J)-1] = if (ooz) c(w) * (dcump.deta / scalemat)^2 *
+ wz[, 2*(1:J)-1] <- if (ooz) c(w) * (dcump.deta / scalemat)^2 *
(1/mu.use[, 1:J] + 1/mu.use[, -1]) else 1
- wz[, 2*(1:J)] = if (ooz) 1 else
+ wz[, 2*(1:J)] <- if (ooz) 1 else
c(w) * (dcump.dscale * dscale.deta)^2 *
(1/mu.use[, 1:J] + 1/mu.use[, -1])
- wz0 = c(w) * (dcump.deta / scalemat) *
+ wz0 <- c(w) * (dcump.deta / scalemat) *
(dcump.dscale * dscale.deta) *
(1/mu.use[, 1:J] + 1/mu.use[, -1])
- wz0 = as.matrix(wz0)
+ wz0 <- as.matrix(wz0)
for (ii in 1:J)
- wz[,iam(2*ii-1,2*ii,M = M)] = if (ooz) wz0[, ii] else 0
+ wz[,iam(2*ii-1,2*ii,M = M)] <- if (ooz) wz0[, ii] else 0
if (J > 1) {
- wz0 = -c(w) *
+ wz0 <- -c(w) *
(dcump.deta[, -J] / scalemat[, -J]) *
(dcump.deta[, -1] / scalemat[, -1]) / mu.use[, 2:J]
- wz0 = as.matrix(wz0) # Just in case J=2
+ wz0 <- as.matrix(wz0) # Just in case J=2
for (ii in 1:(J-1))
- wz[, iam(2*ii-1, 2*ii+1, M = M)] = if (ooz) wz0[, ii] else 0
- wz0 = -c(w) * (dcump.dscale[, -1] * dscale.deta[, -1]) *
+ wz[, iam(2*ii-1, 2*ii+1, M = M)] <- if (ooz) wz0[, ii] else 0
+ wz0 <- -c(w) * (dcump.dscale[, -1] * dscale.deta[, -1]) *
(dcump.dscale[, -J] *
dscale.deta[, -J]) / mu.use[, 2:J]
- wz0 = as.matrix(wz0)
+ wz0 <- as.matrix(wz0)
for (ii in 1:(J-1))
- wz[,iam(2*ii,2*ii+2,M = M)] = if (ooz) wz0[, ii] else 0
+ wz[,iam(2*ii,2*ii+2,M = M)] <- if (ooz) wz0[, ii] else 0
- wz0 = -c(w) * (dcump.deta[, -J] / scalemat[, -J]) *
+ wz0 <- -c(w) * (dcump.deta[, -J] / scalemat[, -J]) *
(dcump.dscale[, -1] *
dscale.deta[, -1]) / mu.use[, 2:J]
- wz0 = as.matrix(wz0)
+ wz0 <- as.matrix(wz0)
for (ii in 1:(J-1))
- wz[,iam(2*ii-1,2*ii+2,M = M)] = if (ooz) wz0[, ii] else 0
- wz0 = -c(w) * (dcump.deta[, -1] / scalemat[, -1]) *
+ wz[,iam(2*ii-1,2*ii+2,M = M)] <- if (ooz) wz0[, ii] else 0
+ wz0 <- -c(w) * (dcump.deta[, -1] / scalemat[, -1]) *
(dcump.dscale[, -J] *
dscale.deta[, -J]) / mu.use[, 2:J]
- wz0 = as.matrix(wz0)
+ wz0 <- as.matrix(wz0)
for (ii in 1:(J-1))
- wz[,iam(2*ii,2*ii+1,M = M)] = if (ooz) wz0[, ii] else 0
+ wz[,iam(2*ii,2*ii+1,M = M)] <- if (ooz) wz0[, ii] else 0
}
wz
}), list( .link = link, .lscale = lscale, .earg = earg,
diff --git a/R/family.censored.R b/R/family.censored.R
index dfdba1b..7284e0d 100644
--- a/R/family.censored.R
+++ b/R/family.censored.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -334,7 +334,7 @@ if (FALSE)
cennormal1 <-
- cennormal <- function(lmu = "identity", lsd = "loge",
+ cennormal <- function(lmu = "identitylink", lsd = "loge",
imethod = 1, zero = 2) {
@@ -681,12 +681,13 @@ if (FALSE)
"gamma(1 + 1/shape)^2)"),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero
))),
@@ -705,10 +706,10 @@ if (FALSE)
y <- temp5$y
ncoly <- ncol(y)
- Musual <- 2
+ M1 <- 2
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
if (is.SurvS4(y))
@@ -721,12 +722,12 @@ if (FALSE)
predictors.names <-
c(namesof(mynames1, .lshape , earg = .eshape , tag = FALSE),
namesof(mynames2, .lscale , earg = .escale , tag = FALSE))[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
- Shape.init <- matrix(if(length( .ishape )) .ishape else 0 + NA,
+ Shape.init <- matrix(if (length( .ishape )) .ishape else 0 + NA,
n, ncoly, byrow = TRUE)
- Scale.init <- matrix(if(length( .iscale )) .iscale else 0 + NA,
+ Scale.init <- matrix(if (length( .iscale )) .iscale else 0 + NA,
n, ncoly, byrow = TRUE)
if (!length(etastart)) {
@@ -747,12 +748,12 @@ if (FALSE)
Shape.init[, ilocal] <- 1 / fit0$coef["X"]
if (!is.Numeric(Scale.init[, ilocal]))
Scale.init[, ilocal] <- exp(fit0$coef["Intercept"])
- } # ilocal
+ } # ilocal
etastart <-
cbind(theta2eta(Shape.init, .lshape , earg = .eshape ),
theta2eta(Scale.init, .lscale , earg = .escale ))[,
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
}
}
}), list( .lscale = lscale, .lshape = lshape,
@@ -786,21 +787,21 @@ if (FALSE)
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <-
c(rep( .lshape , length = ncoly),
- rep( .lscale , length = ncoly))[interleave.VGAM(M, M = Musual)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ rep( .lscale , length = ncoly))[interleave.VGAM(M, M = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .eshape
- misc$earg[[Musual*ii ]] <- .escale
+ misc$earg[[M1*ii-1]] <- .eshape
+ misc$earg[[M1*ii ]] <- .escale
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$imethod <- .imethod
misc$expected <- TRUE
misc$multipleResponses <- TRUE
@@ -825,7 +826,7 @@ if (FALSE)
.escale = escale, .eshape = eshape ) )),
vfamily = c("weibull"),
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , earg = .eshape )
Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , earg = .escale )
@@ -838,7 +839,7 @@ if (FALSE)
myderiv <- c(w) * cbind(dl.dshape * dshape.deta,
dl.dscale * dscale.deta)
- myderiv[, interleave.VGAM(M, M = Musual)]
+ myderiv[, interleave.VGAM(M, M = M1)]
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape ) )),
weight = eval(substitute(expression({
@@ -852,8 +853,8 @@ if (FALSE)
wz <- array(c(c(w) * ned2l.dshape * dshape.deta^2,
c(w) * ned2l.dscale * dscale.deta^2,
c(w) * ned2l.dshapescale * dscale.deta * dshape.deta),
- dim = c(n, M / Musual, 3))
- wz <- arwz2wz(wz, M = M, Musual = Musual)
+ dim = c(n, M / M1, 3))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
wz
@@ -1138,12 +1139,13 @@ pgamma.deriv.unscaled <- function(q, shape) {
""),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
lower.limit = .lower.limit ,
zero = .zero )
}, list( .zero = zero,
@@ -1164,10 +1166,10 @@ pgamma.deriv.unscaled <- function(q, shape) {
y <- temp5$y
ncoly <- ncol(y)
- Musual <- 2
+ M1 <- 2
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
extra$lower.limit <- matrix( .lower.limit , n, ncoly, byrow = TRUE)
@@ -1187,7 +1189,7 @@ pgamma.deriv.unscaled <- function(q, shape) {
predictors.names <-
c(namesof(mynames1, .lAlpha , earg = .eAlpha , tag = FALSE),
namesof(mynames2, .lBetaa , earg = .eBetaa , tag = FALSE))[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
Alpha.init <- matrix(if (length( .iAlpha )) .iAlpha else 0 + NA,
@@ -1214,7 +1216,7 @@ pgamma.deriv.unscaled <- function(q, shape) {
Betaa.init[, ilocal] <- aaa.init
if (!is.Numeric(Alpha.init[, ilocal]))
Alpha.init[, ilocal] <- (1 / bbb.init)^aaa.init
- } # ilocal
+ } # ilocal
} else {
Alpha.init <- rep( .iAlpha , length = n)
Betaa.init <- rep( .iBetaa , length = n)
@@ -1223,7 +1225,7 @@ pgamma.deriv.unscaled <- function(q, shape) {
etastart <-
cbind(theta2eta(Alpha.init, .lAlpha , earg = .eAlpha ),
theta2eta(Betaa.init, .lBetaa , earg = .eBetaa ))[,
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
}
}), list( .lBetaa = lBetaa, .lAlpha = lAlpha,
.eBetaa = eBetaa, .eAlpha = eAlpha,
@@ -1267,21 +1269,21 @@ pgamma.deriv.unscaled <- function(q, shape) {
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <-
c(rep( .lAlpha , length = ncoly),
- rep( .lBetaa , length = ncoly))[interleave.VGAM(M, M = Musual)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ rep( .lBetaa , length = ncoly))[interleave.VGAM(M, M = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .eAlpha
- misc$earg[[Musual*ii ]] <- .eBetaa
+ misc$earg[[M1*ii-1]] <- .eAlpha
+ misc$earg[[M1*ii ]] <- .eBetaa
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$imethod <- .imethod
misc$expected <- TRUE
misc$multipleResponses <- TRUE
@@ -1320,7 +1322,7 @@ pgamma.deriv.unscaled <- function(q, shape) {
vfamily = c("truncweibull"),
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
Alpha <- eta2theta(eta[, c(TRUE, FALSE)], .lAlpha , earg = .eAlpha )
Betaa <- eta2theta(eta[, c(FALSE, TRUE)], .lBetaa , earg = .eBetaa )
@@ -1337,7 +1339,7 @@ pgamma.deriv.unscaled <- function(q, shape) {
myderiv <- c(w) * cbind(dl.dAlpha * dAlpha.deta,
dl.dBetaa * dBetaa.deta)
- myderiv[, interleave.VGAM(M, M = Musual)]
+ myderiv[, interleave.VGAM(M, M = M1)]
}), list( .lBetaa = lBetaa, .lAlpha = lAlpha,
.eBetaa = eBetaa, .eAlpha = eAlpha,
.lower.limit = lower.limit ) )),
@@ -1373,8 +1375,8 @@ pgamma.deriv.unscaled <- function(q, shape) {
wz <- array(c(c(w) * ned2l.daa * dAlpha.deta^2,
c(w) * ned2l.dbb * dBetaa.deta^2,
c(w) * ned2l.dab * dBetaa.deta * dAlpha.deta),
- dim = c(n, M / Musual, 3))
- wz <- arwz2wz(wz, M = M, Musual = Musual)
+ dim = c(n, M / M1, 3))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
wz
}), list( .nrfs = nrfs ))))
}
diff --git a/R/family.circular.R b/R/family.circular.R
index ab0b98e..b735fe6 100644
--- a/R/family.circular.R
+++ b/R/family.circular.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -197,16 +197,39 @@ cardioid.control <- function(save.weight = TRUE, ...) {
}), list( .lmu = lmu, .lrho = lrho,
.emu = emu, .erho = erho, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
mu <- eta2theta(eta[, 1], link = .lmu, earg = .emu)
rho <- eta2theta(eta[, 2], link = .lrho, earg = .erho)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dcard(x = y, mu = mu, rho = rho, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dcard(x = y, mu = mu, rho = rho, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
- }, list( .lmu = lmu, .lrho=lrho,
- .emu = emu, .erho=erho ))),
+ }, list( .lmu = lmu, .lrho = lrho,
+ .emu = emu, .erho = erho ))),
vfamily = c("cardioid"),
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
deriv = eval(substitute(expression({
mu <- eta2theta(eta[, 1], link = .lmu, earg = .emu)
rho <- eta2theta(eta[, 2], link = .lrho, earg = .erho)
@@ -284,7 +307,7 @@ cardioid.control <- function(save.weight = TRUE, ...) {
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
zero = .zero ,
parameterNames = c("location", "scale"))
}, list( .zero = zero ))),
@@ -335,14 +358,21 @@ cardioid.control <- function(save.weight = TRUE, ...) {
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
loglikelihood = eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
locat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
Scale <- eta2theta(eta[, 2], .lscale, earg = .escale)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * (Scale * cos(y - locat) -
- log(mbesselI0(x = Scale ))))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * (Scale * cos(y - locat) - log(mbesselI0(x = Scale)))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .escale = escale, .lscale = lscale,
.llocat = llocat, .elocat = elocat ))),
vfamily = c("vonmises"),
diff --git a/R/family.exp.R b/R/family.exp.R
index 292054f..4962f00 100644
--- a/R/family.exp.R
+++ b/R/family.exp.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -344,7 +344,7 @@ rkoenker <- function(n, location = 0, scale = 1) {
koenker <- function(percentile = 50,
- llocation = "identity", lscale = "loge",
+ llocation = "identitylink", lscale = "loge",
ilocation = NULL, iscale = NULL,
imethod = 1,
zero = 2) {
@@ -456,14 +456,20 @@ rkoenker <- function(n, location = 0, scale = 1) {
.elocat = elocat, .escale = escale,
.imethod = imethod, .percentile = percentile ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
locat <- eta2theta(eta[, 1], link = .llocat, earg = .elocat)
Scale <- eta2theta(eta[, 2], link = .lscale, earg = .escale)
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(w * dkoenker(x = y, location = locat, scale = Scale,
- log = TRUE))
+ ll.elts <- c(w) * dkoenker(x = y, location = locat, scale = Scale,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
diff --git a/R/family.extremes.R b/R/family.extremes.R
index 383030f..f66867c 100644
--- a/R/family.extremes.R
+++ b/R/family.extremes.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -178,7 +178,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
gev <- function(
- llocation = "identity",
+ llocation = "identitylink",
lscale = "loge",
lshape = logoff(offset = 0.5),
percentiles = c(95, 99),
@@ -248,17 +248,17 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 3,
+ list(M1 = 3,
multipleResponses = FALSE,
zero = .zero )
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
- Musual <- extra$Musual <- 3
+ M1 <- extra$M1 <- 3
ncoly <- ncol(y)
extra$ncoly <- ncoly
- extra$Musual <- Musual
+ extra$M1 <- M1
mynames1 <- "location"
@@ -418,7 +418,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
misc$link <- c( .llocat , .lscale , .lshape )
names(misc$link) <- c(mynames1, mynames2, mynames3)
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$multipleResponses <- FALSE
@@ -485,7 +485,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
.giveWarning = giveWarning, .tolshape0 = tolshape0 ))),
vfamily = c("gev", "vextremes"),
deriv = eval(substitute(expression({
- Musual <- 3
+ M1 <- 3
r.vec <- rowSums(cbind(!is.na(y)))
Locat <- eta2theta(eta[, 1], .llocat , .elocat )
@@ -628,7 +628,7 @@ dgammadx <- function(x, deriv.arg = 1) {
- egev <- function(llocation = "identity",
+ egev <- function(llocation = "identitylink",
lscale = "loge",
lshape = logoff(offset = 0.5),
percentiles = c(95, 99),
@@ -824,18 +824,25 @@ dgammadx <- function(x, deriv.arg = 1) {
.elocat = elocat, .escale = escale, .eshape = eshape,
.tolshape0 = tolshape0, .percentiles = percentiles ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
mmu <- eta2theta(eta[, 1], .llocat , earg = .elocat )
sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
xi <- eta2theta(eta[, 3], .lshape , earg = .eshape )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dgev(x=y, location=mmu, scale=sigma, shape=xi,
- tolshape0 = .tolshape0,
- log = TRUE, oobounds.log = -1.0e04,
- giveWarning= .giveWarning))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dgev(x = y, location = mmu, scale = sigma,
+ shape = xi, tolshape0 = .tolshape0,
+ log = TRUE, oobounds.log = -1.0e04,
+ giveWarning = .giveWarning)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
}, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape,
.giveWarning= giveWarning, .tolshape0 = tolshape0 ))),
@@ -961,7 +968,7 @@ pgumbel <- function(q, location = 0, scale = 1) {
}
- gumbel <- function(llocation = "identity",
+ gumbel <- function(llocation = "identitylink",
lscale = "loge",
iscale = NULL,
R = NA, percentiles = c(95, 99),
@@ -1095,7 +1102,8 @@ pgumbel <- function(q, location = 0, scale = 1) {
.mpv = mpv, .R = R ))),
vfamily = c("gumbel", "vextremes"),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
loc <- eta2theta(eta[, 1], .llocat, earg = .elocat )
sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
@@ -1107,12 +1115,18 @@ pgumbel <- function(q, location = 0, scale = 1) {
index <- (jay <= r.vec)
ans[index] <- ans[index] - (y[index,jay]-loc[index]) / sigma[index]
}
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * ans)
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * ans
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
}, list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
deriv = eval(substitute(expression({
@@ -1387,12 +1401,13 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
namesof("shape", link = lshape, earg = eshape )),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero
))),
@@ -1413,10 +1428,10 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
ncoly <- ncol(y)
- Musual <- 2
+ M1 <- 2
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
y.names <- dimnames(y)[[2]]
if (length(y.names) != ncoly)
y.names <- paste("Y", 1:ncoly, sep = "")
@@ -1438,7 +1453,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
predictors.names <-
c(namesof(mynames1, .lscale , earg = .escale , tag = FALSE),
namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
@@ -1475,7 +1490,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
etastart <-
cbind(theta2eta(init.sig, .lscale , earg = .escale ),
theta2eta(init.xii, .lshape , earg = .eshape ))[,
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
}
}), list( .lscale = lscale, .lshape = lshape,
.iscale = iscale, .ishape = ishape,
@@ -1495,10 +1510,10 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
shape <- as.matrix(shape)
- Musual <- 2
+ M1 <- 2
pcent <- .percentiles
LP <- length(pcent) # NULL means LP == 0 and the mean is returned
- ncoly <- ncol(eta) / Musual
+ ncoly <- ncol(eta) / M1
if (!length(y.names <- extra$y.names))
y.names <- paste("Y", 1:ncoly, sep = "")
@@ -1575,21 +1590,21 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <-
c(rep( .lscale , length = ncoly),
- rep( .lshape , length = ncoly))[interleave.VGAM(M, M = Musual)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ rep( .lshape , length = ncoly))[interleave.VGAM(M, M = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .escale
- misc$earg[[Musual*ii ]] <- .eshape
+ misc$earg[[M1*ii-1]] <- .escale
+ misc$earg[[M1*ii ]] <- .eshape
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$multipleResponses <- TRUE
@@ -1603,23 +1618,32 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
.threshold = threshold,
.tolshape0 = tolshape0, .percentiles = percentiles ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
sigma <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , earg = .escale )
Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
Threshold <- extra$threshold
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dgpd(x = y, location = Threshold, scale = sigma,
- shape = Shape, tolshape0 = .tolshape0,
- giveWarning = .giveWarning,
- log = TRUE, oobounds.log = -1.0e04))
+
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dgpd(x = y, location = Threshold, scale = sigma,
+ shape = Shape, tolshape0 = .tolshape0,
+ giveWarning = .giveWarning,
+ log = TRUE, oobounds.log = -1.0e04)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .tolshape0 = tolshape0, .giveWarning= giveWarning,
.escale = escale, .eshape = eshape,
.lscale = lscale, .lshape = lshape ))),
vfamily = c("gpd", "vextremes"),
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
sigma <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , earg = .escale )
Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
@@ -1654,7 +1678,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
myderiv <-
c(w) * cbind(dl.dsigma * dsigma.deta,
dl.dShape * dShape.deta)
- myderiv[, interleave.VGAM(M, M = Musual)]
+ myderiv[, interleave.VGAM(M, M = M1)]
}), list( .tolshape0 = tolshape0,
.lscale = lscale, .escale = escale,
.lshape = lshape, .eshape = eshape ))),
@@ -1665,15 +1689,13 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
ned2l.dshape2 <- 2 / ((1+2*Shape) * (1+Shape))
ned2l.dshapescale <- 1 / ((1+2*Shape) * (1+Shape) * sigma) # > 0 !
- NOS <- M / Musual
-
+ S <- M / M1
wz <- array(c(c(w) * ned2l.dscale2 * dsigma.deta^2,
c(w) * ned2l.dshape2 * dShape.deta^2,
c(w) * ned2l.dshapescale * dsigma.deta * dShape.deta),
- dim = c(n, M / Musual, 3))
- wz <- arwz2wz(wz, M = M, Musual = Musual)
-
+ dim = c(n, S, 3))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
wz
}), list( .lscale = lscale ))))
@@ -1687,6 +1709,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
+
meplot.default <- function(y, main = "Mean Excess Plot",
xlab = "Threshold", ylab = "Mean Excess", lty = c(2, 1:2),
conf = 0.95, col = c("blue", "black", "blue"), type = "l", ...) {
@@ -1791,7 +1814,7 @@ setMethod("guplot", "vlm",
- egumbel <- function(llocation = "identity",
+ egumbel <- function(llocation = "identitylink",
lscale = "loge",
iscale = NULL,
R = NA, percentiles = c(95, 99),
@@ -1903,12 +1926,20 @@ setMethod("guplot", "vlm",
.elocat = elocat, .escale = escale,
.R = R, .percentiles = percentiles ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE,eta,extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
loc <- eta2theta(eta[, 1], .llocat , earg = .elocat )
sca <- eta2theta(eta[, 2], .lscale , earg = .escale )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dgumbel(x = y, location = loc, scale = sca, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) *
+ dgumbel(x = y, location = loc, scale = sca, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
@@ -1944,7 +1975,7 @@ setMethod("guplot", "vlm",
- cgumbel <- function(llocation = "identity",
+ cgumbel <- function(llocation = "identitylink",
lscale = "loge",
iscale = NULL,
mean = TRUE, percentiles = NULL, zero = 2) {
@@ -2045,7 +2076,8 @@ setMethod("guplot", "vlm",
.elocat = elocat, .escale = escale ,
.percentiles = percentiles ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE,eta,extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
loc <- eta2theta(eta[, 1], .llocat , earg = .elocat )
sc <- eta2theta(eta[, 2], .lscale , earg = .escale )
zedd <- (y-loc) / sc
@@ -2321,14 +2353,23 @@ frechet2.control <- function(save.weight = TRUE, ...) {
.escale = escale, .eshape = eshape,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
loctn <- extra$location
Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * dfrechet(x = y, location = loctn, scale = Scale,
- shape = shape, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) *
+ dfrechet(x = y, location = loctn, scale = Scale,
+ shape = shape, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape ))),
vfamily = c("frechet2", "vextremes"),
@@ -2546,15 +2587,22 @@ if (FALSE)
.ediffr = ediffr, .escale = escale, .eshape = eshape,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
loctn <- extra$LHSanchor -
- eta2theta(eta[, 1], .ldiffr , earg = .ediffr)
+ eta2theta(eta[, 1], .ldiffr , earg = .ediffr)
Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
shape <- eta2theta(eta[, 3], .lshape , earg = .eshape )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dfrechet(x = y, location = loctn, scale = Scale,
- shape = shape, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dfrechet(x = y, location = loctn, scale = Scale,
+ shape = shape, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .ldiffr = ldiffr, .lscale = lscale, .lshape = lshape,
.ediffr = ediffr, .escale = escale, .eshape = eshape ))),
@@ -2638,7 +2686,7 @@ recnormal.control <- function(save.weight = TRUE, ...) {
}
- recnormal <- function(lmean = "identity", lsd = "loge",
+ recnormal <- function(lmean = "identitylink", lsd = "loge",
imean = NULL, isd = NULL, imethod = 1,
zero = NULL) {
lmean <- as.list(substitute(lmean))
@@ -2715,15 +2763,21 @@ recnormal.control <- function(save.weight = TRUE, ...) {
}), list( .lmean = lmean, .lsdev = lsdev,
.emean = emean, .esdev = esdev ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
- sdev <- eta2theta(eta[, 2], .lsdev)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- zedd <- (y - mu) / sdev
- NN <- nrow(eta)
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ sdev <- eta2theta(eta[, 2], .lsdev )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ zedd <- (y - mu) / sdev
+ NN <- nrow(eta)
+ if (summation) {
sum(w * (-log(sdev) - 0.5 * zedd^2)) -
sum(w[-NN] * pnorm(zedd[-NN], lower.tail = FALSE, log.p = TRUE))
+ } else {
+ stop("cannot handle 'summation = FALSE' yet")
}
+ }
}, list( .lsdev = lsdev, .esdev = esdev ))),
vfamily = c("recnormal"),
deriv = eval(substitute(expression({
@@ -2831,13 +2885,19 @@ recexp1.control <- function(save.weight = TRUE, ...) {
misc$expected = TRUE
}), list( .lrate = lrate, .erate = erate ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
- rate = eta2theta(eta, .lrate , .erate )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- NN <- length(eta)
- y <- cbind(y)
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ rate <- eta2theta(eta, .lrate , .erate )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ NN <- length(eta)
+ y <- cbind(y)
+ if (summation) {
sum(w * log(rate)) - w[NN] * rate[NN] * y[NN, 1]
+ } else {
+ stop("cannot handle 'summation = FALSE' yet")
+ }
}
}, list( .lrate = lrate, .erate = erate ))),
vfamily = c("recexp1"),
@@ -3001,13 +3061,21 @@ dpois.points <- function(x, lambda, ostatistic,
.ostatistic = ostatistic,
.dimension = dimension ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
density <- eta2theta(eta, .link, earg = .earg)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dpois.points(y, lambda = density,
- ostatistic = .ostatistic ,
- dimension = .dimension , log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dpois.points(y, lambda = density,
+ ostatistic = .ostatistic ,
+ dimension = .dimension , log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg,
.ostatistic = ostatistic,
diff --git a/R/family.functions.R b/R/family.functions.R
index 1cf75f5..9e0a873 100644
--- a/R/family.functions.R
+++ b/R/family.functions.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -103,18 +103,19 @@ tapplymat1 <-
function.arg <- match.arg(function.arg,
c("cumsum", "diff", "cumprod"))[1]
- type <- switch(function.arg,
- cumsum = 1,
- diff = 2,
- cumprod = 3,
- stop("function.arg not matched"))
+ type <-
+ switch(function.arg,
+ cumsum = 1,
+ diff = 2,
+ cumprod = 3,
+ stop("function.arg not matched"))
if (!is.matrix(mat))
mat <- as.matrix(mat)
nr <- nrow(mat)
nc <- ncol(mat)
- fred <- .C("tapplymat1", mat = as.double(mat),
- as.integer(nr), as.integer(nc), as.integer(type))
+ fred <- .C("tapply_mat1", mat = as.double(mat),
+ as.integer(nr), as.integer(nc), as.integer(type))
dim(fred$mat) <- c(nr, nc)
dimnames(fred$mat) <- dimnames(mat)
@@ -230,7 +231,7 @@ veigen <- function(x, M) {
wk = double(M*M),
as.integer(index$row), as.integer(index$col),
as.integer(dimm.value),
- error.code = integer(1), PACKAGE = "VGAM")
+ error.code = integer(1))
if (z$error.code)
stop("eigen algorithm (rs) returned error code ", z$error.code)
diff --git a/R/family.genetic.R b/R/family.genetic.R
index 5029962..dbd7927 100644
--- a/R/family.genetic.R
+++ b/R/family.genetic.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -156,7 +156,7 @@
blurb = c("AA-Aa-aa phenotype (without Hardy-Weinberg assumption)\n\n",
"Links: ",
namesof("pA", link, earg = earg), ", ",
- namesof("f", "identity", tag = FALSE)),
+ namesof("f", "identitylink", tag = FALSE)),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
mustart.orig <- mustart
@@ -177,7 +177,7 @@
predictors.names <-
c(namesof("pA", .link , earg = .earg , tag = FALSE),
- namesof("f", "identity", earg = list(), tag = FALSE))
+ namesof("f", "identitylink", earg = list(), tag = FALSE))
if (is.null(etastart)) {
pA <- if (is.numeric( .ipA )) rep( .ipA , len = n) else
@@ -187,21 +187,21 @@
if (any(pA <= 0) || any(pA >= 1))
stop("bad initial value for 'pA'")
etastart <- cbind(theta2eta(pA, .link , earg = .earg ),
- theta2eta(f, "identity"))
+ theta2eta(f, "identitylink"))
mustart <- NULL # Since etastart has been computed.
}
}), list( .link = link, .ipA = ipA, .iF = iF, .earg = earg))),
linkinv = eval(substitute(function(eta, extra = NULL) {
pA <- eta2theta(eta[, 1], link = .link , earg = .earg )
- f <- eta2theta(eta[, 2], link = "identity", earg = list())
+ f <- eta2theta(eta[, 2], link = "identitylink", earg = list())
cbind(AA = pA^2+pA*(1-pA)*f,
Aa = 2*pA*(1-pA)*(1-f),
aa = (1-pA)^2 + pA*(1-pA)*f)
}, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
- misc$link <- c(pA = .link , f = "identity")
+ misc$link <- c(pA = .link , f = "identitylink")
misc$earg <- list(pA = .earg , f = list() )
@@ -218,7 +218,7 @@
vfamily = c("AAaa.nohw", "vgenetic"),
deriv = eval(substitute(expression({
pA <- eta2theta(eta[, 1], link = .link , earg = .earg )
- f <- eta2theta(eta[, 2], link = "identity")
+ f <- eta2theta(eta[, 2], link = "identitylink")
dP1 <- cbind(f + 2*pA*(1-f),
2*(1-f)*(1-2*pA),
-2*(1-pA) +f*(1-2*pA))
@@ -236,7 +236,7 @@
weight = eval(substitute(expression({
dPP <- array(c(dP1, dP2), c(n, 3, 2))
dPP.deta <- cbind(dtheta.deta(pA, link = .link , earg = .earg ),
- dtheta.deta(f, link = "identity"))
+ dtheta.deta(f, link = "identitylink"))
wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
for (i1 in 1:M)
for (i2 in i1:M) {
diff --git a/R/family.glmgam.R b/R/family.glmgam.R
index f7dd023..c3f96aa 100644
--- a/R/family.glmgam.R
+++ b/R/family.glmgam.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -60,7 +60,8 @@
}), list( .zero = zero,
.parallel = parallel, .apply.parint = apply.parint ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
bred = .bred ,
zero = .zero )
}, list( .zero = zero,
@@ -89,9 +90,10 @@
w <- temp5$w
y <- temp5$y
+
M <- ncol(y)
if (!all(y == 0 | y == 1))
- stop("response must contain 0's and 1's only")
+ stop("response must contain 0s and 1s only")
@@ -105,14 +107,14 @@
namesof(if (M > 1) dn2 else
"mu", .link , earg = .earg , short = TRUE)
- if (!length(mustart) && !length(etastart))
- mustart <- matrix(colMeans(y), nrow = nrow(y), ncol = ncol(y),
- byrow = TRUE)
+ if (!length(mustart) && !length(etastart))
+ mustart <- matrix(colMeans(y), nrow = nrow(y), ncol = ncol(y),
+ byrow = TRUE)
- if (!all(w == 1))
- extra$orig.w <- w
+ if (!all(w == 1))
+ extra$orig.w <- w
- extra$mv <- TRUE
+ extra$mv <- TRUE
} else {
@@ -123,37 +125,38 @@
NCOL <- function (x) if (is.array(x) && length(dim(x)) > 1 ||
is.data.frame(x)) ncol(x) else as.integer(1)
if (NCOL(y) == 1) {
- if (is.factor(y)) y <- (y != levels(y)[1])
- nvec <- rep(1, n)
- y[w == 0] <- 0
- if (!all(y == 0 | y == 1))
- stop("response values 'y' must be 0 or 1")
- if (!length(mustart) && !length(etastart))
- mustart <- (0.5 + w * y) / (1 + w)
+ if (is.factor(y))
+ y <- (y != levels(y)[1])
+ nvec <- rep(1, n)
+ y[w == 0] <- 0
+ if (!all(y == 0 | y == 1))
+ stop("response values 'y' must be 0 or 1")
+ if (!length(mustart) && !length(etastart))
+ mustart <- (0.5 + w * y) / (1 + w)
- no.successes <- y
- if (min(y) < 0)
- stop("Negative data not allowed!")
- if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
- stop("Number of successes must be integer-valued")
+ no.successes <- y
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
+ stop("Number of successes must be integer-valued")
} else if (NCOL(y) == 2) {
- if (min(y) < 0)
- stop("Negative data not allowed!")
- if (any(abs(y - round(y)) > 1.0e-8))
- stop("Count data must be integer-valued")
- y <- round(y)
- nvec <- y[, 1] + y[, 2]
- y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
- w <- w * nvec
- if (!length(mustart) && !length(etastart))
- mustart <- (0.5 + nvec * y) / (1 + nvec)
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(y - round(y)) > 1.0e-8))
+ stop("Count data must be integer-valued")
+ y <- round(y)
+ nvec <- y[, 1] + y[, 2]
+ y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
+ w <- w * nvec
+ if (!length(mustart) && !length(etastart))
+ mustart <- (0.5 + nvec * y) / (1 + nvec)
} else {
- stop("for the binomialff family, response 'y' must be a ",
- "vector of 0 and 1's\n",
- "or a factor (first level = fail, other levels = success),\n",
- "or a 2-column matrix where col 1 is the no. of ",
- "successes and col 2 is the no. of failures")
+ stop("for the binomialff family, response 'y' must be a ",
+ "vector of 0 and 1's\n",
+ "or a factor (first level = fail, other levels = success),\n",
+ "or a 2-column matrix where col 1 is the no. of ",
+ "successes and col 2 is the no. of failures")
}
predictors.names <-
namesof("mu", .link , earg = .earg , short = TRUE)
@@ -182,14 +185,15 @@
last = eval(substitute(expression({
if (exists("CQO.FastAlgorithm", envir = VGAMenv))
- rm("CQO.FastAlgorithm", envir = VGAMenv)
+ rm("CQO.FastAlgorithm", envir = VGAMenv)
if (exists("modelno", envir = VGAMenv))
- rm("modelno", envir = VGAMenv)
+ rm("modelno", envir = VGAMenv)
dpar <- .dispersion
if (!dpar) {
- temp87 <- (y-mu)^2 * wz / (dtheta.deta(mu, link = .link ,
- earg = .earg )^2) # w cancel
+ temp87 <- (y-mu)^2 * wz / (
+ dtheta.deta(mu, link = .link ,
+ earg = .earg )^2) # w cancel
if (.mv && ! .onedpar) {
dpar <- rep(as.numeric(NA), len = M)
temp87 <- cbind(temp87)
@@ -229,35 +233,89 @@
}, list( .link = link, .earg = earg))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals) {
- c(w) * (y / mu - (1-y) / (1-mu))
- } else {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ c(w) * (y / mu - (1-y) / (1-mu))
+ } else {
- ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
- smallno <- 1.0e6 * .Machine$double.eps
- smallno <- sqrt(.Machine$double.eps)
- if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts <- round(ycounts)
+ smallno <- 1.0e6 * .Machine$double.eps
+ smallno <- sqrt(.Machine$double.eps)
+ if (max(abs(ycounts - round(ycounts))) > smallno)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts <- round(ycounts)
- if ( .mv ) {
- sum((ycounts * log(mu) +
- (1 - ycounts) * log1p(-mu)) * w)
- } else {
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dbinom(x = ycounts, size = nvec, prob = mu, log = TRUE))
- }
+ ll.elts <- if ( .mv ) {
+ c(w) * ( ycounts * log( mu) +
+ (1 - ycounts) * log1p(-mu))
+ } else {
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dbinom(x = ycounts, size = nvec, prob = mu, log = TRUE)
+ }
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
}, list( .mv = mv ))),
vfamily = c("binomialff", "vcategorical"),
+
+
+ simslot = function (object, nsim) {
+
+ ftd <- fitted(object)
+
+
+ if (ncol(ftd) > 1)
+ stop("simulate() does not work with more than one response")
+
+
+
+ n <- length(ftd)
+ ntot <- n * nsim
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+
+
+
+ if (any(pwts %% 1 != 0))
+ stop("cannot simulate from non-integer prior.weights")
+ if (length(m <- object at model) > 0) {
+ y <- model.response(m)
+ if (is.factor(y)) {
+ yy <- factor(1 + rbinom(ntot, size = 1, prob = ftd),
+ labels = levels(y))
+ split(yy, rep(seq_len(nsim), each = n))
+ } else if (is.matrix(y) && ncol(y) == 2) {
+ yy <- vector("list", nsim)
+ for (i in seq_len(nsim)) {
+ Y <- rbinom(n, size = pwts, prob = ftd)
+ YY <- cbind(Y, pwts - Y)
+ colnames(YY) <- colnames(y)
+ yy[[i]] <- YY
+ }
+ yy
+ } else {
+ rbinom(ntot, size = pwts, prob = ftd)/pwts
+ }
+ } else {
+
+ rbinom(ntot, size = c(pwts), prob = c(ftd))/c(pwts)
+ }
+ },
+
+
+
+
deriv = eval(substitute(expression({
yBRED <- if ( .bred ) {
Hvector <- hatvaluesbasic(X.vlm = X.vlm.save,
@@ -317,11 +375,13 @@
if (!mv)
ans at deviance <-
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
Deviance.categorical.data.vgam(mu = cbind(mu, 1-mu),
y = cbind(y, 1-y),
w = w, residuals = residuals,
- eta = eta, extra = extra)
+ eta = eta, extra = extra,
+ summation = summation)
}
ans
@@ -342,14 +402,24 @@
blurb = c("Gamma distribution\n\n",
"Link: ", namesof("mu", link, earg = earg), "\n",
"Variance: mu^2 / k"),
- deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- devi <- -2 * w * (log(ifelse(y == 0, 1, y/mu)) - (y - mu)/mu)
+ deviance =
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ devi <- -2 * c(w) * (log(ifelse(y == 0, 1, y/mu)) - (y - mu)/mu)
if (residuals) {
sign(y - mu) * sqrt(abs(devi) * w)
- } else sum(w * devi)
+ } else {
+ dev.elts <- c(w) * devi
+ if (summation) {
+ sum(dev.elts)
+ } else {
+ dev.elts
+ }
+ }
},
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
dispersion = .dispersion )
}, list( .dispersion = dispersion ))),
initialize = eval(substitute(expression({
@@ -388,13 +458,13 @@
dpar <- .dispersion
if (!dpar) {
if (M == 1) {
- temp <- w * dmu.deta^2
- dpar <- sum(w * (y-mu)^2 * wz / temp) / (length(mu) - ncol(x))
+ temp <- c(w) * dmu.deta^2
+ dpar <- sum(c(w) * (y-mu)^2 * wz / temp) / (length(mu) - ncol(x))
} else {
dpar <- rep(0, len = M)
for (spp in 1:M) {
- temp <- w * dmu.deta[,spp]^2
- dpar[spp] <- sum(w * (y[,spp]-mu[,spp])^2 * wz[,spp]/temp) / (
+ temp <- c(w) * dmu.deta[, spp]^2
+ dpar[spp] <- sum(c(w) * (y[,spp]-mu[, spp])^2 * wz[, spp]/temp) / (
length(mu[,spp]) - ncol(x))
}
}
@@ -421,7 +491,7 @@
}, list( .link = link, .earg = earg))),
vfamily = "gammaff",
deriv = eval(substitute(expression({
- Musual <- 1
+ M1 <- 1
ncoly <- ncol(as.matrix(y))
dl.dmu <- (y-mu) / mu^2
@@ -453,17 +523,29 @@
blurb = c("Inverse Gaussian distribution\n\n",
"Link: ", namesof("mu", link, earg = earg), "\n",
"Variance: mu^3 / k"),
- deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+
+ deviance =
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
pow <- 3 # Use Quasi()$deviance with pow==3
devy <- y^(2-pow) / (1-pow) - y^(2-pow) / (2-pow)
devmu <- y * mu^(1-pow) / (1-pow) - mu^(2-pow) / (2-pow)
devi <- 2 * (devy - devmu)
if (residuals) {
sign(y - mu) * sqrt(abs(devi) * w)
- } else sum(w * devi)
+ } else {
+ dev.elts <- c(w) * devi
+ if (summation) {
+ sum(dev.elts)
+ } else {
+ dev.elts
+ }
+ }
},
+
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
dispersion = .dispersion )
}, list( .earg = earg , .dispersion = dispersion ))),
initialize = eval(substitute(expression({
@@ -502,8 +584,8 @@
last = eval(substitute(expression({
dpar <- .dispersion
if (!dpar) {
- temp <- w * dmu.deta^2
- dpar <- sum( w * (y-mu)^2 * wz / temp ) / (length(mu) - ncol(x))
+ temp <- c(w) * dmu.deta^2
+ dpar <- sum( c(w) * (y-mu)^2 * wz / temp ) / (length(mu) - ncol(x))
}
misc$dispersion <- dpar
misc$default.dispersion <- 0
@@ -527,7 +609,7 @@
}, list( .link = link, .earg = earg ))),
vfamily = "inverse.gaussianff",
deriv = eval(substitute(expression({
- Musual <- 1
+ M1 <- 1
ncoly <- ncol(as.matrix(y))
dl.dmu <- (y - mu) / mu^3
@@ -672,7 +754,8 @@ rinv.gaussian <- function(n, mu, lambda) {
}), list( .zero = zero,
.parallel = parallel, .apply.parint = apply.parint ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero ))),
@@ -690,10 +773,10 @@ rinv.gaussian <- function(n, mu, lambda) {
ncoly <- ncol(y)
- Musual <- 2
+ M1 <- 2
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
@@ -702,7 +785,7 @@ rinv.gaussian <- function(n, mu, lambda) {
predictors.names <-
c(namesof(mynames1, .lmu , earg = .emu , short = TRUE),
namesof(mynames2, .llambda , earg = .elambda , short = TRUE))[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
@@ -728,7 +811,7 @@ rinv.gaussian <- function(n, mu, lambda) {
etastart <- cbind(
theta2eta(init.mu, link = .lmu , earg = .emu ),
theta2eta(init.la, link = .llambda , earg = .elambda ))[,
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
}
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda,
@@ -740,21 +823,21 @@ rinv.gaussian <- function(n, mu, lambda) {
}, list( .lmu = lmu, .emu = emu, .elambda = elambda ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <-
c(rep( .lmu , length = ncoly),
- rep( .llambda , length = ncoly))[interleave.VGAM(M, M = Musual)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ rep( .llambda , length = ncoly))[interleave.VGAM(M, M = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .emu
- misc$earg[[Musual*ii ]] <- .elambda
+ misc$earg[[M1*ii-1]] <- .emu
+ misc$earg[[M1*ii ]] <- .elambda
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$imethod <- .imethod
misc$shrinkage.init <- .sinit
misc$expected <- TRUE
@@ -768,15 +851,22 @@ rinv.gaussian <- function(n, mu, lambda) {
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
mymu <- eta2theta(eta[, c(TRUE, FALSE)],
link = .lmu , earg = .emu )
lambda <- eta2theta(eta[, c(FALSE, TRUE)],
link = .llambda , earg = .elambda )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dinv.gaussian(x = y, mu = mymu,
- lambda = lambda, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dinv.gaussian(x = y, mu = mymu,
+ lambda = lambda, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda ))),
@@ -784,7 +874,7 @@ rinv.gaussian <- function(n, mu, lambda) {
vfamily = "inv.gaussianff",
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
mymu <- eta2theta(eta[, c(TRUE, FALSE)],
link = .lmu , earg = .emu )
lambda <- eta2theta(eta[, c(FALSE, TRUE)],
@@ -798,7 +888,7 @@ rinv.gaussian <- function(n, mu, lambda) {
dl.dlambda <- 0.5 / lambda - (y - mymu)^2 / (2 * mymu^2 * y)
myderiv <- c(w) * cbind(dl.dmu * dmu.deta,
dl.dlambda * dlambda.deta)
- myderiv[, interleave.VGAM(M, M = Musual)]
+ myderiv[, interleave.VGAM(M, M = M1)]
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda ))),
@@ -809,9 +899,9 @@ rinv.gaussian <- function(n, mu, lambda) {
wz <- cbind(dmu.deta^2 * ned2l.dmu2,
dlambda.deta^2 * ned2l.dlambda2)[,
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / M1)
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda ))))
}
@@ -828,8 +918,8 @@ rinv.gaussian <- function(n, mu, lambda) {
- if (!is.logical(bred) || length(bred) > 1)
- stop("argument 'bred' must be a single logical")
+ if (!is.logical(bred) || length(bred) > 1)
+ stop("argument 'bred' must be a single logical")
estimated.dispersion <- (dispersion == 0)
@@ -864,15 +954,28 @@ rinv.gaussian <- function(n, mu, lambda) {
constraints = constraints)
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .parallel = parallel, .zero = zero ))),
- deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+
+ deviance =
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
nz <- (y > 0)
devi <- -(y - mu)
devi[nz] <- devi[nz] + y[nz] * log(y[nz]/mu[nz])
- if (residuals) sign(y - mu) * sqrt(2 * abs(devi) * c(w)) else
- 2 * sum(c(w) * devi)
+ if (residuals) {
+ sign(y - mu) * sqrt(2 * abs(devi) * c(w))
+ } else {
+ dev.elts <- 2 * c(w) * devi
+ if (summation) {
+ sum(dev.elts)
+ } else {
+ dev.elts
+ }
+ }
},
+
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
bred = .bred ,
zero = .zero )
}, list( .zero = zero,
@@ -983,16 +1086,42 @@ rinv.gaussian <- function(n, mu, lambda) {
}, list( .link = link, .earg = earg))),
loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals) c(w) * (y / mu - 1) else {
- sum(c(w) * dpois(x = y, lambda = mu, log = TRUE))
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ c(w) * (y / mu - 1)
+ } else {
+ ll.elts <- c(w) * dpois(x = y, lambda = mu, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
},
vfamily = "poissonff",
+
+
+
+
+ simslot =
+ function(object, nsim) {
+
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ ftd <- fitted(object)
+ rpois(nsim * length(ftd), ftd)
+ },
+
+
+
deriv = eval(substitute(expression({
yBRED <- if ( .bred ) {
Hvector <- hatvaluesbasic(X.vlm = X.vlm.save,
- diagWm = c(t(w * mu))) # Handles M>1
+ diagWm = c(t(c(w) * mu))) # Handles M>1
varY <- mu # Is a matrix if M>1.
@@ -1108,7 +1237,7 @@ rinv.gaussian <- function(n, mu, lambda) {
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
lmean = .lmean ,
zero = .zero )
}, list( .lmean = lmean ))),
@@ -1156,16 +1285,23 @@ rinv.gaussian <- function(n, mu, lambda) {
}), list( .lmean = lmean, .emean = emean,
.ldisp = ldisp, .edisp = edisp ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
lambda <- eta2theta(eta[, 1], link = .lmean,
earg = .emean )
Disper <- eta2theta(eta[, 2], link = .ldisp,
earg = .edisp )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w * (0.5*log(Disper) +
- Disper*(y-lambda) + Disper*y*log(lambda)))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * (0.5 * log(Disper) +
+ Disper*(y-lambda) + Disper*y*log(lambda))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
}, list( .lmean = lmean, .emean = emean,
.ldisp = ldisp, .edisp = edisp ))),
vfamily = "double.exppoisson",
@@ -1240,20 +1376,20 @@ rinv.gaussian <- function(n, mu, lambda) {
if (NCOL(y) == 1) {
- if (is.factor(y)) y <- (y != levels(y)[1])
- nvec <- rep(1, n)
- y[w == 0] <- 0
- if (!all(y == 0 | y == 1))
- stop("response values 'y' must be 0 or 1")
- init.mu =
- mustart <- (0.5 + w * y) / (1 + w)
+ if (is.factor(y)) y <- (y != levels(y)[1])
+ nvec <- rep(1, n)
+ y[w == 0] <- 0
+ if (!all(y == 0 | y == 1))
+ stop("response values 'y' must be 0 or 1")
+ init.mu <-
+ mustart <- (0.5 + w * y) / (1 + w)
- no.successes <- y
- if (min(y) < 0)
- stop("Negative data not allowed!")
- if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
- stop("Number of successes must be integer-valued")
+ no.successes <- y
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
+ stop("Number of successes must be integer-valued")
} else if (NCOL(y) == 2) {
if (min(y) < 0)
stop("Negative data not allowed!")
@@ -1305,19 +1441,29 @@ rinv.gaussian <- function(n, mu, lambda) {
}), list( .lmean = lmean, .emean = emean,
.ldisp = ldisp, .edisp = edisp ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
prob <- eta2theta(eta[, 1], link = .lmean, earg = .emean)
Disper <- eta2theta(eta[, 2], link = .ldisp, earg = .edisp)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
temp1 <- y * log(ifelse(y > 0, y, 1)) # y*log(y)
temp2 <- (1.0-y) * log1p(ifelse(y < 1, -y, 0)) # (1-y)*log(1-y)
- sum(0.5 * log(Disper) + w * (y * Disper * log(prob) +
+
+
+ ll.elts <-
+ (0.5 * log(Disper) + w * (y * Disper * log(prob) +
(1-y) * Disper * log1p(-prob) +
temp1 * (1-Disper) + temp2 * (1 - Disper)))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lmean = lmean, .emean = emean,
.ldisp = ldisp, .edisp = edisp ))),
@@ -1461,8 +1607,11 @@ rinv.gaussian <- function(n, mu, lambda) {
matrix(temp, extra$n, extra$M)
}, list( .link = link, .earg = earg))),
loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals) w * (y / mu - (1-y) / (1-mu)) else {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ w * (y / mu - (1-y) / (1-mu))
+ } else {
ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
@@ -1474,10 +1623,15 @@ rinv.gaussian <- function(n, mu, lambda) {
warning("converting 'ycounts' to integer in @loglikelihood")
ycounts <- round(ycounts)
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dbinom(x = ycounts, size = nvec, prob = mu,
- log = TRUE))
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dbinom(x = ycounts, size = nvec, prob = mu, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
},
vfamily = c("matched.binomial", "vcategorical"),
deriv = eval(substitute(expression({
@@ -1607,43 +1761,51 @@ mypool <- function(x, index) {
extra$index9 = temp9
predictors.names <-
namesof("mu", .link , earg = .earg , short = TRUE)
- }), list( .link = link, .earg = earg, .mvar = mvar ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- denominator <- exp(eta)
- numerator <- mypool(denominator, extra$mvar)
- numerator / denominator
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link <- c(mu = .link )
-
- misc$earg <- list(mu = .earg )
-
- misc$expected <- TRUE
- }), list( .link = link, .earg = earg))),
- loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals) w*(y/mu - (1-y)/(1-mu)) else {
- sum(w*(y*log(mu) + (1-y)*log1p(-mu)))
- }
- },
- vfamily = c("mbin", "vcategorical"),
- deriv = eval(substitute(expression({
- answer =
- if ( .link == "logit") {
- w * (y - mu)
- } else stop("can only handle the logit link")
+ }), list( .link = link, .earg = earg, .mvar = mvar ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ denominator <- exp(eta)
+ numerator <- mypool(denominator, extra$mvar)
+ numerator / denominator
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link <- c(mu = .link )
+
+ misc$earg <- list(mu = .earg )
+
+ misc$expected <- TRUE
+ }), list( .link = link, .earg = earg))),
+ loglikelihood =
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ c(w) * (y/mu - (1-y)/(1-mu))
+ } else {
+ ll.elts <- c(w) * (y*log(mu) + (1-y)*log1p(-mu))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ },
+ vfamily = c("mbin", "vcategorical"),
+ deriv = eval(substitute(expression({
+ answer <-
+ if ( .link == "logit") {
+ w * (y - mu)
+ } else stop("can only handle the logit link")
answer
- }), list( .link = link, .earg = earg))),
- weight = eval(substitute(expression({
- tmp100 = mu*(1-mu)
- answer = if ( .link == "logit") {
- cbind(w * tmp100)
- } else stop("can only handle the logit link")
-
- result = matrix( .smallno, n, M)
- result[cbind(1:n, extra$index9)] = answer
- result
- }), list( .link = link, .earg = earg, .smallno = smallno ))))
+ }), list( .link = link, .earg = earg))),
+ weight = eval(substitute(expression({
+ tmp100 <- mu*(1-mu)
+ answer <- if ( .link == "logit") {
+ cbind(w * tmp100)
+ } else stop("can only handle the logit link")
+
+ result <- matrix( .smallno, n, M)
+ result[cbind(1:n, extra$index9)] <- answer
+ result
+ }), list( .link = link, .earg = earg, .smallno = smallno ))))
}
@@ -1682,22 +1844,24 @@ mypool <- function(x, index) {
namesof("mu.2[,j]", link, earg = earg),
"\n",
"Variance: mu*(1-mu)"),
- deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
Deviance.categorical.data.vgam(mu = cbind(mu, 1-mu), y=cbind(y, 1-y),
w = w, residuals = residuals,
- eta = eta, extra = extra)
+ eta = eta, extra = extra,
+ summation = summation)
},
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
parallel = .parallel)
}, list( .parallel = parallel ))),
initialize = eval(substitute(expression({
- Musual = 2
+ M1 = 2
if ( .mv ) {
y = as.matrix(y)
- M = Musual * ncol(y)
+ M = M1 * ncol(y)
if (!all(y == 0 | y == 1))
stop("response must contain 0's and 1's only")
dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
@@ -1711,9 +1875,9 @@ mypool <- function(x, index) {
"mu.1", .link , earg = .earg , short = TRUE),
namesof(if (M > 1) dn2 else
"mu.2", .link , earg = .earg , short = TRUE))
- NOS = M / Musual
+ NOS = M / M1
predictors.names <-
- predictors.names[interleave.VGAM(Musual * NOS, M = Musual)]
+ predictors.names[interleave.VGAM(M1 * NOS, M = M1)]
if (!length(mustart) && !length(etastart))
@@ -1721,7 +1885,7 @@ mypool <- function(x, index) {
} else {
dn2 = c("mu1.", "mu2.")
- M = Musual
+ M = M1
@@ -1796,34 +1960,41 @@ mypool <- function(x, index) {
kronecker(usualanswer, matrix(1, 1, 2))
}, list( .link = link, .earg = earg))),
loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals) w * (y / mu - (1-y) / (1-mu)) else {
-
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ c(w) * (y / mu - (1-y) / (1-mu))
+ } else {
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * c(w) # Convert proportions to counts
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
- smallno = 1.0e6 * .Machine$double.eps
- smallno = sqrt(.Machine$double.eps)
- if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ smallno <- 1.0e6 * .Machine$double.eps
+ smallno <- sqrt(.Machine$double.eps)
+ if (max(abs(ycounts - round(ycounts))) > smallno)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts <- round(ycounts)
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dbinom(x = ycounts, size = nvec, prob = mu,
- log = TRUE))
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dbinom(x = ycounts, size = nvec, prob = mu, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
},
vfamily = c("augbinomial", "vcategorical"),
deriv = eval(substitute(expression({
- Musual = 2
- Mdiv2 = M / 2
+ M1 <- 2
+ Mdiv2 <- M / 2
- NOS = M / Musual
+ NOS <- M / M1
- Konst1 = 1 # Works with this
- deriv1 = Konst1 * w *
+ Konst1 <- 1 # Works with this
+ deriv1 <- Konst1 * w *
if ( .link == "logit") {
y * (1 - mu)
} else {
@@ -1841,27 +2012,27 @@ mypool <- function(x, index) {
}
myderiv = (cbind(deriv1,
- deriv2))[, interleave.VGAM(Musual * NOS,
- M = Musual)]
+ deriv2))[, interleave.VGAM(M1 * NOS,
+ M = M1)]
myderiv
}), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
- tmp100 = mu * (1.0 - mu)
+ tmp100 <- mu * (1.0 - mu)
- tmp200 = if ( .link == "logit") {
+ tmp200 <- if ( .link == "logit") {
cbind(w * tmp100)
} else {
cbind(w * dtheta.deta(mu, link = .link , earg = .earg )^2 / tmp100)
}
- wk.wt1 = (Konst1^2) * tmp200 * (1 - mu)
- wk.wt2 = (Konst1^2) * tmp200 * mu
+ wk.wt1 <- (Konst1^2) * tmp200 * (1 - mu)
+ wk.wt2 <- (Konst1^2) * tmp200 * mu
- my.wk.wt = cbind(wk.wt1, wk.wt2)
- my.wk.wt = my.wk.wt[, interleave.VGAM(Musual * NOS, M = Musual)]
+ my.wk.wt <- cbind(wk.wt1, wk.wt2)
+ my.wk.wt <- my.wk.wt[, interleave.VGAM(M1 * NOS, M = M1)]
my.wk.wt
}), list( .link = link, .earg = earg))))
}
diff --git a/R/family.loglin.R b/R/family.loglin.R
index 6d96144..582e8cf 100644
--- a/R/family.loglin.R
+++ b/R/family.loglin.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -70,8 +70,8 @@
"11" = exp(u1+u2+u12) / denom)
},
last = expression({
- misc$link <- c("u1" = "identity", "u2" = "identity",
- "u12" = "identity")
+ misc$link <- c("u1" = "identitylink", "u2" = "identitylink",
+ "u12" = "identitylink")
misc$earg <- list("u1" = list(), "u2" = list(),
"u12" = list())
@@ -85,15 +85,25 @@
u12 <- log(mu[,4]) - u0 - u1 - u2
cbind(u1, u2, u12)
},
- loglikelihood = function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
+ loglikelihood =
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
u1 <- eta[,1]
u2 <- eta[,2]
u12 <- eta[,3]
denom <- 1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)
u0 <- -log(denom)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else
- sum(c(w) *(u0 + u1*y[,1] + u2*y[,2] + u12*y[,1]*y[,2]))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * (u0 + u1*y[,1] + u2*y[,2] + u12*y[,1]*y[,2])
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
},
vfamily = c("loglinb2"),
deriv = expression({
@@ -226,7 +236,7 @@
"111" = exp(u1+u2+u3+u12+u13+u23)) / denom
},
last = expression({
- misc$link <- rep("identity", length = M)
+ misc$link <- rep("identitylink", length = M)
names(misc$link) <- predictors.names
misc$earg <- list(u1 = list(), u2 = list(), u3 = list(),
@@ -246,47 +256,58 @@
u12 <- log(mu[,7]) - u0 - u1 - u2
cbind(u1, u2, u3, u12, u13, u23)
},
- loglikelihood = function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
- u1 <- eta[, 1]
- u2 <- eta[, 2]
- u3 <- eta[, 3]
- u12 <- eta[, 4]
- u13 <- eta[, 5]
- u23 <- eta[, 6]
- denom <- 1 + exp(u1) + exp(u2) + exp(u3) + exp(u1 + u2 + u12) +
- exp(u1 + u3 + u13) + exp(u2 + u3 + u23) +
- exp(u1 + u2 + u3 + u12 + u13 + u23)
+ loglikelihood =
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ u1 <- eta[, 1]
+ u2 <- eta[, 2]
+ u3 <- eta[, 3]
+ u12 <- eta[, 4]
+ u13 <- eta[, 5]
+ u23 <- eta[, 6]
+ denom <- 1 + exp(u1) + exp(u2) + exp(u3) + exp(u1 + u2 + u12) +
+ exp(u1 + u3 + u13) + exp(u2 + u3 + u23) +
+ exp(u1 + u2 + u3 + u12 + u13 + u23)
u0 <- -log(denom)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else
- sum(c(w) *(u0 + u1*y[,1] + u2*y[,2] + u3*y[,3] +u12*y[,1]*y[,2] +
- u13*y[,1]*y[,3] + u23*y[,2]*y[,3]))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * (u0 + u1*y[,1] + u2*y[,2] + u3*y[,3] +u12*y[,1]*y[,2] +
+ u13*y[,1]*y[,3] + u23*y[,2]*y[,3])
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
},
vfamily = c("loglinb3"),
deriv = expression({
- u1 <- eta[, 1]
- u2 <- eta[, 2]
- u3 <- eta[, 3]
- u12 <- eta[, 4]
- u13 <- eta[, 5]
- u23 <- eta[, 6]
- denom <- 1 + exp(u1) + exp(u2) + exp(u3) + exp(u1 + u2 + u12) +
- exp(u1 + u3 + u13) + exp(u2 + u3 + u23) +
- exp(u1 + u2 + u3 + u12 + u13 + u23)
-
-
-
- allterms <- exp(u1+u2+u3+u12+u13+u23)
- A1 <- exp(u1) + exp(u1 + u2 + u12) + exp(u1 + u3 + u13) +
- allterms
- A2 <- exp(u2) + exp(u1 + u2 + u12) + exp(u2 + u3 + u23) +
- allterms
- A3 <- exp(u3) + exp(u3 + u2 + u23) + exp(u1 + u3 + u13) +
- allterms
- A12 <- exp(u1 + u2 + u12) + allterms
- A13 <- exp(u1 + u3 + u13) + allterms
- A23 <- exp(u2 + u3 + u23) + allterms
+ u1 <- eta[, 1]
+ u2 <- eta[, 2]
+ u3 <- eta[, 3]
+ u12 <- eta[, 4]
+ u13 <- eta[, 5]
+ u23 <- eta[, 6]
+ denom <- 1 + exp(u1) + exp(u2) + exp(u3) + exp(u1 + u2 + u12) +
+ exp(u1 + u3 + u13) + exp(u2 + u3 + u23) +
+ exp(u1 + u2 + u3 + u12 + u13 + u23)
+
+
+
+ allterms <- exp(u1+u2+u3+u12+u13+u23)
+ A1 <- exp(u1) + exp(u1 + u2 + u12) + exp(u1 + u3 + u13) +
+ allterms
+ A2 <- exp(u2) + exp(u1 + u2 + u12) + exp(u2 + u3 + u23) +
+ allterms
+ A3 <- exp(u3) + exp(u3 + u2 + u23) + exp(u1 + u3 + u13) +
+ allterms
+ A12 <- exp(u1 + u2 + u12) + allterms
+ A13 <- exp(u1 + u3 + u13) + allterms
+ A23 <- exp(u2 + u3 + u23) + allterms
c(w) * cbind(-A1/denom + y[,1],
diff --git a/R/family.math.R b/R/family.math.R
index 08ded5a..762b67a 100644
--- a/R/family.math.R
+++ b/R/family.math.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -111,7 +111,7 @@ lambertW <- function(x, tolerance = 1.0e-10, maxit = 50) {
as.double(psip1), as.double(psidp), as.double(psidp1),
ifault = integer(nnn),
tmax = as.double(tmax),
- as.integer(nnn), PACKAGE = "VGAM")
+ as.integer(nnn))
answer <- matrix(fred$d, nnn, 6, byrow = TRUE)
dimnames(answer) <- list(names(q),
c("q", "q^2", "shape", "shape^2",
@@ -137,7 +137,7 @@ expint <- function(x) {
answer <- .C("sf_C_expint",
x = as.double(x),
size = as.integer(LLL),
- ans = double(LLL), PACKAGE = "VGAM")$ans
+ ans = double(LLL))$ans
answer[x < 0] <- NA
answer[x == 0] <- NA
@@ -156,7 +156,7 @@ expexpint <- function(x) {
answer <- .C("sf_C_expexpint",
x = as.double(x),
size = as.integer(LLL),
- ans = double(LLL), PACKAGE = "VGAM")$ans
+ ans = double(LLL))$ans
answer[x < 0] <- NA
answer[x == 0] <- NA
@@ -178,7 +178,7 @@ expint.E1 <- function(x) {
answer <- .C("sf_C_expint_e1",
x = as.double(x),
size = as.integer(LLL),
- ans = double(LLL), PACKAGE = "VGAM")$ans
+ ans = double(LLL))$ans
answer[x < 0] <- NA
answer[x == 0] <- NA
diff --git a/R/family.mixture.R b/R/family.mixture.R
index fec81b2..c77253b 100644
--- a/R/family.mixture.R
+++ b/R/family.mixture.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -20,7 +20,7 @@ mix2normal.control <- function(trace = TRUE, ...) {
mix2normal <-
function(lphi = "logit",
- lmu = "identity",
+ lmu = "identitylink",
lsd = "loge",
iphi = 0.5,
imu1 = NULL, imu2 = NULL,
@@ -116,10 +116,10 @@ mix2normal.control <- function(trace = TRUE, ...) {
if (!length(etastart)) {
- qy <- quantile(y, prob = .qmu)
- init.phi <- rep(if(length(.iphi)) .iphi else 0.5, length = n)
- init.mu1 <- rep(if(length(.imu1)) .imu1 else qy[1], length = n)
- init.mu2 <- rep(if(length(.imu2)) .imu2 else qy[2], length = n)
+ qy <- quantile(y, prob = .qmu )
+ init.phi <- rep(if (length(.iphi)) .iphi else 0.5, length = n)
+ init.mu1 <- rep(if (length(.imu1)) .imu1 else qy[1], length = n)
+ init.mu2 <- rep(if (length(.imu2)) .imu2 else qy[2], length = n)
ind.1 <- if (init.mu1[1] < init.mu2[1])
1:round(n* init.phi[1]) else
round(n* init.phi[1]):n
@@ -127,12 +127,13 @@ mix2normal.control <- function(trace = TRUE, ...) {
round(n* init.phi[1]):n else
1:round(n* init.phi[1])
sorty <- sort(y)
- init.sd1 <- rep(if(length( .isd1 )) .isd1 else sd(sorty[ind.1]),
+ init.sd1 <- rep(if (length( .isd1 )) .isd1 else sd(sorty[ind.1]),
len = n)
- init.sd2 <- rep(if(length( .isd2 )) .isd2 else sd(sorty[ind.2]),
+ init.sd2 <- rep(if (length( .isd2 )) .isd2 else sd(sorty[ind.2]),
len = n)
if ( .eq.sd ) {
- init.sd1 <- init.sd2 = (init.sd1 + init.sd2)/2
+ init.sd1 <-
+ init.sd2 <- (init.sd1 + init.sd2) / 2
if (!all.equal( .esd1, .esd2 ))
stop("'esd1' and 'esd2' must be equal if 'eq.sd = TRUE'")
}
@@ -172,7 +173,9 @@ mix2normal.control <- function(trace = TRUE, ...) {
.esd1 = esd1, .esd2 = esd2,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
mu1 <- eta2theta(eta[, 2], link = .lmu, earg = .emu1)
sd1 <- eta2theta(eta[, 3], link = .lsd, earg = .esd1)
@@ -180,9 +183,16 @@ mix2normal.control <- function(trace = TRUE, ...) {
sd2 <- eta2theta(eta[, 5], link = .lsd, earg = .esd2)
f1 <- dnorm(y, mean=mu1, sd=sd1)
f2 <- dnorm(y, mean=mu2, sd=sd2)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * log(phi*f1 + (1 - phi)*f2))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * log(phi*f1 + (1 - phi)*f2)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list(.lphi = lphi, .lmu = lmu,
.ephi = ephi, .emu1 = emu1, .emu2 = emu2,
.esd1 = esd1, .esd2 = esd2,
@@ -199,8 +209,8 @@ mix2normal.control <- function(trace = TRUE, ...) {
dmu2.deta <- dtheta.deta(mu2, link = .lmu, earg = .emu2)
dsd1.deta <- dtheta.deta(sd1, link = .lsd, earg = .esd1)
dsd2.deta <- dtheta.deta(sd2, link = .lsd, earg = .esd2)
- f1 <- dnorm(y, mean=mu1, sd=sd1)
- f2 <- dnorm(y, mean=mu2, sd=sd2)
+ f1 <- dnorm(y, mean = mu1, sd = sd1)
+ f2 <- dnorm(y, mean = mu2, sd = sd2)
pdf <- phi*f1 + (1 - phi)*f2
z1 <- (y-mu1) / sd1
z2 <- (y-mu2) / sd2
@@ -239,8 +249,8 @@ mix2normal.control <- function(trace = TRUE, ...) {
temp3 <- matrix(0, n, dimm(M))
for (ss in 1:M)
- for (tt in ss:M)
- temp3[,iam(ss,tt, M)] <- -d2l.dthetas2[,ss,tt]
+ for (tt in ss:M)
+ temp3[,iam(ss,tt, M)] <- -d2l.dthetas2[, ss, tt]
run.mean <- ((ii-1) * run.mean + temp3) / ii
}
@@ -338,9 +348,9 @@ mix2poisson.control <- function(trace = TRUE, ...) {
if (!length(etastart)) {
qy <- quantile(y, prob = .qmu)
- init.phi <- rep(if(length(.iphi)) .iphi else 0.5, length = n)
- init.lambda1 <- rep(if(length(.il1)) .il1 else qy[1], length = n)
- init.lambda2 <- rep(if(length(.il2)) .il2 else qy[2], length = n)
+ init.phi <- rep(if (length(.iphi)) .iphi else 0.5, length = n)
+ init.lambda1 <- rep(if (length(.il1)) .il1 else qy[1], length = n)
+ init.lambda2 <- rep(if (length(.il2)) .il2 else qy[2], length = n)
if (!length(etastart))
etastart <- cbind(theta2eta(init.phi, .lphi , earg = .ephi ),
@@ -372,15 +382,24 @@ mix2poisson.control <- function(trace = TRUE, ...) {
.ephi = ephi, .el1 = el1, .el2 = el2,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
lambda1 <- eta2theta(eta[, 2], link = .llambda, earg = .el1)
lambda2 <- eta2theta(eta[, 3], link = .llambda, earg = .el2)
f1 <- dpois(y, lam = lambda1)
f2 <- dpois(y, lam = lambda2)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * log(phi*f1 + (1 - phi)*f2))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * log(phi*f1 + (1 - phi)*f2)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list(.lphi = lphi, .llambda = llambda,
.ephi = ephi, .el1 = el1, .el2 = el2 ))),
vfamily = c("mix2poisson"),
@@ -543,9 +562,9 @@ mix2exp.control <- function(trace = TRUE, ...) {
if (!length(etastart)) {
qy <- quantile(y, prob = .qmu)
- init.phi <- rep(if(length(.iphi)) .iphi else 0.5, length = n)
- init.lambda1 <- rep(if(length(.il1)) .il1 else 1/qy[1], length = n)
- init.lambda2 <- rep(if(length(.il2)) .il2 else 1/qy[2], length = n)
+ init.phi <- rep(if (length(.iphi)) .iphi else 0.5, length = n)
+ init.lambda1 <- rep(if (length(.il1)) .il1 else 1/qy[1], length = n)
+ init.lambda2 <- rep(if (length(.il2)) .il2 else 1/qy[2], length = n)
if (!length(etastart))
etastart <- cbind(theta2eta(init.phi, .lphi, earg = .ephi),
theta2eta(init.lambda1, .llambda, earg = .el1),
@@ -575,16 +594,25 @@ mix2exp.control <- function(trace = TRUE, ...) {
}), list(.lphi = lphi, .llambda = llambda, .nsimEIM = nsimEIM,
.ephi = ephi, .el1 = el1, .el2 = el2 ))),
loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
lambda1 <- eta2theta(eta[, 2], link = .llambda, earg = .el1)
lambda2 <- eta2theta(eta[, 3], link = .llambda, earg = .el2)
f1 <- dexp(y, rate=lambda1)
f2 <- dexp(y, rate=lambda2)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * log(phi*f1 + (1 - phi)*f2))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * log(phi*f1 + (1 - phi)*f2)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list(.lphi = lphi, .llambda = llambda,
.ephi = ephi, .el1 = el1, .el2 = el2 ))),
vfamily = c("mix2exp"),
diff --git a/R/family.nonlinear.R b/R/family.nonlinear.R
index a56b1fa..32f70c0 100644
--- a/R/family.nonlinear.R
+++ b/R/family.nonlinear.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -34,7 +34,7 @@ subset.lohi <- function(xvec, yvec,
min.q <- quantile(xvec, probs = probs.x[1] )
max.q <- quantile(xvec, probs = probs.x[2] )
- if(mode(type) != "character" && mode(type) != "name")
+ if (mode(type) != "character" && mode(type) != "name")
type <- as.character(substitute(type))
type <- match.arg(type, c("median", "wtmean", "unwtmean"))[1]
@@ -83,7 +83,7 @@ micmen.control <- function(save.weight = TRUE, ...) {
init1 = NULL, init2 = NULL,
imethod = 1,
oim = TRUE,
- link1 = "identity", link2 = "identity",
+ link1 = "identitylink", link2 = "identitylink",
firstDeriv = c("nsimEIM", "rpar"),
probs.x = c(0.15, 0.85),
nsimEIM = 500,
@@ -364,7 +364,7 @@ skira.control <- function(save.weight = TRUE, ...) {
skira <- function(rpar = 0.1, divisor = 10,
init1 = NULL, init2 = NULL,
- link1 = "identity", link2 = "identity",
+ link1 = "identitylink", link2 = "identitylink",
earg1 = list(),
earg2 = list(),
imethod = 1,
@@ -416,7 +416,9 @@ skira.control <- function(save.weight = TRUE, ...) {
ncol(y) else 1
if (residuals) {
if (M > 1) NULL else (y - mu) * sqrt(w)
- } else ResSS.vgam(y - mu, w, M = M)
+ } else {
+ ResSS.vgam(y - mu, w, M = M)
+ }
},
initialize = eval(substitute(expression({
diff --git a/R/family.normal.R b/R/family.normal.R
index 9bd8173..fc6a7ce 100644
--- a/R/family.normal.R
+++ b/R/family.normal.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -48,7 +48,7 @@ VGAM.weights.function <- function(w, M, n) {
new("vglmff",
blurb = c("Vector linear/additive model\n",
- "Links: identity for Y1,...,YM"),
+ "Links: identitylink for Y1,...,YM"),
constraints = eval(substitute(expression({
constraints <- cm.vgam(matrix(1, M, 1), x = x,
bool = .parallel ,
@@ -72,7 +72,8 @@ VGAM.weights.function <- function(w, M, n) {
},
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
zero = .zero)
}, list( .zero = zero ))),
@@ -121,7 +122,7 @@ VGAM.weights.function <- function(w, M, n) {
misc$default.dispersion <- 0
misc$estimated.dispersion <- .estimated.dispersion
- misc$link <- rep("identity", length = M)
+ misc$link <- rep("identitylink", length = M)
names(misc$link) <- predictors.names
misc$earg <- vector("list", M)
@@ -143,39 +144,57 @@ VGAM.weights.function <- function(w, M, n) {
}), list( .dispersion = dispersion,
.estimated.dispersion = estimated.dispersion ))),
loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
M <- if (is.matrix(y)) ncol(y) else 1
n <- if (is.matrix(y)) nrow(y) else length(y)
wz <- VGAM.weights.function(w = w, M = M, n = n)
- temp1 <- ResSS.vgam(y-mu, wz = wz, M = M)
-
- if (M == 1 || ncol(wz) == M) {
- -0.5 * temp1 + 0.5 * sum(log(wz)) - n * (M / 2) * log(2*pi)
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
} else {
- if (all(wz[1, ] == apply(wz, 2, min)) &&
- all(wz[1, ] == apply(wz, 2, max))) {
- onewz <- m2adefault(wz[1, , drop = FALSE], M = M)
- onewz <- onewz[,, 1] # M x M
+ temp1 <- ResSS.vgam(y-mu, wz = wz, M = M)
- logdet <- determinant(onewz)$modulus
- logretval <- -0.5 * temp1 + 0.5 * n * logdet -
- n * (M / 2) * log(2*pi)
- distval <- stop("variable 'distval' not computed yet")
- logretval <- -(ncol(onewz) * log(2 * pi) + logdet + distval)/2
- logretval
- } else {
- logretval <- -0.5 * temp1 - n * (M / 2) * log(2*pi)
- for (ii in 1:n) {
- onewz <- m2adefault(wz[ii, , drop = FALSE], M = M)
- onewz <- onewz[,, 1] # M x M
- logdet <- determinant(onewz)$modulus
- logretval <- logretval + 0.5 * logdet
- }
+
+
+ ll.elts <-
+ if (M == 1 || ncol(wz) == M) {
+
+ -0.5 * temp1 + 0.5 * (log(wz)) - n * (M / 2) * log(2*pi)
+ } else {
+ if (all(wz[1, ] == apply(wz, 2, min)) &&
+ all(wz[1, ] == apply(wz, 2, max))) {
+ onewz <- m2adefault(wz[1, , drop = FALSE], M = M)
+ onewz <- onewz[,, 1] # M x M
+
+
+ logdet <- determinant(onewz)$modulus
+ logretval <- -0.5 * temp1 + 0.5 * n * logdet -
+ n * (M / 2) * log(2*pi)
+
+ distval <- stop("variable 'distval' not computed yet")
+ logretval <- -(ncol(onewz) * log(2 * pi) + logdet + distval)/2
logretval
+ } else {
+ logretval <- -0.5 * temp1 - n * (M / 2) * log(2*pi)
+ for (ii in 1:n) {
+ onewz <- m2adefault(wz[ii, , drop = FALSE], M = M)
+ onewz <- onewz[,, 1] # M x M
+ logdet <- determinant(onewz)$modulus
+ logretval <- logretval + 0.5 * logdet
+ }
+ logretval
+ }
+ }
+
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
}
},
@@ -250,7 +269,7 @@ rposnorm <- function(n, mean = 0, sd = 1) {
- posnormal <- function(lmean = "identity", lsd = "loge",
+ posnormal <- function(lmean = "identitylink", lsd = "loge",
imean = NULL, isd = NULL,
nsimEIM = 100, zero = NULL) {
warning("this VGAM family function is not working properly yet")
@@ -290,7 +309,7 @@ rposnorm <- function(n, mean = 0, sd = 1) {
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
par.names = c("mean", "sd"),
zero = .zero )
}, list( .zero = zero
@@ -313,8 +332,8 @@ rposnorm <- function(n, mean = 0, sd = 1) {
predictors.names <-
- c(namesof("mean", .lmean, earg = .emean, tag = FALSE),
- namesof("sd", .lsd, earg = .esd, tag = FALSE))
+ c(namesof("mean", .lmean , earg = .emean, tag = FALSE),
+ namesof("sd", .lsd , earg = .esd, tag = FALSE))
if (!length(etastart)) {
init.me <- if (length( .imean)) rep( .imean, len = n) else NULL
@@ -323,16 +342,16 @@ rposnorm <- function(n, mean = 0, sd = 1) {
init.me <- rep(quantile(y, probs = 0.40), len = n)
if (!length(init.sd))
init.sd <- rep(sd(c(y)) * 1.2, len = n)
- etastart <- cbind(theta2eta(init.me, .lmean, earg = .emean ),
- theta2eta(init.sd, .lsd, earg = .esd ))
+ etastart <- cbind(theta2eta(init.me, .lmean , earg = .emean ),
+ theta2eta(init.sd, .lsd , earg = .esd ))
}
}), list( .lmean = lmean, .lsd = lsd,
.imean = imean, .isd = isd,
.emean = emean, .esd = esd
))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mymu <- eta2theta(eta[, 1], .lmean, earg = .emean )
- mysd <- eta2theta(eta[, 2], .lsd, earg = .esd )
+ mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
+ mysd <- eta2theta(eta[, 2], .lsd , earg = .esd )
mymu + mysd * dnorm(-mymu/mysd) / pnorm(mymu/mysd)
}, list( .lmean = lmean, .lsd = lsd,
.emean = emean, .esd = esd
@@ -348,20 +367,51 @@ rposnorm <- function(n, mean = 0, sd = 1) {
.emean = emean, .esd = esd,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mymu <- eta2theta(eta[, 1], .lmean, earg = .emean )
- mysd <- eta2theta(eta[, 2], .lsd, earg = .esd )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
+ mysd <- eta2theta(eta[, 2], .lsd , earg = .esd )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
- sum(c(w) * dposnorm(x = y, m = mymu, sd = mysd, log = TRUE))
+ ll.elts <- c(w) * dposnorm(x = y, m = mymu, sd = mysd, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lmean = lmean, .lsd = lsd,
.emean = emean, .esd = esd ))),
vfamily = c("posnormal"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
+ mysd <- eta2theta(eta[, 2], .lsd , earg = .esd )
+ rposnorm(nsim * length(mymu), mean = mymu, sd = mysd)
+ }, list( .lmean = lmean, .lsd = lsd,
+ .emean = emean, .esd = esd ))),
+
+
+
+
+
+
+
deriv = eval(substitute(expression({
- mymu <- eta2theta(eta[, 1], .lmean, earg = .emean )
- mysd <- eta2theta(eta[, 2], .lsd, earg = .esd )
+ mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
+ mysd <- eta2theta(eta[, 2], .lsd , earg = .esd )
zedd <- (y-mymu) / mysd
temp7 <- dnorm(-mymu/mysd)
@@ -370,8 +420,8 @@ rposnorm <- function(n, mean = 0, sd = 1) {
dl.dmu <- zedd / mysd^2 - temp7 / temp8
dl.dsd <- (mymu*temp7/temp8 + zedd^3 / mysd - 1) / mysd
- dmu.deta <- dtheta.deta(mymu, .lmean, earg = .emean )
- dsd.deta <- dtheta.deta(mysd, .lsd, earg = .esd )
+ dmu.deta <- dtheta.deta(mymu, .lmean , earg = .emean )
+ dsd.deta <- dtheta.deta(mysd, .lsd , earg = .esd )
dthetas.detas <- cbind(dmu.deta, dsd.deta)
c(w) * dthetas.detas * cbind(dl.dmu, dl.dsd)
}), list( .lmean = lmean, .lsd = lsd,
@@ -604,7 +654,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
- tikuv <- function(d, lmean = "identity", lsigma = "loge",
+ tikuv <- function(d, lmean = "identitylink", lsigma = "loge",
isigma = NULL, zero = 2) {
@@ -640,7 +690,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
zero = .zero)
}, list( .zero = zero ))),
@@ -650,7 +700,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
predictors.names <-
- c(namesof("mean", .lmean, earg = .emean, tag = FALSE),
+ c(namesof("mean", .lmean , earg = .emean, tag = FALSE),
namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
@@ -663,14 +713,14 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
}
mean.init <- rep(weighted.mean(y, w), len = n)
etastart <-
- cbind(theta2eta(mean.init, .lmean, earg = .emean ),
+ cbind(theta2eta(mean.init, .lmean , earg = .emean ),
theta2eta(sigma.init, .lsigma, earg = .esigma))
}
}),list( .lmean = lmean, .lsigma = lsigma,
.isigma = isigma, .d = d,
.emean = emean, .esigma = esigma ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .lmean, earg = .emean )
+ eta2theta(eta[, 1], .lmean , earg = .emean )
}, list( .lmean = lmean,
.emean = emean, .esigma = esigma ))),
last = eval(substitute(expression({
@@ -683,22 +733,45 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
}), list( .lmean = lmean, .lsigma = lsigma, .d = d,
.emean = emean, .esigma = esigma ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mymu <- eta2theta(eta[, 1], .lmean, earg = .emean )
- sigma <- eta2theta(eta[, 2], .lsigma, earg = .esigma)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dtikuv(x = y, d = .d , mean = mymu,
- sigma = sigma, log = TRUE))
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
+ sigma <- eta2theta(eta[, 2], .lsigma , earg = .esigma )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dtikuv(x = y, d = .d , mean = mymu,
+ sigma = sigma, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lmean = lmean, .lsigma = lsigma, .d = d,
.emean = emean, .esigma = esigma ))),
vfamily = c("tikuv"),
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
deriv = eval(substitute(expression({
- mymu <- eta2theta(eta[, 1], .lmean, earg = .emean )
+ mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
sigma <- eta2theta(eta[, 2], .lsigma, earg = .esigma)
- dmu.deta <- dtheta.deta(mymu, .lmean, earg = .emean )
+ dmu.deta <- dtheta.deta(mymu, .lmean , earg = .emean )
dsigma.deta <- dtheta.deta(sigma, .lsigma, earg = .esigma)
zedd <- (y - mymu) / sigma
@@ -813,7 +886,7 @@ rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
- foldnormal <- function(lmean = "identity", lsd = "loge",
+ foldnormal <- function(lmean = "identitylink", lsd = "loge",
imean = NULL, isd = NULL,
a1 = 1, a2 = 1,
nsimEIM = 500, imethod = 1, zero = NULL) {
@@ -861,7 +934,7 @@ rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
namesof("mean", lmean, earg = emean, tag = TRUE), "; ",
namesof("sd", lsd, earg = esd, tag = TRUE)),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
a1 = .a1 ,
a2 = .a2 ,
zero = .zero ,
@@ -884,8 +957,8 @@ rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
predictors.names <-
- c(namesof("mean", .lmean, earg = .emean, tag = FALSE),
- namesof("sd", .lsd, earg = .esd, tag = FALSE))
+ c(namesof("mean", .lmean , earg = .emean, tag = FALSE),
+ namesof("sd", .lsd , earg = .esd, tag = FALSE))
if (!length(etastart)) {
junk <- lm.wfit(x = x, y = c(y), w = c(w))
@@ -913,16 +986,16 @@ rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
{if ( .imethod == 1) median(y) else meany}, len = n)
sd.init <- rep(if (length( .isd )) .isd else
{if ( .imethod == 1) stddev else 1.2*sd(c(y))}, len = n)
- etastart <- cbind(theta2eta(mean.init, .lmean, earg = .emean ),
- theta2eta(sd.init, .lsd, earg = .esd ))
+ etastart <- cbind(theta2eta(mean.init, .lmean , earg = .emean ),
+ theta2eta(sd.init, .lsd , earg = .esd ))
}
}), list( .lmean = lmean, .lsd = lsd,
.emean = emean, .esd = esd,
.imean = imean, .isd = isd,
.a1 = a1, .a2 = a2, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mymu <- eta2theta(eta[, 1], .lmean, earg = .emean )
- mysd <- eta2theta(eta[, 2], .lsd, earg = .esd )
+ mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
+ mysd <- eta2theta(eta[, 2], .lsd , earg = .esd )
mytheta <- mymu / mysd
mysd * (( .a1 + .a2 ) * (mytheta * pnorm(mytheta) +
dnorm(mytheta)) - .a2 * mytheta)
@@ -946,28 +1019,37 @@ rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
.imethod = imethod, .nsimEIM = nsimEIM,
.a1 = a1, .a2 = a2 ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mymu <- eta2theta(eta[, 1], .lmean, earg = .emean )
- mysd <- eta2theta(eta[, 2], .lsd, earg = .esd )
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
+ mysd <- eta2theta(eta[, 2], .lsd , earg = .esd )
a1vec <- .a1
a2vec <- .a2
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
- sum(c(w) * dfoldnorm(y, mean = mymu, sd = mysd,
- a1 = a1vec, a2 = a2vec, log = TRUE))
+ ll.elts <-
+ c(w) * dfoldnorm(y, mean = mymu, sd = mysd,
+ a1 = a1vec, a2 = a2vec, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
}, list( .lmean = lmean, .lsd = lsd,
.emean = emean, .esd = esd,
.a1 = a1, .a2 = a2 ))),
vfamily = c("foldnormal"),
deriv = eval(substitute(expression({
- Musual <- 2
- mymu <- eta2theta(eta[, 1], .lmean, earg = .emean )
- mysd <- eta2theta(eta[, 2], .lsd, earg = .esd )
+ M1 <- 2
+ mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
+ mysd <- eta2theta(eta[, 2], .lsd , earg = .esd )
- dmu.deta <- dtheta.deta(mymu, .lmean, earg = .emean )
- dsd.deta <- dtheta.deta(mysd, .lsd, earg = .esd )
+ dmu.deta <- dtheta.deta(mymu, .lmean , earg = .emean )
+ dsd.deta <- dtheta.deta(mysd, .lsd , earg = .esd )
a1vec <- .a1
a2vec <- .a2
@@ -1027,7 +1109,7 @@ lqnorm.control <- function(trace = TRUE, ...) {
lqnorm <- function(qpower = 2,
- link = "identity",
+ link = "identitylink",
imethod = 1, imu = NULL, shrinkage.init = 0.95) {
@@ -1237,8 +1319,7 @@ qtobit <- function(p, mean = 0, sd = 1,
-rtobit <- function(n, mean = 0, sd = 1,
- Lower = 0, Upper = Inf) {
+rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
use.n <- if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
@@ -1272,7 +1353,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
tobit <- function(Lower = 0, Upper = Inf,
- lmu = "identity", lsd = "loge",
+ lmu = "identitylink", lsd = "loge",
nsimEIM = 250,
imu = NULL, isd = NULL,
type.fitted = c("uncensored", "censored", "mean.obs"),
@@ -1322,7 +1403,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
stdTobit <- all(Lower == 0.0) &&
all(!is.finite(Upper)) &&
- all(lmu == "identity")
+ all(lmu == "identitylink")
new("vglmff",
@@ -1335,19 +1416,19 @@ tobit.control <- function(save.weight = TRUE, ...) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
zero = .zero ,
nsimEIM = .nsimEIM )
}, list( .zero = zero, .nsimEIM = nsimEIM ))),
initialize = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
temp5 <-
@@ -1363,7 +1444,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
ncoly <- ncol(y)
- M <- Musual * ncoly
+ M <- M1 * ncoly
Lowmat <- matrix( .Lower , nrow = n, ncol = ncoly, byrow = TRUE)
Uppmat <- matrix( .Upper , nrow = n, ncol = ncoly, byrow = TRUE)
@@ -1387,8 +1468,8 @@ tobit.control <- function(save.weight = TRUE, ...) {
if (ncoly == 1) "sd" else paste("sd", 1:ncoly, sep = "")
predictors.names <-
c(namesof(temp1.names, .lmu, earg = .emu, tag = FALSE),
- namesof(temp2.names, .lsd, earg = .esd, tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = Musual)]
+ namesof(temp2.names, .lsd , earg = .esd, tag = FALSE))
+ predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
if (!length(etastart)) {
anyc <- cbind(extra$censoredL | extra$censoredU)
@@ -1414,9 +1495,9 @@ tobit.control <- function(save.weight = TRUE, ...) {
sd.init <- matrix( .isd , n, ncoly, byrow = TRUE)
etastart <- cbind(theta2eta(mu.init, .lmu, earg = .emu ),
- theta2eta(sd.init, .lsd, earg = .esd ))
+ theta2eta(sd.init, .lsd , earg = .esd ))
- etastart <- etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
+ etastart <- etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
}
}), list( .Lower = Lower, .Upper = Upper,
.lmu = lmu, .lsd = lsd,
@@ -1424,9 +1505,9 @@ tobit.control <- function(save.weight = TRUE, ...) {
.i.mu = imu, .isd = isd,
.imethod = imethod ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
- Musual <- 2
- ncoly <- ncol(eta) / Musual
- mum <- eta2theta(eta[, Musual*(1:ncoly)-1, drop=FALSE], .lmu, earg = .emu )
+ M1 <- 2
+ ncoly <- ncol(eta) / M1
+ mum <- eta2theta(eta[, M1*(1:ncoly)-1, drop=FALSE], .lmu, earg = .emu )
if ( .type.fitted == "uncensored")
return(mum)
@@ -1438,7 +1519,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
mum
} else {
- sdm <- eta2theta(eta[, Musual*(1:ncoly)-0, drop = FALSE],
+ sdm <- eta2theta(eta[, M1*(1:ncoly)-0, drop = FALSE],
.lsd , earg = .esd )
zeddL <- (Lowmat - mum) / sdm
zeddU <- (Uppmat - mum) / sdm
@@ -1458,25 +1539,25 @@ tobit.control <- function(save.weight = TRUE, ...) {
last = eval(substitute(expression({
temp0303 <- c(rep( .lmu, length = ncoly),
- rep( .lsd, length = ncoly))
+ rep( .lsd , length = ncoly))
names(temp0303) =
c(if (ncoly == 1) "mu" else paste("mu", 1:ncoly, sep = ""),
if (ncoly == 1) "sd" else paste("sd", 1:ncoly, sep = ""))
- temp0303 <- temp0303[interleave.VGAM(M, M = Musual)]
+ temp0303 <- temp0303[interleave.VGAM(M, M = M1)]
misc$link <- temp0303 # Already named
misc$earg <- vector("list", M)
names(misc$earg) <- names(misc$link)
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .emu
- misc$earg[[Musual*ii ]] <- .esd
+ misc$earg[[M1*ii-1]] <- .emu
+ misc$earg[[M1*ii ]] <- .esd
}
misc$multipleResponses <- TRUE
misc$expected <- TRUE
misc$imethod <- .imethod
misc$nsimEIM <- .nsimEIM
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$stdTobit <- .stdTobit
misc$Lower <- Lowmat
misc$Upper <- Uppmat
@@ -1495,8 +1576,10 @@ tobit.control <- function(save.weight = TRUE, ...) {
.Lower = Lower,
.Upper = Upper ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual <- 2
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ M1 <- 2
y <- cbind(y)
ncoly <- ncol(y)
@@ -1507,9 +1590,9 @@ tobit.control <- function(save.weight = TRUE, ...) {
Uppmat <- matrix( .Upper , nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
- mum <- eta2theta(eta[, Musual*(1:ncoly)-1, drop = FALSE],
+ mum <- eta2theta(eta[, M1*(1:ncoly)-1, drop = FALSE],
.lmu , earg = .emu )
- sdm <- eta2theta(eta[, Musual*(1:ncoly)-0, drop = FALSE],
+ sdm <- eta2theta(eta[, M1*(1:ncoly)-0, drop = FALSE],
.lsd , earg = .esd )
ell0 <- dnorm( y[cen0], mean = mum[cen0], sd = sdm[cen0],
@@ -1521,19 +1604,33 @@ tobit.control <- function(save.weight = TRUE, ...) {
wmat <- matrix(w, nrow = nrow(eta), ncol = ncoly)
if (residuals) {
- stop("loglikelihood residuals not ",
- "implemented yet")
+ stop("loglikelihood residuals not implemented yet")
} else {
- sum(wmat[cen0] * ell0) +
- sum(wmat[cenL] * ellL) +
- sum(wmat[cenU] * ellU)
+ ll.elts <- y # Right dimension only
+ ll.elts[cen0]<- wmat[cen0] * ell0
+ ll.elts[cenL]<- wmat[cenL] * ellL
+ ll.elts[cenU]<- wmat[cenU] * ellU
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lmu = lmu, .lsd = lsd,
.emu = emu, .esd = esd,
.Lower = Lower, .Upper = Upper ))),
vfamily = c("tobit"),
+
+
+
+
+
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
y <- cbind(y)
ncoly <- ncol(y)
@@ -1544,17 +1641,17 @@ tobit.control <- function(save.weight = TRUE, ...) {
cenU <- extra$censoredU
cen0 <- !cenL & !cenU # uncensored obsns
- mum <- eta2theta(eta[, Musual*(1:ncoly)-1, drop = FALSE],
+ mum <- eta2theta(eta[, M1*(1:ncoly)-1, drop = FALSE],
.lmu, earg = .emu )
- sdm <- eta2theta(eta[, Musual*(1:ncoly)-0, drop = FALSE],
- .lsd, earg = .esd )
+ sdm <- eta2theta(eta[, M1*(1:ncoly)-0, drop = FALSE],
+ .lsd , earg = .esd )
zedd <- (y - mum) / sdm
dl.dmu <- zedd / sdm
dl.dsd <- (zedd^2 - 1) / sdm
dmu.deta <- dtheta.deta(mum, .lmu, earg = .emu )
- dsd.deta <- dtheta.deta(sdm, .lsd, earg = .esd )
+ dsd.deta <- dtheta.deta(sdm, .lsd , earg = .esd )
if (any(cenL)) {
mumL <- Lowmat - mum
@@ -1576,18 +1673,18 @@ tobit.control <- function(save.weight = TRUE, ...) {
}
dthetas.detas <- cbind(dmu.deta, dsd.deta)
- dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
myderiv <- cbind(c(w) * dl.dmu,
c(w) * dl.dsd) * dthetas.detas
- myderiv[, interleave.VGAM(M, M = Musual)]
+ myderiv[, interleave.VGAM(M, M = M1)]
}), list( .lmu = lmu, .lsd = lsd,
.emu = emu, .esd = esd,
.Lower = Lower, .Upper = Upper ))),
weight = eval(substitute(expression({
wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
- ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
if (is.numeric( .nsimEIM ) &&
@@ -1644,53 +1741,53 @@ tobit.control <- function(save.weight = TRUE, ...) {
run.varcov
- wz1 <- wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
- dThetas.detas[, Musual * (spp. - 1) + ind1$col]
+ wz1 <- wz1 * dThetas.detas[, M1 * (spp. - 1) + ind1$row] *
+ dThetas.detas[, M1 * (spp. - 1) + ind1$col]
- for (jay in 1:Musual)
- for (kay in jay:Musual) {
- cptr <- iam((spp. - 1) * Musual + jay,
- (spp. - 1) * Musual + kay,
+ for (jay in 1:M1)
+ for (kay in jay:M1) {
+ cptr <- iam((spp. - 1) * M1 + jay,
+ (spp. - 1) * M1 + kay,
M = M)
- wz[, cptr] = wz1[, iam(jay, kay, M = Musual)]
+ wz[, cptr] = wz1[, iam(jay, kay, M = M1)]
}
- } # End of for (spp.) loop
+ } # End of for (spp.) loop
} else {
- wz1 <- matrix(0.0, n, dimm(Musual))
+ wz1 <- matrix(0.0, n, dimm(M1))
for (spp. in 1:ncoly) {
zedd <- (y[, spp.] - mum[, spp.]) / sdm[, spp.]
zedd0 <- ( mum[, spp.]) / sdm[, spp.]
phivec <- dnorm(zedd0)
Phivec <- pnorm(zedd0)
- wz1[, iam(1, 1, M = Musual)] <- -(phivec * zedd0 -
+ wz1[, iam(1, 1, M = M1)] <- -(phivec * zedd0 -
phivec^2 / (1 - Phivec) -
Phivec)
- wz1[, iam(2, 2, M = Musual)] <- -(phivec * zedd0^3 +
+ wz1[, iam(2, 2, M = M1)] <- -(phivec * zedd0^3 +
phivec * zedd0 -
phivec^2 * zedd0^2 / (1 - Phivec) -
2 * Phivec)
- wz1[, iam(1, 2, M = Musual)] <- +(phivec * zedd0^2 +
+ wz1[, iam(1, 2, M = M1)] <- +(phivec * zedd0^2 +
phivec -
phivec^2 * zedd0 / (1 - Phivec))
wz1 <- wz1 / sdm[, spp.]^2
- wz1 <- wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
- dThetas.detas[, Musual * (spp. - 1) + ind1$col]
+ wz1 <- wz1 * dThetas.detas[, M1 * (spp. - 1) + ind1$row] *
+ dThetas.detas[, M1 * (spp. - 1) + ind1$col]
- for (jay in 1:Musual)
- for (kay in jay:Musual) {
- cptr <- iam((spp. - 1) * Musual + jay,
- (spp. - 1) * Musual + kay,
+ for (jay in 1:M1)
+ for (kay in jay:M1) {
+ cptr <- iam((spp. - 1) * M1 + jay,
+ (spp. - 1) * M1 + kay,
M = M)
- wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
+ wz[, cptr] <- wz1[, iam(jay, kay, M = M1)]
}
- } # End of for (spp.) loop
+ } # End of for (spp.) loop
- } # End of EIM
+ } # End of EIM
temp <- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
@@ -1701,7 +1798,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
.lsd = lsd,
.stdTobit = stdTobit,
.nsimEIM = nsimEIM ))))
-} # End of tobit()
+} # End of tobit()
@@ -1710,7 +1807,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
normal1 <-
- uninormal <- function(lmean = "identity", lsd = "loge", lvar = "loge",
+ uninormal <- function(lmean = "identitylink", lsd = "loge", lvar = "loge",
var.arg = FALSE,
imethod = 1,
isd = NULL,
@@ -1792,14 +1889,15 @@ tobit.control <- function(save.weight = TRUE, ...) {
apply.int = .apply.parint )
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero,
.parallel = parallel, .apply.parint = apply.parint ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
zero = .zero)
}, list( .zero = zero ))),
@@ -1845,10 +1943,10 @@ tobit.control <- function(save.weight = TRUE, ...) {
ncoly <- ncol(y)
- Musual <- 2
+ M1 <- 2
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
@@ -1861,7 +1959,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
if ( .var.arg )
namesof(mynames2, .lvare , earg = .evare , tag = FALSE) else
namesof(mynames2, .lsdev , earg = .esdev , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = Musual)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
extra$predictors.names <- predictors.names
@@ -1910,7 +2008,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
theta2eta(sdev.init^2, .lvare , earg = .evare ) else
theta2eta(sdev.init , .lsdev , earg = .esdev ))
etastart <-
- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart[, interleave.VGAM(ncol(etastart), M = M1)]
colnames(etastart) <- predictors.names
}
@@ -1920,45 +2018,45 @@ tobit.control <- function(save.weight = TRUE, ...) {
.var.arg = var.arg, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Musual <- extra$Musual
+ M1 <- extra$M1
ncoly <- extra$ncoly
if ( .lmean == "explink") {
- if (any(eta[, Musual*(1:ncoly) - 1] < 0)) {
+ if (any(eta[, M1*(1:ncoly) - 1] <= 0)) {
warning("turning some columns of 'eta' positive in @linkinv")
for (ii in 1:ncoly)
- eta[, Musual*ii - 1] <- pmax( .smallno , eta[, Musual*ii - 1])
+ eta[, M1*ii - 1] <- pmax( .smallno , eta[, M1*ii - 1])
}
}
- eta2theta(eta[, Musual*(1:ncoly) - 1], .lmean , earg = .emean )
+ eta2theta(eta[, M1*(1:ncoly) - 1], .lmean , earg = .emean )
}, list( .lmean = lmean,
.emean = emean, .esdev = esdev , .evare = evare,
.smallno = smallno ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .lmean , length = ncoly),
rep( .lsdev , length = ncoly))
- misc$link <- misc$link [interleave.VGAM(Musual * ncoly, M = Musual)]
+ misc$link <- misc$link [interleave.VGAM(M1 * ncoly, M = M1)]
temp.names <- c(mynames1, mynames2)
- temp.names <- temp.names[interleave.VGAM(Musual * ncoly, M = Musual)]
+ temp.names <- temp.names[interleave.VGAM(M1 * ncoly, M = M1)]
names(misc$link) <- temp.names
- misc$earg <- vector("list", Musual * ncoly)
+ misc$earg <- vector("list", M1 * ncoly)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .emean
- misc$earg[[Musual*ii ]] <- if ( .var.arg ) .evare else .esdev
+ misc$earg[[M1*ii-1]] <- .emean
+ misc$earg[[M1*ii ]] <- if ( .var.arg ) .evare else .esdev
}
names(misc$earg) <- temp.names
misc$var.arg <- .var.arg
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$imethod <- .imethod
misc$multipleResponses <- TRUE
@@ -1972,27 +2070,35 @@ tobit.control <- function(save.weight = TRUE, ...) {
.var.arg = var.arg, .imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
ncoly <- extra$ncoly
- Musual <- extra$Musual
+ M1 <- extra$M1
if ( .lmean == "explink") {
- if (any(eta[, Musual*(1:ncoly) - 1] < 0)) {
+ if (any(eta[, M1*(1:ncoly) - 1] <= 0)) {
warning("turning some columns of 'eta' positive in @loglikelihood")
for (ii in 1:ncoly)
- eta[, Musual*ii - 1] <- pmax( .smallno , eta[, Musual*ii - 1])
+ eta[, M1*ii - 1] <- pmax( .smallno , eta[, M1*ii - 1])
}
}
if ( .var.arg ) {
- Varm <- eta2theta(eta[, Musual*(1:ncoly)], .lvare , earg = .evare )
+ Varm <- eta2theta(eta[, M1*(1:ncoly)], .lvare , earg = .evare )
sdev <- sqrt(Varm)
} else {
- sdev <- eta2theta(eta[, Musual*(1:ncoly)], .lsdev , earg = .esdev )
+ sdev <- eta2theta(eta[, M1*(1:ncoly)], .lsdev , earg = .esdev )
}
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dnorm(y, m = mu, sd = sdev, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dnorm(y, m = mu, sd = sdev, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lsdev = lsdev, .lvare = lvare,
.esdev = esdev, .evare = evare,
@@ -2000,26 +2106,55 @@ tobit.control <- function(save.weight = TRUE, ...) {
.smallno = smallno,
.var.arg = var.arg ))),
vfamily = c("uninormal"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ mymu <- fitted(object)
+ eta <- predict(object)
+ if ( .var.arg ) {
+ Varm <- eta2theta(eta[, c(FALSE, TRUE)], .lvare , earg = .evare )
+ sdev <- sqrt(Varm)
+ } else {
+ sdev <- eta2theta(eta[, c(FALSE, TRUE)], .lsdev , earg = .esdev )
+ }
+ rnorm(nsim * length(mymu), mean = mymu, sd = sdev)
+ }, list( .lsdev = lsdev, .lvare = lvare,
+ .esdev = esdev, .evare = evare,
+ .lmean = lmean,
+ .smallno = smallno,
+ .var.arg = var.arg ))),
+
+
+
+
deriv = eval(substitute(expression({
ncoly <- extra$ncoly
- Musual <- extra$Musual
+ M1 <- extra$M1
if ( .lmean == "explink") {
- if (any(eta[, Musual*(1:ncoly) - 1] < 0)) {
+ if (any(eta[, M1*(1:ncoly) - 1] <= 0)) {
warning("turning some columns of 'eta' positive in @deriv")
for (ii in 1:ncoly)
- eta[, Musual*ii - 1] <- pmax( .smallno , eta[, Musual*ii - 1])
+ eta[, M1*ii - 1] <- pmax( .smallno , eta[, M1*ii - 1])
}
}
- mymu <- eta2theta( eta[, Musual*(1:ncoly) - 1], .lmean , earg = .emean )
+ mymu <- eta2theta( eta[, M1*(1:ncoly) - 1], .lmean , earg = .emean )
if ( .var.arg ) {
- Varm <- eta2theta(eta[, Musual*(1:ncoly) ], .lvare , earg = .evare )
+ Varm <- eta2theta(eta[, M1*(1:ncoly) ], .lvare , earg = .evare )
sdev <- sqrt(Varm)
} else {
- sdev <- eta2theta(eta[, Musual*(1:ncoly) ], .lsdev , earg = .esdev )
+ sdev <- eta2theta(eta[, M1*(1:ncoly) ], .lsdev , earg = .esdev )
}
dl.dmu <- (y - mymu) / sdev^2
@@ -2040,7 +2175,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
cbind(dl.dmu * dmu.deta,
if ( .var.arg ) dl.dva * dva.deta else
dl.dsd * dsd.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
@@ -2066,8 +2201,8 @@ tobit.control <- function(save.weight = TRUE, ...) {
ned2l.dsd2 <- 2 / sdev^2
}
- wz[, Musual*(1:ncoly) - 1] <- ned2l.dmu2 * dmu.deta^2
- wz[, Musual*(1:ncoly) ] <- if ( .var.arg ) {
+ wz[, M1*(1:ncoly) - 1] <- ned2l.dmu2 * dmu.deta^2
+ wz[, M1*(1:ncoly) ] <- if ( .var.arg ) {
ned2l.dva2 * dva.deta^2
} else {
ned2l.dsd2 * dsd.deta^2
@@ -2086,7 +2221,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
normal.vcm <-
- function(link.list = list("(Default)" = "identity"),
+ function(link.list = list("(Default)" = "identitylink"),
earg.list = list("(Default)" = list()),
lsd = "loge", lvar = "loge",
esd = list(), evar = list(),
@@ -2148,13 +2283,14 @@ tobit.control <- function(save.weight = TRUE, ...) {
if (is.character(dotzero) && dotzero == "M")
dotzero <- M
- Musual <- NA
+ M1 <- NA
eval(negzero.expression)
}), list( .zero = zero
))),
infos = eval(substitute(function(...) {
- list(Musual = NA,
+ list(M1 = NA,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero ))),
@@ -2195,7 +2331,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
if (sum(names(earg.list) == "(Default)") > 1)
stop("only one default allowed in argument 'earg.list'!")
default.link <- if (any(names(link.list) == "(Default)"))
- link.list[["(Default)"]] else "identity"
+ link.list[["(Default)"]] else "identitylink"
default.earg <- if (any(names(earg.list) == "(Default)"))
earg.list[["(Default)"]] else list()
@@ -2250,9 +2386,9 @@ tobit.control <- function(save.weight = TRUE, ...) {
extra$ncoly <- ncoly <- ncol(y)
extra$M <- M <- ncol(Xm2) + 1 -
(length(extra$is.mlogit) > 0)
- Musual <- NA # Since this cannot be determined apriori.
+ M1 <- NA # Since this cannot be determined apriori.
- extra$Musual <- Musual
+ extra$M1 <- M1
extra$Xm2 <- Xm2 # Needed for @linkinv
extra$depvar <- y
@@ -2441,7 +2577,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
.esd = esd , .evar = evar ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(link.list.ordered,
@@ -2455,7 +2591,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
misc$var.arg <- .var.arg
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$imethod <- .imethod
misc$multipleResponses <- FALSE
@@ -2470,16 +2606,24 @@ tobit.control <- function(save.weight = TRUE, ...) {
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
if ( .var.arg ) {
Varm <- eta2theta(eta[, ncol(eta)], .lvar , earg = .evar )
sdev <- sqrt(Varm)
} else {
sdev <- eta2theta(eta[, ncol(eta)], .lsd , earg = .esd )
}
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dnorm(y, m = mu, sd = sdev, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dnorm(y, m = mu, sd = sdev, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lsd = lsd, .lvar = lvar,
.esd = esd, .evar = evar,
@@ -2665,7 +2809,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
- lognormal <- function(lmeanlog = "identity", lsdlog = "loge",
+ lognormal <- function(lmeanlog = "identitylink", lsdlog = "loge",
zero = 2) {
@@ -2704,17 +2848,17 @@ tobit.control <- function(save.weight = TRUE, ...) {
predictors.names <-
- c(namesof("meanlog", .lmulog, earg = .emulog, tag = FALSE),
- namesof("sdlog", .lsdlog, earg = .esdlog, tag = FALSE))
+ c(namesof("meanlog", .lmulog , earg = .emulog, tag = FALSE),
+ namesof("sdlog", .lsdlog , earg = .esdlog, tag = FALSE))
if (!length(etastart)) {
mylm <- lm.wfit(x = x, y = c(log(y)), w = c(w))
sdlog.y.est <- sqrt( sum(c(w) * mylm$resid^2) / mylm$df.residual )
etastart <- cbind(
- meanlog = rep(theta2eta(log(median(y)), .lmulog,
- earg = .emulog), length = n),
- sdlog = rep(theta2eta(sdlog.y.est, .lsdlog,
- earg = .esdlog), length = n))
+ meanlog = rep(theta2eta(log(median(y)), .lmulog ,
+ earg = .emulog ), length = n),
+ sdlog = rep(theta2eta(sdlog.y.est, .lsdlog ,
+ earg = .esdlog ), length = n))
}
}), list( .lmulog = lmulog, .lsdlog = lsdlog,
.emulog = emulog, .esdlog = esdlog ))),
@@ -2733,22 +2877,51 @@ tobit.control <- function(save.weight = TRUE, ...) {
}), list( .lmulog = lmulog, .lsdlog = lsdlog,
.emulog = emulog, .esdlog = esdlog ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mulog <- eta2theta(eta[, 1], .lmulog, earg = .emulog)
- sdlog <- eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dlnorm(y, meanlog = mulog, sdlog = sdlog, log = TRUE))
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ mulog <- eta2theta(eta[, 1], .lmulog , earg = .emulog )
+ sdlog <- eta2theta(eta[, 2], .lsdlog , earg = .esdlog )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dlnorm(y, meanlog = mulog, sdlog = sdlog, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lmulog = lmulog, .lsdlog = lsdlog,
.emulog = emulog, .esdlog = esdlog ))),
vfamily = c("lognormal"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ mulog <- eta2theta(eta[, c(TRUE, FALSE)], .lmulog , earg = .emulog )
+ sdlog <- eta2theta(eta[, c(FALSE, TRUE)], .lsdlog , earg = .esdlog )
+ rlnorm(nsim * length(mulog),
+ meanlog = mulog, sdlog = sdlog)
+ }, list( .lmulog = lmulog, .lsdlog = lsdlog,
+ .emulog = emulog, .esdlog = esdlog ))),
+
+
+
deriv = eval(substitute(expression({
- mulog <- eta2theta(eta[, 1], .lmulog, earg = .emulog)
- sdlog <- eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
+ mulog <- eta2theta(eta[, 1], .lmulog , earg = .emulog )
+ sdlog <- eta2theta(eta[, 2], .lsdlog , earg = .esdlog )
- dmulog.deta <- dtheta.deta(mulog, .lmulog, earg = .emulog)
- dsdlog.deta <- dtheta.deta(sdlog, .lsdlog, earg = .esdlog)
+ dmulog.deta <- dtheta.deta(mulog, .lmulog , earg = .emulog )
+ dsdlog.deta <- dtheta.deta(sdlog, .lsdlog , earg = .esdlog )
dl.dmulog <- (log(y) - mulog) / sdlog^2
dl.dsdlog <- -1 / sdlog + (log(y) - mulog)^2 / sdlog^3
@@ -2775,7 +2948,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
- lognormal3 <- function(lmeanlog = "identity", lsdlog = "loge",
+ lognormal3 <- function(lmeanlog = "identitylink", lsdlog = "loge",
powers.try = (-3):3,
delta = NULL, zero = 2) {
@@ -2811,7 +2984,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
"Links: ",
namesof("meanlog", lmulog, earg = emulog, tag = TRUE), "; ",
namesof("sdlog", lsdlog, earg = esdlog, tag = TRUE), "; ",
- namesof("lambda", "identity", earg = list(), tag = TRUE)),
+ namesof("lambda", "identitylink", earg = list(), tag = TRUE)),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
@@ -2822,8 +2995,8 @@ tobit.control <- function(save.weight = TRUE, ...) {
predictors.names <-
- c(namesof("meanlog", .lmulog, earg = .emulog, tag = FALSE),
- namesof("sdlog", .lsdlog, earg = .esdlog, tag = FALSE),
+ c(namesof("meanlog", .lmulog , earg = .emulog, tag = FALSE),
+ namesof("sdlog", .lsdlog , earg = .esdlog, tag = FALSE),
"lambda")
if (!length(etastart)) {
@@ -2854,16 +3027,16 @@ tobit.control <- function(save.weight = TRUE, ...) {
.emulog = emulog, .esdlog = esdlog,
.delta = delta, .powers.try = powers.try ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mymu <- eta2theta(eta[, 1], .lmulog, earg = .emulog)
- sdlog <- eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
- lambda <- eta2theta(eta[, 3], "identity", earg = list(theta = NULL))
+ mymu <- eta2theta(eta[, 1], .lmulog , earg = .emulog )
+ sdlog <- eta2theta(eta[, 2], .lsdlog , earg = .esdlog )
+ lambda <- eta2theta(eta[, 3], "identitylink", earg = list(theta = NULL))
lambda + exp(mymu + 0.5 * sdlog^2)
}, list( .lmulog = lmulog, .lsdlog = lsdlog,
.emulog = emulog, .esdlog = esdlog ))),
last = eval(substitute(expression({
- misc$link <- c("meanlog" = .lmulog,
- "sdlog" = .lsdlog,
- "lambda" = "identity")
+ misc$link <- c("meanlog" = .lmulog ,
+ "sdlog" = .lsdlog ,
+ "lambda" = "identitylink")
misc$earg <- list("meanlog" = .emulog,
"sdlog" = .esdlog,
@@ -2873,24 +3046,53 @@ tobit.control <- function(save.weight = TRUE, ...) {
}), list( .lmulog = lmulog, .lsdlog = lsdlog,
.emulog = emulog, .esdlog = esdlog ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mymu <- eta2theta(eta[, 1], .lmulog , earg = .emulog)
- sdlog <- eta2theta(eta[, 2], .lsdlog , earg = .esdlog)
- lambda <- eta2theta(eta[, 3], "identity", earg = list(theta = NULL))
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ mymu <- eta2theta(eta[, 1], .lmulog , earg = .emulog )
+ sdlog <- eta2theta(eta[, 2], .lsdlog , earg = .esdlog )
+ lambda <- eta2theta(eta[, 3], "identitylink", earg = list(theta = NULL))
if (any(y < lambda))
warning("bad 'y'")
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dlnorm(y - lambda, meanlog = mymu,
- sdlog = sdlog, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dlnorm(y - lambda, meanlog = mymu,
+ sdlog = sdlog, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
}, list( .lmulog = lmulog, .lsdlog = lsdlog,
.emulog = emulog, .esdlog = esdlog ))),
vfamily = c("lognormal3"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ mymu <- eta2theta(eta[, 1], .lmulog , earg = .emulog )
+ sdlog <- eta2theta(eta[, 2], .lsdlog , earg = .esdlog )
+ lambda <- eta2theta(eta[, 3], "identitylink", earg = list(theta = NULL))
+ rlnorm(nsim * length(mymu),
+ meanlog = mymu, sdlog = sdlog) + lambda
+ }, list( .lmulog = lmulog, .lsdlog = lsdlog,
+ .emulog = emulog, .esdlog = esdlog ))),
+
+
+
deriv = eval(substitute(expression({
- mymu <- eta2theta(eta[, 1], .lmulog, earg = .emulog)
- sdlog <- eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
- lambda <- eta2theta(eta[, 3], "identity", earg = list(theta = NULL))
+ mymu <- eta2theta(eta[, 1], .lmulog , earg = .emulog )
+ sdlog <- eta2theta(eta[, 2], .lsdlog , earg = .esdlog )
+ lambda <- eta2theta(eta[, 3], "identitylink", earg = list(theta = NULL))
if (any(y < lambda))
warning("bad 'y'")
@@ -2899,9 +3101,9 @@ tobit.control <- function(save.weight = TRUE, ...) {
dl.dsdlog <- -1/sdlog + (log(y-lambda)-mymu)^2 / sdlog^3
dl.dlambda <- (1 + (log(y-lambda)-mymu) / sdlog^2) / (y-lambda)
- dmymu.deta <- dtheta.deta(mymu, .lmulog, earg = .emulog)
- dsdlog.deta <- dtheta.deta(sdlog, .lsdlog, earg = .esdlog)
- dlambda.deta <- dtheta.deta(lambda, "identity", earg = list())
+ dmymu.deta <- dtheta.deta(mymu, .lmulog , earg = .emulog )
+ dsdlog.deta <- dtheta.deta(sdlog, .lsdlog , earg = .esdlog )
+ dlambda.deta <- dtheta.deta(lambda, "identitylink", earg = list())
c(w) * cbind(dl.dmymu * dmymu.deta,
dl.dsdlog * dsdlog.deta,
@@ -2953,11 +3155,22 @@ dskewnorm <- function(x, location = 0, scale = 1, shape = 0, log = FALSE) {
rskewnorm <- function(n, location = 0, scale = 1, shape = 0) {
+
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ length.arg = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
rho <- shape / sqrt(1 + shape^2)
- u0 <- rnorm(n)
- v <- rnorm(n)
+ u0 <- rnorm(use.n)
+ v <- rnorm(use.n)
u1 <- rho * u0 + sqrt(1 - rho^2) * v
- ans <- location + scale * ifelse(u0 >= 0, u1, -u1)
+
+
+
+
+ ans <- location + scale * sign(u0) * u1
+
ans[scale <= 0] <- NA
ans
}
@@ -2967,7 +3180,7 @@ rskewnorm <- function(n, location = 0, scale = 1, shape = 0) {
- skewnormal <- function(lshape = "identity",
+ skewnormal <- function(lshape = "identitylink",
ishape = NULL,
nsimEIM = NULL) {
@@ -2991,7 +3204,7 @@ rskewnorm <- function(n, location = 0, scale = 1, shape = 0) {
"Mean: shape * sqrt(2 / (pi * (1 + shape^2 )))\n",
"Variance: 1-mu^2"),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
nsimEIM = .nsimEIM)
}, list( .nsimEIM = nsimEIM ))),
initialize = eval(substitute(expression({
@@ -3045,15 +3258,45 @@ rskewnorm <- function(n, location = 0, scale = 1, shape = 0) {
theta2eta(alpha, .lshape , earg = .eshape )
}, list( .eshape = eshape, .lshape = lshape ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
alpha <- eta2theta(eta, .lshape , earg = .eshape )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dskewnorm(x = y, location = 0, scale = 1,
- shape = alpha, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dskewnorm(x = y, location = 0, scale = 1,
+ shape = alpha, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
}, list( .eshape = eshape, .lshape = lshape ))),
vfamily = c("skewnormal"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ alpha <- eta2theta(eta, .lshape , earg = .eshape )
+ rskewnorm(nsim * length(alpha), location = 0, scale = 1,
+ shape = alpha)
+ }, list( .eshape = eshape, .lshape = lshape ))),
+
+
+
+
+
+
deriv = eval(substitute(expression({
alpha <- eta2theta(eta, .lshape , earg = .eshape )
@@ -3121,7 +3364,7 @@ if (FALSE)
emean <- list()
- lmean <- "identity"
+ lmean <- "identitylink"
@@ -3165,13 +3408,13 @@ if (FALSE)
apply.int = .apply.parint )
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero,
.parallel = parallel, .apply.parint = apply.parint ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
zero = .zero)
}, list( .zero = zero ))),
@@ -3217,10 +3460,10 @@ if (FALSE)
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
@@ -3286,29 +3529,29 @@ if (FALSE)
.var.arg = var.arg, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Musual <- extra$Musual
+ M1 <- extra$M1
ncoly <- extra$ncoly
- eta2theta(eta[, Musual*(1:ncoly) - 1], .lmean , earg = .emean )
+ eta2theta(eta[, M1*(1:ncoly) - 1], .lmean , earg = .emean )
}, list( .esd = esd , .evar = evar,
.emean = emean,
.lmean = lmean ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .lsd , length = ncoly))
temp.names <- c(mynames2)
names(misc$link) <- temp.names
- misc$earg <- vector("list", Musual * ncoly)
+ misc$earg <- vector("list", M1 * ncoly)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii ]] <- if ( .var.arg ) .evar else .esd
+ misc$earg[[M1*ii ]] <- if ( .var.arg ) .evar else .esd
}
names(misc$earg) <- temp.names
misc$var.arg <- .var.arg
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$imethod <- .imethod
misc$multipleResponses <- TRUE
@@ -3320,18 +3563,26 @@ if (FALSE)
.var.arg = var.arg, .imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
ncoly <- extra$ncoly
- Musual <- extra$Musual
+ M1 <- extra$M1
if ( .var.arg ) {
- Varm <- eta2theta(eta[, Musual*(1:ncoly)], .lvar , earg = .evar )
+ Varm <- eta2theta(eta[, M1*(1:ncoly)], .lvar , earg = .evar )
sdev <- sqrt(Varm)
} else {
- sdev <- eta2theta(eta[, Musual*(1:ncoly)], .lsd , earg = .esd )
+ sdev <- eta2theta(eta[, M1*(1:ncoly)], .lsd , earg = .esd )
}
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dnorm(y, m = mu, sd = sdev, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dnorm(y, m = mu, sd = sdev, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lsd = lsd, .lvar = lvar,
.esd = esd, .evar = evar,
@@ -3339,14 +3590,14 @@ if (FALSE)
vfamily = c("halfuninormal"),
deriv = eval(substitute(expression({
ncoly <- extra$ncoly
- Musual <- extra$Musual
+ M1 <- extra$M1
mymu <- zz
if ( .var.arg ) {
- Varm <- eta2theta(eta[, Musual*(1:ncoly) ], .lvar , earg = .evar )
+ Varm <- eta2theta(eta[, M1*(1:ncoly) ], .lvar , earg = .evar )
sdev <- sqrt(Varm)
} else {
- sdev <- eta2theta(eta[, Musual*(1:ncoly) ], .lsd , earg = .esd )
+ sdev <- eta2theta(eta[, M1*(1:ncoly) ], .lsd , earg = .esd )
}
dl.dmu <- zz * (y - mymu) / sdev^2
@@ -3380,7 +3631,7 @@ if (FALSE)
ned2l.dsd2 <- 2 / sdev^2
}
- wz[, Musual*(1:ncoly) ] <- if ( .var.arg ) {
+ wz[, M1*(1:ncoly) ] <- if ( .var.arg ) {
ned2l.dva2 * dva.deta^2
} else {
ned2l.dsd2 * dsd.deta^2
diff --git a/R/family.others.R b/R/family.others.R
index 63dc0b8..1355deb 100644
--- a/R/family.others.R
+++ b/R/family.others.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -173,14 +173,22 @@ rexppois <- function(n, lambda, betave = 1) {
}), list( .llambda = llambda, .lbetave = lbetave,
.elambda = elambda, .ebetave = ebetave))),
- loglikelihood = eval(substitute(function(mu, y, w,
- residuals = FALSE, eta, extra = NULL) {
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
betave <- eta2theta(eta[, 2], .lbetave , earg = .ebetave )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dexppois(x = y, lambda = lambda, betave = betave,
- log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dexppois(x = y, lambda = lambda, betave = betave,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lbetave = lbetave , .llambda = llambda ,
.elambda = elambda , .ebetave = ebetave ))),
@@ -398,16 +406,23 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
.eshape = eshape, .escale = escale,
.nsimEIM = nsimEIM ))),
- loglikelihood = eval(substitute(function(mu, y, w,
- residuals = FALSE, eta, extra = NULL) {
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
- if (residuals) stop("loglikelihood residuals",
- "not implemented yet") else {
- sum(c(w) * dgenray(x = y, shape = shape,
- scale = Scale, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dgenray(x = y, shape = shape,
+ scale = Scale, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape = lshape , .lscale = lscale ,
.eshape = eshape , .escale = escale ))),
@@ -658,16 +673,23 @@ expgeometric.control <- function(save.weight = TRUE, ...) {
.escale = escale, .eshape = eshape,
.nsimEIM = nsimEIM ))),
- loglikelihood = eval(substitute(function(mu, y, w,
- residuals = FALSE, eta, extra = NULL) {
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
- if (residuals) stop("loglikelihood residuals",
- "not implemented yet") else {
- sum(c(w) * dexpgeom(x = y, scale = Scale, shape = shape,
- log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dexpgeom(x = y, scale = Scale, shape = shape,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lscale = lscale , .lshape = lshape ,
.escale = escale , .eshape = eshape ))),
@@ -929,17 +951,25 @@ explogff.control <- function(save.weight = TRUE, ...) {
.escale = escale, .eshape = eshape,
.nsimEIM = nsimEIM ))),
- loglikelihood = eval(substitute(function(mu, y, w,
- residuals = FALSE, eta, extra = NULL) {
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
- if (residuals) stop("loglikelihood residuals",
- "not implemented yet") else {
- sum(c(w) * dexplog(x = y, scale = Scale,
- shape = shape, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dexplog(x = y, scale = Scale,
+ shape = shape, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lscale = lscale , .lshape = lshape ,
.escale = escale , .eshape = eshape ))),
@@ -1155,7 +1185,7 @@ rtpn <- function(n, location = 0, scale = 1, skewpar = 0.5) {
-tpnff <- function(llocation = "identity", lscale = "loge",
+tpnff <- function(llocation = "identitylink", lscale = "loge",
pp = 0.5, method.init = 1, zero = 2)
{
if (!is.Numeric(method.init, length.arg = 1,
@@ -1228,20 +1258,20 @@ tpnff <- function(llocation = "identity", lscale = "loge",
}
}
etastart <- cbind(
- theta2eta(location.init, .llocat, earg = .elocat),
- theta2eta(scale.y.est, .lscale, earg = .escale))
+ theta2eta(location.init, .llocat , earg = .elocat ),
+ theta2eta(scale.y.est, .lscale , earg = .escale ))
}
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale,
.method.init=method.init ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .llocat, earg = .elocat)
+ eta2theta(eta[, 1], .llocat , earg = .elocat )
}, list( .llocat = llocat,
.elocat = elocat, .escale = escale ))),
last = eval(substitute(expression({
- misc$link <- c("location" = .llocat, "scale" = .lscale)
+ misc$link <- c("location" = .llocat , "scale" = .lscale )
- misc$earg <- list("location" = .elocat, "scale" = .escale)
+ misc$earg <- list("location" = .elocat , "scale" = .escale )
misc$expected <- TRUE
misc$pp <- .pp
@@ -1250,23 +1280,32 @@ tpnff <- function(llocation = "identity", lscale = "loge",
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale,
.pp = pp, .method.init = method.init ))),
+
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location <- eta2theta(eta[, 1], .llocat, earg = .elocat)
- myscale <- eta2theta(eta[, 2], .lscale, earg = .escale)
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ location <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
ppay <- .pp
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dtpn(y, skewpar = ppay, location = location,
- scale = myscale, log.arg = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dtpn(y, skewpar = ppay, location = location,
+ scale = myscale, log.arg = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale,
.pp = pp ))),
vfamily = c("tpnff"),
deriv = eval(substitute(expression({
- mylocat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
- myscale <- eta2theta(eta[, 2], .lscale, earg = .escale)
+ mylocat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
mypp <- .pp
zedd <- (y - mylocat) / myscale
@@ -1314,9 +1353,9 @@ tpnff <- function(llocation = "identity", lscale = "loge",
########################################################################
-tpnff3 <- function(llocation = "identity",
+tpnff3 <- function(llocation = "identitylink",
lscale = "loge",
- lskewpar = "identity",
+ lskewpar = "identitylink",
method.init = 1, zero = 2)
{
if (!is.Numeric(method.init, length.arg = 1,
@@ -1413,19 +1452,28 @@ tpnff3 <- function(llocation = "identity",
}), list( .llocat = llocat, .lscale = lscale, .lskewp = lskewp,
.elocat = elocat, .escale = escale, .eskewp = eskewp,
.method.init = method.init ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- locat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
- myscale <- eta2theta(eta[, 2], .lscale, earg = .escale)
- myskew <- eta2theta(eta[, 3], .lskewp, earg = .eskewp)
-
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dtpn(y, location = locat, scale = myscale,
- skewpar = myskew, log.arg = TRUE))
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+
+ locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ myskew <- eta2theta(eta[, 3], .lskewp , earg = .eskewp )
+
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dtpn(y, location = locat, scale = myscale,
+ skewpar = myskew, log.arg = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
- }, list( .llocat = llocat, .lscale = lscale, .lskewp = lskewp,
- .elocat = elocat, .escale = escale, .eskewp = eskewp
+ }, list( .llocat = llocat, .lscale = lscale, .lskewp = lskewp,
+ .elocat = elocat, .escale = escale, .eskewp = eskewp
))),
vfamily = c("tpnff3"),
deriv = eval(substitute(expression({
diff --git a/R/family.positive.R b/R/family.positive.R
index 17d08af..ea83b84 100644
--- a/R/family.positive.R
+++ b/R/family.positive.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -176,6 +176,7 @@ N.hat.posbernoulli <-
rposbern <-
function(n, nTimePts = 5, pvars = length(xcoeff),
xcoeff = c(-2, 1, 2),
+ Xmatrix = NULL, # If is.null(Xmatrix) then it is created
cap.effect = 1,
is.popn = FALSE,
link = "logit",
@@ -212,6 +213,7 @@ rposbern <-
link <- attr(earg, "function.name")
+ cap.effect.orig <- cap.effect
Ymatrix <- matrix(0, use.n, nTimePts,
@@ -224,13 +226,15 @@ rposbern <-
sep = "")))
- Xmatrix <- cbind(x1 = rep(1.0, len = use.n))
- if (pvars > 1)
- Xmatrix <- cbind(Xmatrix,
- matrix(runif(n = use.n * (pvars-1)),
- use.n, pvars - 1,
- dimnames = list(as.character(1:use.n),
- paste("x", 2:pvars, sep = ""))))
+ if (is.null(Xmatrix)) {
+ Xmatrix <- cbind(x1 = rep(1.0, len = use.n))
+ if (pvars > 1)
+ Xmatrix <- cbind(Xmatrix,
+ matrix(runif(n = use.n * (pvars-1)),
+ use.n, pvars - 1,
+ dimnames = list(as.character(1:use.n),
+ paste("x", 2:pvars, sep = ""))))
+ }
lin.pred.baseline <- xcoeff[1]
@@ -239,11 +243,13 @@ rposbern <-
Xmatrix[, 2:pvars, drop = FALSE] %*%
xcoeff[2:pvars]
sumrowy <- rep(0, length = use.n)
+ cap.effect <- rep(cap.effect.orig, length = use.n)
for (jlocal in 1:nTimePts) {
CHmatrix[, jlocal] <- as.numeric(sumrowy > 0)
- lin.pred <- lin.pred.baseline + (CHmatrix[, jlocal] > 0) * cap.effect
+ caught.before.TF <- (CHmatrix[, jlocal] > 0)
+ lin.pred <- lin.pred.baseline + caught.before.TF * cap.effect
Ymatrix[, jlocal] <-
rbinom(use.n, size = 1,
@@ -273,11 +279,13 @@ rposbern <-
ans[1:orig.n, ]
} else {
rbind(ans,
- Recall(n = orig.n - nrow(ans),
- nTimePts = nTimePts, pvars = pvars,
- xcoeff = xcoeff,
- cap.effect = cap.effect,
- link = earg, earg.link = TRUE))
+ Recall(n = orig.n - nrow(ans),
+ nTimePts = nTimePts,
+ pvars = pvars,
+ xcoeff = xcoeff,
+ cap.effect = cap.effect.orig,
+ link = earg,
+ earg.link = TRUE))
}
}
@@ -285,7 +293,7 @@ rposbern <-
attr(ans, "pvars") <- pvars
attr(ans, "nTimePts") <- nTimePts
- attr(ans, "cap.effect") <- cap.effect
+ attr(ans, "cap.effect") <- cap.effect.orig
attr(ans, "is.popn") <- is.popn
attr(ans, "n") <- n
@@ -457,11 +465,12 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
lmunb = .lmunb ,
emunb = .emunb ,
lsize = .lsize ,
@@ -472,7 +481,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
.imethod = imethod ))),
initialize = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
if (any(y == 0))
stop("there are zero values in the response")
@@ -495,7 +504,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
- M <- Musual * ncol(y)
+ M <- M1 * ncol(y)
extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
predictors.names <- c(
@@ -505,7 +514,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
namesof(if (NOS == 1) "size" else
paste("size", 1:NOS, sep = ""),
.lsize, earg = .esize, tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = Musual)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
if (!length(etastart)) {
mu.init <- y
@@ -541,18 +550,18 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
cbind(
theta2eta(mu.init * (1 - p00), .lmunb, earg = .emunb ),
theta2eta(kmat0, .lsize, earg = .esize ))
- etastart <- etastart[,interleave.VGAM(M, M = Musual), drop = FALSE]
+ etastart <- etastart[,interleave.VGAM(M, M = M1), drop = FALSE]
}
}), list( .lmunb = lmunb, .lsize = lsize, .isize = isize,
.emunb = emunb, .esize = esize,
.sinit = shrinkage.init,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Musual <- 2
- NOS <- ncol(eta) / Musual
- munb <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lmunb, earg = .emunb )
- kmat <- eta2theta(eta[, Musual*(1:NOS), drop = FALSE],
+ kmat <- eta2theta(eta[, M1*(1:NOS), drop = FALSE],
.lsize, earg = .esize )
po0 <- (kmat / (kmat + munb))^kmat
munb / (1 - po0)
@@ -564,14 +573,14 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
names(temp0303) =
c(if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = ""),
if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""))
- temp0303 <- temp0303[interleave.VGAM(M, M = Musual)]
+ temp0303 <- temp0303[interleave.VGAM(M, M = M1)]
misc$link <- temp0303 # Already named
- misc$earg <- vector("list", Musual*NOS)
+ misc$earg <- vector("list", M1*NOS)
names(misc$earg) <- names(misc$link)
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] <- .emunb
- misc$earg[[Musual*ii ]] <- .esize
+ misc$earg[[M1*ii-1]] <- .emunb
+ misc$earg[[M1*ii ]] <- .esize
}
misc$nsimEIM <- .nsimEIM
@@ -580,33 +589,66 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
.emunb = emunb, .esize = esize,
.nsimEIM = nsimEIM, .imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual <- 2
- NOS <- ncol(eta) / Musual
- munb <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lmunb, earg = .emunb )
- kmat <- eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ kmat <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
.lsize, earg = .esize )
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(w * dposnegbin(x = y, size = kmat, munb = munb, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dposnegbin(x = y, size = kmat, munb = munb, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lmunb = lmunb, .lsize = lsize,
.emunb = emunb, .esize = esize ))),
vfamily = c("posnegbinomial"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ munb <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+ .lmunb, earg = .emunb )
+ kmat <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+ .lsize, earg = .esize )
+ rposnegbin(nsim * length(munb), size = kmat, munb = munb)
+ }, list( .lmunb = lmunb, .lsize = lsize,
+ .emunb = emunb, .esize = esize ))),
+
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
NOS <- extra$NOS
- munb <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ kmat <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
.lsize , earg = .esize )
dmunb.deta <- dtheta.deta(munb, .lmunb, earg = .emunb )
dsize.deta <- dtheta.deta(kmat, .lsize, earg = .esize )
- NOS <- ncol(eta) / Musual
+ NOS <- ncol(eta) / M1
tempk <- kmat / (kmat + munb)
@@ -629,12 +671,12 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
myderiv <- c(w) * cbind(dl.dmunb * dmunb.deta,
dl.dsize * dsize.deta)
- myderiv[, interleave.VGAM(M, M = Musual)]
+ myderiv[, interleave.VGAM(M, M = M1)]
}), list( .lmunb = lmunb, .lsize = lsize,
.emunb = emunb, .esize = esize ))),
weight = eval(substitute(expression({
run.varcov =
- wz <- matrix(0.0, n, 2 * Musual * NOS - 1)
+ wz <- matrix(0.0, n, 2 * M1 * NOS - 1)
@@ -653,7 +695,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
{
- ind2 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ ind2 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
for (ii in 1:( .nsimEIM )) {
ysim <- rposnegbin(n = n*NOS, mu = c(munb), size = c(kmat))
dim(ysim) <- c(n, NOS)
@@ -672,14 +714,14 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
small.varcov <- temp2[, ind2$row.index] *
temp2[, ind2$col.index]
- run.varcov[, ((kk-1)*Musual+1):(kk*Musual)] =
- run.varcov[, ((kk-1)*Musual+1):(kk*Musual)] +
- c(small.varcov[, 1:Musual])
- run.varcov[, M + (kk-1)*Musual + 1] =
- run.varcov[, M + (kk-1)*Musual + 1] +
- c(small.varcov[, Musual + 1])
+ run.varcov[, ((kk-1)*M1+1):(kk*M1)] =
+ run.varcov[, ((kk-1)*M1+1):(kk*M1)] +
+ c(small.varcov[, 1:M1])
+ run.varcov[, M + (kk-1)*M1 + 1] =
+ run.varcov[, M + (kk-1)*M1 + 1] +
+ c(small.varcov[, M1 + 1])
}
- } # ii
+ } # ii
run.varcov <- cbind(run.varcov / .nsimEIM )
wz <- if (intercept.only)
@@ -688,7 +730,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
}
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / M1)
}), list( .nsimEIM = nsimEIM ))))
}
@@ -849,12 +891,13 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
namesof("lambda", link, earg = earg, tag = FALSE)),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
link = .link ,
earg = .earg)
}, list( .link = link, .earg = earg ))),
@@ -874,10 +917,10 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
y <- temp5$y
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
@@ -915,20 +958,44 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
for (ii in 1:M)
misc$earg[[ii]] <- .earg
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$multipleResponses <- TRUE
}), list( .link = link, .earg = earg, .expected = expected ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
lambda <- eta2theta(eta, .link , earg = .earg )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(w * dpospois(x = y, lambda = lambda, log = TRUE))
+ ll.elts <- c(w) * dpospois(x = y, lambda = lambda, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("pospoisson"),
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ lambda <- eta2theta(eta, .link , earg = .earg )
+ rpospois(nsim * length(lambda), lambda)
+ }, list( .link = link, .earg = earg ))),
+
+
+
+
deriv = eval(substitute(expression({
lambda <- eta2theta(eta, .link , earg = .earg )
@@ -1083,11 +1150,12 @@ dposbinom <- function(x, size, prob, log = FALSE) {
constraints = constraints)
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
eval(negzero.expression)
}), list( .parallel = parallel, .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
p.small = .p.small ,
no.warning = .no.warning ,
zero = .zero )
@@ -1112,10 +1180,10 @@ dposbinom <- function(x, size, prob, log = FALSE) {
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
extra$p.small <- .p.small
@@ -1224,7 +1292,9 @@ if (length(extra$tau)) {
.omit.constant = omit.constant ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
ycounts <- if ( .mv ) {
round(y * extra$orig.w)
@@ -1241,21 +1311,62 @@ if (length(extra$tau)) {
use.orig.w <- if (is.numeric(extra$orig.w)) extra$orig.w else 1
binprob <- eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
-
- answer <- sum(use.orig.w * dposbinom(x = ycounts, size = nvec,
- prob = binprob, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ answer <- c(use.orig.w) * dposbinom(x = ycounts, size = nvec,
+ prob = binprob, log = TRUE)
if ( .omit.constant ) {
- answer <- answer - sum(use.orig.w * lchoose(n = nvec, k = ycounts))
+ answer <- answer - c(use.orig.w) * lchoose(n = nvec, k = ycounts)
+ }
+ ll.elts <- answer
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
- answer
}
}, list( .link = link, .earg = earg,
.mv = mv,
.omit.constant = omit.constant ))),
vfamily = c("posbinomial"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+
+ if ( .mv )
+ stop("cannot run simulate() when 'mv = TRUE'")
+
+ eta <- predict(object)
+ binprob <- eta2theta(eta, .link , earg = .earg )
+
+ extra <- object at extra
+ w <- extra$w # Usual code
+ w <- pwts # 20140101
+
+
+ nvec <- if ( .mv ) {
+ w
+ } else {
+ if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+ }
+ rposbinom(nsim * length(eta), size = nvec, prob = binprob)
+ }, list( .link = link, .earg = earg,
+ .mv = mv,
+ .omit.constant = omit.constant ))),
+
+
+
+
+
deriv = eval(substitute(expression({
use.orig.w <- if (is.numeric(extra$orig.w)) extra$orig.w else
rep(1, n)
@@ -1355,7 +1466,7 @@ if (length(extra$tau)) {
}), list( .parallel.t = parallel.t,
.apply.parint = apply.parint ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
multipleResponses = TRUE,
p.small = .p.small ,
no.warning = .no.warning ,
@@ -1367,7 +1478,7 @@ if (length(extra$tau)) {
.apply.parint = apply.parint ))),
initialize = eval(substitute(expression({
- Musual <- 1
+ M1 <- 1
mustart.orig <- mustart
y <- as.matrix(y)
@@ -1474,23 +1585,29 @@ if (length(extra$tau)) {
.apply.parint = apply.parint,
.iprob = iprob ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
ycounts <- y
use.orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
probs <- eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
-
- sum(dposbern(x = ycounts, # size = 1, # Bernoulli trials
- prob = probs, prob0 = probs, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
- sum(use.orig.w *
+ ll.elts <-
+ c(use.orig.w) *
dposbern(x = ycounts, # size = 1, # Bernoulli trials
- prob = probs, prob0 = probs, log = TRUE))
+ prob = probs, prob0 = probs, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("posbernoulli.t"),
@@ -1613,7 +1730,7 @@ if (length(extra$tau)) {
.apply.parint.b = apply.parint.b ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
p.small = .p.small ,
no.warning = .no.warning ,
type.fitted = .type.fitted ,
@@ -1627,7 +1744,7 @@ if (length(extra$tau)) {
))),
initialize = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
if (!is.matrix(y) || ncol(y) == 1)
stop("the response appears to be univariate")
@@ -1799,7 +1916,9 @@ if (length(extra$tau)) {
.apply.parint.b = apply.parint.b
))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
tau <- extra$ncoly
ycounts <- y
@@ -1810,19 +1929,38 @@ if (length(extra$tau)) {
prc <- matrix(cap.probs, nrow(eta), tau)
prr <- matrix(rec.probs, nrow(eta), tau)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
probs.numer <- prr
mat.index <- cbind(1:nrow(prc), extra$cap1)
probs.numer[mat.index] <- prc[mat.index]
probs.numer[extra$cap.hist1 == 0] <- prc[extra$cap.hist1 == 0]
- sum(use.orig.w *
+ ll.elts <-
+ c(use.orig.w) *
dposbern(x = ycounts, # Bernoulli trials
- prob = probs.numer, prob0 = prc, log = TRUE))
+ prob = probs.numer, prob0 = prc, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("posbernoulli.b"),
+
+
+
+
+
+
+
+
+
+
+
+
deriv = eval(substitute(expression({
cap.probs <- eta2theta(eta[, 1], .link , earg = .earg )
rec.probs <- eta2theta(eta[, 2], .link , earg = .earg )
@@ -2012,7 +2150,7 @@ if (length(extra$tau)) {
.apply.parint.d = apply.parint.d,
.apply.parint.t = apply.parint.t ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
multipleResponses = TRUE,
ridge.constant = .ridge.constant ,
ridge.power = .ridge.power ,
@@ -2038,7 +2176,7 @@ if (length(extra$tau)) {
.apply.parint.t = apply.parint.t ))),
initialize = eval(substitute(expression({
- Musual <- 2 # Not quite true
+ M1 <- 2 # Not quite true
if (ncol(cbind(w)) > 1)
@@ -2053,7 +2191,7 @@ if (length(extra$tau)) {
extra$ycounts <- y
extra$type.fitted <- .type.fitted
extra$dimnamesy <- dimnames(y)
- M <- Musual * tau - 1 # recap.prob.1 is unused
+ M <- M1 * tau - 1 # recap.prob.1 is unused
mustart <- (y + matrix(apply(y, 2, weighted.mean, w = w),
@@ -2221,7 +2359,9 @@ if (length(extra$tau)) {
.ridge.power = ridge.power,
.iprob = iprob ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
tau <- extra$ncoly
taup1 <- tau + 1
@@ -2235,17 +2375,23 @@ if (length(extra$tau)) {
probs[, taup1:ncol(probs)]) # 1st coln ignored
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
-
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
probs.numer <- prr
mat.index <- cbind(1:nrow(prc), extra$cap1)
probs.numer[mat.index] <- prc[mat.index]
probs.numer[extra$cap.hist1 == 0] <- prc[extra$cap.hist1 == 0]
- sum(use.orig.w *
+ ll.elts <-
+ c(use.orig.w) *
dposbern(x = ycounts, # size = 1, # Bernoulli trials
- prob = probs.numer, prob0 = prc, log = TRUE))
+ prob = probs.numer, prob0 = prc, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("posbernoulli.tb"),
diff --git a/R/family.qreg.R b/R/family.qreg.R
index f4ebc85..7ec0bcc 100644
--- a/R/family.qreg.R
+++ b/R/family.qreg.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -30,8 +30,8 @@ lms.yjn.control <- function(trace = TRUE, ...)
lms.bcn <- function(percentiles = c(25, 50, 75),
zero = c(1, 3),
- llambda = "identity",
- lmu = "identity",
+ llambda = "identitylink",
+ lmu = "identitylink",
lsigma = "loge",
dfmu.init = 4,
dfsigma.init = 2,
@@ -148,10 +148,13 @@ lms.yjn.control <- function(trace = TRUE, ...)
.percentiles = percentiles, .expectiles = expectiles,
.tol0 = tol0 ))),
loglikelihood = eval(substitute(
- function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
- lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
- muvec <- eta2theta(eta[, 2], .lmu, earg = .emu)
- sigma <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+
+ lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+ muvec <- eta2theta(eta[, 2], .lmu , earg = .emu )
+ sigma <- eta2theta(eta[, 3], .lsigma , earg = .esigma )
zedd <- ((y/muvec)^lambda - 1) / (lambda * sigma)
log.dz.dy <- (lambda - 1) * log(y/muvec) - log(muvec * sigma)
@@ -162,9 +165,15 @@ lms.yjn.control <- function(trace = TRUE, ...)
log.dz.dy[is.eff.0] <- -log(y[is.eff.0] * sigma[is.eff.0])
}
- if (residuals) stop("loglikelihood residuals not ",
- "implemented") else {
- sum(c(w) * (dnorm(zedd, log = TRUE) + log.dz.dy))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * (dnorm(zedd, log = TRUE) + log.dz.dy)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
.elambda = elambda, .emu = emu, .esigma = esigma,
@@ -211,8 +220,8 @@ lms.yjn.control <- function(trace = TRUE, ...)
lms.bcg <- function(percentiles = c(25, 50, 75),
zero = c(1, 3),
- llambda = "identity",
- lmu = "identity",
+ llambda = "identitylink",
+ lmu = "identitylink",
lsigma = "loge",
dfmu.init=4,
dfsigma.init = 2,
@@ -244,7 +253,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
namesof("mu", link = lmu, earg = emu), ", ",
namesof("sigma", link = lsigma, earg = esigma)),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list(.zero = zero))),
initialize = eval(substitute(expression({
@@ -259,28 +268,29 @@ lms.yjn.control <- function(trace = TRUE, ...)
if (!length(etastart)) {
- Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)],
- y = y, w = w, df = .dfmu.init)
- fv.init = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
-
- lambda.init = if (is.Numeric( .ilambda )) .ilambda else 1.0
-
- sigma.init = if (is.null(.isigma)) {
- myratio = ((y/fv.init)^lambda.init-1) / lambda.init
- if (is.numeric( .dfsigma.init ) &&
- is.finite( .dfsigma.init )) {
- fit600 = vsmooth.spline(x = x[, min(ncol(x), 2)],
- y=(myratio)^2,
- w = w, df = .dfsigma.init)
- sqrt(c(abs(predict(fit600, x = x[, min(ncol(x), 2)])$y)))
- } else
- sqrt(var(myratio))
+ Fit5 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = y, w = w, df = .dfmu.init)
+ fv.init <- c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
+
+ lambda.init <- if (is.Numeric( .ilambda )) .ilambda else 1.0
+
+ sigma.init <- if (is.null( .isigma )) {
+ myratio <- ((y/fv.init)^lambda.init-1) / lambda.init
+ if (is.numeric( .dfsigma.init ) &&
+ is.finite( .dfsigma.init )) {
+ fit600 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = (myratio)^2,
+ w = w, df = .dfsigma.init )
+ sqrt(c(abs(predict(fit600, x = x[, min(ncol(x), 2)])$y)))
+ } else {
+ sqrt(var(myratio))
+ }
} else .isigma
etastart <-
- cbind(theta2eta(lambda.init, .llambda, earg = .elambda),
- theta2eta(fv.init, .lmu, earg = .emu),
- theta2eta(sigma.init, .lsigma, earg = .esigma))
+ cbind(theta2eta(lambda.init, .llambda , earg = .elambda ),
+ theta2eta(fv.init, .lmu , earg = .emu ),
+ theta2eta(sigma.init, .lsigma , earg = .esigma ))
}
}), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
.elambda = elambda, .emu = emu, .esigma = esigma,
@@ -288,40 +298,49 @@ lms.yjn.control <- function(trace = TRUE, ...)
.dfsigma.init = dfsigma.init,
.ilambda = ilambda, .isigma = isigma ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta[, 1] <- eta2theta(eta[, 1], .llambda, earg = .elambda)
- eta[, 2] <- eta2theta(eta[, 2], .lmu, earg = .emu)
- eta[, 3] <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ eta[, 1] <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+ eta[, 2] <- eta2theta(eta[, 2], .lmu , earg = .emu )
+ eta[, 3] <- eta2theta(eta[, 3], .lsigma , earg = .esigma )
qtplot.lms.bcg(percentiles = .percentiles, eta = eta)
}, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
.elambda = elambda, .emu = emu, .esigma = esigma,
.percentiles = percentiles ))),
last = eval(substitute(expression({
- misc$link <- c(lambda = .llambda, mu = .lmu, sigma = .lsigma)
+ misc$link <- c(lambda = .llambda, mu = .lmu, sigma = .lsigma )
- misc$earg <- list(lambda = .elambda, mu = .emu, sigma = .esigma)
+ misc$earg <- list(lambda = .elambda, mu = .emu, sigma = .esigma )
misc$percentiles <- .percentiles
misc$true.mu <- FALSE # $fitted is not a true mu
if (control$cdf) {
- post$cdf <- cdf.lms.bcg(y, eta0=matrix(c(lambda,mymu,sigma),
- ncol=3, dimnames = list(dimnames(x)[[1]], NULL)))
+ post$cdf <- cdf.lms.bcg(y, eta0 = matrix(c(lambda, mymu, sigma),
+ ncol = 3, dimnames = list(dimnames(x)[[1]], NULL)))
}
}), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
.elambda = elambda, .emu = emu, .esigma = esigma,
.percentiles = percentiles ))),
loglikelihood = eval(substitute(
- function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
- lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
- mu <- eta2theta(eta[, 2], .lmu, earg = .emu)
- sigma <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
- Gee <- (y / mu)^lambda
- theta <- 1 / (sigma * lambda)^2
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * (log(abs(lambda)) + theta * (log(theta) +
- log(Gee)-Gee) - lgamma(theta) - log(y)))
- }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
- .elambda = elambda, .emu = emu, .esigma = esigma ))),
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
+ mu <- eta2theta(eta[, 2], .lmu, earg = .emu)
+ sigma <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ Gee <- (y / mu)^lambda
+ theta <- 1 / (sigma * lambda)^2
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * (log(abs(lambda)) + theta * (log(theta) +
+ log(Gee)-Gee) - lgamma(theta) - log(y))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma ))),
vfamily = c("lms.bcg", "lmscreg"),
deriv = eval(substitute(expression({
lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
@@ -634,8 +653,8 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
lms.yjn2 <- function(percentiles = c(25, 50, 75),
zero = c(1, 3),
- llambda = "identity",
- lmu = "identity",
+ llambda = "identitylink",
+ lmu = "identitylink",
lsigma = "loge",
dfmu.init=4,
dfsigma.init = 2,
@@ -674,7 +693,7 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
", ",
namesof("sigma", link = lsigma, earg = esigma)),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list(.zero = zero))),
initialize = eval(substitute(expression({
@@ -755,27 +774,36 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
y <- y.save # Restore back the value; to be attached to object
if (control$cdf) {
- post$cdf <- cdf.lms.yjn(y + misc$yoffset,
- eta0=matrix(c(lambda,mymu,sigma),
- ncol=3, dimnames = list(dimnames(x)[[1]], NULL)))
- }
- }), list(.percentiles = percentiles,
- .elambda = elambda, .emu = emu, .esigma = esigma,
- .nsimEIM=nsimEIM,
- .llambda = llambda, .lmu = lmu, .lsigma = lsigma ))),
- loglikelihood = eval(substitute(
- function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
- lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
- mu <- eta2theta(eta[, 2], .lmu, earg = .emu)
- sigma <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
- psi <- yeo.johnson(y, lambda)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * (-log(sigma) - 0.5 * ((psi-mu)/sigma)^2 +
- (lambda-1) * sign(y) * log1p(abs(y))))
- }, list( .elambda = elambda, .emu = emu, .esigma = esigma,
- .llambda = llambda, .lmu = lmu,
- .lsigma = lsigma ))),
+ post$cdf <- cdf.lms.yjn(y + misc$yoffset,
+ eta0=matrix(c(lambda,mymu,sigma),
+ ncol=3, dimnames = list(dimnames(x)[[1]], NULL)))
+ }
+ }), list(.percentiles = percentiles,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
+ .nsimEIM=nsimEIM,
+ .llambda = llambda, .lmu = lmu, .lsigma = lsigma ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+ mu <- eta2theta(eta[, 2], .lmu , earg = .emu )
+ sigma <- eta2theta(eta[, 3], .lsigma , earg = .esigma )
+ psi <- yeo.johnson(y, lambda)
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * (-log(sigma) - 0.5 * ((psi-mu)/sigma)^2 +
+ (lambda-1) * sign(y) * log1p(abs(y)))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .elambda = elambda, .emu = emu, .esigma = esigma,
+ .llambda = llambda, .lmu = lmu,
+ .lsigma = lsigma ))),
vfamily = c("lms.yjn2", "lmscreg"),
deriv = eval(substitute(expression({
lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
@@ -832,11 +860,11 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
lms.yjn <- function(percentiles = c(25, 50, 75),
zero = c(1, 3),
- llambda = "identity",
+ llambda = "identitylink",
lsigma = "loge",
- dfmu.init=4,
+ dfmu.init = 4,
dfsigma.init = 2,
- ilambda=1.0,
+ ilambda = 1.0,
isigma = NULL,
rule = c(10, 5),
yoffset = NULL,
@@ -877,9 +905,9 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
predictors.names <-
- c(namesof("lambda", .llambda, earg = .elambda, short= TRUE),
+ c(namesof("lambda", .llambda, earg = .elambda , short = TRUE),
"mu",
- namesof("sigma", .lsigma, earg = .esigma, short= TRUE))
+ namesof("sigma", .lsigma, earg = .esigma , short = TRUE))
y.save <- y
yoff <- if (is.Numeric( .yoffset )) .yoffset else -median(y)
@@ -939,7 +967,7 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
.llambda = llambda,
.lsigma = lsigma))),
last = eval(substitute(expression({
- misc$link <- c(lambda = .llambda, mu = "identity",
+ misc$link <- c(lambda = .llambda, mu = "identitylink",
sigma = .lsigma)
misc$earg <- list(lambda = .elambda, mu = list(theta = NULL),
@@ -963,17 +991,27 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
.llambda = llambda,
.lsigma = lsigma))),
loglikelihood = eval(substitute(
- function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
- lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
- mu <- eta[, 2]
- sigma <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
- psi <- yeo.johnson(y, lambda)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * (-log(sigma) - 0.5 * ((psi-mu)/sigma)^2 +
- (lambda-1) * sign(y) * log1p(abs(y))))
- }, list( .esigma = esigma, .elambda = elambda,
- .lsigma = lsigma, .llambda = llambda))),
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+
+ lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+ mu <- eta[, 2]
+ sigma <- eta2theta(eta[, 3], .lsigma , earg = .esigma )
+ psi <- yeo.johnson(y, lambda)
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * (-log(sigma) - 0.5 * ((psi-mu)/sigma)^2 +
+ (lambda-1) * sign(y) * log1p(abs(y)))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .esigma = esigma, .elambda = elambda,
+ .lsigma = lsigma, .llambda = llambda))),
vfamily = c("lms.yjn", "lmscreg"),
deriv = eval(substitute(expression({
lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
@@ -1090,7 +1128,7 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
as.double(gleg.abs), as.double(gleg.wts), as.integer(n),
as.integer(length(gleg.abs)), as.double(lambda),
as.double(mymu), as.double(sigma), answer = double(3*n),
- eps=as.double(1.0e-5), PACKAGE = "VGAM")$ans
+ eps=as.double(1.0e-5))$ans
dim(temp9) <- c(3,n)
wz[,iam(1, 1, M)] <- temp9[1,]
wz[,iam(1, 2, M)] <- temp9[2,]
@@ -1171,7 +1209,7 @@ Wr2 <- function(r, w) (r <= 0) * 1 + (r > 0) * w
amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
- eta, extra = NULL) {
+ eta, extra = NULL) {
M <- length(extra$w.aml)
@@ -1186,8 +1224,9 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
all.deviances <- numeric(M)
myresid <- matrix(y,extra$n,extra$M) - cbind(mu)
for (ii in 1:M)
- all.deviances[ii] <- sum(c(w) * devi[, ii] *
- Wr1(myresid[, ii], w=extra$w.aml[ii]))
+ all.deviances[ii] <- sum(c(w) * devi[, ii] *
+ Wr1(myresid[, ii],
+ w = extra$w.aml[ii]))
}
if (is.logical(extra$individual) && extra$individual)
all.deviances else sum(all.deviances)
@@ -1196,7 +1235,7 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
amlnormal <- function(w.aml = 1, parallel = FALSE,
- lexpectile = "identity",
+ lexpectile = "identitylink",
iexpectile = NULL,
imethod = 1, digw = 4) {
@@ -1229,7 +1268,8 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
bool = .parallel ,
constraints = constraints)
}), list( .parallel = parallel ))),
- deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ deviance = function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL) {
amlnormal.deviance(mu = mu, y = y, w = w, residuals = residuals,
eta = eta, extra = extra)
},
@@ -1311,8 +1351,9 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
extra$individual <- TRUE
if (!(M > 1 && ncol(cbind(w)) == M)) {
- extra$deviance <- amlnormal.deviance(mu = mu, y = y, w = w,
- residuals = FALSE, eta = eta, extra = extra)
+ extra$deviance <-
+ amlnormal.deviance(mu = mu, y = y, w = w,
+ residuals = FALSE, eta = eta, extra = extra)
names(extra$deviance) <- extra$y.names
}
}), list( .lexpectile = lexpectile,
@@ -1824,7 +1865,7 @@ rho1check <- function(u, tau = 0.5)
dalap <- function(x, location = 0, scale = 1, tau = 0.5,
- kappa = sqrt(tau/(1-tau)), log = FALSE) {
+ kappa = sqrt(tau/(1-tau)), log = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -1852,7 +1893,7 @@ dalap <- function(x, location = 0, scale = 1, tau = 0.5,
ralap <- function(n, location = 0, scale = 1, tau = 0.5,
- kappa = sqrt(tau/(1-tau))) {
+ kappa = sqrt(tau/(1-tau))) {
use.n <- if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
length.arg = 1, positive = TRUE))
@@ -1871,7 +1912,7 @@ ralap <- function(n, location = 0, scale = 1, tau = 0.5,
palap <- function(q, location = 0, scale = 1, tau = 0.5,
- kappa = sqrt(tau/(1-tau))) {
+ kappa = sqrt(tau/(1-tau))) {
NN <- max(length(q), length(location), length(scale), length(kappa),
length(tau))
@@ -1895,7 +1936,7 @@ palap <- function(q, location = 0, scale = 1, tau = 0.5,
qalap <- function(p, location = 0, scale = 1, tau = 0.5,
- kappa = sqrt(tau / (1 - tau))) {
+ kappa = sqrt(tau / (1 - tau))) {
NN <- max(length(p), length(location), length(scale), length(kappa),
length(tau))
@@ -1930,7 +1971,7 @@ qalap <- function(p, location = 0, scale = 1, tau = 0.5,
rloglap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
- kappa = sqrt(tau/(1-tau))) {
+ kappa = sqrt(tau/(1-tau))) {
use.n <- if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
length.arg = 1, positive = TRUE))
@@ -1948,7 +1989,7 @@ rloglap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
dloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
- kappa = sqrt(tau/(1-tau)), log = FALSE) {
+ kappa = sqrt(tau/(1-tau)), log = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -1983,7 +2024,7 @@ dloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
qloglap <- function(p, location.ald = 0, scale.ald = 1,
- tau = 0.5, kappa = sqrt(tau/(1-tau))) {
+ tau = 0.5, kappa = sqrt(tau/(1-tau))) {
NN <- max(length(p), length(location.ald), length(scale.ald),
length(kappa))
p <- rep(p, length.out = NN)
@@ -2049,7 +2090,7 @@ rlogitlap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
dlogitlap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
- kappa = sqrt(tau/(1-tau)), log = FALSE) {
+ kappa = sqrt(tau/(1-tau)), log = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -2082,7 +2123,7 @@ dlogitlap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
qlogitlap <- function(p, location.ald = 0, scale.ald = 1,
- tau = 0.5, kappa = sqrt(tau/(1-tau))) {
+ tau = 0.5, kappa = sqrt(tau/(1-tau))) {
qqq <- qalap(p = p, location = location.ald, scale = scale.ald,
tau = tau, kappa = kappa)
ans <- logit(qqq, inverse = TRUE) # earg = earg
@@ -2095,7 +2136,7 @@ qlogitlap <- function(p, location.ald = 0, scale.ald = 1,
plogitlap <- function(q, location.ald = 0, scale.ald = 1,
- tau = 0.5, kappa = sqrt(tau/(1-tau))) {
+ tau = 0.5, kappa = sqrt(tau/(1-tau))) {
NN <- max(length(q), length(location.ald), length(scale.ald),
length(kappa))
location.ald <- rep(location.ald, length.out = NN);
@@ -2336,7 +2377,7 @@ alaplace2.control <- function(maxit = 100, ...) {
alaplace2 <- function(tau = NULL,
- llocation = "identity", lscale = "loge",
+ llocation = "identitylink", lscale = "loge",
ilocation = NULL, iscale = NULL,
kappa = sqrt(tau / (1-tau)),
shrinkage.init = 0.95,
@@ -2437,7 +2478,7 @@ alaplace2.control <- function(maxit = 100, ...) {
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
constraints <- cm.zero.vgam(constraints, x, z.Index, M)
@@ -2458,11 +2499,11 @@ alaplace2.control <- function(maxit = 100, ...) {
.intparloc = intparloc,
.zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
zero = .zero)
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
- extra$Musual <- Musual <- 2
+ extra$M1 <- M1 <- 2
temp5 <-
@@ -2486,7 +2527,7 @@ alaplace2.control <- function(maxit = 100, ...) {
extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
extra$Mdiv2 <- Mdiv2 <- max(ncoly, length( .kappa ))
- extra$M <- M <- Musual * Mdiv2
+ extra$M <- M <- M1 * Mdiv2
extra$n <- n
@@ -2508,7 +2549,7 @@ alaplace2.control <- function(maxit = 100, ...) {
c(namesof(mynames1, .llocat , earg = .elocat, tag = FALSE),
namesof(mynames2, .lscale , earg = .escale, tag = FALSE))
predictors.names <-
- predictors.names[interleave.VGAM(M, M = Musual)]
+ predictors.names[interleave.VGAM(M, M = M1)]
@@ -2553,7 +2594,7 @@ alaplace2.control <- function(maxit = 100, ...) {
etastart <-
cbind(theta2eta(locat.init, .llocat , earg = .elocat ),
theta2eta(scale.init, .lscale , earg = .escale ))
- etastart <- etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
+ etastart <- etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
}
}), list( .imethod = imethod,
.dfmu.init = dfmu.init,
@@ -2582,19 +2623,19 @@ alaplace2.control <- function(maxit = 100, ...) {
.fittedMean = fittedMean,
.kappa = kappa ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
tmp34 <- c(rep( .llocat , length = Mdiv2),
rep( .lscale , length = Mdiv2))
names(tmp34) <- c(mynames1, mynames2)
- tmp34 <- tmp34[interleave.VGAM(M, M = Musual)]
+ tmp34 <- tmp34[interleave.VGAM(M, M = M1)]
misc$link <- tmp34 # Already named
misc$earg <- vector("list", M)
- misc$Musual <- Musual
+ misc$M1 <- M1
for (ii in 1:Mdiv2) {
- misc$earg[[Musual * ii - 1]] <- .elocat
- misc$earg[[Musual * ii ]] <- .escale
+ misc$earg[[M1 * ii - 1]] <- .elocat
+ misc$earg[[M1 * ii ]] <- .escale
}
names(misc$earg) <- names(misc$link)
@@ -2620,36 +2661,71 @@ alaplace2.control <- function(maxit = 100, ...) {
.fittedMean = fittedMean,
.intparloc = intparloc,
.kappa = kappa ))),
+
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual <- 2
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ M1 <- 2
Mdiv2 <- extra$Mdiv2
ymat <- matrix(y, extra$n, extra$Mdiv2)
kappamat <- matrix(extra$kappa, extra$n, extra$Mdiv2, byrow = TRUE)
locat <- eta2theta(eta[, 2 * (1:Mdiv2) - 1, drop = FALSE],
- .llocat , earg = .elocat )
+ .llocat , earg = .elocat )
Scale <- eta2theta(eta[, 2 * (1:Mdiv2) , drop = FALSE],
- .lscale , earg = .escale )
+ .lscale , earg = .escale )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(c(w) * dalap(x = c(ymat), location = c(locat),
- scale = c(Scale), kappa = c(kappamat),
- log = TRUE))
+ ll.elts <- c(w) * dalap(x = c(ymat), location = c(locat),
+ scale = c(Scale), kappa = c(kappamat),
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .elocat = elocat, .llocat = llocat,
.escale = escale, .lscale = lscale,
.kappa = kappa ))),
vfamily = c("alaplace2"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ extra <- object at extra
+ locat <- eta2theta(eta[, c(TRUE, FALSE)], .llocat , .elocat )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+ kappamat <- matrix(extra$kappa, extra$n, extra$Mdiv2, byrow = TRUE)
+ ralap(nsim * length(Scale), location = c(locat),
+ scale = c(Scale), kappa = c(kappamat))
+ }, list( .elocat = elocat, .llocat = llocat,
+ .escale = escale, .lscale = lscale,
+ .kappa = kappa ))),
+
+
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
Mdiv2 <- extra$Mdiv2
ymat <- matrix(y, n, Mdiv2)
- locat <- eta2theta(eta[, Musual * (1:(Mdiv2)) - 1, drop = FALSE],
+ locat <- eta2theta(eta[, M1 * (1:(Mdiv2)) - 1, drop = FALSE],
.llocat , earg = .elocat )
- Scale <- eta2theta(eta[, Musual * (1:(Mdiv2)) , drop = FALSE],
+ Scale <- eta2theta(eta[, M1 * (1:(Mdiv2)) , drop = FALSE],
.lscale , earg = .escale )
@@ -2664,7 +2740,7 @@ alaplace2.control <- function(maxit = 100, ...) {
ans <- c(w) * cbind(dl.dlocat * dlocat.deta,
dl.dscale * dscale.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
ans
}), list( .escale = escale, .lscale = lscale,
.elocat = elocat, .llocat = llocat,
@@ -2675,8 +2751,8 @@ alaplace2.control <- function(maxit = 100, ...) {
d2l.dlocat2 <- 2 / Scale^2
d2l.dscale2 <- 1 / Scale^2
- wz[, Musual*(1:Mdiv2) - 1] <- d2l.dlocat2 * dlocat.deta^2
- wz[, Musual*(1:Mdiv2) ] <- d2l.dscale2 * dscale.deta^2
+ wz[, M1*(1:Mdiv2) - 1] <- d2l.dlocat2 * dlocat.deta^2
+ wz[, M1*(1:Mdiv2) ] <- d2l.dscale2 * dscale.deta^2
c(w) * wz
}), list( .escale = escale, .lscale = lscale,
@@ -2702,7 +2778,7 @@ alaplace1.control <- function(maxit = 100, ...) {
alaplace1 <- function(tau = NULL,
- llocation = "identity",
+ llocation = "identitylink",
ilocation = NULL,
kappa = sqrt(tau/(1-tau)),
Scale.arg = 1,
@@ -2800,13 +2876,13 @@ alaplace1.control <- function(maxit = 100, ...) {
}), list( .parallelLocation = parallelLocation,
.intparloc = intparloc ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
tau = .tau,
kappa = .kappa)
}, list( .kappa = kappa,
.tau = tau ))),
initialize = eval(substitute(expression({
- extra$Musual <- Musual <- 1
+ extra$M1 <- M1 <- 1
temp5 <-
@@ -2906,8 +2982,8 @@ alaplace1.control <- function(maxit = 100, ...) {
.fittedMean = fittedMean, .Scale.arg = Scale.arg,
.kappa = kappa ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
- misc$Musual <- Musual
+ M1 <- extra$M1
+ misc$M1 <- M1
misc$multipleResponses <- TRUE
tmp34 <- c(rep( .llocat , length = M))
@@ -2940,23 +3016,54 @@ alaplace1.control <- function(maxit = 100, ...) {
.llocat = llocat,
.Scale.arg = Scale.arg, .fittedMean = fittedMean,
.kappa = kappa ))),
+
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+
ymat <- matrix(y, extra$n, extra$M)
- kappamat <- matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
locat <- eta2theta(eta, .llocat , earg = .elocat )
- Scale <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ kappamat <- matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ Scale <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(c(w) * dalap(x = c(ymat), locat = c(locat),
- scale = c(Scale), kappa = c(kappamat), log = TRUE))
+ ll.elts <- c(w) * dalap(x = c(ymat), locat = c(locat),
+ scale = c(Scale), kappa = c(kappamat),
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .elocat = elocat,
.llocat = llocat,
.Scale.arg = Scale.arg, .kappa = kappa ))),
vfamily = c("alaplace1"),
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ extra <- object at extra
+ locat <- eta2theta(eta, .llocat , .elocat )
+ Scale <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ kappamat <- matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ ralap(nsim * length(Scale), location = c(locat),
+ scale = c(Scale), kappa = c(kappamat))
+ }, list( .elocat = elocat, .llocat = llocat,
+ .Scale.arg = Scale.arg, .kappa = kappa ))),
+
+
+
deriv = eval(substitute(expression({
ymat <- matrix(y, n, M)
Scale <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
@@ -2999,7 +3106,7 @@ alaplace3.control <- function(maxit = 100, ...) {
alaplace3 <- function(
- llocation = "identity", lscale = "loge", lkappa = "loge",
+ llocation = "identitylink", lscale = "loge", lkappa = "loge",
ilocation = NULL, iscale = NULL, ikappa = 1.0,
imethod = 1, zero = 2:3) {
@@ -3103,15 +3210,22 @@ alaplace3.control <- function(maxit = 100, ...) {
.escale = escale, .lscale = lscale,
.ekappa = ekappa, .lkappa = lkappa ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
kappa <- eta2theta(eta[, 3], .lkappa , earg = .ekappa ) # a matrix
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(c(w) * dalap(x = y, locat = locat,
- scale = Scale, kappa = kappa, log = TRUE))
+ ll.elts <- c(w) * dalap(x = y, locat = locat,
+ scale = Scale, kappa = kappa, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .elocat = elocat, .llocat = llocat,
.escale = escale, .lscale = lscale,
@@ -3225,7 +3339,7 @@ rlaplace <- function(n, location = 0, scale = 1) {
}
- laplace <- function(llocation = "identity", lscale = "loge",
+ laplace <- function(llocation = "identitylink", lscale = "loge",
ilocation = NULL, iscale = NULL,
imethod = 1, zero = 2) {
@@ -3317,14 +3431,22 @@ rlaplace <- function(n, location = 0, scale = 1) {
}), list( .escale = escale, .lscale = lscale,
.elocat = elocat, .llocat = llocat ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+
locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(c(w) * dlaplace(x = y, locat = locat,
- scale = Scale, log = TRUE))
+ ll.elts <- c(w) * dlaplace(x = y, locat = locat,
+ scale = Scale, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .escale = escale, .lscale = lscale,
.elocat = elocat, .llocat = llocat ))),
@@ -3454,32 +3576,40 @@ fff.control <- function(save.weight = TRUE, ...) {
}), list( .link = link, .earg = earg,
.ncp = ncp,
.nsimEIM = nsimEIM ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- df1 <- eta2theta(eta[, 1], .link , earg = .earg )
- df2 <- eta2theta(eta[, 2], .link , earg = .earg )
- if (residuals) {
- stop("loglikelihood residuals not implemented yet")
- } else {
- sum(c(w) * df(x = y, df1 = df1, df2 = df2,
- ncp = .ncp, log = TRUE))
- }
- }, list( .link = link, .earg = earg, .ncp=ncp ))),
- vfamily = c("fff"),
- deriv = eval(substitute(expression({
- df1 <- eta2theta(eta[, 1], .link , earg = .earg )
- df2 <- eta2theta(eta[, 2], .link , earg = .earg )
- dl.ddf1 <- 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
- 0.5*log(y) - 0.5*digamma(0.5*df1) -
- 0.5*(df1+df2)*(y/df2) / (1 + df1*y/df2) -
- 0.5*log1p(df1*y/df2)
- dl.ddf2 <- 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
- 0.5*digamma(0.5*df2) -
- 0.5*(df1+df2) * (-df1*y/df2^2) / (1 + df1*y/df2) -
- 0.5*log1p(df1*y/df2)
- ddf1.deta <- dtheta.deta(df1, .link , earg = .earg )
- ddf2.deta <- dtheta.deta(df2, .link , earg = .earg )
- dthetas.detas <- cbind(ddf1.deta, ddf2.deta)
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+
+ df1 <- eta2theta(eta[, 1], .link , earg = .earg )
+ df2 <- eta2theta(eta[, 2], .link , earg = .earg )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * df(x = y, df1 = df1, df2 = df2,
+ ncp = .ncp , log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .link = link, .earg = earg, .ncp=ncp ))),
+ vfamily = c("fff"),
+ deriv = eval(substitute(expression({
+ df1 <- eta2theta(eta[, 1], .link , earg = .earg )
+ df2 <- eta2theta(eta[, 2], .link , earg = .earg )
+ dl.ddf1 <- 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
+ 0.5*log(y) - 0.5*digamma(0.5*df1) -
+ 0.5*(df1+df2)*(y/df2) / (1 + df1*y/df2) -
+ 0.5*log1p(df1*y/df2)
+ dl.ddf2 <- 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
+ 0.5*digamma(0.5*df2) -
+ 0.5*(df1+df2) * (-df1*y/df2^2) / (1 + df1*y/df2) -
+ 0.5*log1p(df1*y/df2)
+ ddf1.deta <- dtheta.deta(df1, .link , earg = .earg )
+ ddf2.deta <- dtheta.deta(df2, .link , earg = .earg )
+ dthetas.detas <- cbind(ddf1.deta, ddf2.deta)
c(w) * dthetas.detas * cbind(dl.ddf1, dl.ddf2)
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
@@ -3590,7 +3720,8 @@ fff.control <- function(save.weight = TRUE, ...) {
theta2eta(mu, .lprob, earg = .earg )
}, list( .lprob = lprob, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
N <- extra$Nvector
Dvec <- extra$Dvector
prob <- mu
@@ -3598,19 +3729,24 @@ fff.control <- function(save.weight = TRUE, ...) {
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
+ ll.elts <-
if (extra$Nunknown) {
tmp12 <- Dvec * (1-prob) / prob
- sum(lgamma(1+tmp12) + lgamma(1+Dvec/prob-w) -
- lgamma(1+tmp12-w+yvec) - lgamma(1+Dvec/prob))
-
+ (lgamma(1+tmp12) + lgamma(1+Dvec/prob-w) -
+ lgamma(1+tmp12-w+yvec) - lgamma(1+Dvec/prob))
} else {
- sum(lgamma(1+N*prob) + lgamma(1+N*(1-prob)) -
- lgamma(1+N*prob-yvec) -
- lgamma(1+N*(1-prob) -w + yvec))
+ (lgamma(1+N*prob) + lgamma(1+N*(1-prob)) -
+ lgamma(1+N*prob-yvec) -
+ lgamma(1+N*(1-prob) -w + yvec))
+ }
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
}
}, list( .lprob = lprob, .earg = earg ))),
@@ -3753,12 +3889,12 @@ rbenini <- function(n, shape, y0) {
"Median: qbenini(p = 0.5, shape, y0)"),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
lshape = .lshape ,
eshape = .eshape)
}, list( .eshape = eshape,
@@ -3779,10 +3915,10 @@ rbenini <- function(n, shape, y0) {
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -3817,7 +3953,7 @@ rbenini <- function(n, shape, y0) {
qbenini(p = 0.5, shape, y0 = extra$y0)
}, list( .lshape = lshape, .eshape = eshape ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .lshape , length = ncoly))
names(misc$link) <- mynames1
@@ -3827,7 +3963,7 @@ rbenini <- function(n, shape, y0) {
misc$earg[[ii]] <- .eshape
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$multipleResponses <- TRUE
@@ -3837,16 +3973,45 @@ rbenini <- function(n, shape, y0) {
}), list( .lshape = lshape,
.eshape = eshape, .y0 = y0 ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
shape <- eta2theta(eta, .lshape , earg = .eshape )
y0 <- extra$y0
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(c(w) * dbenini(x = y, shape=shape, y0 = y0, log = TRUE))
+ ll.elts <- c(w) * dbenini(x = y, shape = shape, y0 = y0, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape = lshape, .eshape = eshape ))),
vfamily = c("benini"),
+
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ extra <- object at extra
+ shape <- eta2theta(eta, .lshape , earg = .eshape )
+ y0 <- extra$y0
+ rbenini(nsim * length(shape), shape = shape, y0 = y0)
+ }, list( .lshape = lshape, .eshape = eshape ))),
+
+
+
+
+
deriv = eval(substitute(expression({
shape <- eta2theta(eta, .lshape , earg = .eshape )
@@ -4109,7 +4274,7 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
"Link: ",
namesof("theta", link, earg = earg)),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
link = .link )
}, list( .link = link ))),
@@ -4158,18 +4323,46 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
misc$expected <- TRUE
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
Theta <- eta2theta(eta, .link , earg = .earg )
lower <- extra$lower
upper <- extra$upper
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(c(w) * dtriangle(x = y, theta = Theta, lower = lower,
- upper = upper, log = TRUE))
+ ll.elts <- c(w) * dtriangle(x = y, theta = Theta, lower = lower,
+ upper = upper, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("triangle"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ extra <- object at extra
+ Theta <- eta2theta(eta, .link , earg = .earg )
+ lower <- extra$lower
+ upper <- extra$upper
+ rtriangle(nsim * length(Theta),
+ theta = Theta, lower = lower, upper = upper)
+ }, list( .link = link, .earg = earg ))),
+
+
+
+
deriv = eval(substitute(expression({
Theta <- eta2theta(eta, .link , earg = .earg )
@@ -4244,7 +4437,7 @@ loglaplace1.control <- function(maxit = 300, ...) {
ilocat <- ilocation
- llocat.identity <- as.list(substitute("identity"))
+ llocat.identity <- as.list(substitute("identitylink"))
elocat.identity <- link2list(llocat.identity)
llocat.identity <- attr(elocat.identity, "function.name")
@@ -4414,24 +4607,35 @@ loglaplace1.control <- function(maxit = 300, ...) {
.Scale.arg = Scale.arg, .fittedMean = fittedMean,
.minquantile = minquantile, .maxquantile = maxquantile,
.rep0 = rep0, .kappa = kappa ))),
+
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+
kappamat <- matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
Scale.w <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
ymat <- matrix(y, extra$n, extra$M)
-
if ( .llocat == "loge")
- ymat <- adjust0.loglaplace1(ymat = ymat, y = y, w = w, rep0= .rep0)
+ ymat <- adjust0.loglaplace1(ymat = ymat, y = y, w = w, rep0 = .rep0)
+
w.mat <- theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logoff()
- if (residuals) {
- stop("loglikelihood residuals not implemented yet")
- } else {
- ALDans <- sum(c(w) * dalap(x = c(w.mat), locat = c(eta),
- scale = c(Scale.w), kappa = c(kappamat),
- log = TRUE))
- ALDans
- }
+
+
+
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dalap(x = c(w.mat), locat = c(eta),
+ scale = c(Scale.w), kappa = c(kappamat),
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .elocat = elocat, .llocat = llocat,
.rep0 = rep0,
.Scale.arg = Scale.arg, .kappa = kappa ))),
@@ -4692,26 +4896,36 @@ loglaplace2.control <- function(save.weight = TRUE, ...) {
.fittedMean = fittedMean,
.nsimEIM = nsimEIM, .rep0 = rep0,
.kappa = kappa ))),
+
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+
kappamat <- matrix(extra$kappa, extra$n, extra$M/2, byrow = TRUE)
- Scale.w <- eta2theta(eta[,(1+extra$M/2):extra$M],
- .lscale , earg = .escale )
+ Scale.w <- eta2theta(eta[, (1+extra$M/2):extra$M],
+ .lscale , earg = .escale )
ymat <- matrix(y, extra$n, extra$M/2)
- ymat[ymat <= 0] <- min(min(y[y > 0]), .rep0) # Adjust for 0s
+ ymat[ymat <= 0] <- min(min(y[y > 0]), .rep0 ) # Adjust for 0s
ell.mat <- matrix(c(dloglaplace(x = c(ymat),
- locat.ald = c(eta[,1:(extra$M/2)]),
- scale.ald = c(Scale.w),
- kappa = c(kappamat), log = TRUE)),
- extra$n, extra$M/2)
+ locat.ald = c(eta[, 1:(extra$M/2)]),
+ scale.ald = c(Scale.w),
+ kappa = c(kappamat), log = TRUE)),
+ extra$n, extra$M/2)
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(c(w) * ell.mat)
+ ll.elts <- c(w) * ell.mat
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
}, list( .elocat = elocat, .llocat = llocat,
.escale = escale, .lscale = lscale,
.rep0 = rep0, .kappa = kappa ))),
+
+
vfamily = c("loglaplace2"),
deriv = eval(substitute(expression({
ymat <- matrix(y, n, M/2)
@@ -4825,7 +5039,7 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
ilocat <- ilocation
- llocat.identity <- as.list(substitute("identity"))
+ llocat.identity <- as.list(substitute("identitylink"))
elocat.identity <- link2list(llocat.identity)
llocat.identity <- attr(elocat.identity, "function.name")
@@ -4986,26 +5200,37 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
.Scale.arg = Scale.arg, .fittedMean = fittedMean,
.rep01 = rep01,
.kappa = kappa ))),
+
+
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+
kappamat <- matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
Scale.w <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- ymat <- matrix(y, extra$n, extra$M)
+ ymat <- matrix(y, extra$n, extra$M)
ymat <- adjust01.logitlaplace1(ymat = ymat, y = y, w = w,
- rep01 = .rep01)
+ rep01 = .rep01)
w.mat <- theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logit()
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- ALDans =
- sum(c(w) * dalap(x = c(w.mat), location = c(eta),
- scale = c(Scale.w), kappa = c(kappamat),
- log = TRUE))
- ALDans
+ ll.elts <-
+ c(w) * dalap(x = c(w.mat), location = c(eta),
+ scale = c(Scale.w), kappa = c(kappamat),
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
}, list( .elocat = elocat, .llocat = llocat,
.rep01 = rep01,
.Scale.arg = Scale.arg, .kappa = kappa ))),
+
+
vfamily = c("logitlaplace1"),
deriv = eval(substitute(expression({
ymat <- matrix(y, n, M)
@@ -5021,7 +5246,7 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
dlocat.deta <- dtheta.deta(locat.w,
- "identity",
+ "identitylink",
earg = .elocat.identity )
diff --git a/R/family.quantal.R b/R/family.quantal.R
index 704f147..be36bbc 100644
--- a/R/family.quantal.R
+++ b/R/family.quantal.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -115,29 +115,37 @@
.type.fitted = type.fitted ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
- prob0 <- eta2theta(eta[, 1], .link0 , earg = .earg0 )
- prob1 <- eta2theta(eta[, 2], .link1 , earg = .earg1 )
- mymu <- prob0 + (1 - prob0) * prob1
+ prob0 <- eta2theta(eta[, 1], .link0 , earg = .earg0 )
+ prob1 <- eta2theta(eta[, 2], .link1 , earg = .earg1 )
+ mymu <- prob0 + (1 - prob0) * prob1
- if (residuals) {
- w * (y / mymu - (1 - y) / (1 - mymu))
+ if (residuals) {
+ w * (y / mymu - (1 - y) / (1 - mymu))
+ } else {
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+ smallno <- 1.0e6 * .Machine$double.eps
+ smallno <- sqrt(.Machine$double.eps)
+ if (max(abs(ycounts - round(ycounts))) > smallno)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts <- round(ycounts)
+
+ ll.elts <-
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dbinom(x = ycounts, size = nvec, prob = mymu, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
} else {
- ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
- smallno <- 1.0e6 * .Machine$double.eps
- smallno <- sqrt(.Machine$double.eps)
- if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts <- round(ycounts)
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dbinom(x = ycounts, size = nvec, prob = mymu, log = TRUE))
+ ll.elts
}
+ }
}, list( .link0 = link0, .earg0 = earg0,
.link1 = link1, .earg1 = earg1 ))),
@@ -420,12 +428,13 @@ abbott.EM.control <- function(maxit = 1000, ...) {
"Mean: mu"),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero ))),
@@ -449,10 +458,10 @@ abbott.EM.control <- function(maxit = 1000, ...) {
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
extra$lambda <- matrix( .ilambda , n, M, byrow = TRUE)
extra$orig.w <- w
@@ -490,7 +499,7 @@ abbott.EM.control <- function(maxit = 1000, ...) {
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .link , length = ncoly))
names(misc$link) <- mynames1
@@ -500,7 +509,7 @@ abbott.EM.control <- function(maxit = 1000, ...) {
misc$earg[[ii]] <- .earg
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$multipleResponses <- TRUE
misc$imethod <- .imethod
misc$iprob <- .iprob
@@ -513,15 +522,23 @@ abbott.EM.control <- function(maxit = 1000, ...) {
.b1.arg = b1.arg, .b2.arg = b2.arg,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
prob <- eta2theta(eta, .link , earg = .earg )
mymu <- extra$lambda + (1 - extra$lambda) * prob # Eqn (3)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
- sum(c(w) * dbinom(x = y, prob = mymu,
- size = nvec, log = TRUE))
+
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+ ll.elts <- c(w) * dbinom(x = y, prob = mymu,
+ size = nvec, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("abbott.EM"),
diff --git a/R/family.rcim.R b/R/family.rcim.R
index 2ea2863..c4b6542 100644
--- a/R/family.rcim.R
+++ b/R/family.rcim.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -20,7 +20,7 @@
function(y,
family = poissonff,
Rank = 0,
- Musual = NULL,
+ M1 = NULL,
weights = NULL,
which.linpred = 1,
Index.corner = ifelse(is.null(str0), 0, max(str0)) + 1:Rank,
@@ -84,26 +84,26 @@
efamily <- family
- if (!is.Numeric(Musual)) {
+ if (!is.Numeric(M1)) {
iefamily <- efamily at infos
if (is.function(iefamily))
- Musual <- (iefamily())$Musual
- if (is.Numeric(Musual))
- Musual <- abs(Musual)
+ M1 <- (iefamily())$M1
+ if (is.Numeric(M1))
+ M1 <- abs(M1)
}
- if (!is.Numeric(Musual)) {
+ if (!is.Numeric(M1)) {
if (!is.Numeric(M))
- warning("cannot determine the value of 'Musual'.",
+ warning("cannot determine the value of 'M1'.",
"Assuming the value one.")
- Musual <- 1
+ M1 <- 1
}
- M <- if (is.null(M)) Musual * ncol(y) else M
+ M <- if (is.null(M)) M1 * ncol(y) else M
- special <- (M > 1) && (Musual == 1)
+ special <- (M > 1) && (M1 == 1)
@@ -264,12 +264,12 @@
}
- if (Musual > 1) {
+ if (M1 > 1) {
orig.Hlist <- Hlist
- kmat1 <- matrix(0, nrow = Musual, ncol = 1)
+ kmat1 <- matrix(0, nrow = M1, ncol = 1)
kmat1[which.linpred, 1] <- 1
- kmat0 <- (diag(Musual))[, -which.linpred, drop = FALSE]
+ kmat0 <- (diag(M1))[, -which.linpred, drop = FALSE]
for (ii in 1:length(Hlist)) {
Hlist[[ii]] <- kronecker(Hlist[[ii]], kmat1)
@@ -462,8 +462,8 @@ setMethod("summary", "rcim",
...) {
- nparff <- if (is.numeric(object at family@infos()$Musual)) {
- object at family@infos()$Musual
+ nparff <- if (is.numeric(object at family@infos()$M1)) {
+ object at family@infos()$M1
} else {
1
}
@@ -932,7 +932,7 @@ plota21 <- function(rrvglm2, show.plot = TRUE, nseq.a21 = 31,
row.index <- (1:Mdot)[Hk.row != 0]
- all.labels <- vlabel(factorname, ncolBlist = Mdot, M = M)
+ all.labels <- vlabel(factorname, ncolHlist = Mdot, M = M)
all.labels[row.index]
} else {
factorname
@@ -1006,7 +1006,7 @@ plota21 <- function(rrvglm2, show.plot = TRUE, nseq.a21 = 31,
covmat <- covmat[perm, perm, drop = FALSE]
}
}
- } # ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ } # ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
return(Recall(covmat,
@@ -1026,7 +1026,7 @@ plota21 <- function(rrvglm2, show.plot = TRUE, nseq.a21 = 31,
if ((LLL <- dim(covmat)[1]) <= 2)
stop("This function works only for factors with 3 ",
"or more levels")
- } # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ } # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
diff --git a/R/family.rcqo.R b/R/family.rcqo.R
index e648772..4fa41a8 100644
--- a/R/family.rcqo.R
+++ b/R/family.rcqo.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/family.robust.R b/R/family.robust.R
index fc2689b..1d1c7e6 100644
--- a/R/family.robust.R
+++ b/R/family.robust.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -94,9 +94,9 @@ rhuber <- function(n, k = 0.862, mu = 0, sigma = 1) {
qhuber <- function (p, k = 0.862, mu = 0, sigma = 1) {
- if(min(sigma) <= 0)
+ if (min(sigma) <= 0)
stop("argument 'sigma' must be positive")
- if(min(k) <= 0)
+ if (min(k) <= 0)
stop("argument 'k' must be positive")
cnorm <- sqrt(2 * pi) * ((2 * pnorm(k) - 1) + 2 * dnorm(k) / k)
@@ -131,7 +131,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
- huber2 <- function(llocation = "identity", lscale = "loge",
+ huber2 <- function(llocation = "identitylink", lscale = "loge",
k = 0.862, imethod = 1, zero = 2) {
@@ -222,19 +222,26 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale,
.k = k, .imethod = imethod ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location <- eta2theta(eta[, 1], .llocat, earg = .elocat)
- myscale <- eta2theta(eta[, 2], .lscale, earg = .escale)
- kay <- .k
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dhuber(y, k = kay, mu = location, sigma = myscale,
- log = TRUE))
- }
- }, list( .llocat = llocat, .lscale = lscale,
- .elocat = elocat, .escale = escale,
- .k = k ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ location <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ kay <- .k
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dhuber(y, k = kay, mu = location, sigma = myscale,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
+ .k = k ))),
vfamily = c("huber2"),
deriv = eval(substitute(expression({
mylocat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
@@ -286,7 +293,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
- huber1 <- function(llocation = "identity",
+ huber1 <- function(llocation = "identitylink",
k = 0.862,
imethod = 1) {
@@ -363,18 +370,26 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
}), list( .llocat = llocat,
.elocat = elocat,
.k = k, .imethod = imethod ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location <- eta2theta(eta, .llocat, earg = .elocat)
- kay <- .k
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dhuber(y, k = kay, mu = location, sigma = 1,
- log = TRUE))
- }
- }, list( .llocat = llocat,
- .elocat = elocat,
- .k = k ))),
+
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ location <- eta2theta(eta, .llocat , earg = .elocat )
+ kay <- .k
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dhuber(y, k = kay, mu = location, sigma = 1,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .llocat = llocat,
+ .elocat = elocat,
+ .k = k ))),
vfamily = c("huber1"),
deriv = eval(substitute(expression({
mylocat <- eta2theta(eta, .llocat, earg = .elocat)
diff --git a/R/family.rrr.R b/R/family.rrr.R
index 759a571..0affebb 100644
--- a/R/family.rrr.R
+++ b/R/family.rrr.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -8,11 +8,11 @@
-replace.constraints <- function(Blist, cm, index) {
+replace.constraints <- function(Hlist, cm, index) {
for (iii in index)
- Blist[[iii]] <- cm
- Blist
+ Hlist[[iii]] <- cm
+ Hlist
}
@@ -42,18 +42,18 @@ replace.constraints <- function(Blist, cm, index) {
-qrrvglm.xprod <- function(numat, Aoffset, Quadratic, ITolerances) {
+qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
Rank <- ncol(numat)
moff <- NULL
ans <- if (Quadratic) {
index <- iam(NA, NA, M = Rank, diag = TRUE, both = TRUE)
temp1 <- cbind(numat[,index$row] * numat[,index$col])
- if (ITolerances) {
+ if (I.tolerances) {
moff <- 0
for (ii in 1:Rank)
moff <- moff - 0.5 * temp1[, ii]
}
- cbind(numat, if (ITolerances) NULL else temp1)
+ cbind(numat, if (I.tolerances) NULL else temp1)
} else {
as.matrix(numat)
}
@@ -64,7 +64,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, ITolerances) {
valt <- function(x, z, U, Rank = 1,
- Blist = NULL,
+ Hlist = NULL,
Cinit = NULL,
Alphavec = c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
60, 80, 100, 125, 2^(8:12)),
@@ -110,8 +110,8 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, ITolerances) {
stop("'p2', the number of variables for the ",
"reduced-rank regression, must be > 0")
- if (!length(Blist)) {
- Blist <- replace.constraints(vector("list", p), diag(M), 1:p)
+ if (!length(Hlist)) {
+ Hlist <- replace.constraints(vector("list", p), diag(M), 1:p)
}
dU <- dim(U)
@@ -124,7 +124,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, ITolerances) {
diag(M), 1:Rank)
if (p1) {
for (kk in 1:p1)
- clist2[[Rank+kk]] <- Blist[[colx1.index[kk]]]
+ clist2[[Rank+kk]] <- Hlist[[colx1.index[kk]]]
}
if (is.null(Cinit))
@@ -142,13 +142,13 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, ITolerances) {
latvar.mat <- x[, colx2.index, drop = FALSE] %*% C
new.latvar.model.matrix <- cbind(latvar.mat,
if (p1) x[, colx1.index] else NULL)
- fit <- vlm.wfit(xmat = new.latvar.model.matrix, z, Blist = clist2,
+ fit <- vlm.wfit(xmat = new.latvar.model.matrix, z, Hlist = clist2,
U = U, matrix.out = TRUE, is.vlmX = FALSE,
res.ss = FALSE, qr = FALSE, xij = xij)
A <- t(fit$mat.coef[1:Rank, , drop = FALSE])
- clist1 <- replace.constraints(Blist, A, colx2.index)
- fit <- vlm.wfit(xmat = x, z, Blist = clist1, U = U,
+ clist1 <- replace.constraints(Hlist, A, colx2.index)
+ fit <- vlm.wfit(xmat = x, z, Hlist = clist1, U = U,
matrix.out = TRUE, is.vlmX = FALSE,
res.ss = TRUE, qr = FALSE, xij = xij)
C <- fit$mat.coef[colx2.index, , drop = FALSE] %*% A %*%
@@ -203,7 +203,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, ITolerances) {
if (p1) x[, colx1.index] else NULL)
try <- vlm.wfit(xmat = try.new.latvar.model.matrix, z,
- Blist = clist2, U = U, matrix.out = TRUE,
+ Hlist = clist2, U = U, matrix.out = TRUE,
is.vlmX = FALSE, res.ss = TRUE, qr = FALSE,
xij = xij)
if (try$res.ss < ftemp) {
@@ -243,7 +243,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, ITolerances) {
lm2qrrvlm.model.matrix <-
- function(x, Blist, C, control, assign = TRUE,
+ function(x, Hlist, C, control, assign = TRUE,
no.thrills = FALSE) {
Rank <- control$Rank
@@ -251,14 +251,14 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, ITolerances) {
Quadratic <- control$Quadratic
Dzero <- control$Dzero
Corner <- control$Corner
- ITolerances <- control$ITolerances
+ I.tolerances <- control$I.tolerances
- M <- nrow(Blist[[1]])
+ M <- nrow(Hlist[[1]])
p1 <- length(colx1.index)
combine2 <- c(control$str0,
if (Corner) control$Index.corner else NULL)
- Qoffset <- if (Quadratic) ifelse(ITolerances, 0, sum(1:Rank)) else 0
+ Qoffset <- if (Quadratic) ifelse(I.tolerances, 0, sum(1:Rank)) else 0
NoA <- length(combine2) == M # No unknown parameters in A
clist2 <- if (NoA) {
Aoffset <- 0
@@ -269,22 +269,22 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, ITolerances) {
if (length(combine2)) diag(M)[, -combine2, drop = FALSE] else diag(M),
1:Rank) # If Corner then does not contain \bI_{Rank}
}
- if (Quadratic && !ITolerances)
+ if (Quadratic && !I.tolerances)
clist2 <- replace.constraints(clist2,
- if (control$EqualTolerances)
+ if (control$eq.tolerances)
matrix(1, M, 1) - eijfun(Dzero, M) else {
if (length(Dzero)) diag(M)[,-Dzero, drop = FALSE] else diag(M)},
Aoffset + (1:Qoffset))
if (p1)
for (kk in 1:p1)
- clist2[[Aoffset+Qoffset+kk]] <- Blist[[colx1.index[kk]]]
+ clist2[[Aoffset+Qoffset+kk]] <- Hlist[[colx1.index[kk]]]
if (!no.thrills) {
i63 <- iam(NA, NA, M=Rank, both = TRUE)
names(clist2) <- c(
if (NoA) NULL else paste("(latvar", 1:Rank, ")", sep = ""),
- if (Quadratic && Rank == 1 && !ITolerances)
+ if (Quadratic && Rank == 1 && !I.tolerances)
"(latvar^2)" else
- if (Quadratic && Rank>1 && !ITolerances)
+ if (Quadratic && Rank>1 && !I.tolerances)
paste("(latvar", i63$row, ifelse(i63$row == i63$col, "^2",
paste("*latvar", i63$col, sep = "")), ")", sep = "") else NULL,
if (p1) names(colx1.index) else NULL)
@@ -293,7 +293,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, ITolerances) {
latvar.mat <- x[, control$colx2.index, drop = FALSE] %*% C
- tmp900 <- qrrvglm.xprod(latvar.mat, Aoffset, Quadratic, ITolerances)
+ tmp900 <- qrrvglm.xprod(latvar.mat, Aoffset, Quadratic, I.tolerances)
new.latvar.model.matrix <- cbind(tmp900$matrix,
if (p1) x[,colx1.index] else NULL)
if (!no.thrills)
@@ -324,23 +324,23 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, ITolerances) {
-valt.2iter <- function(x, z, U, Blist, A, control) {
+valt.2iter <- function(x, z, U, Hlist, A, control) {
- clist1 <- replace.constraints(Blist, A, control$colx2.index)
- fit <- vlm.wfit(xmat = x, z, Blist = clist1, U = U, matrix.out = TRUE,
+ clist1 <- replace.constraints(Hlist, A, control$colx2.index)
+ fit <- vlm.wfit(xmat = x, z, Hlist = clist1, U = U, matrix.out = TRUE,
is.vlmX = FALSE, res.ss = TRUE, qr = FALSE, xij = control$xij)
C <- fit$mat.coef[control$colx2.index, , drop = FALSE] %*%
A %*% solve(t(A) %*% A)
list(A = A, C = C,
fitted = fit$fitted, new.coeffs = fit$coef,
- Blist = clist1, res.ss = fit$res.ss)
+ Hlist = clist1, res.ss = fit$res.ss)
}
-valt.1iter <- function(x, z, U, Blist, C, control,
+valt.1iter <- function(x, z, U, Hlist, C, control,
lp.names = NULL, nice31 = FALSE,
MSratio = 1) {
@@ -351,10 +351,11 @@ valt.1iter <- function(x, z, U, Blist, C, control,
M <- ncol(zedd <- as.matrix(z))
NOS <- M / MSratio
Corner <- control$Corner
- ITolerances <- control$ITolerances
+ I.tolerances <- control$I.tolerances
- Qoffset <- if (Quadratic) ifelse(ITolerances, 0, sum(1:Rank)) else 0
- tmp833 <- lm2qrrvlm.model.matrix(x = x, Blist = Blist, C=C, control=control)
+ Qoffset <- if (Quadratic) ifelse(I.tolerances, 0, sum(1:Rank)) else 0
+ tmp833 <- lm2qrrvlm.model.matrix(x = x, Hlist = Hlist, C = C,
+ control = control)
new.latvar.model.matrix <- tmp833$new.latvar.model.matrix
clist2 <- tmp833$constraints # Does not contain \bI_{Rank}
latvar.mat <- tmp833$latvar.mat
@@ -372,7 +373,7 @@ valt.1iter <- function(x, z, U, Blist, C, control,
tmp100 <- vlm.wfit(xmat = new.latvar.model.matrix,
zedd[, i5, drop = FALSE],
- Blist = clist2,
+ Hlist = clist2,
U = U[i5,, drop = FALSE],
matrix.out = TRUE,
is.vlmX = FALSE, res.ss = TRUE,
@@ -387,7 +388,7 @@ valt.1iter <- function(x, z, U, Blist, C, control,
}
} else {
fit <- vlm.wfit(xmat = new.latvar.model.matrix,
- zedd, Blist = clist2, U = U,
+ zedd, Hlist = clist2, U = U,
matrix.out = TRUE,
is.vlmX = FALSE, res.ss = TRUE, qr = FALSE,
Eta.range = control$Eta.range,
@@ -405,7 +406,7 @@ valt.1iter <- function(x, z, U, Blist, C, control,
if (Corner)
fv[,Index.corner] <- fv[,Index.corner] + latvar.mat
Dmat <- if (Quadratic) {
- if (ITolerances) {
+ if (I.tolerances) {
tmp800 <- matrix(0, M, Rank*(Rank+1)/2)
tmp800[if (MSratio == 2) c(TRUE, FALSE) else
TRUE, 1:Rank] <- -0.5
@@ -466,7 +467,7 @@ rrr.init.expression <- expression({
rrr.alternating.expression <- expression({
alt <- valt(x, z, U, Rank = Rank,
- Blist = Blist,
+ Hlist = Hlist,
Cinit = rrcontrol$Cinit,
Criterion = rrcontrol$Criterion,
colx1.index = rrcontrol$colx1.index,
@@ -481,12 +482,12 @@ rrr.alternating.expression <- expression({
ans2 <- rrr.normalize(rrcontrol = rrcontrol, A=alt$A, C=alt$C, x = x)
- Amat <- ans2$A # Fed into Blist below (in rrr.end.expression)
+ Amat <- ans2$A # Fed into Hlist below (in rrr.end.expression)
tmp.fitted <- alt$fitted # Also fed; was alt2$fitted
rrcontrol$Cinit <- ans2$C # For next valt() call
- eval(rrr.end.expression) # Put Amat into Blist, and create new z
+ eval(rrr.end.expression) # Put Amat into Hlist, and create new z
})
@@ -601,23 +602,23 @@ rrr.end.expression <- expression({
extra$Dmat <- Dmat # Not the latest iteration
extra$B1 <- B1.save # Not the latest iteration (not good)
} else {
- Blist <- replace.constraints(Blist.save, Amat, colx2.index)
+ Hlist <- replace.constraints(Hlist.save, Amat, colx2.index)
}
X.vlm.save <- if (control$Quadratic) {
- tmp300 <- lm2qrrvlm.model.matrix(x = x, Blist = Blist.save,
- C = Cmat, control=control)
+ tmp300 <- lm2qrrvlm.model.matrix(x = x, Hlist = Hlist.save,
+ C = Cmat, control = control)
latvar.mat <- tmp300$latvar.mat # Needed at the top of new.s.call
lm2vlm.model.matrix(tmp300$new.latvar.model.matrix,
- B.list,
+ H.list,
xij = control$xij)
} else {
- lm2vlm.model.matrix(x, Blist, xij = control$xij)
+ lm2vlm.model.matrix(x, Hlist, xij = control$xij)
}
- fv <- tmp.fitted # Contains \bI \bnu
+ fv <- tmp.fitted # Contains \bI \bnu
eta <- fv + offset
if (FALSE && control$Rank == 1) {
ooo <- order(latvar.mat[, 1])
@@ -631,10 +632,10 @@ rrr.end.expression <- expression({
wz <- eval(family at weight)
if (control$checkwz)
wz <- checkwz(wz, M = M, trace = trace,
- wzepsilon = control$wzepsilon)
+ wzepsilon = control$wzepsilon)
U <- vchol(wz, M = M, n = n, silent=!trace)
tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
- z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset # Contains \bI \bnu
+ z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset # Contains \bI \bnu
@@ -650,89 +651,94 @@ rrr.derivative.expression <- expression({
which.optimizer <- if (control$Quadratic && control$FastAlgorithm) {
- "BFGS"
+ "BFGS"
} else {
- if (iter <= rrcontrol$Switch.optimizer) "Nelder-Mead" else "BFGS"
+ if (iter <= rrcontrol$Switch.optimizer) "Nelder-Mead" else "BFGS"
}
if (trace && control$OptimizeWrtC) {
- cat("\n\n")
- cat("Using", which.optimizer, "\n")
- flush.console()
+ cat("\n\n")
+ cat("Using", which.optimizer, "\n")
+ flush.console()
}
- constraints=replace.constraints(constraints,diag(M),rrcontrol$colx2.index)
- nice31 <- (!control$EqualTol || control$ITolerances) &&
- all(trivial.constraints(constraints) == 1)
+ constraints <- replace.constraints(constraints, diag(M),
+ rrcontrol$colx2.index)
+ nice31 <- (!control$eq.tol || control$I.tolerances) &&
+ all(trivial.constraints(constraints) == 1)
theta0 <- c(Cmat)
assign(".VGAM.dot.counter", 0, envir = VGAMenv)
if (control$OptimizeWrtC) {
- if (control$Quadratic && control$FastAlgorithm) {
- if (iter == 2) {
- if (exists(".VGAM.etamat", envir = VGAMenv))
- rm(".VGAM.etamat", envir = VGAMenv)
- }
- if (iter > 2 && !quasi.newton$convergence) {
- if (zthere <- exists(".VGAM.z", envir = VGAMenv)) {
- ..VGAM.z <- get(".VGAM.z", envir = VGAMenv)
- ..VGAM.U <- get(".VGAM.U", envir = VGAMenv)
- ..VGAM.beta <- get(".VGAM.beta", envir = VGAMenv)
+ if (control$Quadratic && control$FastAlgorithm) {
+ if (iter == 2) {
+ if (exists(".VGAM.etamat", envir = VGAMenv))
+ rm(".VGAM.etamat", envir = VGAMenv)
+ }
+ if (iter > 2 && !quasi.newton$convergence) {
+ if (zthere <- exists(".VGAM.z", envir = VGAMenv)) {
+ ..VGAM.z <- get(".VGAM.z", envir = VGAMenv)
+ ..VGAM.U <- get(".VGAM.U", envir = VGAMenv)
+ ..VGAM.beta <- get(".VGAM.beta", envir = VGAMenv)
}
if (zthere) {
z <- matrix(..VGAM.z, n, M) # minus any offset
U <- matrix(..VGAM.U, M, n)
}
- }
+ }
- if (iter == 2 || quasi.newton$convergence) {
- NOS <- ifelse(modelno == 3 || modelno == 5, M/2, M)
-
- canfitok <-
- (exists("CQO.FastAlgorithm", envir=VGAMenv) &&
- get("CQO.FastAlgorithm", envir = VGAMenv))
- if (!canfitok)
- stop("cannot fit this model using fast algorithm")
- p2star <- if (nice31)
- ifelse(control$IToleran, Rank, Rank+0.5*Rank*(Rank+1)) else
- (NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol, 1,NOS))
- p1star <- if (nice31) p1 *
- ifelse(modelno == 3 || modelno == 5, 2, 1) else
- (ncol(X.vlm.save) - p2star)
- X.vlm.1save <- if (p1star > 0)
- X.vlm.save[,-(1:p2star)] else NULL
- quasi.newton <- optim(par=Cmat, fn=callcqof,
- gr <- if (control$GradientFunction) calldcqo else NULL,
- method=which.optimizer,
- control=list(fnscale = 1,
- trace = as.integer(control$trace),
- parscale = rep(control$Parscale,
- length.out = length(Cmat)),
- maxit = 250),
- etamat=eta, xmat=x, ymat=y, wvec=w,
- X.vlm.1save = if (nice31) NULL else X.vlm.1save,
- modelno=modelno, Control=control,
- n = n, M = M, p1star=p1star,
- p2star=p2star, nice31 = nice31)
+ if (iter == 2 || quasi.newton$convergence) {
+ NOS <- ifelse(modelno == 3 || modelno == 5, M/2, M)
+
+ canfitok <-
+ (exists("CQO.FastAlgorithm", envir=VGAMenv) &&
+ get("CQO.FastAlgorithm", envir = VGAMenv))
+ if (!canfitok)
+ stop("cannot fit this model using fast algorithm")
+ p2star <- if (nice31)
+ ifelse(control$I.toleran,
+ Rank,
+ Rank+0.5*Rank*(Rank+1)) else
+ (NOS*Rank +
+ Rank*(Rank+1)/2 * ifelse(control$eq.tol, 1,NOS))
+ p1star <- if (nice31) p1 *
+ ifelse(modelno == 3 || modelno == 5, 2, 1) else
+ (ncol(X.vlm.save) - p2star)
+ X.vlm.1save <- if (p1star > 0)
+ X.vlm.save[,-(1:p2star)] else NULL
+ quasi.newton <-
+ optim(par = Cmat, fn = callcqof,
+ gr <- if (control$GradientFunction) calldcqo else NULL,
+ method = which.optimizer,
+ control = list(fnscale = 1,
+ trace = as.integer(control$trace),
+ parscale = rep(control$Parscale,
+ length.out=length(Cmat)),
+ maxit = 250),
+ etamat = eta, xmat = x, ymat = y, wvec = w,
+ X.vlm.1save = if (nice31) NULL else X.vlm.1save,
+ modelno = modelno, Control = control,
+ n = n, M = M, p1star = p1star,
+ p2star = p2star, nice31 = nice31)
if (zthere <- exists(".VGAM.z", envir = VGAMenv)) {
- ..VGAM.z <- get(".VGAM.z", envir = VGAMenv)
- ..VGAM.U <- get(".VGAM.U", envir = VGAMenv)
- ..VGAM.beta <- get(".VGAM.beta", envir = VGAMenv)
+ ..VGAM.z <- get(".VGAM.z", envir = VGAMenv)
+ ..VGAM.U <- get(".VGAM.U", envir = VGAMenv)
+ ..VGAM.beta <- get(".VGAM.beta", envir = VGAMenv)
}
if (zthere) {
- z <- matrix(..VGAM.z, n, M) # minus any offset
- U <- matrix(..VGAM.U, M, n)
+ z <- matrix(..VGAM.z, n, M) # minus any offset
+ U <- matrix(..VGAM.U, M, n)
}
- } else {
- if (exists(".VGAM.offset", envir = VGAMenv))
- rm(".VGAM.offset", envir = VGAMenv)
- }
+ } else {
+ if (exists(".VGAM.offset", envir = VGAMenv))
+ rm(".VGAM.offset", envir = VGAMenv)
+ }
} else {
- use.reltol <- if (length(rrcontrol$Reltol) >= iter)
+ use.reltol <- if (length(rrcontrol$Reltol) >= iter)
rrcontrol$Reltol[iter] else rev(rrcontrol$Reltol)[1]
- quasi.newton <-
+ quasi.newton <-
optim(par = theta0,
fn = rrr.derivC.res.ss,
method = which.optimizer,
@@ -740,27 +746,27 @@ rrr.derivative.expression <- expression({
maxit = rrcontrol$Maxit,
abstol = rrcontrol$Abstol,
reltol = use.reltol),
- U = U, z = if (control$ITolerances) z + offset else z,
+ U = U, z = if (control$I.tolerances) z + offset else z,
M = M, xmat = x, # varbix2 = varbix2,
- Blist = Blist, rrcontrol = rrcontrol)
+ Hlist = Hlist, rrcontrol = rrcontrol)
}
- Cmat <- matrix(quasi.newton$par, p2, Rank, byrow = FALSE)
+ Cmat <- matrix(quasi.newton$par, p2, Rank, byrow = FALSE)
- if (Rank > 1 && rrcontrol$ITolerances) {
- numat <- x[, rrcontrol$colx2.index, drop = FALSE] %*% Cmat
- evnu <- eigen(var(numat))
- Cmat <- Cmat %*% evnu$vector
- numat <- x[, rrcontrol$colx2.index, drop = FALSE] %*% Cmat
- offset <- if (Rank > 1) -0.5*rowSums(numat^2) else -0.5*numat^2
+ if (Rank > 1 && rrcontrol$I.tolerances) {
+ numat <- x[, rrcontrol$colx2.index, drop = FALSE] %*% Cmat
+ evnu <- eigen(var(numat))
+ Cmat <- Cmat %*% evnu$vector
+ numat <- x[, rrcontrol$colx2.index, drop = FALSE] %*% Cmat
+ offset <- if (Rank > 1) -0.5*rowSums(numat^2) else -0.5*numat^2
+ }
}
-}
- alt <- valt.1iter(x = x, z = z, U = U, Blist = Blist,
+ alt <- valt.1iter(x = x, z = z, U = U, Hlist = Hlist,
C = Cmat, nice31 = nice31,
control = rrcontrol,
lp.names = predictors.names)
@@ -797,65 +803,66 @@ rrr.derivative.expression <- expression({
Cmat <- alt$Cmat # Needed in rrr.end.expression if Quadratic
Dmat <- alt$Dmat # Put later into extra
- eval(rrr.end.expression) # Put Amat into Blist, and create new z
+ eval(rrr.end.expression) # Put Amat into Hlist, and create new z
})
-rrr.derivC.res.ss <- function(theta, U, z, M, xmat, Blist, rrcontrol,
+rrr.derivC.res.ss <- function(theta, U, z, M, xmat, Hlist, rrcontrol,
omit.these = NULL) {
- if (rrcontrol$trace) {
- cat(".")
- flush.console()
- }
- alreadyThere <- exists(".VGAM.dot.counter", envir = VGAMenv)
- if (alreadyThere) {
- VGAM.dot.counter <- get(".VGAM.dot.counter", envir = VGAMenv)
- VGAM.dot.counter <- VGAM.dot.counter + 1
- assign(".VGAM.dot.counter", VGAM.dot.counter,
- envir = VGAMenv)
- if (VGAM.dot.counter > max(50, options()$width - 5)) {
- if (rrcontrol$trace) {
- cat("\n")
- flush.console()
- }
- assign(".VGAM.dot.counter", 0, envir = VGAMenv)
- }
+ if (rrcontrol$trace) {
+ cat(".")
+ flush.console()
+ }
+ alreadyThere <- exists(".VGAM.dot.counter", envir = VGAMenv)
+ if (alreadyThere) {
+ VGAM.dot.counter <- get(".VGAM.dot.counter", envir = VGAMenv)
+ VGAM.dot.counter <- VGAM.dot.counter + 1
+ assign(".VGAM.dot.counter", VGAM.dot.counter, envir = VGAMenv)
+ if (VGAM.dot.counter > max(50, options()$width - 5)) {
+ if (rrcontrol$trace) {
+ cat("\n")
+ flush.console()
+ }
+ assign(".VGAM.dot.counter", 0, envir = VGAMenv)
}
+ }
- Cmat <- matrix(theta, length(rrcontrol$colx2.index), rrcontrol$Rank)
+ Cmat <- matrix(theta, length(rrcontrol$colx2.index), rrcontrol$Rank)
- tmp700 <- lm2qrrvlm.model.matrix(x = xmat, Blist = Blist,
- no.thrills = !rrcontrol$Corner,
- C = Cmat, control = rrcontrol, assign = FALSE)
- Blist <- tmp700$constraints # Does not contain \bI_{Rank} \bnu
+ tmp700 <-
+ lm2qrrvlm.model.matrix(x = xmat, Hlist = Hlist,
+ no.thrills = !rrcontrol$Corner,
+ C = Cmat, control = rrcontrol, assign = FALSE)
+ Hlist <- tmp700$constraints # Does not contain \bI_{Rank} \bnu
if (rrcontrol$Corner) {
- z <- as.matrix(z) # should actually call this zedd
- z[, rrcontrol$Index.corner] <- z[, rrcontrol$Index.corner] -
- tmp700$latvar.mat
+ z <- as.matrix(z) # should actually call this zedd
+ z[, rrcontrol$Index.corner] <-
+ z[, rrcontrol$Index.corner] - tmp700$latvar.mat
}
if (length(tmp700$offset)) z <- z - tmp700$offset
- vlm.wfit(xmat=tmp700$new.latvar.model.matrix, zmat=z,
- Blist = Blist, ncolx=ncol(xmat), U = U, only.res.ss = TRUE,
- matrix.out = FALSE, is.vlmX = FALSE, res.ss= TRUE, qr = FALSE,
- Eta.range = rrcontrol$Eta.range,
+ vlm.wfit(xmat = tmp700$new.latvar.model.matrix, zmat = z,
+ Hlist = Hlist, ncolx = ncol(xmat), U = U, only.res.ss = TRUE,
+ matrix.out = FALSE, is.vlmX = FALSE, res.ss = TRUE,
+ qr = FALSE, Eta.range = rrcontrol$Eta.range,
xij = rrcontrol$xij)$res.ss
}
+
rrvglm.optim.control <- function(Fnscale = 1,
- Maxit = 100,
- Switch.optimizer = 3,
- Abstol = -Inf,
- Reltol = sqrt(.Machine$double.eps),
- ...) {
+ Maxit = 100,
+ Switch.optimizer = 3,
+ Abstol = -Inf,
+ Reltol = sqrt(.Machine$double.eps),
+ ...) {
@@ -870,28 +877,29 @@ rrvglm.optim.control <- function(Fnscale = 1,
nlminbcontrol <- function(Abs.tol = 10^(-6),
- Eval.max = 91,
- Iter.max = 91,
- Rel.err = 10^(-6),
- Rel.tol = 10^(-6),
- Step.min = 10^(-6),
- X.tol = 10^(-6),
- ...) {
-
-
- list(Abs.tol = Abs.tol,
- Eval.max = Eval.max,
- Iter.max = Iter.max,
- Rel.err = Rel.err,
- Rel.tol = Rel.tol,
- Step.min = Step.min,
- X.tol = X.tol)
+ Eval.max = 91,
+ Iter.max = 91,
+ Rel.err = 10^(-6),
+ Rel.tol = 10^(-6),
+ Step.min = 10^(-6),
+ X.tol = 10^(-6),
+ ...) {
+
+
+ list(Abs.tol = Abs.tol,
+ Eval.max = Eval.max,
+ Iter.max = Iter.max,
+ Rel.err = Rel.err,
+ Rel.tol = Rel.tol,
+ Step.min = Step.min,
+ X.tol = X.tol)
}
-Coef.qrrvglm <- function(object, varI.latvar = FALSE, reference = NULL, ...) {
+Coef.qrrvglm <- function(object, varI.latvar = FALSE,
+ reference = NULL, ...) {
if (length(varI.latvar) != 1 || !is.logical(varI.latvar))
@@ -918,16 +926,16 @@ Coef.qrrvglm <- function(object, varI.latvar = FALSE, reference = NULL, ...) {
p2 <- length(ocontrol$colx2.index)
Index.corner <- ocontrol$Index.corner
str0 <- ocontrol$str0
- EqualTolerances <- ocontrol$EqualTolerances
+ eq.tolerances <- ocontrol$eq.tolerances
Dzero <- ocontrol$Dzero
Corner <- if (ConstrainedQO) ocontrol$Corner else FALSE
- estITol <- if (ConstrainedQO) object at control$ITolerances else FALSE
+ estI.tol <- if (ConstrainedQO) object at control$I.tolerances else FALSE
modelno <- object at control$modelno # 1, 2, 3, 4, 5, 6, 7 or 0
combine2 <- c(str0, if (Corner) Index.corner else NULL)
NoA <- length(combine2) == M # A is fully known.
- Qoffset <- if (Quadratic) ifelse(estITol, 0, sum(1:Rank)) else 0
+ Qoffset <- if (Quadratic) ifelse(estI.tol, 0, sum(1:Rank)) else 0
ynames <- object at misc$ynames
if (!length(ynames)) ynames <- object at misc$predictors.names
@@ -973,10 +981,10 @@ Coef.qrrvglm <- function(object, varI.latvar = FALSE, reference = NULL, ...) {
- Amat <- object at extra$Amat # M x Rank
- Cmat <- object at extra$Cmat # p2 x Rank
- Dmat <- object at extra$Dmat #
- B1 <- object at extra$B1 #
+ Amat <- object at extra$Amat # M x Rank
+ Cmat <- object at extra$Cmat # p2 x Rank
+ Dmat <- object at extra$Dmat #
+ B1 <- object at extra$B1 #
bellshaped <- rep(FALSE, length = M)
if (is.character(reference)) {
@@ -994,7 +1002,7 @@ Coef.qrrvglm <- function(object, varI.latvar = FALSE, reference = NULL, ...) {
elts <- Dmat[this.spp,, drop = FALSE]
if (length(elts) < Rank)
elts <- matrix(elts, 1, Rank)
- Dk <- m2adefault(elts, M = Rank)[,, 1] # Hopefully negative-def
+ Dk <- m2adefault(elts, M = Rank)[,, 1] # Hopefully negative-def
temp400 <- eigen(Dk)
ptr1 <- ptr1 + 1
if (all(temp400$value < 0))
@@ -1003,44 +1011,44 @@ Coef.qrrvglm <- function(object, varI.latvar = FALSE, reference = NULL, ...) {
break
}
if (all(temp400$value < 0)) {
- temp1tol <- -0.5 * solve(Dk)
- dim(temp1tol) <- c(Rank,Rank)
- Mmat <- t(chol(temp1tol))
- if (ConstrainedQO) {
- temp900 <- solve(t(Mmat))
- Cmat <- Cmat %*% temp900
- Amat <- Amat %*% Mmat
- }
- if (length(Cmat)) {
- temp800 <- crow1C(Cmat, ocontrol$Crow1positive, amat = Amat)
- Cmat <- temp800$cmat
- Amat <- temp800$amat
- }
+ temp1tol <- -0.5 * solve(Dk)
+ dim(temp1tol) <- c(Rank,Rank)
+ Mmat <- t(chol(temp1tol))
+ if (ConstrainedQO) {
+ temp900 <- solve(t(Mmat))
+ Cmat <- Cmat %*% temp900
+ Amat <- Amat %*% Mmat
+ }
+ if (length(Cmat)) {
+ temp800 <- crow1C(Cmat, ocontrol$Crow1positive, amat = Amat)
+ Cmat <- temp800$cmat
+ Amat <- temp800$amat
+ }
- Dmat <- adjust.Dmat.expression(Mmat = Mmat, Rank = Rank,
- Dmat = Dmat, M = M)
+ Dmat <- adjust.Dmat.expression(Mmat = Mmat, Rank = Rank,
+ Dmat = Dmat, M = M)
- retlist <- td.expression(Dmat = Dmat, Amat = Amat, M = M,
- Dzero = Dzero, Rank = Rank,
- bellshaped = bellshaped)
- optimum <- retlist$optimum
- Tolerance <- retlist$Tolerance
- Darray <- retlist$Darray
- bellshaped <- retlist$bellshaped
+ retlist <- td.expression(Dmat = Dmat, Amat = Amat, M = M,
+ Dzero = Dzero, Rank = Rank,
+ bellshaped = bellshaped)
+ optimum <- retlist$optimum
+ Tolerance <- retlist$Tolerance
+ Darray <- retlist$Darray
+ bellshaped <- retlist$bellshaped
} else {
if (length(reference) == 1)
- stop("tolerance matrix specified by 'reference' ",
- "is not positive-definite") else
- warning("could not find any positive-definite ",
- "tolerance matrix")
+ stop("tolerance matrix specified by 'reference' ",
+ "is not positive-definite") else
+ warning("could not find any positive-definite ",
+ "tolerance matrix")
}
@@ -1189,6 +1197,7 @@ setClass(Class = "Coef.rrvglm", representation(
"colx2.index" = "numeric",
"Atilde" = "matrix"))
+
setClass(Class = "Coef.uqo", representation(
"A" = "matrix",
"B1" = "matrix",
@@ -1206,11 +1215,14 @@ setClass(Class = "Coef.uqo", representation(
"Dzero" = "logical",
"Tolerance" = "array"))
+
setClass(Class = "Coef.qrrvglm", representation(
"C" = "matrix"),
contains = "Coef.uqo")
+
+
show.Coef.qrrvglm <- function(x, ...) {
object <- x
@@ -1292,10 +1304,12 @@ setMethod("show", "Coef.qrrvglm", function(object)
+
setMethod("summary", "qrrvglm", function(object, ...)
summary.qrrvglm(object, ...))
+
predictqrrvglm <-
function(object,
newdata = NULL,
@@ -1324,7 +1338,9 @@ predictqrrvglm <-
na.act <- object at na.action
object at na.action <- list()
- if (!length(newdata) && type == "response" && length(object at fitted.values)) {
+ if (!length(newdata) &&
+ type == "response" &&
+ length(object at fitted.values)) {
if (length(na.act)) {
return(napredict(na.act[[1]], object at fitted.values))
} else {
@@ -1343,10 +1359,11 @@ predictqrrvglm <-
setup.smart("read", smart.prediction = object at smart.prediction)
}
- tt <- object at terms$terms # terms(object) # 11/8/03; object at terms$terms
- X <- model.matrix(delete.response(tt), newdata, contrasts =
- if (length(object at contrasts)) object at contrasts else NULL,
- xlev = object at xlevels)
+ tt <- object at terms$terms # terms(object) # 11/8/03; object at terms$terms
+ X <- model.matrix(delete.response(tt), newdata,
+ contrasts = if (length(object at contrasts))
+ object at contrasts else NULL,
+ xlev = object at xlevels)
if (nrow(X) != nrow(newdata)) {
as.save <- attr(X, "assign")
@@ -1370,7 +1387,7 @@ predictqrrvglm <-
attr(X, "assign") <- attrassigndefault(X, tt)
}
- ocontrol <- object at control
+ ocontrol <- object at control
Rank <- ocontrol$Rank
NOS <- ncol(object at y)
@@ -1402,20 +1419,21 @@ predictqrrvglm <-
etamat <- object at predictors
}
- pred <- switch(type,
- response = {
- fv = if (length(newdata))
- object at family@linkinv(etamat, extra) else
- fitted(object)
- if (M > 1 && is.matrix(fv)) {
+ pred <-
+ switch(type,
+ response = {
+ fv <- if (length(newdata))
+ object at family@linkinv(etamat, extra) else
+ fitted(object)
+ if (M > 1 && is.matrix(fv)) {
dimnames(fv) <- list(dimnames(fv)[[1]],
dimnames(object at fitted.values)[[2]])
}
fv
},
- link = etamat,
- latvar = stop("failure here"),
- terms = stop("failure here"))
+ link = etamat,
+ latvar = stop("failure here"),
+ terms = stop("failure here"))
if (!length(newdata) && length(na.act)) {
if (se.fit) {
@@ -1457,6 +1475,7 @@ setMethod("residuals", "qrrvglm",
+
show.rrvglm <- function(x, ...) {
if (!is.null(cl <- x at call)) {
cat("Call:\n")
@@ -1495,7 +1514,8 @@ show.rrvglm <- function(x, ...) {
if (length(x at criterion)) {
ncrit <- names(x at criterion)
for (iii in ncrit)
- if (iii != "loglikelihood" && iii != "deviance")
+ if (iii != "loglikelihood" &&
+ iii != "deviance")
cat(paste(iii, ":", sep = ""),
format(x at criterion[[iii]]), "\n")
}
@@ -1584,11 +1604,11 @@ summary.rrvglm <- function(object, correlation = FALSE,
object at misc$disper else
object at misc$default.disper
if (is.numeric(dispersion)) {
- if (is.numeric(od) && dispersion != od)
- warning("dispersion != object at misc$dispersion; ",
- "using the former")
+ if (is.numeric(od) && dispersion != od)
+ warning("dispersion != object at misc$dispersion; ",
+ "using the former")
} else {
- dispersion <- if (is.numeric(od)) od else 1
+ dispersion <- if (is.numeric(od)) od else 1
}
tmp8 <- object at misc$M - object at control$Rank -
@@ -1623,81 +1643,81 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
- if (length(fit at control$Nested) && fit at control$Nested)
- stop("sorry, cannot handle nested models yet")
-
- str0 <- fit at control$str0
-
+ if (length(fit at control$Nested) && fit at control$Nested)
+ stop("sorry, cannot handle nested models yet")
- if (!length(fit at x))
- stop("fix at x is empty. Run rrvglm(... , x= TRUE)")
+ str0 <- fit at control$str0
- colx1.index <- fit at control$colx1.index # May be NULL
- colx2.index <- fit at control$colx2.index
- Blist <- fit at constraints
- ncolBlist <- unlist(lapply(Blist, ncol))
- p1 <- length(colx1.index) # May be 0
- p2 <- length(colx2.index)
+ if (!length(fit at x))
+ stop("fix at x is empty. Run rrvglm(... , x= TRUE)")
- Rank <- fit at control$Rank # fit at misc$Nested.Rank
+ colx1.index <- fit at control$colx1.index # May be NULL
+ colx2.index <- fit at control$colx2.index
+ Hlist <- fit at constraints
+ ncolHlist <- unlist(lapply(Hlist, ncol))
- Amat <- fit at constraints[[colx2.index[1]]]
- B1mat <- if (p1)
- coefvlm(fit, matrix.out = TRUE)[colx1.index, , drop = FALSE] else
- NULL
- C.try <- coefvlm(fit, matrix.out= TRUE)[colx2.index, , drop = FALSE]
- Cmat <- C.try %*% Amat %*% solve(t(Amat) %*% Amat)
+ p1 <- length(colx1.index) # May be 0
+ p2 <- length(colx2.index)
- x1mat <- if (p1) fit at x[, colx1.index, drop = FALSE] else NULL
- x2mat <- fit at x[, colx2.index, drop = FALSE]
-
- wz <- weights(fit, type = "work") # old: wweights(fit) #fit at weights
- if (!length(wz))
- stop("cannot get fit at weights")
-
- M <- fit at misc$M
- n <- fit at misc$n
- Index.corner <- fit at control$Index.corner # used to be (1:Rank);
- zmat <- fit at predictors + fit at residuals
- theta <- c(Amat[-c(Index.corner,str0), ])
- if (fit at control$checkwz)
- wz <- checkwz(wz, M = M, trace = trace,
- wzepsilon = fit at control$wzepsilon)
- U <- vchol(wz, M = M, n = n, silent= TRUE)
-
- if (numerical) {
- delct.da <- num.deriv.rrr(fit, M = M, r = Rank,
- x1mat = x1mat, x2mat = x2mat, p2 = p2,
- Index.corner, Aimat = Amat,
- B1mat = B1mat, Cimat = Cmat,
- h.step = h.step,
- colx2.index = colx2.index,
- xij = fit at control$xij,
- str0 = str0)
- } else {
- delct.da <- dctda.fast.only(theta = theta, wz = wz,
- U = U, zmat,
- M = M, r = Rank, x1mat = x1mat,
- x2mat = x2mat, p2 = p2,
- Index.corner, Aimat = Amat,
- B1mat = B1mat, Cimat = Cmat,
- xij = fit at control$xij,
- str0 = str0)
- }
+ Rank <- fit at control$Rank # fit at misc$Nested.Rank
+
+ Amat <- fit at constraints[[colx2.index[1]]]
+ B1mat <- if (p1)
+ coefvlm(fit, matrix.out = TRUE)[colx1.index, , drop = FALSE] else
+ NULL
+ C.try <- coefvlm(fit, matrix.out= TRUE)[colx2.index, , drop = FALSE]
+ Cmat <- C.try %*% Amat %*% solve(t(Amat) %*% Amat)
+
+ x1mat <- if (p1) fit at x[, colx1.index, drop = FALSE] else NULL
+ x2mat <- fit at x[, colx2.index, drop = FALSE]
+
+ wz <- weights(fit, type = "work") # old: wweights(fit) #fit at weights
+ if (!length(wz))
+ stop("cannot get fit at weights")
+
+ M <- fit at misc$M
+ n <- fit at misc$n
+ Index.corner <- fit at control$Index.corner # used to be (1:Rank);
+ zmat <- fit at predictors + fit at residuals
+ theta <- c(Amat[-c(Index.corner,str0), ])
+ if (fit at control$checkwz)
+ wz <- checkwz(wz, M = M, trace = trace,
+ wzepsilon = fit at control$wzepsilon)
+ U <- vchol(wz, M = M, n = n, silent= TRUE)
+
+ if (numerical) {
+ delct.da <- num.deriv.rrr(fit, M = M, r = Rank,
+ x1mat = x1mat, x2mat = x2mat, p2 = p2,
+ Index.corner, Aimat = Amat,
+ B1mat = B1mat, Cimat = Cmat,
+ h.step = h.step,
+ colx2.index = colx2.index,
+ xij = fit at control$xij,
+ str0 = str0)
+ } else {
+ delct.da <- dctda.fast.only(theta = theta, wz = wz,
+ U = U, zmat,
+ M = M, r = Rank, x1mat = x1mat,
+ x2mat = x2mat, p2 = p2,
+ Index.corner, Aimat = Amat,
+ B1mat = B1mat, Cimat = Cmat,
+ xij = fit at control$xij,
+ str0 = str0)
+ }
- newobject <- as(fit, "vglm")
+ newobject <- as(fit, "vglm")
- sfit2233 <- summaryvglm(newobject)
- d8 <- dimnames(sfit2233 at cov.unscaled)[[1]]
- cov2233 <- solve(sfit2233 at cov.unscaled) # Includes any intercepts
- dimnames(cov2233) <- list(d8, d8)
+ sfit2233 <- summaryvglm(newobject)
+ d8 <- dimnames(sfit2233 at cov.unscaled)[[1]]
+ cov2233 <- solve(sfit2233 at cov.unscaled) # Includes any intercepts
+ dimnames(cov2233) <- list(d8, d8)
log.vec33 <- NULL
nassign <- names(fit at constraints)
@@ -1719,11 +1739,11 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
cm <- matrix(0, M, M - Rank - length(str0))
cm[-c(Index.corner, str0), ] <- diag(M - Rank - length(str0))
- Blist <- vector("list", length(colx1.index)+1)
- names(Blist) <- c(names(colx1.index), "I(latvar.mat)")
+ Hlist <- vector("list", length(colx1.index)+1)
+ names(Hlist) <- c(names(colx1.index), "I(latvar.mat)")
for (ii in names(colx1.index))
- Blist[[ii]] <- fit at constraints[[ii]]
- Blist[["I(latvar.mat)"]] <- cm
+ Hlist[[ii]] <- fit at constraints[[ii]]
+ Hlist[["I(latvar.mat)"]] <- cm
if (p1) {
@@ -1764,12 +1784,12 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
fit1122 <- if (dspec)
vlm(bb,
- constraints = Blist, criterion = "d", weights = wz,
+ constraints = Hlist, criterion = "d", weights = wz,
data = bbdata,
save.weight = TRUE, smart = FALSE, trace = trace.arg,
x.arg = TRUE) else
vlm(bb,
- constraints = Blist, criterion = "d", weights = wz,
+ constraints = Hlist, criterion = "d", weights = wz,
save.weight = TRUE, smart = FALSE, trace = trace.arg,
x.arg = TRUE)
@@ -1853,11 +1873,11 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
ptr <- 1
for (sss in 1:r)
for (tt in cbindex) {
- small.Blist <- vector("list", p2)
+ small.Hlist <- vector("list", p2)
pAmat <- Aimat
pAmat[tt,sss] <- pAmat[tt,sss] + h.step # Perturb it
for (ii in 1:p2)
- small.Blist[[ii]] <- pAmat
+ small.Hlist[[ii]] <- pAmat
offset <- if (length(fit at offset)) fit at offset else 0
if (all(offset == 0)) offset <- 0
@@ -1883,7 +1903,7 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
newzmat <- newzmat - x1mat %*% B1mat
newfit <- vlm.wfit(xmat = x2mat, zmat = newzmat,
- Blist = small.Blist, U = U,
+ Hlist = small.Hlist, U = U,
matrix.out = FALSE, is.vlmX = FALSE,
res.ss = TRUE, qr = FALSE, x.ret = FALSE,
offset = NULL, xij = xij)
@@ -1973,17 +1993,17 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
Aimat[-Index.corner,] <- theta # [-(1:M)]
if (intercept) {
- Blist <- vector("list", pp+1)
- Blist[[1]] <- diag(M)
+ Hlist <- vector("list", pp+1)
+ Hlist[[1]] <- diag(M)
for (ii in 2:(pp+1))
- Blist[[ii]] <- Aimat
+ Hlist[[ii]] <- Aimat
} else {
- Blist <- vector("list", pp)
+ Hlist <- vector("list", pp)
for (ii in 1:pp)
- Blist[[ii]] <- Aimat
+ Hlist[[ii]] <- Aimat
}
- coeffs <- vlm.wfit(xmat = xmat, z, Blist, U = U, matrix.out = TRUE,
+ coeffs <- vlm.wfit(xmat = xmat, z, Hlist, U = U, matrix.out = TRUE,
xij = xij)$mat.coef
c3 <- coeffs <- t(coeffs) # transpose to make M x (pp+1)
@@ -2082,17 +2102,17 @@ rrr.deriv.res.ss <- function(theta, wz, U, z, M, r, xmat,
Amat[-Index.corner,] <- theta # [-(1:M)]
if (intercept) {
- Blist <- vector("list", pp+1)
- Blist[[1]] <- diag(M)
+ Hlist <- vector("list", pp+1)
+ Hlist[[1]] <- diag(M)
for (ii in 2:(pp+1))
- Blist[[ii]] <- Amat
+ Hlist[[ii]] <- Amat
} else {
- Blist <- vector("list", pp)
+ Hlist <- vector("list", pp)
for (ii in 1:pp)
- Blist[[ii]] <- Amat
+ Hlist[[ii]] <- Amat
}
- vlm.wfit(xmat = xmat, z, Blist, U = U, matrix.out = FALSE,
+ vlm.wfit(xmat = xmat, z, Hlist, U = U, matrix.out = FALSE,
res.ss = TRUE, xij = xij)$res.ss
}
@@ -2113,17 +2133,17 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
Aimat[-Index.corner,] <- theta # [-(1:M)]
if (intercept) {
- Blist <- vector("list", pp+1)
- Blist[[1]] <- diag(M)
+ Hlist <- vector("list", pp+1)
+ Hlist[[1]] <- diag(M)
for (i in 2:(pp+1))
- Blist[[i]] <- Aimat
+ Hlist[[i]] <- Aimat
} else {
- Blist <- vector("list", pp)
+ Hlist <- vector("list", pp)
for (i in 1:(pp))
- Blist[[i]] <- Aimat
+ Hlist[[i]] <- Aimat
}
- coeffs <- vlm.wfit(xmat, z, Blist, U = U, matrix.out= TRUE,
+ coeffs <- vlm.wfit(xmat, z, Hlist, U = U, matrix.out= TRUE,
xij = NULL)$mat.coef
c3 <- coeffs <- t(coeffs) # transpose to make M x (pp+1)
@@ -2243,6 +2263,7 @@ biplot.qrrvglm <- function(x, ...) {
}
+
lvplot.qrrvglm <-
function(object, varI.latvar = FALSE, reference = NULL,
add = FALSE, show.plot = TRUE, rug = TRUE, y = FALSE,
@@ -2255,7 +2276,8 @@ biplot.qrrvglm <- function(x, ...) {
llty = par()$lty, lcol = par()$col, llwd = par()$lwd,
label.arg = FALSE, adj.arg = -0.1,
ellipse = 0.95, Absolute = FALSE,
- elty = par()$lty, ecol = par()$col, elwd = par()$lwd, egrid = 200,
+ elty = par()$lty, ecol = par()$col, elwd = par()$lwd,
+ egrid = 200,
chull.arg = FALSE, clty = 2, ccol = par()$col, clwd = par()$lwd,
cpch = " ",
C = FALSE,
@@ -2266,14 +2288,14 @@ biplot.qrrvglm <- function(x, ...) {
sfont = par()$font,
check.ok = TRUE, ...) {
if (mode(type) != "character" && mode(type) != "name")
- type <- as.character(substitute(type))
+ type <- as.character(substitute(type))
type <- match.arg(type, c("fitted.values", "predictors"))[1]
if (is.numeric(OriginC))
- OriginC <- rep(OriginC, length.out = 2) else {
- if (mode(OriginC) != "character" && mode(OriginC) != "name")
- OriginC <- as.character(substitute(OriginC))
- OriginC <- match.arg(OriginC, c("origin","mean"))[1]
+ OriginC <- rep(OriginC, length.out = 2) else {
+ if (mode(OriginC) != "character" && mode(OriginC) != "name")
+ OriginC <- as.character(substitute(OriginC))
+ OriginC <- match.arg(OriginC, c("origin","mean"))[1]
}
if (length(ellipse) > 1)
@@ -2294,7 +2316,8 @@ biplot.qrrvglm <- function(x, ...) {
stop("latent variable plots allowable only for ",
"noRRR = ~ 1 models")
- Coef.list <- Coef(object, varI.latvar = varI.latvar, reference = reference)
+ Coef.list <- Coef(object, varI.latvar = varI.latvar,
+ reference = reference)
if ( C) Cmat <- Coef.list at C
nustar <- Coef.list at latvar # n x Rank
@@ -2302,140 +2325,142 @@ biplot.qrrvglm <- function(x, ...) {
r.curves <- slot(object, type) # n times M (\boldeta or \boldmu)
if (!add) {
- if (Rank == 1) {
- matplot(nustar,
- if ( y && type == "fitted.values")
- object at y else r.curves,
- type = "n", xlab=xlab, ylab=ylab, ...)
- } else { # Rank == 2
- matplot(c(Coef.list at Optimum[1,], nustar[, 1]),
- c(Coef.list at Optimum[2,], nustar[, 2]),
- type = "n", xlab=xlab, ylab=ylab, ...)
- }
+ if (Rank == 1) {
+ matplot(nustar,
+ if ( y && type == "fitted.values")
+ object at y else r.curves,
+ type = "n", xlab = xlab, ylab = ylab, ...)
+ } else { # Rank == 2
+ matplot(c(Coef.list at Optimum[1, ], nustar[, 1]),
+ c(Coef.list at Optimum[2, ], nustar[, 2]),
+ type = "n", xlab = xlab, ylab = ylab, ...)
+ }
}
- if ((length(pch) != 1 && length(pch) != ncol(r.curves)) ||
- (length(pcol) != 1 && length(pcol) != ncol(r.curves)) ||
- (length(pcex) != 1 && length(pcex) != ncol(r.curves)))
- stop("pch, pcol and pcex must be of length 1 or ncol(r.curves)")
-
- pch <- rep(pch, leng=ncol(r.curves))
- pcol <- rep(pcol, leng=ncol(r.curves))
- pcex <- rep(pcex, leng=ncol(r.curves))
- llty <- rep(llty, leng=ncol(r.curves))
- lcol <- rep(lcol, leng=ncol(r.curves))
- llwd <- rep(llwd, leng=ncol(r.curves))
- elty <- rep(elty, leng=ncol(r.curves))
- ecol <- rep(ecol, leng=ncol(r.curves))
- elwd <- rep(elwd, leng=ncol(r.curves))
- adj.arg <- rep(adj.arg, leng=ncol(r.curves))
+
+
+
+ pch <- rep(pch, length = ncol(r.curves))
+ pcol <- rep(pcol, length = ncol(r.curves))
+ pcex <- rep(pcex, length = ncol(r.curves))
+ llty <- rep(llty, length = ncol(r.curves))
+ lcol <- rep(lcol, length = ncol(r.curves))
+ llwd <- rep(llwd, length = ncol(r.curves))
+ elty <- rep(elty, length = ncol(r.curves))
+ ecol <- rep(ecol, length = ncol(r.curves))
+ elwd <- rep(elwd, length = ncol(r.curves))
+ adj.arg <- rep(adj.arg, length = ncol(r.curves))
if ( C ) {
- Clwd <- rep(Clwd, leng=nrow(Cmat))
- Clty <- rep(Clty, leng=nrow(Cmat))
- Ccol <- rep(Ccol, leng=nrow(Cmat))
- Cadj.arg <- rep(Cadj.arg, leng=nrow(Cmat))
- Ccex <- rep(Ccex, leng=nrow(Cmat))
+ Clwd <- rep(Clwd, length = nrow(Cmat))
+ Clty <- rep(Clty, length = nrow(Cmat))
+ Ccol <- rep(Ccol, length = nrow(Cmat))
+ Ccex <- rep(Ccex, length = nrow(Cmat))
+ Cadj.arg <- rep(Cadj.arg, length = nrow(Cmat))
}
if (Rank == 1) {
- for (i in 1:ncol(r.curves)) {
- xx <- nustar
- yy <- r.curves[,i]
- o <- sort.list(xx)
- xx <- xx[o]
- yy <- yy[o]
- lines(xx, yy, col =lcol[i], lwd = llwd[i], lty = llty[i])
- if ( y && type == "fitted.values") {
- ypts <- object at y
- if (ncol(as.matrix(ypts)) == ncol(r.curves))
- points(xx, ypts[o,i], col =pcol[i],
- cex = pcex[i], pch=pch[i])
- }
+ for (i in 1:ncol(r.curves)) {
+ xx <- nustar
+ yy <- r.curves[,i]
+ o <- sort.list(xx)
+ xx <- xx[o]
+ yy <- yy[o]
+ lines(xx, yy, col = lcol[i], lwd = llwd[i], lty = llty[i])
+ if ( y && type == "fitted.values") {
+ ypts <- object at y
+ if (ncol(as.matrix(ypts)) == ncol(r.curves))
+ points(xx, ypts[o,i], col = pcol[i],
+ cex = pcex[i], pch = pch[i])
}
- if (rug) rug(xx)
+ }
+ if (rug)
+ rug(xx)
} else {
- for (i in 1:ncol(r.curves))
- points(Coef.list at Optimum[1,i], Coef.list at Optimum[2,i],
- col =pcol[i], cex = pcex[i], pch=pch[i])
- if (label.arg) {
- for (i in 1:ncol(r.curves))
- text(Coef.list at Optimum[1,i], Coef.list at Optimum[2,i],
- labels=(dimnames(Coef.list at Optimum)[[2]])[i],
- adj=adj.arg[i], col =pcol[i], cex = pcex[i])
- }
- if (chull.arg) {
- hull <- chull(nustar[, 1], nustar[, 2])
- hull <- c(hull, hull[1])
- lines(nustar[hull, 1], nustar[hull, 2], type = "b", pch=cpch,
- lty = clty, col =ccol, lwd = clwd)
+ for (i in 1:ncol(r.curves))
+ points(Coef.list at Optimum[1, i], Coef.list at Optimum[2, i],
+ col = pcol[i], cex = pcex[i], pch = pch[i])
+ if (label.arg) {
+ for (i in 1:ncol(r.curves))
+ text(Coef.list at Optimum[1, i], Coef.list at Optimum[2, i],
+ labels = (dimnames(Coef.list at Optimum)[[2]])[i],
+ adj = adj.arg[i], col = pcol[i], cex = pcex[i])
+ }
+ if (chull.arg) {
+ hull <- chull(nustar[, 1], nustar[, 2])
+ hull <- c(hull, hull[1])
+ lines(nustar[hull, 1], nustar[hull, 2], type = "b", pch = cpch,
+ lty = clty, col = ccol, lwd = clwd)
+ }
+ if (length(ellipse)) {
+ ellipse.temp <- if (ellipse > 0) ellipse else 0.95
+ if (ellipse < 0 && (!object at control$eq.tolerances || varI.latvar))
+ stop("an equal-tolerances assumption and 'varI.latvar = FALSE' ",
+ "is needed for 'ellipse' < 0")
+ if ( check.ok ) {
+ colx1.index <- object at control$colx1.index
+ if (!(length(colx1.index) == 1 &&
+ names(colx1.index) == "(Intercept)"))
+ stop("can only plot ellipses for intercept models only")
}
- if (length(ellipse)) {
- ellipse.temp <- if (ellipse > 0) ellipse else 0.95
- if (ellipse < 0 && (!object at control$EqualTolerances || varI.latvar))
- stop("an equal-tolerances assumption and 'varI.latvar = FALSE' ",
- "is needed for 'ellipse' < 0")
- if ( check.ok ) {
- colx1.index <- object at control$colx1.index
- if (!(length(colx1.index) == 1 &&
- names(colx1.index) == "(Intercept)"))
- stop("can only plot ellipses for intercept models only")
- }
- for (i in 1:ncol(r.curves)) {
- cutpoint <- object at family@linkfun( if (Absolute) ellipse.temp
- else Coef.list at Maximum[i] * ellipse.temp,
- extra = object at extra)
- if (MSratio > 1)
- cutpoint <- cutpoint[1, 1]
-
- cutpoint <- object at family@linkfun(Coef.list at Maximum[i],
- extra = object at extra) - cutpoint
- if (is.finite(cutpoint) && cutpoint > 0) {
- Mmat <- diag(rep(ifelse(object at control$Crow1positive, 1, -1),
- length.out = Rank))
- etoli <- eigen(t(Mmat) %*% Coef.list at Tolerance[,,i] %*% Mmat)
- A=ifelse(etoli$val[1]>0,sqrt(2*cutpoint*etoli$val[1]),Inf)
- B=ifelse(etoli$val[2]>0,sqrt(2*cutpoint*etoli$val[2]),Inf)
- if (ellipse < 0) A <- B <- -ellipse / 2
-
- theta.angle <- asin(etoli$vector[2, 1]) *
- ifelse(object at control$Crow1positive[2], 1, -1)
- if (object at control$Crow1positive[1])
- theta.angle <- pi - theta.angle
- if (all(is.finite(c(A,B))))
- lines(vellipse(R = 2*A, ratio = B/A,
- orientation = theta.angle,
- center = Coef.list at Optimum[,i],
- N = egrid),
- lwd = elwd[i], col =ecol[i], lty = elty[i])
- }
+ for (i in 1:ncol(r.curves)) {
+ cutpoint <- object at family@linkfun( if (Absolute) ellipse.temp
+ else Coef.list at Maximum[i] * ellipse.temp,
+ extra = object at extra)
+ if (MSratio > 1)
+ cutpoint <- cutpoint[1, 1]
+
+ cutpoint <- object at family@linkfun(Coef.list at Maximum[i],
+ extra = object at extra) - cutpoint
+ if (is.finite(cutpoint) && cutpoint > 0) {
+ Mmat <- diag(rep(ifelse(object at control$Crow1positive, 1, -1),
+ length.out = Rank))
+ etoli <- eigen(t(Mmat) %*% Coef.list at Tolerance[,,i] %*% Mmat)
+ A <- ifelse(etoli$val[1]>0,sqrt(2*cutpoint*etoli$val[1]),Inf)
+ B <- ifelse(etoli$val[2]>0,sqrt(2*cutpoint*etoli$val[2]),Inf)
+ if (ellipse < 0)
+ A <- B <- -ellipse / 2
+
+ theta.angle <- asin(etoli$vector[2, 1]) *
+ ifelse(object at control$Crow1positive[2], 1, -1)
+ if (object at control$Crow1positive[1])
+ theta.angle <- pi - theta.angle
+ if (all(is.finite(c(A,B))))
+ lines(vellipse(R = 2*A, ratio = B/A,
+ orientation = theta.angle,
+ center = Coef.list at Optimum[,i],
+ N = egrid),
+ lwd = elwd[i], col =ecol[i], lty = elty[i])
}
}
+ }
- if ( C ) {
- if (is.character(OriginC) && OriginC == "mean")
- OriginC <- c(mean(nustar[, 1]), mean(nustar[, 2]))
- if (is.character(OriginC) && OriginC == "origin")
- OriginC <- c(0,0)
- for (i in 1:nrow(Cmat))
- arrows(x0=OriginC[1], y0=OriginC[2],
- x1=OriginC[1] + stretchC*Cmat[i, 1],
- y1=OriginC[2] + stretchC*Cmat[i, 2],
- lty = Clty[i], col =Ccol[i], lwd = Clwd[i])
- if (label.arg) {
- temp200 <- dimnames(Cmat)[[1]]
- for (i in 1:nrow(Cmat))
- text(OriginC[1] + stretchC*Cmat[i, 1],
- OriginC[2] + stretchC*Cmat[i, 2], col =Ccol[i],
- labels=temp200[i], adj=Cadj.arg[i], cex = Ccex[i])
- }
- }
- if (sites) {
- text(nustar[, 1], nustar[, 2], adj = 0.5,
- labels = if (is.null(spch)) dimnames(nustar)[[1]] else
- rep(spch, length = nrow(nustar)), col =scol, cex = scex, font=sfont)
- }
+ if ( C ) {
+ if (is.character(OriginC) && OriginC == "mean")
+ OriginC <- c(mean(nustar[, 1]), mean(nustar[, 2]))
+ if (is.character(OriginC) && OriginC == "origin")
+ OriginC <- c(0,0)
+ for (i in 1:nrow(Cmat))
+ arrows(x0 = OriginC[1], y0 = OriginC[2],
+ x1 = OriginC[1] + stretchC * Cmat[i, 1],
+ y1 = OriginC[2] + stretchC * Cmat[i, 2],
+ lty = Clty[i], col = Ccol[i], lwd = Clwd[i])
+ if (label.arg) {
+ temp200 <- dimnames(Cmat)[[1]]
+ for (i in 1:nrow(Cmat))
+ text(OriginC[1] + stretchC * Cmat[i, 1],
+ OriginC[2] + stretchC * Cmat[i, 2], col = Ccol[i],
+ labels = temp200[i], adj = Cadj.arg[i],
+ cex = Ccex[i])
+ }
+ }
+ if (sites) {
+ text(nustar[, 1], nustar[, 2], adj = 0.5,
+ labels = if (is.null(spch)) dimnames(nustar)[[1]] else
+ rep(spch, length = nrow(nustar)), col = scol,
+ cex = scex, font = sfont)
}
- invisible(nustar)
+ }
+ invisible(nustar)
}
@@ -2521,59 +2546,59 @@ lvplot.rrvglm <- function(object,
}
if (C) {
- p2 <- nrow(Cmat)
- gapC <- rep(gapC, length.out = p2)
- Cadj <- rep(Cadj, length.out = p2)
- Ccex <- rep(Ccex, length.out = p2)
- Ccol <- rep(Ccol, length.out = p2)
- Clwd <- rep(Clwd, length.out = p2)
- Clty <- rep(Clty, length.out = p2)
- if (length(Clabels) != p2)
- stop("'length(Clabels)' must be equal to ", p2)
- for (ii in 1:p2) {
- arrows(0, 0, Cmat[ii, 1], Cmat[ii, 2],
- lwd = Clwd[ii], lty = Clty[ii], col =Ccol[ii])
- const <- 1 + gapC[ii] / sqrt(Cmat[ii, 1]^2 + Cmat[ii, 2]^2)
- text(const*Cmat[ii, 1], const*Cmat[ii, 2],
- Clabels[ii], cex = Ccex[ii],
- adj=Cadj[ii], col =Ccol[ii])
- }
+ p2 <- nrow(Cmat)
+ gapC <- rep(gapC, length.out = p2)
+ Cadj <- rep(Cadj, length.out = p2)
+ Ccex <- rep(Ccex, length.out = p2)
+ Ccol <- rep(Ccol, length.out = p2)
+ Clwd <- rep(Clwd, length.out = p2)
+ Clty <- rep(Clty, length.out = p2)
+ if (length(Clabels) != p2)
+ stop("'length(Clabels)' must be equal to ", p2)
+ for (ii in 1:p2) {
+ arrows(0, 0, Cmat[ii, 1], Cmat[ii, 2],
+ lwd = Clwd[ii], lty = Clty[ii], col = Ccol[ii])
+ const <- 1 + gapC[ii] / sqrt(Cmat[ii, 1]^2 + Cmat[ii, 2]^2)
+ text(const*Cmat[ii, 1], const*Cmat[ii, 2],
+ Clabels[ii], cex = Ccex[ii],
+ adj = Cadj[ii], col = Ccol[ii])
+ }
}
if (scores) {
- ugrp <- unique(groups)
- nlev <- length(ugrp) # number of groups
- clty <- rep(clty, length.out = nlev)
- clwd <- rep(clwd, length.out = nlev)
- ccol <- rep(ccol, length.out = nlev)
- if (length(spch))
- spch <- rep(spch, length.out = n)
- scol <- rep(scol, length.out = n)
- scex <- rep(scex, length.out = n)
- for (ii in ugrp) {
- gp <- groups == ii
- if (nlev > 1 && (length(unique(spch[gp])) != 1 ||
- length(unique(scol[gp])) != 1 ||
- length(unique(scex[gp])) != 1))
- warning("spch/scol/scex is different for individuals ",
- "from the same group")
-
- temp <- nuhat[gp,, drop = FALSE]
- if (length(spch)) {
- points(temp[, 1], temp[, 2], cex = scex[gp], pch=spch[gp],
- col =scol[gp])
- } else {
- text(temp[, 1], temp[, 2], label = slabels, cex = scex[gp],
- col =scol[gp])
- }
- if (chull.arg) {
- hull <- chull(temp[, 1], temp[, 2])
- hull <- c(hull, hull[1])
- lines(temp[hull, 1], temp[hull, 2],
- type = "b", lty = clty[ii],
- col = ccol[ii], lwd = clwd[ii], pch = " ")
- }
+ ugrp <- unique(groups)
+ nlev <- length(ugrp) # number of groups
+ clty <- rep(clty, length.out = nlev)
+ clwd <- rep(clwd, length.out = nlev)
+ ccol <- rep(ccol, length.out = nlev)
+ if (length(spch))
+ spch <- rep(spch, length.out = n)
+ scol <- rep(scol, length.out = n)
+ scex <- rep(scex, length.out = n)
+ for (ii in ugrp) {
+ gp <- groups == ii
+ if (nlev > 1 && (length(unique(spch[gp])) != 1 ||
+ length(unique(scol[gp])) != 1 ||
+ length(unique(scex[gp])) != 1))
+ warning("spch/scol/scex is different for individuals ",
+ "from the same group")
+
+ temp <- nuhat[gp,, drop = FALSE]
+ if (length(spch)) {
+ points(temp[, 1], temp[, 2], cex = scex[gp], pch = spch[gp],
+ col = scol[gp])
+ } else {
+ text(temp[, 1], temp[, 2], label = slabels, cex = scex[gp],
+ col = scol[gp])
}
+ if (chull.arg) {
+ hull <- chull(temp[, 1], temp[, 2])
+ hull <- c(hull, hull[1])
+ lines(temp[hull, 1], temp[hull, 2],
+ type = "b", lty = clty[ii],
+ col = ccol[ii], lwd = clwd[ii], pch = " ")
+ }
+ }
}
invisible(nuhat)
@@ -2604,7 +2629,9 @@ lvplot.rrvglm <- function(object,
Rank <- object at control$Rank
- latvar.names <- if (Rank>1) paste("latvar", 1:Rank, sep = "") else "latvar"
+ latvar.names <- if (Rank > 1)
+ paste("latvar", 1:Rank, sep = "") else
+ "latvar"
dimnames(Amat) <- list(object at misc$predictors.names, latvar.names)
dimnames(Cmat) <- list(dimnames(Cmat)[[1]], latvar.names)
@@ -2614,15 +2641,15 @@ lvplot.rrvglm <- function(object,
Rank = Rank,
colx2.index = colx2.index)
- if (!is.null(colx1.index)) {
- ans at colx1.index <- colx1.index
- ans at B1 <- B1mat
- }
+ if (!is.null(colx1.index)) {
+ ans at colx1.index <- colx1.index
+ ans at B1 <- B1mat
+ }
- if (object at control$Corner)
- ans at Atilde <- Amat[-c(object at control$Index.corner,
- object at control$str0),, drop = FALSE]
- ans
+ if (object at control$Corner)
+ ans at Atilde <- Amat[-c(object at control$Index.corner,
+ object at control$str0),, drop = FALSE]
+ ans
}
@@ -2639,19 +2666,19 @@ show.Coef.rrvglm <- function(x, ...) {
object <- x
- cat("A matrix:\n")
- print(object at A, ...)
+ cat("A matrix:\n")
+ print(object at A, ...)
- cat("\nC matrix:\n")
- print(object at C, ...)
+ cat("\nC matrix:\n")
+ print(object at C, ...)
- p1 <- length(object at colx1.index)
- if (p1) {
- cat("\nB1 matrix:\n")
- print(object at B1, ...)
- }
+ p1 <- length(object at colx1.index)
+ if (p1) {
+ cat("\nB1 matrix:\n")
+ print(object at B1, ...)
+ }
- invisible(object)
+ invisible(object)
}
@@ -2685,19 +2712,22 @@ setMethod("biplot", "rrvglm", function(x, ...)
-summary.qrrvglm <- function(object,
- varI.latvar = FALSE, reference = NULL, ...) {
+summary.qrrvglm <-
+ function(object,
+ varI.latvar = FALSE, reference = NULL, ...) {
answer <- object
- answer at post$Coef <- Coef(object, varI.latvar = varI.latvar, reference = reference,
- ...) # Store it here; non-elegant
-
- if (length((answer at post$Coef)@dispersion) &&
- length(object at misc$estimated.dispersion) &&
- object at misc$estimated.dispersion)
- answer at dispersion <-
- answer at misc$dispersion <- (answer at post$Coef)@dispersion
-
- as(answer, "summary.qrrvglm")
+ answer at post$Coef <- Coef(object,
+ varI.latvar = varI.latvar,
+ reference = reference,
+ ...) # Store it here; non-elegant
+
+ if (length((answer at post$Coef)@dispersion) &&
+ length(object at misc$estimated.dispersion) &&
+ object at misc$estimated.dispersion)
+ answer at dispersion <-
+ answer at misc$dispersion <- (answer at post$Coef)@dispersion
+
+ as(answer, "summary.qrrvglm")
}
@@ -2766,10 +2796,10 @@ setMethod("show", "Coef.rrvglm", function(object)
str0 = str0, ...)
object.save <- y
if (is(y, "rrvglm")) {
- y <- object.save at y
+ y <- object.save at y
} else {
- y <- as.matrix(y)
- y <- as(y, "matrix")
+ y <- as.matrix(y)
+ y <- as(y, "matrix")
}
if (length(dim(y)) != 2 || nrow(y) < 3 || ncol(y) < 3)
stop("y must be a matrix with >= 3 rows & columns, ",
@@ -2825,7 +2855,7 @@ setMethod("show", "Coef.rrvglm", function(object)
assign(".grc.df", .grc.df, envir = VGAMenv)
warn.save <- options()$warn
- options(warn = -3) # Suppress the warnings (hopefully, temporarily)
+ options(warn = -3) # Suppress the warnings (hopefully, temporarily)
answer <- if (is(object.save, "rrvglm")) object.save else
rrvglm(as.formula(str2), family = poissonff,
constraints = cms, control = myrrcontrol,
@@ -2854,22 +2884,23 @@ summary.grc <- function(object, ...) {
-trplot.qrrvglm <- function(object,
- which.species = NULL,
- add = FALSE, show.plot = TRUE,
- label.sites = FALSE,
- sitenames = rownames(object at y),
- axes.equal = TRUE,
- cex = par()$cex,
- col = 1:(nos*(nos-1)/2),
- log = "",
- lty = rep(par()$lty, length.out = nos*(nos-1)/2),
- lwd = rep(par()$lwd, length.out = nos*(nos-1)/2),
- tcol = rep(par()$col, length.out = nos*(nos-1)/2),
- xlab = NULL, ylab = NULL,
- main = "", # "Trajectory plot",
- type = "b",
- check.ok = TRUE, ...) {
+trplot.qrrvglm <-
+ function(object,
+ which.species = NULL,
+ add = FALSE, show.plot = TRUE,
+ label.sites = FALSE,
+ sitenames = rownames(object at y),
+ axes.equal = TRUE,
+ cex = par()$cex,
+ col = 1:(nos*(nos-1)/2),
+ log = "",
+ lty = rep(par()$lty, length.out = nos*(nos-1)/2),
+ lwd = rep(par()$lwd, length.out = nos*(nos-1)/2),
+ tcol = rep(par()$col, length.out = nos*(nos-1)/2),
+ xlab = NULL, ylab = NULL,
+ main = "", # "Trajectory plot",
+ type = "b",
+ check.ok = TRUE, ...) {
coef.obj <- Coef(object) # use defaults for those two arguments
if (coef.obj at Rank != 1)
stop("object must be a rank-1 model")
@@ -2975,7 +3006,7 @@ vcovrrvglm <- function(object, ...) {
vcovqrrvglm <- function(object,
- ITolerances = object at control$EqualTolerances,
+ I.tolerances = object at control$eq.tolerances,
MaxScale = c("predictors", "response"),
dispersion = rep(if (length(sobj at dispersion))
sobj at dispersion else 1,
@@ -2989,7 +3020,7 @@ vcovqrrvglm <- function(object,
stop("can currently only handle MaxScale='predictors'")
sobj <- summary(object)
- cobj <- Coef(object, ITolerances = ITolerances, ...)
+ cobj <- Coef(object, I.tolerances = I.tolerances, ...)
M <- nrow(cobj at A)
dispersion <- rep(dispersion, length.out = M)
if (cobj at Rank != 1)
@@ -3006,16 +3037,16 @@ vcovqrrvglm <- function(object,
okvals <- c(3*M, 2*M+1)
if (all(length(coef(object)) != okvals))
stop("Can only handle intercepts-only model with ",
- "EqualTolerances = FALSE")
+ "eq.tolerances = FALSE")
answer <- NULL
Cov.unscaled <- array(NA, c(3, 3, M), dimnames = list(
c("(Intercept)", "latvar", "latvar^2"),
c("(Intercept)", "latvar", "latvar^2"), dimnames(cobj at D)[[3]]))
for (spp in 1:M) {
- index <- c(M + ifelse(object at control$EqualTolerances, 1, M) + spp,
+ index <- c(M + ifelse(object at control$eq.tolerances, 1, M) + spp,
spp,
- M + ifelse(object at control$EqualTolerances, 1, spp))
+ M + ifelse(object at control$eq.tolerances, 1, spp))
vcov <- Cov.unscaled[,,spp] <-
sobj at cov.unscaled[index, index] # Order is A, D, B1
se2Max <- dvecMax[spp,, drop = FALSE] %*% vcov %*% cbind(dvecMax[spp,])
@@ -3078,7 +3109,8 @@ setMethod("model.matrix", "qrrvglm", function(object, ...)
-perspqrrvglm <- function(x, varI.latvar = FALSE, reference = NULL,
+perspqrrvglm <-
+ function(x, varI.latvar = FALSE, reference = NULL,
show.plot = TRUE,
xlim = NULL, ylim = NULL,
zlim = NULL, # zlim ignored if Rank == 1
diff --git a/R/family.sur.R b/R/family.sur.R
index 5a321d3..b51c5a1 100644
--- a/R/family.sur.R
+++ b/R/family.sur.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -29,7 +29,7 @@
apply.parint <- TRUE
- lmean <- "identity"
+ lmean <- "identitylink"
lsdev <- "loge"
emean <- list()
esdev <- list()
@@ -69,7 +69,7 @@
infos = eval(substitute(function(...) {
- list(Musual = 1, # zz???
+ list(M1 = 1, # zz???
parallel = .parallel ,
multipleResponses = TRUE )
}, list( .parallel = parallel ))),
@@ -104,10 +104,10 @@
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
predictors.names <- if (!length(ddd <- dimnames(y)[[2]]))
@@ -125,13 +125,13 @@
etastart <- matrix(0, n, M)
- Blist.early <- process.constraints(constraints, x, M,
+ Hlist.early <- process.constraints(constraints, x, M,
specialCM = specialCM)
- X.vlm.early <- lm2vlm.model.matrix(x, Blist.early,
+ X.vlm.early <- lm2vlm.model.matrix(x, Hlist.early,
xij = control$xij,
Xm2 = Xm2)
- Hmatrices <- matrix(c(unlist(Blist.early)), nrow = M)
+ Hmatrices <- matrix(c(unlist(Hlist.early)), nrow = M)
jay.index <- 1:ncol(Hmatrices)
@@ -139,7 +139,7 @@
for (jay in 1:ncoly) {
X.lm.jay <- vlm2lm.model.matrix(x.vlm = X.vlm.early,
- Blist = Blist.early,
+ Hlist = Hlist.early,
which.linpred = jay, M = M)
extra$ncols.X.lm[jay] <- ncol(X.lm.jay)
@@ -155,19 +155,19 @@
linkinv = function(eta, extra = NULL) eta,
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .lmean , length = ncoly))
temp.names <- predictors.names
names(misc$link) <- temp.names
- misc$earg <- vector("list", Musual * ncoly)
+ misc$earg <- vector("list", M1 * ncoly)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii]] <- .emean
+ misc$earg[[M1*ii]] <- .emean
}
names(misc$earg) <- temp.names
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$divisor <- .divisor
misc$values.divisor <- round(n / ratio.df)
@@ -257,7 +257,13 @@
ret.ff at loglikelihood <-
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+
+ if (!summation)
+ stop("cannot handle 'summation = FALSE' yet")
+
+
M <- if (is.matrix(y)) ncol(y) else 1
n <- if (is.matrix(y)) nrow(y) else length(y)
diff --git a/R/family.survival.R b/R/family.survival.R
index e92c1ae..cfeda14 100644
--- a/R/family.survival.R
+++ b/R/family.survival.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -14,7 +14,7 @@
double.cennormal <-
function(r1 = 0, r2 = 0,
- lmu = "identity",
+ lmu = "identitylink",
lsd = "loge",
imu = NULL, isd = NULL, zero = 2) {
if (!is.Numeric(r1, length.arg = 1, integer.valued = TRUE) ||
@@ -90,15 +90,26 @@
.emu = emu, .esd = esd,
.r1 = r1, .r2 = r2 ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
sd <- eta2theta(eta[, 2], .lsd, earg = .esd )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
+
+ if (!summation)
+ stop("cannot handle 'summation = FALSE' yet")
+
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
sum(w * dnorm(y, m = mu, sd = sd, log = TRUE)) +
(if ( .r1 == 0) 0 else {
- z1 <- min((y - mu) / sd); Fz1 = pnorm(z1); .r1 * log(Fz1)}) +
+ z1 <- min((y - mu) / sd)
+ Fz1 <- pnorm(z1)
+ .r1 * log(Fz1)}) +
(if ( .r2 == 0) 0 else {
- z2 <- max((y - mu) / sd); Fz2 = pnorm(z2); .r2 * log1p(-Fz2)})
+ z2 <- max((y - mu) / sd)
+ Fz2 <- pnorm(z2)
+ .r2 * log1p(-Fz2)})
+ }
} , list( .lmu = lmu, .lsd = lsd,
.emu = emu, .esd = esd,
.r1 = r1, .r2 = r2 ))),
@@ -131,16 +142,16 @@
weight=expression({
wz <- matrix(as.numeric(NA), n, dimm(M))
- Q1 <- ifelse(q1 == 0, 1, q1) # Saves division by 0 below; not elegant
- Q2 <- ifelse(q2 == 0, 1, q2) # Saves division by 0 below; not elegant
+ Q.1 <- ifelse(q1 == 0, 1, q1) # Saves division by 0 below; not elegant
+ Q.2 <- ifelse(q2 == 0, 1, q2) # Saves division by 0 below; not elegant
ed2l.dmu2 <- 1 / (sd^2) +
- ((fz1*(z1+fz1/Q1) - fz2*(z2-fz2/Q2)) / sd^2) / (pee*w)
- ed2l.dmusd <- ((fz1-fz2 + z1*fz1*(z1+fz1/Q1) -
- z2*fz2*(z2-fz2/Q2)) / sd^2) / (pee*w)
+ ((fz1*(z1+fz1/Q.1) - fz2*(z2-fz2/Q.2)) / sd^2) / (pee*w)
+ ed2l.dmusd <- ((fz1-fz2 + z1*fz1*(z1+fz1/Q.1) -
+ z2*fz2*(z2-fz2/Q.2)) / sd^2) / (pee*w)
ed2l.dsd2 <- 2 / (sd^2) +
- ((z1*fz1-z2*fz2 + z1^2 *fz1 *(z1+fz1/Q1) -
- z2^2 *fz2*(z2-fz2/Q2)) / sd^2) / (pee*w)
+ ((z1*fz1-z2*fz2 + z1^2 *fz1 *(z1+fz1/Q.1) -
+ z2^2 *fz2*(z2-fz2/Q.2)) / sd^2) / (pee*w)
wz[,iam(1,1,M)] <- w * ed2l.dmu2 * dmu.deta^2
wz[,iam(2,2,M)] <- w * ed2l.dsd2 * dsd.deta^2
@@ -309,15 +320,24 @@ rbisa <- function(n, shape, scale = 1) {
}) , list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale ))),
loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
sh <- eta2theta(eta[, 1], .lshape , earg = .eshape )
sc <- eta2theta(eta[, 2], .lscale , earg = .escale )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dbisa(x = y, shape = sh, scale = sc, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dbisa(x = y, shape = sh, scale = sc, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale ))),
+
vfamily = c("bisa"),
deriv = eval(substitute(expression({
sh <- eta2theta(eta[, 1], .lshape, earg = .eshape)
diff --git a/R/family.ts.R b/R/family.ts.R
index b07ca96..50040cf 100644
--- a/R/family.ts.R
+++ b/R/family.ts.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -147,7 +147,7 @@ rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...) {
blurb = c("Nested reduced-rank vector autoregressive model AR(",
lag.p, ")\n\n",
"Link: ",
- namesof("mu_t", "identity"),
+ namesof("mu_t", "identitylink"),
", t = ", paste(paste(1:lag.p, coll = ",", sep = ""))),
initialize = eval(substitute(expression({
Ranks. <- .Ranks
@@ -287,7 +287,7 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
}
- garma <- function(link = "identity",
+ garma <- function(link = "identitylink",
p.ar.lag = 1,
q.ma.lag = 0,
coefstart = NULL,
@@ -372,21 +372,33 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
misc$plag <- plag
}), list( .link = link, .earg = earg ))),
+
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals) switch( .link ,
- identity = y - mu,
- loge = w * (y / mu - 1),
- reciprocal = w * (y / mu - 1),
- inverse = w * (y / mu - 1),
- w * (y / mu - (1-y) / (1 - mu))) else
- switch( .link ,
- identity = sum(w * (y - mu)^2),
- loge = sum(w * (-mu + y * log(mu))),
- reciprocal = sum(w * (-mu + y * log(mu))),
- inverse = sum(w * (-mu + y * log(mu))),
- sum(w * (y * log(mu) + (1-y) * log1p(-mu))))
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ if (residuals) {
+ switch( .link ,
+ identitylink = y - mu,
+ loge = w * (y / mu - 1),
+ reciprocal = w * (y / mu - 1),
+ inverse = w * (y / mu - 1),
+ w * (y / mu - (1-y) / (1 - mu)))
+ } else {
+ ll.elts <-
+ switch( .link ,
+ identitylink = c(w) * (y - mu)^2,
+ loge = c(w) * (-mu + y * log(mu)),
+ reciprocal = c(w) * (-mu + y * log(mu)),
+ inverse = c(w) * (-mu + y * log(mu)),
+ c(w) * (y * log(mu) + (1-y) * log1p(-mu)))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .link = link, .earg = earg ))),
+
middle2 = eval(substitute(expression({
realfv <- fv
for (ii in 1:plag) {
@@ -401,7 +413,7 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
vfamily = c("garma", "vglmgam"),
deriv = eval(substitute(expression({
dl.dmu <- switch( .link ,
- identity = y-mu,
+ identitylink = y-mu,
loge = (y - mu) / mu,
reciprocal = (y - mu) / mu,
inverse = (y - mu) / mu,
@@ -430,10 +442,10 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
if (iter == 1)
old.coeffs <- new.coeffs
- X.vlm.save <- lm2vlm.model.matrix(x, Blist, xij = control$xij)
+ X.vlm.save <- lm2vlm.model.matrix(x, Hlist, xij = control$xij)
vary <- switch( .link ,
- identity = 1,
+ identitylink = 1,
loge = mu,
reciprocal = mu^2,
inverse = mu^2,
diff --git a/R/family.univariate.R b/R/family.univariate.R
index 1b28310..1de0cbb 100644
--- a/R/family.univariate.R
+++ b/R/family.univariate.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -141,14 +141,24 @@
}), list( .ltheta = ltheta, .lnuvec = lnuvec,
.etheta = etheta, .enuvec = enuvec ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
Theta <- eta2theta(eta[, 1], .ltheta , earg = .etheta )
nuvec <- eta2theta(eta[, 2], .lnuvec , earg = .enuvec )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * ((nuvec - 0.5) * log1p(-y^2) -
- nuvec * log1p(-2*Theta*y + Theta^2) -
- lbeta(nuvec + 0.5, 0.5)))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * ((nuvec - 0.5) * log1p(-y^2) -
+ nuvec * log1p(-2*Theta*y + Theta^2) -
+ lbeta(nuvec + 0.5, 0.5))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .ltheta = ltheta, .lnuvec = lnuvec,
.etheta = etheta, .enuvec = enuvec ))),
vfamily = c("mccullagh89"),
@@ -245,14 +255,40 @@ hzeta.control <- function(save.weight = TRUE, ...) {
}), list( .link = link, .earg = earg, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
alpha <- eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dhzeta(x = y, alpha = alpha, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dhzeta(x = y, alpha = alpha, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("hzeta"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ alpha <- eta2theta(eta, .link , earg = .earg )
+ rhzeta(nsim * length(alpha), alpha = alpha)
+ }, list( .link = link, .earg = earg ))),
+
+
+
deriv = eval(substitute(expression({
alpha <- eta2theta(eta, .link , earg = .earg )
@@ -347,7 +383,13 @@ qhzeta <- function(p, alpha) {
rhzeta <- function(n, alpha) {
- ans <- (runif(n)^(-1/alpha) - 1) / 2
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ length.arg = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ alpha <- rep(alpha, length = use.n)
+ ans <- (runif(use.n)^(-1/alpha) - 1) / 2
ans[alpha <= 0] <- NaN
floor(ans + 1)
}
@@ -471,7 +513,9 @@ rhzeta <- function(n, alpha) {
}
}), list( .ephi = ephi, .lphi = lphi ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
M <- if (is.matrix(eta)) ncol(eta) else 1
probs <- cbind(exp(eta[, -M]), 1)
probs <- prop.table(probs, 1)
@@ -481,8 +525,9 @@ rhzeta <- function(n, alpha) {
ycount <- round(ycount)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
ans <- rep(0.0, length.out = n)
omega <- extra$n2
for (jay in 1:M) {
@@ -503,7 +548,7 @@ rhzeta <- function(n, alpha) {
probs[index, jay] + (rrr-1) * phi[index])
}
}
- } # end of jay loop
+ } # end of jay loop
maxomega <- max(omega)
loopOveri <- n < maxomega
@@ -518,7 +563,12 @@ rhzeta <- function(n, alpha) {
ans[ind8] <- ans[ind8] - log1p(-phi[ind8] + (rrr-1) * phi[ind8])
}
}
- sum(ans)
+ ll.elts <- ans
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .ephi = ephi, .lphi = lphi ))),
vfamily = c("dirmultinomial"),
@@ -572,7 +622,7 @@ rhzeta <- function(n, alpha) {
}
}
}
- } # end of jay loop
+ } # end of jay loop
maxomega <- max(omega)
loopOveri <- n < maxomega
if (loopOveri) {
@@ -628,8 +678,8 @@ rhzeta <- function(n, alpha) {
sum(probs[iii, M] * pYiM.ge.rrr / denomM)
wz[iii, iam(M, M, M)] <- wz[iii, iam(M, M, M)] +
sum(probs[iii, jay]^2 * pYij.ge.rrr / denomj)
- } # end of jay loop
- } # end of iii loop
+ } # end of jay loop
+ } # end of iii loop
} else {
for (rrr in 1:maxomega) {
ind5 <- rrr <= omega
@@ -659,8 +709,8 @@ rhzeta <- function(n, alpha) {
probs[ind5, M] * pYiM.ge.rrr / denomM
wz[ind5, iam(M, M, M)] <- wz[ind5, iam(M, M, M)] +
probs[ind5, jay]^2 * pYij.ge.rrr / denomj
- } # end of jay loop
- } # end of rrr loop
+ } # end of jay loop
+ } # end of rrr loop
}
for (jay in 1:(M-1))
@@ -748,14 +798,24 @@ dirmul.old <- function(link = "loge", init.alpha = 0.01,
misc$pooled.weight <- pooled.weight
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
shape <- eta2theta(eta, .link , earg = .earg )
M <- if (is.matrix(eta)) ncol(eta) else 1
sumshape <- as.vector(shape %*% rep(1, length.out = M))
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * (lgamma(sumshape) - lgamma(extra$n2 + sumshape ))) +
- sum(c(w) * (lgamma(y + shape) - lgamma(shape )))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * (lgamma(sumshape) - lgamma(extra$n2 + sumshape )) +
+ c(w) * (lgamma(y + shape) - lgamma(shape ))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .link = link, .earg = earg ))),
vfamily = c("dirmul.old"),
deriv = eval(substitute(expression({
@@ -795,23 +855,48 @@ dirmul.old <- function(link = "loge", init.alpha = 0.01,
-rdiric <- function(n, shape, dimension = NULL) {
-
+rdiric <- function(n, shape, dimension = NULL,
+ is.matrix.shape = FALSE) {
use.n <- if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
length.arg = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- if (!is.numeric(dimension))
- dimension <- length(shape)
- shape <- rep(shape, length.out = dimension)
+ shape.orig <- shape
+
+
+ if (is.matrix.shape) {
+
+ if (!is.matrix(shape))
+ stop("argument 'shape' is not a matrix")
+ if (!is.numeric(dimension))
+ dimension <- ncol(shape)
+
+ n.shape <- nrow(shape)
+ shape <- kronecker(matrix(1, use.n, 1), shape)
+
+ ans <- rgamma(use.n * n.shape * dimension,
+ shape)
+ dim(ans) <- c(use.n * n.shape, dimension)
+ } else {
+ if (!is.numeric(dimension))
+ dimension <- length(shape)
+
+ if (length(shape) != dimension)
+ shape <- rep(shape, length.out = dimension)
- ans <- rgamma(use.n * dimension,
- rep(shape, rep(use.n, dimension)))
- dim(ans) <- c(use.n, dimension)
+ ans <- rgamma(use.n * dimension,
+ rep(shape, rep(use.n, dimension)))
+ dim(ans) <- c(use.n, dimension)
+ }
ans <- ans / rowSums(ans)
+
+ names.shape.orig <- names(shape.orig)
+ if (is.character(names.shape.orig) && !is.matrix.shape)
+ colnames(ans) <- names.shape.orig
+
ans
}
@@ -883,18 +968,48 @@ rdiric <- function(n, shape, dimension = NULL) {
misc$expected <- TRUE
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
shape <- eta2theta(eta, .link , earg = .earg )
M <- if (is.matrix(eta)) ncol(eta) else 1
sumshape <- as.vector(shape %*% rep(1, length.out = M))
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * lgamma(sumshape)) -
- sum(c(w) * lgamma(shape)) +
- sum(c(w) * (shape-1) * log(y))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ (c(w) * lgamma(sumshape)) -
+ (c(w) * lgamma(shape)) +
+ (c(w) * (shape-1) * log(y))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("dirichlet"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ M <- ncol(as.matrix(eta))
+ Shape <- eta2theta(eta, .link , earg = .earg )
+ rdiric(nsim, # has a different meaning;
+ shape = as.matrix(Shape),
+ dimension = M,
+ is.matrix.shape = TRUE) # 20140106; This is new
+ }, list( .link = link, .earg = earg ))),
+
+
+
deriv = eval(substitute(expression({
shape <- eta2theta(eta, .link , earg = .earg )
@@ -998,7 +1113,7 @@ rdiric <- function(n, shape, dimension = NULL) {
nn <- sum(ok) # Effective length (excludes x < 0 and x = 1 values)
if (nn)
ans[ok] <- .C("vzetawr", as.double(x[ok]), ans = double(nn),
- as.integer(deriv.arg), as.integer(nn), PACKAGE = "VGAM")$ans
+ as.integer(deriv.arg), as.integer(nn))$ans
@@ -1062,7 +1177,8 @@ dzeta <- function(x, p, log = FALSE) {
"Mean: zeta(p) / zeta(p+1), provided p>1\n",
"Variance: zeta(p-1) / zeta(p+1) - mean^2, provided p>2"),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
multipleResponses = TRUE,
zero = .zero ,
link = .link)
@@ -1089,10 +1205,10 @@ dzeta <- function(x, p, log = FALSE) {
predictors.names <-
namesof(mynames1, .link , earg = .earg , tag = FALSE)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
if (!length(etastart)) {
@@ -1122,7 +1238,7 @@ dzeta <- function(x, p, log = FALSE) {
ans
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- rep( .link , length = ncoly)
names(misc$link) <- mynames1
@@ -1136,11 +1252,19 @@ dzeta <- function(x, p, log = FALSE) {
misc$multipleResponses <- TRUE
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
pp <- eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dzeta(x = y, p = pp, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dzeta(x = y, p = pp, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("zetaff"),
@@ -1256,6 +1380,7 @@ pzipf <- function(q, N, s) {
zipf <- function(N = NULL, link = "loge", init.s = NULL) {
+
if (length(N) &&
(!is.Numeric(N, positive = TRUE,
integer.valued = TRUE, length.arg = 1) ||
@@ -1321,14 +1446,40 @@ pzipf <- function(q, N, s) {
misc$N <- extra$N
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
ss <- eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dzipf(x = y, N=extra$N, s=ss, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dzipf(x = y, N = extra$N, s = ss, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("zipf"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ extra <- object at extra
+ ss <- eta2theta(eta, .link , earg = .earg )
+ rzipf(nsim * length(ss), N = extra$N, s = ss)
+ }, list( .link = link, .earg = earg ))),
+
+
+
deriv = eval(substitute(expression({
ss <- eta2theta(eta, .link , earg = .earg )
fred1 <- gharmonic(extra$N, ss)
@@ -1352,7 +1503,7 @@ cauchy.control <- function(save.weight = TRUE, ...) {
}
- cauchy <- function(llocation = "identity", lscale = "loge",
+ cauchy <- function(llocation = "identitylink", lscale = "loge",
ilocation = NULL, iscale = NULL,
iprobs = seq(0.2, 0.8, by = 0.2),
imethod = 1, nsimEIM = NULL, zero = 2) {
@@ -1419,7 +1570,8 @@ cauchy.control <- function(save.weight = TRUE, ...) {
ztry <- tan(pi*(iprobs-0.5))
btry <- (qy - loc) / ztry
scal <- median(btry, na.rm = TRUE)
- if (scal <= 0) scal <- 0.1
+ if (scal <= 0)
+ scal <- 0.1
sum(c(w) * dcauchy(x = y, loc = loc, scale = scal,
log = TRUE))
}
@@ -1449,77 +1601,112 @@ cauchy.control <- function(save.weight = TRUE, ...) {
cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
theta2eta(sca.init, .lscale , earg = .escale ))
}
- }), list( .ilocat = ilocat,
- .elocat = elocat, .llocat = llocat,
- .iscale = iscale, .escale = escale, .lscale = lscale,
- .iprobs = iprobs, .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .llocat , earg = .elocat )
- }, list( .llocat = llocat,
- .elocat = elocat ))),
- last = eval(substitute(expression({
- misc$expected <- TRUE
- misc$link <- c("location" = .llocat , "scale" =.lscale)
- misc$earg <- list("location" = .elocat , "scale" = .escale )
- misc$imethod <- .imethod
- }), list( .escale = escale, .elocat = elocat,
- .imethod = imethod,
- .llocat = llocat, .lscale = lscale ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
- myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dcauchy(x = y, loc=locat, sc=myscale, log = TRUE))
- }
- }, list( .escale = escale, .lscale = lscale,
- .elocat = elocat, .llocat = llocat ))),
- vfamily = c("cauchy"),
- deriv = eval(substitute(expression({
- location <- eta2theta(eta[, 1], .llocat , earg = .elocat )
- myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
- dlocation.deta <- dtheta.deta(location, .llocat , earg = .elocat )
- dscale.deta <- dtheta.deta(myscale, .lscale , earg = .escale )
- Z <- (y-location) / myscale
+ }), list( .ilocat = ilocat,
+ .elocat = elocat, .llocat = llocat,
+ .iscale = iscale, .escale = escale, .lscale = lscale,
+ .iprobs = iprobs, .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[, 1], .llocat , earg = .elocat )
+ }, list( .llocat = llocat,
+ .elocat = elocat ))),
+ last = eval(substitute(expression({
+ misc$expected <- TRUE
+ misc$link <- c("location" = .llocat , "scale" =.lscale)
+ misc$earg <- list("location" = .elocat , "scale" = .escale )
+ misc$imethod <- .imethod
+ }), list( .escale = escale, .elocat = elocat,
+ .imethod = imethod,
+ .llocat = llocat, .lscale = lscale ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dcauchy(x = y, loc = locat, sc = myscale, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .escale = escale, .lscale = lscale,
+ .elocat = elocat, .llocat = llocat ))),
+ vfamily = c("cauchy"),
+
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ rcauchy(nsim * length(myscale), loc = locat, sc = myscale)
+ }, list( .escale = escale, .lscale = lscale,
+ .elocat = elocat, .llocat = llocat ))),
+
+
+
+
+
+
+
+
+ deriv = eval(substitute(expression({
+ location <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ dlocation.deta <- dtheta.deta(location, .llocat , earg = .elocat )
+ dscale.deta <- dtheta.deta(myscale, .lscale , earg = .escale )
+ Z <- (y-location) / myscale
+ dl.dlocation <- 2 * Z / ((1 + Z^2) * myscale)
+ dl.dscale <- (Z^2 - 1) / ((1 + Z^2) * myscale)
+ c(w) * cbind(dl.dlocation * dlocation.deta,
+ dl.dscale * dscale.deta)
+ }), list( .escale = escale, .lscale = lscale,
+ .elocat = elocat, .llocat = llocat ))),
+ weight = eval(substitute(expression({
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ dthetas.detas = cbind(dlocation.deta, dscale.deta)
+ if (length( .nsimEIM )) {
+ for (ii in 1:( .nsimEIM )) {
+ ysim <- rcauchy(n, loc = location, scale = myscale)
+ Z <- (ysim-location) / myscale
dl.dlocation <- 2 * Z / ((1 + Z^2) * myscale)
dl.dscale <- (Z^2 - 1) / ((1 + Z^2) * myscale)
- c(w) * cbind(dl.dlocation * dlocation.deta,
- dl.dscale * dscale.deta)
- }), list( .escale = escale, .lscale = lscale,
- .elocat = elocat, .llocat = llocat ))),
- weight = eval(substitute(expression({
- run.varcov <- 0
- ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- dthetas.detas = cbind(dlocation.deta, dscale.deta)
- if (length( .nsimEIM )) {
- for (ii in 1:( .nsimEIM )) {
- ysim <- rcauchy(n, loc = location, scale = myscale)
- Z <- (ysim-location) / myscale
- dl.dlocation <- 2 * Z / ((1 + Z^2) * myscale)
- dl.dscale <- (Z^2 - 1) / ((1 + Z^2) * myscale)
- rm(ysim)
- temp3 <- matrix(c(dl.dlocation, dl.dscale), n, 2)
- run.varcov <- ((ii-1) * run.varcov +
- temp3[, ind1$row.index] *
- temp3[, ind1$col.index]) / ii
- }
- wz <- if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow = TRUE) else run.varcov
+ rm(ysim)
+ temp3 <- matrix(c(dl.dlocation, dl.dscale), n, 2)
+ run.varcov <- ((ii-1) * run.varcov +
+ temp3[, ind1$row.index] *
+ temp3[, ind1$col.index]) / ii
+ }
+ wz <- if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz <- wz * dthetas.detas[, ind1$row] *
- dthetas.detas[, ind1$col]
- wz <- c(w) * matrix(wz, n, dimm(M))
- } else {
- wz <- cbind(matrix(0.5 / myscale^2,n,2), matrix(0,n,1)) *
- dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
- wz <- c(w) * wz[, 1:M] # diagonal wz
- }
+ wz <- wz * dthetas.detas[, ind1$row] *
+ dthetas.detas[, ind1$col]
+ wz <- c(w) * matrix(wz, n, dimm(M))
+ } else {
+ wz <- cbind(matrix(0.5 / myscale^2,n,2), matrix(0,n,1)) *
+ dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
+ wz <- c(w) * wz[, 1:M] # diagonal wz
+ }
- wz
- }), list( .escale = escale, .lscale = lscale, .nsimEIM = nsimEIM,
- .elocat = elocat, .llocat = llocat ))))
+ wz
+ }), list( .escale = escale, .lscale = lscale, .nsimEIM = nsimEIM,
+ .elocat = elocat, .llocat = llocat ))))
}
@@ -1528,7 +1715,7 @@ cauchy.control <- function(save.weight = TRUE, ...) {
- cauchy1 <- function(scale.arg = 1, llocation = "identity",
+ cauchy1 <- function(scale.arg = 1, llocation = "identitylink",
ilocation = NULL, imethod = 1) {
@@ -1604,16 +1791,44 @@ cauchy.control <- function(save.weight = TRUE, ...) {
}), list( .scale.arg = scale.arg, .elocat = elocat,
.llocat = llocat ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
locat <- eta2theta(eta, .llocat , earg = .elocat )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dcauchy(x = y, loc=locat, scale = .scale.arg,
- log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dcauchy(x = y, loc = locat, scale = .scale.arg ,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .scale.arg = scale.arg, .elocat = elocat,
.llocat = llocat ))),
vfamily = c("cauchy1"),
+
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ locat <- eta2theta(eta, .llocat , earg = .elocat )
+ rcauchy(nsim * length(locat), loc = locat, sc = .scale.arg )
+ }, list( .scale.arg = scale.arg, .elocat = elocat,
+ .llocat = llocat ))),
+
+
deriv = eval(substitute(expression({
locat <- eta2theta(eta, .llocat , earg = .elocat )
temp <- (y-locat)/.scale.arg
@@ -1636,7 +1851,7 @@ cauchy.control <- function(save.weight = TRUE, ...) {
- logistic1 <- function(llocation = "identity",
+ logistic1 <- function(llocation = "identitylink",
scale.arg = 1, imethod = 1) {
if (!is.Numeric(scale.arg, length.arg = 1, positive = TRUE))
stop("'scale.arg' must be a single positive number")
@@ -1690,17 +1905,44 @@ cauchy.control <- function(save.weight = TRUE, ...) {
}), list( .llocat = llocat,
.elocat = elocat, .scale.arg = scale.arg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
locat <- eta2theta(eta, .llocat , earg = .elocat )
- zedd <- (y-locat)/.scale.arg
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dlogis(x = y, locat = locat,
- scale = .scale.arg, log = TRUE))
+ zedd <- (y-locat) / .scale.arg
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dlogis(x = y, locat = locat,
+ scale = .scale.arg , log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .llocat = llocat,
.elocat = elocat, .scale.arg = scale.arg ))),
vfamily = c("logistic1"),
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ locat <- eta2theta(eta, .llocat , earg = .elocat )
+ rlogis(nsim * length(locat),
+ location = locat, scale = .scale.arg )
+ }, list( .llocat = llocat,
+ .elocat = elocat, .scale.arg = scale.arg ))),
+
+
+
deriv = eval(substitute(expression({
locat <- eta2theta(eta, .llocat , earg = .elocat )
@@ -1723,8 +1965,7 @@ cauchy.control <- function(save.weight = TRUE, ...) {
erlang <-
function(shape.arg, link = "loge",
- imethod = 1, zero = NULL)
-{
+ imethod = 1, zero = NULL) {
if (!is.Numeric(shape.arg, length.arg = 1,
integer.valued = TRUE, positive = TRUE))
@@ -1753,12 +1994,13 @@ cauchy.control <- function(save.weight = TRUE, ...) {
"Variance: shape * scale^2"),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero ))),
@@ -1778,10 +2020,10 @@ cauchy.control <- function(save.weight = TRUE, ...) {
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -1814,7 +2056,7 @@ cauchy.control <- function(save.weight = TRUE, ...) {
.shape.arg * sc
}, list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .link , length = ncoly))
names(misc$link) <- mynames1
@@ -1824,22 +2066,50 @@ cauchy.control <- function(save.weight = TRUE, ...) {
misc$earg[[ii]] <- .earg
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$multipleResponses <- TRUE
misc$shape.arg <- .shape.arg
}), list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
sc <- eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * (( .shape.arg - 1) * log(y) - y / sc -
- .shape.arg * log(sc) - lgamma( .shape.arg )))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * (( .shape.arg - 1) * log(y) - y / sc -
+ .shape.arg * log(sc) - lgamma( .shape.arg ))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
vfamily = c("erlang"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ Scale <- eta2theta(eta, .link , earg = .earg )
+ rgamma(nsim * length(Scale), shape = .shape.arg , scale = Scale )
+ }, list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
+
+
+
+
+
deriv = eval(substitute(expression({
sc <- eta2theta(eta, .link , earg = .earg )
dl.dsc <- (y / sc - .shape.arg) / sc
@@ -1886,6 +2156,7 @@ dbort <- function(x, Qsize = 1, a = 0.5, log = FALSE) {
}
+
rbort <- function(n, Qsize = 1, a = 0.5) {
use.n <- if ((length.n <- length(n)) > 1) length.n else
@@ -1900,11 +2171,11 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
stop("bad input for argument 'a'")
N <- use.n
- qsize <- rep(Qsize, length.out = N);
+ qsize <- rep(Qsize, length.out = N)
a <- rep(a, length.out = N)
totqsize <- qsize
fini <- (qsize < 1)
- while(any(!fini)) {
+ while (any(!fini)) {
additions <- rpois(sum(!fini), a[!fini])
qsize[!fini] <- qsize[!fini] + additions
totqsize[!fini] <- totqsize[!fini] + additions
@@ -1978,14 +2249,40 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
misc$Qsize <- .Qsize
}), list( .link = link, .earg = earg, .Qsize = Qsize ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
aa <- eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dbort(x = y, Qsize = .Qsize, a = aa, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dbort(x = y, Qsize = .Qsize , a = aa, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg, .Qsize = Qsize ))),
vfamily = c("borel.tanner"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ aa <- eta2theta(eta, .link , earg = .earg )
+ rbort(nsim * length(aa), Qsize = .Qsize , a = aa)
+ }, list( .link = link, .earg = earg, .Qsize = Qsize ))),
+
+
+
+
deriv = eval(substitute(expression({
aa <- eta2theta(eta, .link , earg = .earg )
dl.da <- (y - .Qsize) / aa - y
@@ -2047,9 +2344,9 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
namesof("a", link, earg = earg), "\n\n",
"Mean: 1/(1-2*a)"),
initialize = eval(substitute(expression({
- if (any(y < 1) ||
- any((y+1)/2 != round((y+1)/2)))
- warning("response should be positive, odd and integer-valued")
+ if (any(y < 1) ||
+ any((y+1)/2 != round((y+1)/2)))
+ warning("response should be positive, odd and integer-valued")
w.y.check(w = w, y = y)
@@ -2085,11 +2382,19 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
misc$earg <- list(a = .earg )
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
aa <- eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dfelix(x = y, a = aa, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dfelix(x = y, a = aa, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("felix"),
@@ -2209,23 +2514,59 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
.emu = emu, .ephi = ephi,
.stdbeta = stdbeta ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL){
- mu <- eta2theta(eta[, 1], .lmu , .emu )
- m1u <- if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
- phi <- eta2theta(eta[, 2], .lphi , .ephi )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- shape1 <- phi * m1u
- shape2 <- (1 - m1u) * phi
- zedd <- (y - .A) / ( .B - .A)
- sum(c(w) * (dbeta(x = zedd, shape1 = shape1, shape2 = shape2,
- log = TRUE) -
- log( abs( .B - .A ))))
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ mu <- eta2theta(eta[, 1], .lmu , earg = .emu )
+ phi <- eta2theta(eta[, 2], .lphi , earg = .ephi )
+ m1u <- if ( .stdbeta ) mu else (mu - .A ) / ( .B - .A )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ shape1 <- phi * m1u
+ shape2 <- (1 - m1u) * phi
+ zedd <- (y - .A) / ( .B - .A)
+ ll.elts <-
+ c(w) * (dbeta(x = zedd, shape1 = shape1, shape2 = shape2,
+ log = TRUE) -
+ log( abs( .B - .A )))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lmu = lmu, .lphi = lphi, .A = A, .B = B,
.emu = emu, .ephi = ephi,
.stdbeta = stdbeta ))),
vfamily = "betaff",
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+
+ eta <- predict(object)
+ mu <- eta2theta(eta[, 1], .lmu , earg = .emu )
+ phi <- eta2theta(eta[, 2], .lphi , earg = .ephi )
+ m1u <- if ( .stdbeta ) mu else (mu - .A ) / ( .B - .A )
+ shape1 <- phi * m1u
+ shape2 <- (1 - m1u) * phi
+ .A + ( .B - .A ) *
+ rbeta(nsim * length(shape1), shape1 = shape1, shape2 = shape2)
+ }, list( .lmu = lmu, .lphi = lphi, .A = A, .B = B,
+ .emu = emu, .ephi = ephi,
+ .stdbeta = stdbeta ))),
+
+
+
+
+
deriv = eval(substitute(expression({
mu <- eta2theta(eta[, 1], .lmu , .emu )
phi <- eta2theta(eta[, 2], .lphi , .ephi )
@@ -2354,7 +2695,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
linkinv = eval(substitute(function(eta, extra = NULL) {
shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
- .A + ( .B-.A) * shapes[, 1] / (shapes[, 1] + shapes[, 2])
+ .A + ( .B - .A ) * shapes[, 1] / (shapes[, 1] + shapes[, 2])
}, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
last = eval(substitute(expression({
@@ -2365,19 +2706,51 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
.A = A, .B = B,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL){
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
zedd <- (y - .A ) / ( .B - .A )
- sum(c(w) * (dbeta(x = zedd, shape1 = shapes[, 1],
- shape2 = shapes[, 2],
- log = TRUE) - log( abs( .B - .A ))))
+ ll.elts <-
+ c(w) * (dbeta(x = zedd, shape1 = shapes[, 1],
+ shape2 = shapes[, 2],
+ log = TRUE) - log( abs( .B - .A )))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
vfamily = "beta.ab",
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+
+ eta <- predict(object)
+ shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
+ eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
+ .A + ( .B - .A ) *
+ rbeta(nsim * length(shapes[, 1]),
+ shape1 = shapes[, 1], shape2 = shapes[, 2])
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+
+
+
deriv = eval(substitute(expression({
shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
@@ -2416,11 +2789,21 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
new("vglmff",
blurb = c("Simple exponential distribution\n",
"Link: log(rate)\n"),
- deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
devy <- -log(y) - 1
devmu <- -log(mu) - y/mu
devi <- 2 * (devy - devmu)
- if (residuals) sign(y - mu) * sqrt(abs(devi) * w) else sum(w * devi)
+ if (residuals) {
+ sign(y - mu) * sqrt(abs(devi) * w)
+ } else {
+ dev.elts <- c(w) * devi
+ if (summation) {
+ sum(dev.elts)
+ } else {
+ dev.elts
+ }
+ }
},
initialize = expression({
predictors.names <- "log(rate)"
@@ -2447,13 +2830,14 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
- exponential <- function(link = "loge",
- location = 0, expected = TRUE,
- shrinkage.init = 0.95,
- zero = NULL) {
+
+
+
+ exponential <-
+ function(link = "loge", location = 0, expected = TRUE,
+ shrinkage.init = 0.95, zero = NULL) {
if (!is.Numeric(location, length.arg = 1))
stop("bad input for argument 'location'")
-
if (!is.logical(expected) || length(expected) != 1)
stop("bad input for argument 'expected'")
@@ -2461,14 +2845,12 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
earg <- link2list(link)
link <- attr(earg, "function.name")
-
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
if (!is.Numeric(shrinkage.init, length.arg = 1) ||
- shrinkage.init < 0 ||
- shrinkage.init > 1)
+ shrinkage.init < 0 || shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
@@ -2482,28 +2864,32 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
"Variance: ",
if (location == 0) "Exponential: mu^2" else
paste("(mu - ", location, ")^2", sep = "")),
-
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero ))),
-
deviance = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
devy <- -log(y - .location) - 1
devmu <- -log(mu - .location) - (y - .location) / (mu - .location)
devi <- 2 * (devy - devmu)
if (residuals) {
sign(y - mu) * sqrt(abs(devi) * w)
- } else {
- sum(w * devi)
+ } else {
+ dev.elts <- c(w) * devi
+ if (summation) {
+ sum(dev.elts)
+ } else {
+ dev.elts
+ }
}
}, list( .location = location, .earg = earg ))),
initialize = eval(substitute(expression({
-
temp5 <-
w.y.check(w = w, y = y,
ncol.w.max = Inf,
@@ -2515,15 +2901,13 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
y <- temp5$y
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
-
+ extra$M1 <- M1
+ M <- M1 * ncoly
extra$Loc <- matrix( .location , n, ncoly, byrow = TRUE)
-
if (any(y <= extra$Loc))
stop("all responses must be greater than ", extra$Loc)
@@ -2534,10 +2918,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
if (length(mustart) + length(etastart) == 0)
mustart <- matrix(colSums(y * w) / colSums(w),
n, M, byrow = TRUE) * .sinit +
- (1 - .sinit) * y +
- 1 / 8
-
-
+ (1 - .sinit) * y + 1 / 8
if (!length(etastart))
etastart <- theta2eta(1 / (mustart - extra$Loc),
.link , earg = .earg )
@@ -2556,17 +2937,28 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
for (ii in 1:M) {
misc$earg[[ii]] <- .earg
}
-
misc$location <- .location
misc$expected <- .expected
misc$multipleResponses <- TRUE
- misc$Musual <- Musual
+ misc$M1 <- M1
}), list( .link = link, .earg = earg,
.expected = expected, .location = location ))),
linkfun = eval(substitute(function(mu, extra = NULL)
theta2eta(1 / (mu - extra$Loc), .link , earg = .earg ),
list( .link = link, .earg = earg ))),
vfamily = c("exponential"),
+ simslot = eval(substitute(
+ function(object, nsim) {
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ mu <- fitted(object)
+ extra <- object at extra
+ rate <- 1 / (mu - extra$Loc)
+ rexp(nsim * length(rate), rate = rate)
+ }, list( .link = link, .earg = earg ))),
deriv = eval(substitute(expression({
rate <- 1 / (mu - extra$Loc)
dl.drate <- mu - y
@@ -2609,12 +3001,13 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
"Variance: mu (=shape)"),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero ))),
@@ -2633,7 +3026,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
M <- if (is.matrix(y)) ncol(y) else 1
- Musual <- 1
+ M1 <- 1
mynames1 <- if (M == 1) "shape" else paste("shape", 1:M, sep = "")
predictors.names <-
@@ -2656,17 +3049,45 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
misc$expected <- TRUE
misc$multipleResponses <- TRUE
- misc$Musual <- Musual
+ misc$M1 <- M1
}), list( .link = link, .earg = earg ))),
linkfun = eval(substitute(function(mu, extra = NULL)
theta2eta(mu, .link , earg = .earg )),
list( .link = link, .earg = earg )),
- loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dgamma(x = y, shape = mu, scale = 1, log = TRUE))
+ loglikelihood =
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE)
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dgamma(x = y, shape = mu, scale = 1, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
},
vfamily = c("gamma1"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ mu <- fitted(object)
+ rgamma(nsim * length(shape), shape = mu, scale = 1)
+ }, list( .link = link, .earg = earg ))),
+
+
+
+
+
deriv = eval(substitute(expression({
shape <- mu
dl.dshape <- log(y) - digamma(shape)
@@ -2761,16 +3182,46 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
}), list( .lrate = lrate, .lshape = lshape,
.erate = erate, .eshape = eshape))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
rate <- eta2theta(eta[, 1], .lrate , earg = .erate )
shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dgamma(x = y, shape = shape, rate=rate, log = TRUE))
- }
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dgamma(x = y, shape = shape, rate = rate, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .lrate = lrate, .lshape = lshape,
.erate = erate, .eshape = eshape))),
vfamily = c("gamma2.ab"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ rate <- eta2theta(eta[, 1], .lrate , earg = .erate )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+ rgamma(nsim * length(shape), shape = shape, rate = rate)
+ }, list( .lrate = lrate, .lshape = lshape,
+ .erate = erate, .eshape = eshape))),
+
+
+
+
+
deriv = eval(substitute(expression({
rate <- eta2theta(eta[, 1], .lrate , earg = .erate )
shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
@@ -2803,6 +3254,9 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
+
+
+
gamma2 <-
function(lmu = "loge", lshape = "loge",
imethod = 1, ishape = NULL,
@@ -2811,6 +3265,10 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
+ if (!is.logical( deviance.arg ) || length( deviance.arg ) != 1)
+ stop("argument 'deviance.arg' must be TRUE or FALSE")
+
+
apply.parint <- FALSE
lmu <- as.list(substitute(lmu))
@@ -2859,20 +3317,21 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
apply.int = .apply.parint )
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
constraints <- cm.zero.vgam(constraints, x, z.Index, M)
}), list( .zero = zero,
.parallel = parallel, .apply.parint = apply.parint ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
temp5 <-
w.y.check(w = w, y = y,
@@ -2892,7 +3351,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
is.Numeric( .zero , length.arg = 1) && .zero != -2)
stop("argument zero = -2 is required")
- M <- Musual * ncol(y)
+ M <- M1 * ncol(y)
NOS <- ncoly <- ncol(y) # Number of species
@@ -2903,7 +3362,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
predictors.names <-
c(namesof(temp1.names, .lmu , earg = .emu , tag = FALSE),
namesof(temp2.names, .lshape , earg = .eshape, tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = Musual)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
@@ -2934,16 +3393,16 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
cbind(theta2eta(mymu, .lmu , earg = .emu ),
theta2eta(init.shape, .lshape , earg = .eshape ))
etastart <-
- etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
+ etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
}
}), list( .lmu = lmu, .lshape = lshape, .ishape = ishape,
.emu = emu, .eshape = eshape,
.parallel = parallel, .apply.parint = apply.parint,
.zero = zero, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Musual <- 2
- NOS <- ncol(eta) / Musual
- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lmu , earg = .emu )
}, list( .lmu = lmu, .emu = emu ))),
last = eval(substitute(expression({
@@ -2961,11 +3420,11 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
misc$earg <- vector("list", M)
names(misc$earg) <- names(misc$link)
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] <- .emu
- misc$earg[[Musual*ii ]] <- .eshape
+ misc$earg[[M1*ii-1]] <- .emu
+ misc$earg[[M1*ii ]] <- .eshape
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$multipleResponses <- TRUE
misc$parallel <- .parallel
@@ -2979,29 +3438,62 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
temp[, interleave.VGAM(ncol(temp), M = 2), drop = FALSE]
}, list( .lmu = lmu, .emu = emu ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual <- 2
- NOS <- ncol(eta) / Musual
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ M1 <- 2
+ NOS <- ncol(eta) / M1
mymu <- mu # eta2theta(eta[, 2*(1:NOS)-1], .lmu , earg = .emu )
- shapemat <- eta2theta(eta[, Musual * (1:NOS), drop = FALSE],
+ shapemat <- eta2theta(eta[, M1 * (1:NOS), drop = FALSE],
.lshape , earg = .eshape )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dgamma(x = y,
- shape = c(shapemat),
- scale = c(mymu/shapemat),
- log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dgamma(x = y,
+ shape = c(shapemat),
+ scale = c(mymu / shapemat),
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lmu = lmu, .lshape = lshape,
.emu = emu, .eshape = eshape))),
vfamily = c("gamma2"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ mymu <- (eta2theta(eta[, c(TRUE, FALSE)], .lmu , earg = .emu ))
+ shape <- (eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape ))
+ rgamma(nsim * length(shape),
+ shape = c(shape),
+ scale = c(mymu/shape))
+ }, list( .lmu = lmu, .lshape = lshape,
+ .emu = emu, .eshape = eshape))),
+
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
- NOS <- ncol(eta) / Musual
+ M1 <- 2
+ NOS <- ncol(eta) / M1
- mymu <- eta2theta(eta[, Musual * (1:NOS) - 1],
+ mymu <- eta2theta(eta[, M1 * (1:NOS) - 1],
.lmu , earg = .emu )
- shape <- eta2theta(eta[, Musual * (1:NOS)],
+ shape <- eta2theta(eta[, M1 * (1:NOS)],
.lshape , earg = .eshape )
dl.dmu <- shape * (y / mymu - 1) / mymu
@@ -3013,7 +3505,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
myderiv <- c(w) * cbind(dl.dmu * dmu.deta,
dl.dshape * dshape.deta)
- myderiv[, interleave.VGAM(M, M = Musual)]
+ myderiv[, interleave.VGAM(M, M = M1)]
}), list( .lmu = lmu, .lshape = lshape,
.emu = emu, .eshape = eshape))),
weight = eval(substitute(expression({
@@ -3021,8 +3513,8 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
ned2l.dshape2 <- trigamma(shape) - 1 / shape
wz <- matrix(as.numeric(NA), n, M) # 2 = M; diagonal!
- wz[, Musual*(1:NOS)-1] <- ned2l.dmu2 * dmu.deta^2
- wz[, Musual*(1:NOS) ] <- ned2l.dshape2 * dshape.deta^2
+ wz[, M1*(1:NOS)-1] <- ned2l.dmu2 * dmu.deta^2
+ wz[, M1*(1:NOS) ] <- ned2l.dshape2 * dshape.deta^2
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
@@ -3031,24 +3523,32 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
- if (deviance.arg) ans at deviance <- eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ if (deviance.arg)
+ ans at deviance <- eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
if (ncol(as.matrix(y)) > 1 && ncol(as.matrix(w)) > 1)
stop("cannot handle matrix 'w' yet")
- Musual <- 2
+ M1 <- 2
NOS <- ncol(eta) / 2
temp300 <- eta[, 2*(1:NOS), drop = FALSE]
shape <- eta2theta(temp300, .lshape , earg = .eshape )
devi <- -2 * (log(y/mu) - y/mu + 1)
if (residuals) {
- warning("not 100% sure about these deviance residuals!")
- sign(y - mu) * sqrt(abs(devi) * w)
- } else
- sum(c(w) * devi)
+ warning("not 100% sure about these deviance residuals!")
+ sign(y - mu) * sqrt(abs(devi) * w)
+ } else {
+ dev.elts <- c(w) * devi
+ if (summation) {
+ sum(dev.elts)
+ } else {
+ dev.elts
+ }
+ }
}, list( .lshape = lshape )))
ans
}
@@ -3088,12 +3588,13 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
"Variance: mu * (1 + mu) = (1 - prob) / prob^2"),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero ))),
@@ -3115,10 +3616,10 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
mynames1 <- paste("prob", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -3151,7 +3652,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .link , length = ncoly))
names(misc$link) <- mynames1
@@ -3161,7 +3662,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
misc$earg[[ii]] <- .earg
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$multipleResponses <- TRUE
misc$expected <- .expected
@@ -3171,14 +3672,39 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
.iprob = iprob,
.expected = expected, .imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
prob <- eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dgeom(x = y, prob = prob, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dgeom(x = y, prob = prob, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = c("geometric"),
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ prob <- eta2theta(eta, .link , earg = .earg )
+ rgeom(nsim * length(prob), prob = prob)
+ }, list( .link = link, .earg = earg ))),
+
+
+
+
deriv = eval(substitute(expression({
prob <- eta2theta(eta, .link , earg = .earg )
@@ -3274,6 +3800,10 @@ rbetageom <- function(n, shape1, shape2) {
+
+
+
+
negbinomial.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -3301,7 +3831,12 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
- alternate.derivs = FALSE # 20130823; added for 'nbcanlink'
+ alternate.derivs <- FALSE # 20130823; added for 'nbcanlink'
+
+
+ if (!is.logical( deviance.arg ) || length( deviance.arg ) != 1)
+ stop("argument 'deviance.arg' must be TRUE or FALSE")
+
lmuuu <- as.list(substitute(lmu))
@@ -3367,7 +3902,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
if ( .parallel && ncol(cbind(y)) > 1)
@@ -3377,13 +3912,20 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
constraints = constraints)
}), list( .parallel = parallel, .zero = zero ))),
+
+
+
+
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
zero = .zero)
}, list( .zero = zero ))),
+
+
initialize = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
temp5 <- w.y.check(w = w, y = y,
Is.integer.y = TRUE,
@@ -3395,6 +3937,10 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
y <- temp5$y
+
+
+
+
assign("CQO.FastAlgorithm",
( .lmuuu == "loge") && ( .lsize == "loge"),
envir = VGAMenv)
@@ -3413,14 +3959,14 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
stop("number of columns of prior-'weights' is greater than ",
"the number of responses")
- M <- Musual * ncol(y)
+ M <- M1 * ncol(y)
NOS <- ncoly <- ncol(y) # Number of species
predictors.names <-
c(namesof(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = ""),
.lmuuu, earg = .emuuu, tag = FALSE),
namesof(if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""),
.lsize , earg = .esize , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = Musual)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
if (is.null( .nsimEIM )) {
save.weight <- control$save.weight <- FALSE
@@ -3451,7 +3997,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
mu.init[, iii] <- abs(mu.init[, iii]) + 1 / 1024
}
- } # of for (iii)
+ } # of for (iii)
if ( is.Numeric( .k.init )) {
kay.init <- matrix( .k.init, nrow = n, ncol = NOS, byrow = TRUE)
@@ -3485,19 +4031,20 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
cbind(theta2eta(mu.init , link = .lmuuu , earg = newemu ),
theta2eta(kay.init, link = .lsize , earg = .esize ))
etastart <-
- etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
+ etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
}
}), list( .lmuuu = lmuuu, .lsize = lsize,
.emuuu = emuuu, .esize = esize,
.mu.init = imu,
+ .deviance.arg = deviance.arg,
.k.init = isize, .probs.y = probs.y,
.sinit = shrinkage.init, .nsimEIM = nsimEIM,
.zero = zero, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Musual <- 2
- NOS <- ncol(eta) / Musual
- eta.k <- eta[, Musual * (1:NOS) , drop = FALSE]
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ eta.k <- eta[, M1 * (1:NOS) , drop = FALSE]
kmat <- eta2theta(eta.k, .lsize , earg = .esize )
@@ -3510,7 +4057,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
- eta2theta(eta[, Musual * (1:NOS) - 1, drop = FALSE], .lmuuu ,
+ eta2theta(eta[, M1 * (1:NOS) - 1, drop = FALSE], .lmuuu ,
earg = newemu)
}, list( .lmuuu = lmuuu, .lsize = lsize,
.emuuu = emuuu, .esize = esize))),
@@ -3530,8 +4077,8 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
misc$earg <- vector("list", M)
names(misc$earg) <- names(misc$link)
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] <- newemu
- misc$earg[[Musual*ii ]] <- .esize
+ misc$earg[[M1*ii-1]] <- newemu
+ misc$earg[[M1*ii ]] <- .esize
}
misc$cutoff <- .cutoff
@@ -3548,7 +4095,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
.imethod = imethod ))),
linkfun = eval(substitute(function(mu, extra = NULL) {
- Musual <- 2
+ M1 <- 2
newemu <- .emuuu
@@ -3568,23 +4115,25 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
eta.temp <- cbind(eta.temp, eta.kayy)
- eta.temp[, interleave.VGAM(ncol(eta.temp), M = Musual), drop = FALSE]
+ eta.temp[, interleave.VGAM(ncol(eta.temp), M = M1), drop = FALSE]
}, list( .lmuuu = lmuuu, .lsize = lsize,
.emuuu = emuuu, .esize = esize,
.isize = isize ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual <- 2
- NOS <- ncol(eta) / Musual
-
- eta.k <- eta[, Musual*(1:NOS), drop = FALSE]
- if ( .lsize == "loge") {
- bigval <- 68
- eta.k <- ifelse(eta.k > bigval, bigval, eta.k)
- eta.k <- ifelse(eta.k < -bigval, -bigval, eta.k)
- }
- kmat <- eta2theta(eta.k, .lsize , earg = .esize )
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+
+ eta.k <- eta[, M1*(1:NOS), drop = FALSE]
+ if ( .lsize == "loge") {
+ bigval <- 68
+ eta.k <- ifelse(eta.k > bigval, bigval, eta.k)
+ eta.k <- ifelse(eta.k < -bigval, -bigval, eta.k)
+ }
+ kmat <- eta2theta(eta.k, .lsize , earg = .esize )
@@ -3599,19 +4148,81 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dnbinom(x = y, mu = mu, size = kmat, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .lsize = lsize,
.lmuuu = lmuuu, .emuuu = emuuu, .esize = esize))),
vfamily = c("negbinomial"),
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ muuuu <- cbind(eta2theta(eta[, c(TRUE, FALSE)], .lmuuu , .emuuu ))
+ eta.k <- cbind(eta2theta(eta[, c(FALSE, TRUE)], .lsize , .esize ))
+ rnbinom(nsim * length(muuuu), mu = muuuu, size = eta.k)
+ }, list( .lmuuu = lmuuu, .lsize = lsize,
+ .emuuu = emuuu, .esize = esize ))),
+
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
- NOS <- ncol(eta) / Musual
+
+
+
+
+ if ( iter == 1 && .deviance.arg ) {
+ if (control$criterion != "coefficients" ||
+ control$half.step)
+ warning("Argument 'criterion' should be 'coefficients' ",
+ "or 'half.step' should be 'FALSE' when ",
+ "'deviance.arg = TRUE'")
+
+
+
+
+
+ low.index <- ifelse(names(constraints)[1] == "(Intercept)", 2, 1)
+ if (low.index <= length(constraints))
+ for (iii in low.index:length(constraints)) {
+ conmat <- constraints[[iii]]
+ if (any(conmat[c(FALSE, TRUE), ] != 0))
+ stop("argument 'deviance.arg' should only be TRUE for NB-2 models; ",
+ "non-zero elements detected for the 'size' parameter." )
+ }
+ }
+
+
+
+
+
+
+
+
+
+
+
+ M1 <- 2
+ NOS <- ncol(eta) / M1
M <- ncol(eta)
- eta.k <- eta[, Musual*(1:NOS) , drop = FALSE]
+ eta.k <- eta[, M1*(1:NOS) , drop = FALSE]
if ( .lsize == "loge") {
bigval <- 68
eta.k <- ifelse(eta.k > bigval, bigval, eta.k)
@@ -3666,7 +4277,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
}
- myderiv <- myderiv[, interleave.VGAM(M, M = Musual)]
+ myderiv <- myderiv[, interleave.VGAM(M, M = M1)]
if ( .alternate.derivs || ( .lmuuu == "nbcanlink")) { # 20130823 added
@@ -3676,6 +4287,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
myderiv
}), list( .lmuuu = lmuuu, .lsize = lsize,
.alternate.derivs = alternate.derivs,
+ .deviance.arg = deviance.arg,
.emuuu = emuuu, .esize = esize))),
weight = eval(substitute(expression({
@@ -3687,12 +4299,12 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
as.double(kmat), as.double(mu), as.double( .cutoff ),
as.integer(n), ok = as.integer(1), as.integer(NOS),
sumpdf = double(1), as.double( .Machine$double.eps ),
- as.integer( .Maxiter ), PACKAGE = "VGAM")
+ as.integer( .Maxiter ))
if (fred2$ok != 1)
stop("error in Fortran subroutine exnbin9")
dim(fred2$ans) <- c(n, NOS)
ned2l.dk2 <- -fred2$ans - 1/kmat + 1/(kmat+mu)
- wz[, Musual*(1:NOS)] <- dk.deta2^2 * ned2l.dk2
+ wz[, M1*(1:NOS)] <- dk.deta2^2 * ned2l.dk2
@@ -3707,19 +4319,19 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
(ysim + kmat) / (mu + kmat) +
1 + log(kmat / (kmat + mu))
run.varcov <- run.varcov + dl.dk^2
- } # end of for loop
+ } # end of for loop
run.varcov <- cbind(run.varcov / .nsimEIM )
ned2l.dk2 <- if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz[, Musual*(1:NOS)] <- ned2l.dk2 * dk.deta2^2
- } # end of else
+ wz[, M1*(1:NOS)] <- ned2l.dk2 * dk.deta2^2
+ } # end of else
ned2l.dmu2 <- 1 / mu - 1 / (mu + kmat)
- wz[, Musual*(1:NOS) - 1] <- ned2l.dmu2 * dmu.deta^2
+ wz[, M1*(1:NOS) - 1] <- ned2l.dmu2 * dmu.deta^2
@@ -3727,7 +4339,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
if ( .lmuuu == "nbcanlink") {
if ( iter%% 2 == 0) {
- wz[, Musual*(1:NOS) - 1] <- ned2l.dk2 * dk.deta1^2
+ wz[, M1*(1:NOS) - 1] <- ned2l.dk2 * dk.deta1^2
} else {
}
}
@@ -3738,8 +4350,8 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
if ( FALSE && .lmuuu == "nbcanlink") { # 20130823 FALSE added
if ( iter%% 2 == 1)
- wz[, Musual*(1:NOS)-1] <-
- wz[, Musual*(1:NOS)-1] + ned2l.dk2 * dk.deta1^2 * 1 # 20130823
+ wz[, M1*(1:NOS)-1] <-
+ wz[, M1*(1:NOS)-1] + ned2l.dk2 * dk.deta1^2 * 1 # 20130823
if (FALSE)
wz <- cbind(wz,
@@ -3762,33 +4374,46 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
- if (deviance.arg) ans at deviance = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual <- 2
- NOS <- ncol(eta) / Musual
+ if (deviance.arg) {
+ ans at deviance <- eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
- if (ncol(as.matrix(y)) > 1 && ncol(as.matrix(w)) > 1)
- stop("cannot handle matrix 'w' yet")
- temp300 <- eta[, Musual*(1:NOS), drop = FALSE]
- if ( .lsize == "loge") {
- bigval <- 68
- temp300[temp300 > bigval] <- bigval
- temp300[temp300 < -bigval] <- -bigval
- } else stop("can only handle the 'loge' link")
- kmat <- eta2theta(temp300, .lsize , earg = .esize )
- devi <- 2 * (y * log(ifelse(y < 1, 1, y) / mu) +
- (y + kmat) * log((mu + kmat) / (kmat + y)))
- if (residuals) {
- sign(y - mu) * sqrt(abs(devi) * w)
+
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ eta.k <- eta[, M1 * (1:NOS) , drop = FALSE]
+ kmat <- eta2theta(eta.k, .lsize , earg = .esize )
+
+ if (residuals) {
+ stop("this part of the function has not been written yet.")
+ } else {
+ size <- kmat
+ dev.elts <- 2 * c(w) *
+ (y * log(pmax(1, y) / mu) -
+ (y + size) * log((y + size) / (mu + size)))
+ if (summation) {
+ sum(dev.elts)
} else {
- sum(c(w) * devi)
+ dev.elts
}
- }, list( .lsize = lsize, .emuuu = emuuu,
- .esize = esize)))
+ }
+ }, list( .lsize = lsize, .esize = esize,
+ .lmuuu = lmuuu, .emuuu = emuuu )))
+
+
+
+
+
+ }
+
+
+
+
ans
}
@@ -3810,10 +4435,13 @@ polya.control <- function(save.weight = TRUE, ...) {
iprob = NULL, isize = NULL,
probs.y = 0.75,
nsimEIM = 100,
- deviance.arg = FALSE, imethod = 1,
+ imethod = 1,
shrinkage.init = 0.95, zero = -2) {
+ deviance.arg <- FALSE # 20131212; for now
+
+
if (length(iprob) && !is.Numeric(iprob, positive = TRUE))
stop("bad input for argument 'iprob'")
@@ -3858,18 +4486,19 @@ polya.control <- function(save.weight = TRUE, ...) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
zero = .zero)
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
if (any(function.name == c("cqo", "cao")))
stop("polya() does not work with cqo() or cao(). ",
"Try negbinomial()")
@@ -3887,7 +4516,7 @@ polya.control <- function(save.weight = TRUE, ...) {
- M <- Musual * ncol(y)
+ M <- M1 * ncol(y)
NOS <- ncoly <- ncol(y) # Number of species
predictors.names <-
@@ -3961,7 +4590,7 @@ polya.control <- function(save.weight = TRUE, ...) {
cbind(theta2eta(prob.init, .lprob , earg = .eprob),
theta2eta(kayy.init, .lsize , earg = .esize))
etastart <-
- etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
+ etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
}
}), list( .lprob = lprob, .lsize = lsize,
.eprob = eprob, .esize = esize,
@@ -3970,11 +4599,11 @@ polya.control <- function(save.weight = TRUE, ...) {
.sinit = shrinkage.init, .nsimEIM = nsimEIM, .zero = zero,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Musual <- 2
- NOS <- ncol(eta) / Musual
- pmat <- eta2theta(eta[, Musual*(1:NOS) - 1, drop = FALSE],
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ pmat <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
.lprob , earg = .eprob)
- kmat <- eta2theta(eta[, Musual*(1:NOS)- 0, drop = FALSE],
+ kmat <- eta2theta(eta[, M1*(1:NOS)- 0, drop = FALSE],
.lsize , earg = .esize)
kmat / (kmat + pmat)
}, list( .lprob = lprob, .eprob = eprob,
@@ -3991,8 +4620,8 @@ polya.control <- function(save.weight = TRUE, ...) {
misc$earg <- vector("list", M)
names(misc$earg) <- names(misc$link)
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] <- .eprob
- misc$earg[[Musual*ii ]] <- .esize
+ misc$earg[[M1*ii-1]] <- .eprob
+ misc$earg[[M1*ii ]] <- .esize
}
misc$isize <- .isize
@@ -4000,7 +4629,7 @@ polya.control <- function(save.weight = TRUE, ...) {
misc$nsimEIM <- .nsimEIM
misc$expected <- TRUE
misc$shrinkage.init <- .sinit
- misc$Musual <- 2
+ misc$M1 <- 2
misc$multipleResponses <- TRUE
}), list( .lprob = lprob, .lsize = lsize,
.eprob = eprob, .esize = esize,
@@ -4010,32 +4639,61 @@ polya.control <- function(save.weight = TRUE, ...) {
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual <- 2
- NOS <- ncol(eta) / Musual
- pmat <- eta2theta(eta[, Musual*(1:NOS) - 1, drop = FALSE],
- .lprob , earg = .eprob)
- temp300 <- eta[, Musual*(1:NOS) , drop = FALSE]
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ pmat <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lprob , earg = .eprob)
+ temp300 <- eta[, M1*(1:NOS) , drop = FALSE]
if ( .lsize == "loge") {
bigval <- 68
temp300 <- ifelse(temp300 > bigval, bigval, temp300)
temp300 <- ifelse(temp300 < -bigval, -bigval, temp300)
}
kmat <- eta2theta(temp300, .lsize , earg = .esize)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * dnbinom(x = y, prob = pmat, size = kmat, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dnbinom(x = y, prob = pmat, size = kmat, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .lsize = lsize, .lprob = lprob,
.esize = esize, .eprob = eprob ))),
vfamily = c("polya"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ pmat <- eta2theta(eta[, c(TRUE, FALSE)], .lprob , .eprob )
+ kmat <- eta2theta(eta[, c(FALSE, TRUE)], .lsize , .esize )
+ rnbinom(nsim * length(pmat), prob = pmat, size = kmat)
+ }, list( .lprob = lprob, .lsize = lsize,
+ .eprob = eprob, .esize = esize ))),
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
- NOS <- ncol(eta) / Musual
+ M1 <- 2
+ NOS <- ncol(eta) / M1
M <- ncol(eta)
- pmat <- eta2theta(eta[, Musual*(1:NOS) - 1, drop = FALSE],
+ pmat <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
.lprob , earg = .eprob)
- temp3 <- eta[, Musual*(1:NOS) , drop = FALSE]
+ temp3 <- eta[, M1*(1:NOS) , drop = FALSE]
if ( .lsize == "loge") {
bigval <- 68
temp3 <- ifelse(temp3 > bigval, bigval, temp3)
@@ -4049,15 +4707,15 @@ polya.control <- function(save.weight = TRUE, ...) {
dprob.deta <- dtheta.deta(pmat, .lprob , earg = .eprob)
dkayy.deta <- dtheta.deta(kmat, .lsize , earg = .esize)
dthetas.detas <- cbind(dprob.deta, dkayy.deta)
- dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
myderiv <- c(w) * cbind(dl.dprob, dl.dkayy) * dthetas.detas
- myderiv[, interleave.VGAM(M, M = Musual)]
+ myderiv[, interleave.VGAM(M, M = M1)]
}), list( .lprob = lprob, .lsize = lsize,
.eprob = eprob, .esize = esize))),
weight = eval(substitute(expression({
wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
- ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
mumat <- as.matrix(mu)
@@ -4083,18 +4741,18 @@ polya.control <- function(save.weight = TRUE, ...) {
nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else
run.varcov
- wz1 <- wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
- dThetas.detas[, Musual * (spp. - 1) + ind1$col]
+ wz1 <- wz1 * dThetas.detas[, M1 * (spp. - 1) + ind1$row] *
+ dThetas.detas[, M1 * (spp. - 1) + ind1$col]
- for (jay in 1:Musual)
- for (kay in jay:Musual) {
- cptr <- iam((spp. - 1) * Musual + jay,
- (spp. - 1) * Musual + kay,
+ for (jay in 1:M1)
+ for (kay in jay:M1) {
+ cptr <- iam((spp. - 1) * M1 + jay,
+ (spp. - 1) * M1 + kay,
M = M)
- wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
+ wz[, cptr] <- wz1[, iam(jay, kay, M = M1)]
}
- } # End of for (spp.) loop
+ } # End of for (spp.) loop
@@ -4104,11 +4762,13 @@ polya.control <- function(save.weight = TRUE, ...) {
- if (deviance.arg) ans at deviance <- eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual <- 2
- NOS <- ncol(eta) / Musual
- temp300 <- eta[, Musual*(1:NOS), drop = FALSE]
+ if (deviance.arg)
+ ans at deviance <- eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ temp300 <- eta[, M1*(1:NOS), drop = FALSE]
@@ -4127,15 +4787,22 @@ polya.control <- function(save.weight = TRUE, ...) {
}
kayy <- eta2theta(temp300, .lsize , earg = .esize)
devi <- 2 * (y * log(ifelse(y < 1, 1, y) / mu) +
- (y + kayy) * log((mu + kayy) / (kayy + y)))
- if (residuals)
- sign(y - mu) * sqrt(abs(devi) * w) else
- sum(c(w) * devi)
- }, list( .lsize = lsize, .eprob = eprob,
- .esize = esize)))
+ (y + kayy) * log((mu + kayy) / (kayy + y)))
+ if (residuals) {
+ sign(y - mu) * sqrt(abs(devi) * w)
+ } else {
+ dev.elts <- sum(c(w) * devi)
+ if (summation) {
+ sum(dev.elts)
+ } else {
+ dev.elts
+ }
+ }
+ }, list( .lsize = lsize, .eprob = eprob,
+ .esize = esize)))
- ans
-} # End of polya()
+ ans
+} # End of polya()
@@ -4146,12 +4813,21 @@ polya.control <- function(save.weight = TRUE, ...) {
"Link: log(lambda)",
"\n",
"Variance: lambda"),
- deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
nz <- y > 0
devi <- - (y - mu)
devi[nz] <- devi[nz] + y[nz] * log(y[nz]/mu[nz])
- if (residuals) sign(y - mu) * sqrt(2 * abs(devi) * w) else
- 2 * sum(c(w) * devi)
+ if (residuals) {
+ sign(y - mu) * sqrt(2 * abs(devi) * w)
+ } else {
+ dev.elts <- 2 * c(w) * devi
+ if (summation) {
+ sum(dev.elts)
+ } else {
+ dev.elts
+ }
+ }
},
initialize = expression({
if (ncol(cbind(w)) != 1)
@@ -4232,7 +4908,7 @@ polya.control <- function(save.weight = TRUE, ...) {
namesof("df", ldof, earg = edof), "\n",
"Variance: df / (df - 2) if df > 2\n"),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
tol1 = .tol1 )
}, list( .tol1 = tol1 ))),
initialize = eval(substitute(expression({
@@ -4276,14 +4952,44 @@ polya.control <- function(save.weight = TRUE, ...) {
}), list( .ldof = ldof,
.edof = edof, .imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
Dof <- eta2theta(eta, .ldof , earg = .edof )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dt(x = y, df = Dof, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dt(x = y, df = Dof, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .ldof = ldof, .edof = edof ))),
vfamily = c("studentt"),
+
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ Dof <- eta2theta(eta, .ldof , earg = .edof )
+ rt(nsim * length(Dof), df = Dof)
+ }, list( .ldof = ldof, .edof = edof ))),
+
+
+
+
+
+
deriv = eval(substitute(expression({
Dof <- eta2theta(eta, .ldof , earg = .edof )
ddf.deta <- dtheta.deta(theta = Dof, .ldof , earg = .edof )
@@ -4338,7 +5044,7 @@ polya.control <- function(save.weight = TRUE, ...) {
- studentt3 <- function(llocation = "identity",
+ studentt3 <- function(llocation = "identitylink",
lscale = "loge",
ldf = "loglog",
ilocation = NULL, iscale = NULL, idf = NULL,
@@ -4392,15 +5098,15 @@ polya.control <- function(save.weight = TRUE, ...) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 3
+ M1 <- 3
eval(negzero.expression)
}), list( .zero = zero ))),
- infos = eval(substitute(function(...) {
- list(Musual = 3,
- zero = .zero)
- }, list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(M1 = 3,
+ zero = .zero)
+ }, list( .zero = zero ))),
initialize = eval(substitute(expression({
- Musual <- 3
+ M1 <- 3
@@ -4415,8 +5121,8 @@ polya.control <- function(save.weight = TRUE, ...) {
extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
- extra$Musual <- Musual
- M <- Musual * ncoly #
+ extra$M1 <- M1
+ M <- M1 * ncoly #
mynames1 <- paste("location", if (NOS > 1) 1:NOS else "", sep = "")
mynames2 <- paste("scale", if (NOS > 1) 1:NOS else "", sep = "")
@@ -4426,7 +5132,7 @@ polya.control <- function(save.weight = TRUE, ...) {
namesof(mynames2, .lsca , earg = .esca , tag = FALSE),
namesof(mynames3, .ldof , earg = .edof , tag = FALSE))
predictors.names <-
- predictors.names[interleave.VGAM(Musual * NOS, M = Musual)]
+ predictors.names[interleave.VGAM(M1 * NOS, M = M1)]
if (!length(etastart)) {
init.loc <- if (length( .iloc )) .iloc else {
@@ -4460,7 +5166,7 @@ polya.control <- function(save.weight = TRUE, ...) {
mat3 <- matrix(theta2eta(init.dof, .ldof , earg = .edof ), n, NOS,
byrow = TRUE)
etastart <- cbind(mat1, mat2, mat3)
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
}
}), list( .lloc = lloc, .eloc = eloc, .iloc = iloc,
.lsca = lsca, .esca = esca, .isca = isca,
@@ -4468,33 +5174,33 @@ polya.control <- function(save.weight = TRUE, ...) {
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
NOS <- extra$NOS
- Musual <- extra$Musual
- Loc <- eta2theta(eta[, Musual*(1:NOS)-2], .lloc , earg = .eloc )
- Dof <- eta2theta(eta[, Musual*(1:NOS)-0], .ldof , earg = .edof )
+ M1 <- extra$M1
+ Loc <- eta2theta(eta[, M1*(1:NOS)-2], .lloc , earg = .eloc )
+ Dof <- eta2theta(eta[, M1*(1:NOS)-0], .ldof , earg = .edof )
Loc[Dof <= 1] <- NA
Loc
}, list( .lloc = lloc, .eloc = eloc,
.lsca = lsca, .esca = esca,
.ldof = ldof, .edof = edof ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .lloc , length = NOS),
rep( .lsca , length = NOS),
rep( .ldof , length = NOS))
- misc$link <- misc$link[interleave.VGAM(Musual * NOS, M = Musual)]
+ misc$link <- misc$link[interleave.VGAM(M1 * NOS, M = M1)]
temp.names <- c(mynames1, mynames2, mynames3)
- temp.names <- temp.names[interleave.VGAM(Musual * NOS, M = Musual)]
+ temp.names <- temp.names[interleave.VGAM(M1 * NOS, M = M1)]
names(misc$link) <- temp.names
- misc$earg <- vector("list", Musual * NOS)
+ misc$earg <- vector("list", M1 * NOS)
names(misc$earg) <- temp.names
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-2]] <- .eloc
- misc$earg[[Musual*ii-1]] <- .esca
- misc$earg[[Musual*ii ]] <- .edof
+ misc$earg[[M1*ii-2]] <- .eloc
+ misc$earg[[M1*ii-1]] <- .esca
+ misc$earg[[M1*ii ]] <- .edof
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$imethod <- .imethod
misc$expected <- TRUE
misc$multipleResponses <- TRUE
@@ -4503,27 +5209,60 @@ polya.control <- function(save.weight = TRUE, ...) {
.ldof = ldof, .edof = edof,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
NOS <- extra$NOS
- Musual <- extra$Musual
- Loc <- eta2theta(eta[, Musual*(1:NOS)-2], .lloc , earg = .eloc )
- Sca <- eta2theta(eta[, Musual*(1:NOS)-1], .lsca , earg = .esca )
- Dof <- eta2theta(eta[, Musual*(1:NOS)-0], .ldof , earg = .edof )
+ M1 <- extra$M1
+ Loc <- eta2theta(eta[, M1*(1:NOS)-2], .lloc , earg = .eloc )
+ Sca <- eta2theta(eta[, M1*(1:NOS)-1], .lsca , earg = .esca )
+ Dof <- eta2theta(eta[, M1*(1:NOS)-0], .ldof , earg = .edof )
zedd <- (y - Loc) / Sca
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * (dt(x = zedd, df = Dof, log = TRUE) - log(Sca)))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * (dt(x = zedd, df = Dof, log = TRUE) - log(Sca))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lloc = lloc, .eloc = eloc,
.lsca = lsca, .esca = esca,
.ldof = ldof, .edof = edof ))),
vfamily = c("studentt3"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ Loc <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lloc , earg = .eloc )
+ Sca <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lsca , earg = .esca )
+ Dof <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .ldof , earg = .edof )
+
+ Loc + Sca * rt(nsim * length(Dof), df = Dof)
+ }, list( .lloc = lloc, .eloc = eloc,
+ .lsca = lsca, .esca = esca,
+ .ldof = ldof, .edof = edof ))),
+
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
NOS <- extra$NOS
- Loc <- eta2theta(eta[, Musual*(1:NOS)-2], .lloc , earg = .eloc )
- Sca <- eta2theta(eta[, Musual*(1:NOS)-1], .lsca , earg = .esca )
- Dof <- eta2theta(eta[, Musual*(1:NOS)-0], .ldof , earg = .edof )
+ Loc <- eta2theta(eta[, M1*(1:NOS)-2], .lloc , earg = .eloc )
+ Sca <- eta2theta(eta[, M1*(1:NOS)-1], .lsca , earg = .esca )
+ Dof <- eta2theta(eta[, M1*(1:NOS)-0], .ldof , earg = .edof )
dloc.deta <- cbind(dtheta.deta(theta = Loc, .lloc , earg = .eloc ))
dsca.deta <- cbind(dtheta.deta(theta = Sca, .lsca , earg = .esca ))
@@ -4541,7 +5280,7 @@ polya.control <- function(save.weight = TRUE, ...) {
ans <- c(w) * cbind(dl.dloc * dloc.deta,
dl.dsca * dsca.deta,
dl.ddof * ddof.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
ans
}), list( .lloc = lloc, .eloc = eloc,
.lsca = lsca, .esca = esca,
@@ -4576,23 +5315,23 @@ polya.control <- function(save.weight = TRUE, ...) {
c(w) * ned2l.dshape2 * 0,
c(w) * ned2l.dshape.dscale * dsca.deta * ddof.deta,
c(w) * ned2l.dshape.dlocat * dloc.deta * ddof.deta),
- dim = c(n, M / Musual, 6))
- wz <- arwz2wz(wz, M = M, Musual = Musual)
+ dim = c(n, M / M1, 6))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
if (FALSE) {
wz <- matrix(0.0, n, dimm(M))
- wz[, Musual*(1:NOS) - 2] <- ned2l.dlocat2 * dloc.deta^2
- wz[, Musual*(1:NOS) - 1] <- ned2l.dscale2 * dsca.deta^2
- wz[, Musual*(1:NOS) - 0] <- ned2l.dshape2 * ddof.deta^2
+ wz[, M1*(1:NOS) - 2] <- ned2l.dlocat2 * dloc.deta^2
+ wz[, M1*(1:NOS) - 1] <- ned2l.dscale2 * dsca.deta^2
+ wz[, M1*(1:NOS) - 0] <- ned2l.dshape2 * ddof.deta^2
for (ii in ((1:NOS) - 1)) {
ind3 <- 1 + ii
- wz[, iam(ii*Musual + 1, ii*Musual + 3, M = M)] <-
+ wz[, iam(ii*M1 + 1, ii*M1 + 3, M = M)] <-
ned2l.dshape.dlocat[, ind3] *
dloc.deta[, ind3] * ddof.deta[, ind3]
- wz[, iam(ii*Musual + 2, ii*Musual + 3, M = M)] <-
+ wz[, iam(ii*M1 + 2, ii*M1 + 3, M = M)] <-
ned2l.dshape.dscale[, ind3] *
dsca.deta[, ind3] * ddof.deta[, ind3]
}
@@ -4614,7 +5353,7 @@ polya.control <- function(save.weight = TRUE, ...) {
studentt2 <- function(df = Inf,
- llocation = "identity",
+ llocation = "identitylink",
lscale = "loge",
ilocation = NULL, iscale = NULL,
imethod = 1,
@@ -4660,15 +5399,15 @@ polya.control <- function(save.weight = TRUE, ...) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
- infos = eval(substitute(function(...) {
- list(Musual = 2,
- zero = .zero)
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ zero = .zero)
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
temp5 <-
@@ -4682,8 +5421,8 @@ polya.control <- function(save.weight = TRUE, ...) {
extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
- extra$Musual <- Musual
- M <- Musual * ncoly #
+ extra$M1 <- M1
+ M <- M1 * ncoly #
mynames1 <- paste("location", if (NOS > 1) 1:NOS else "", sep = "")
mynames2 <- paste("scale", if (NOS > 1) 1:NOS else "", sep = "")
@@ -4691,7 +5430,7 @@ polya.control <- function(save.weight = TRUE, ...) {
c(namesof(mynames1, .lloc , earg = .eloc , tag = FALSE),
namesof(mynames2, .lsca , earg = .esca , tag = FALSE))
predictors.names <-
- predictors.names[interleave.VGAM(Musual * NOS, M = Musual)]
+ predictors.names[interleave.VGAM(M1 * NOS, M = M1)]
if (!length(etastart)) {
@@ -4711,7 +5450,7 @@ polya.control <- function(save.weight = TRUE, ...) {
mat2 <- matrix(theta2eta(init.sca, .lsca , earg = .esca ), n, NOS,
byrow = TRUE)
etastart <- cbind(mat1, mat2)
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
}
}), list( .lloc = lloc, .eloc = eloc, .iloc = iloc,
.lsca = lsca, .esca = esca, .isca = isca,
@@ -4719,8 +5458,8 @@ polya.control <- function(save.weight = TRUE, ...) {
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
NOS <- extra$NOS
- Musual <- extra$Musual
- Loc <- eta2theta(eta[, Musual*(1:NOS) - 1], .lloc , earg = .eloc )
+ M1 <- extra$M1
+ Loc <- eta2theta(eta[, M1*(1:NOS) - 1], .lloc , earg = .eloc )
Dof <- matrix( .doff , nrow(cbind(Loc)), NOS, byrow = TRUE)
Loc[Dof <= 1] <- NA
Loc
@@ -4728,20 +5467,20 @@ polya.control <- function(save.weight = TRUE, ...) {
.lsca = lsca, .esca = esca,
.doff = doff ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .lloc , length = NOS),
rep( .lsca , length = NOS))
temp.names <- c(mynames1, mynames2)
- temp.names <- temp.names[interleave.VGAM(Musual * NOS, M = Musual)]
+ temp.names <- temp.names[interleave.VGAM(M1 * NOS, M = M1)]
names(misc$link) <- temp.names
- misc$earg <- vector("list", Musual * NOS)
+ misc$earg <- vector("list", M1 * NOS)
names(misc$earg) <- temp.names
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] <- .eloc
- misc$earg[[Musual*ii-0]] <- .esca
+ misc$earg[[M1*ii-1]] <- .eloc
+ misc$earg[[M1*ii-0]] <- .esca
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$simEIM <- TRUE
misc$df <- .doff
misc$imethod <- .imethod
@@ -4752,26 +5491,61 @@ polya.control <- function(save.weight = TRUE, ...) {
.doff = doff,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
NOS <- extra$NOS
- Musual <- extra$Musual
- Loc <- eta2theta(eta[, Musual*(1:NOS)-1], .lloc , earg = .eloc )
- Sca <- eta2theta(eta[, Musual*(1:NOS)-0], .lsca , earg = .esca )
+ M1 <- extra$M1
+ Loc <- eta2theta(eta[, M1*(1:NOS)-1], .lloc , earg = .eloc )
+ Sca <- eta2theta(eta[, M1*(1:NOS)-0], .lsca , earg = .esca )
Dof <- matrix( .doff , nrow(cbind(Loc)), NOS, byrow = TRUE)
zedd <- (y - Loc) / Sca
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * (dt(x = zedd, df = Dof, log = TRUE) - log(Sca)))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * (dt(x = zedd, df = Dof, log = TRUE) - log(Sca))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lloc = lloc, .eloc = eloc,
.lsca = lsca, .esca = esca,
.doff = doff ))),
vfamily = c("studentt2"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ extra <- object at extra
+ NOS <- extra$NOS
+ Loc <- eta2theta(eta[, c(TRUE, FALSE)], .lloc , earg = .eloc )
+ Sca <- eta2theta(eta[, c(FALSE, TRUE)], .lsca , earg = .esca )
+ Dof <- matrix( .doff , nrow(cbind(Loc)), NOS, byrow = TRUE)
+
+ Loc + Sca * rt(nsim * length(Sca), df = Dof)
+ }, list( .lloc = lloc, .eloc = eloc,
+ .lsca = lsca, .esca = esca,
+ .doff = doff ))),
+
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
NOS <- extra$NOS
- Loc <- eta2theta(eta[, Musual*(1:NOS)-1], .lloc , earg = .eloc )
- Sca <- eta2theta(eta[, Musual*(1:NOS)-0], .lsca , earg = .esca )
+ Loc <- eta2theta(eta[, M1*(1:NOS)-1], .lloc , earg = .eloc )
+ Sca <- eta2theta(eta[, M1*(1:NOS)-0], .lsca , earg = .esca )
Dof <- matrix( .doff , n, NOS, byrow = TRUE)
dlocat.deta <- dtheta.deta(theta = Loc, .lloc , earg = .eloc )
@@ -4786,7 +5560,7 @@ polya.control <- function(save.weight = TRUE, ...) {
ans <- c(w) * cbind(dl.dlocat * dlocat.deta,
dl.dscale * dscale.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
ans
}), list( .lloc = lloc, .eloc = eloc,
.lsca = lsca, .esca = esca,
@@ -4804,8 +5578,8 @@ polya.control <- function(save.weight = TRUE, ...) {
ned2l.dscale2 <- 2.0 * const2 / Sca^2 # 2.0 seems to work
wz <- matrix(as.numeric(NA), n, M) #2=M; diagonal!
- wz[, Musual*(1:NOS) - 1] <- ned2l.dlocat2 * dlocat.deta^2
- wz[, Musual*(1:NOS) ] <- ned2l.dscale2 * dscale.deta^2
+ wz[, M1*(1:NOS) - 1] <- ned2l.dlocat2 * dlocat.deta^2
+ wz[, M1*(1:NOS) ] <- ned2l.dscale2 * dscale.deta^2
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
}), list( .lloc = lloc, .eloc = eloc,
@@ -4837,12 +5611,13 @@ polya.control <- function(save.weight = TRUE, ...) {
namesof("df", link, earg = earg, tag = FALSE)),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero ))),
@@ -4863,10 +5638,10 @@ polya.control <- function(save.weight = TRUE, ...) {
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
extra$ncoly <- NOS <- ncoly # Number of species
@@ -4882,7 +5657,7 @@ polya.control <- function(save.weight = TRUE, ...) {
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .link , length = ncoly))
names(misc$link) <- mynames1
@@ -4892,7 +5667,7 @@ polya.control <- function(save.weight = TRUE, ...) {
misc$earg[[ii]] <- .earg
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$multipleResponses <- TRUE
}), list( .link = link, .earg = earg ))),
@@ -4901,13 +5676,40 @@ polya.control <- function(save.weight = TRUE, ...) {
theta2eta(mu, .link , earg = .earg )
}, list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
mydf <- eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * dchisq(x = y, df = mydf, ncp = 0, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dchisq(x = y, df = mydf, ncp = 0, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .link = link, .earg = earg ))),
vfamily = "chisq",
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ Dof <- eta2theta(eta, .link , earg = .earg )
+ rchisq(nsim * length(Dof), df = Dof, ncp = 0)
+ }, list( .link = link, .earg = earg ))),
+
+
+
+
deriv = eval(substitute(expression({
mydf <- eta2theta(eta, .link , earg = .earg )
dl.dv <- (log(y / 2) - digamma(mydf / 2)) / 2
@@ -5024,7 +5826,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
if (!is.Numeric(imethod, length.arg = 1,
integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
+ stop("argument 'imethod' must be 1 or 2 or 3")
if (!is.Numeric(shrinkage.init, length.arg = 1) ||
shrinkage.init < 0 ||
shrinkage.init > 1)
@@ -5042,7 +5844,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
"(1-y) * mu^2 * (1-mu)^2)],\n",
" 0 < y < 1, 0 < mu < 1, sigma > 0\n\n",
"Links: ",
- namesof("mu", lmu, earg = emu), ", ",
+ namesof("mu", lmu, earg = emu), ", ",
namesof("sigma", lsigma, earg = esigma), "\n\n",
"Mean: mu\n",
"Variance function: V(mu) = mu^3 * (1 - mu)^3"),
@@ -5050,93 +5852,123 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- if (any(y <= 0.0 | y >= 1.0))
- stop("all 'y' values must be in (0,1)")
+ if (any(y <= 0.0 | y >= 1.0))
+ stop("all 'y' values must be in (0,1)")
w.y.check(w = w, y = y,
Is.positive.y = TRUE)
- predictors.names <- c(
- namesof("mu", .lmu , earg = .emu , tag = FALSE),
- namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
+ predictors.names <- c(
+ namesof("mu", .lmu , earg = .emu , tag = FALSE),
+ namesof("sigma", .lsigma , earg = .esigma, tag = FALSE))
- deeFun <- function(y, mu)
- (((y - mu) / (mu * (1 - mu)))^2) / (y * (1 - y))
+ deeFun <- function(y, mu)
+ (((y - mu) / (mu * (1 - mu)))^2) / (y * (1 - y))
- if (!length(etastart)) {
+ if (!length(etastart)) {
- use.this =
- if ( .imethod == 3) weighted.mean(y, w = w) else
- if ( .imethod == 1) median(y) else
- mean(y, trim = 0.1)
+ use.this =
+ if ( .imethod == 3) weighted.mean(y, w = w) else
+ if ( .imethod == 1) median(y) else
+ mean(y, trim = 0.1)
- init.mu <- (1 - .sinit) * y + .sinit * use.this
- mu.init <- rep(if (length( .imu )) .imu else init.mu, length = n)
- sigma.init <- if (length( .isigma )) rep( .isigma, leng = n) else {
- use.this <- deeFun(y, mu=init.mu)
- rep(sqrt( if ( .imethod == 3) weighted.mean(use.this, w) else
- if ( .imethod == 1) median(use.this) else
- mean(use.this, trim = 0.1)),
- length = n)
- }
- etastart <-
- cbind(theta2eta(mu.init, .lmu , earg = .emu),
- theta2eta(sigma.init, .lsigma, earg = .esigma))
+ init.mu <- (1 - .sinit) * y + .sinit * use.this
+ mu.init <- rep(if (length( .imu )) .imu else init.mu, length = n)
+ sigma.init <- if (length( .isigma )) rep( .isigma, leng = n) else {
+ use.this <- deeFun(y, mu=init.mu)
+ rep(sqrt( if ( .imethod == 3) weighted.mean(use.this, w) else
+ if ( .imethod == 1) median(use.this) else
+ mean(use.this, trim = 0.1)),
+ length = n)
+ }
+ etastart <-
+ cbind(theta2eta(mu.init, .lmu , earg = .emu ),
+ theta2eta(sigma.init, .lsigma , earg = .esigma ))
}
}), list( .lmu = lmu, .lsigma = lsigma,
.emu = emu, .esigma = esigma,
.imu = imu, .isigma = isigma,
.sinit = shrinkage.init, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .lmu , earg = .emu)
+ eta2theta(eta[, 1], .lmu , earg = .emu )
}, list( .lmu = lmu, .emu = emu ))),
last = eval(substitute(expression({
- misc$link <- c(mu = .lmu ,
- sigma = .lsigma)
- misc$earg <- list(mu = .emu ,
- sigma = .esigma)
- misc$imu <- .imu
- misc$isigma <- .isigma
- misc$imethod <- .imethod
- misc$shrinkage.init <- .sinit
+ misc$link <- c(mu = .lmu ,
+ sigma = .lsigma )
+ misc$earg <- list(mu = .emu ,
+ sigma = .esigma )
+ misc$imu <- .imu
+ misc$isigma <- .isigma
+ misc$imethod <- .imethod
+ misc$shrinkage.init <- .sinit
}), list( .lmu = lmu, .lsigma = lsigma,
.imu = imu, .isigma = isigma,
.emu = emu, .esigma = esigma,
.sinit = shrinkage.init, .imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- sigma <- eta2theta(eta[, 2], .lsigma, earg = .esigma)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dsimplex(x = y, mu = mu, dispersion = sigma, log = TRUE))
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ sigma <- eta2theta(eta[, 2], .lsigma , earg = .esigma )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dsimplex(x = y, mu = mu, dispersion = sigma, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
}, list( .lsigma = lsigma, .emu = emu,
.esigma = esigma ))),
vfamily = c("simplex"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ mymu <- eta2theta(eta[, 1], .lmu , earg = .emu )
+ sigma <- eta2theta(eta[, 2], .lsigma , earg = .esigma )
+ rsimplex(nsim * length(sigma), mu = mymu, dispersion = sigma)
+ }, list( .lmu = lmu, .lsigma = lsigma,
+ .emu = emu, .esigma = esigma ))),
+
+
+
+
deriv = eval(substitute(expression({
- deeFun <- function(y, mu)
- (((y - mu) / (mu * (1 - mu)))^2) / (y * (1 - y))
- sigma <- eta2theta(eta[, 2], .lsigma, earg = .esigma)
- dmu.deta <- dtheta.deta(mu, .lmu , earg = .emu)
- dsigma.deta <- dtheta.deta(sigma, .lsigma, earg = .esigma)
+ deeFun <- function(y, mu)
+ (((y - mu) / (mu * (1 - mu)))^2) / (y * (1 - y))
+ sigma <- eta2theta(eta[, 2], .lsigma , earg = .esigma )
+
+ dmu.deta <- dtheta.deta(mu, .lmu , earg = .emu )
+ dsigma.deta <- dtheta.deta(sigma, .lsigma , earg = .esigma )
- dl.dmu <- (y - mu) * (deeFun(y, mu) +
+ dl.dmu <- (y - mu) * (deeFun(y, mu) +
1 / (mu * (1 - mu))^2) / (mu * (1 - mu) * sigma^2)
- dl.dsigma <- (deeFun(y, mu) / sigma^2 - 1) / sigma
- cbind(dl.dmu * dmu.deta,
- dl.dsigma * dsigma.deta)
+ dl.dsigma <- (deeFun(y, mu) / sigma^2 - 1) / sigma
+ cbind(dl.dmu * dmu.deta,
+ dl.dsigma * dsigma.deta)
}), list( .lmu = lmu, .lsigma = lsigma,
.emu = emu, .esigma = esigma ))),
weight = eval(substitute(expression({
- wz <- matrix(0.0, n, M) # Diagonal!!
- eim11 <- 3 / (mu * (1 - mu)) + 1 / (sigma^2 * (mu * (1 - mu))^3)
- wz[, iam(1, 1, M)] <- eim11 * dmu.deta^2
- wz[, iam(2, 2, M)] <- (2 / sigma^2) * dsigma.deta^2
- c(w) * wz
+ wz <- matrix(0.0, n, M) # Diagonal!!
+ eim11 <- 3 / (mu * (1 - mu)) + 1 / (sigma^2 * (mu * (1 - mu))^3)
+ wz[, iam(1, 1, M)] <- eim11 * dmu.deta^2
+ wz[, iam(2, 2, M)] <- (2 / sigma^2) * dsigma.deta^2
+ c(w) * wz
}), list( .lmu = lmu, .lsigma = lsigma,
.emu = emu, .esigma = esigma ))))
}
@@ -5148,7 +5980,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
- rigff <- function(lmu = "identity", llambda = "loge",
+ rigff <- function(lmu = "identitylink", llambda = "loge",
imu = NULL, ilambda = 1) {
@@ -5193,14 +6025,14 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
lambda.init <- rep(if (length( .ilambda )) .ilambda else
sqrt(var(y)), length = n)
etastart <-
- cbind(theta2eta(mu.init, .lmu , earg = .emu),
+ cbind(theta2eta(mu.init, .lmu , earg = .emu ),
theta2eta(lambda.init, .llambda , earg = .elambda ))
}
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda,
.imu = imu, .ilambda = ilambda ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .lmu , earg = .emu)
+ eta2theta(eta[, 1], .lmu , earg = .emu )
}, list( .lmu = lmu,
.emu = emu, .elambda = elambda ))),
last = eval(substitute(expression({
@@ -5211,12 +6043,22 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * (-0.5 * log(y) + 0.5 * log(lambda) -
- (0.5 * lambda/y) * (y - mu)^2))
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * (-0.5 * log(y) + 0.5 * log(lambda) -
+ (0.5 * lambda/y) * (y - mu)^2)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .llambda = llambda,
.elambda = elambda,
.emu = emu ))),
@@ -5233,7 +6075,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
eval.d3 <- eval(d3)
dl.dthetas <- attr(eval.d3, "gradient")
- dmu.deta <- dtheta.deta(mu, .lmu , earg = .emu)
+ dmu.deta <- dtheta.deta(mu, .lmu , earg = .emu )
dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
dtheta.detas <- cbind(dmu.deta, dlambda.deta)
@@ -5249,7 +6091,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
wz[, iam(1, 2, M)] <- -d2l.dthetas2[, 1, 2] * dtheta.detas[, 1] *
dtheta.detas[, 2]
if (! .expected ) {
- d2mudeta2 <- d2theta.deta2(mu, .lmu , earg = .emu)
+ d2mudeta2 <- d2theta.deta2(mu, .lmu , earg = .emu )
d2lambda <- d2theta.deta2(lambda, .llambda , earg = .elambda )
wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] - dl.dthetas[, 1] * d2mudeta2
wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] - dl.dthetas[, 2] * d2lambda
@@ -5317,11 +6159,20 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
misc$expected <- TRUE
}), list( .link.theta = link.theta , .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
theta <- eta2theta(eta, .link.theta , earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * (theta*y + log(cos(theta)) - log(cosh(pi*y/2 ))))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * (theta*y + log(cos(theta)) - log(cosh(pi*y/2 )))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .link.theta = link.theta , .earg = earg ))),
vfamily = c("hypersecant"),
deriv = eval(substitute(expression({
@@ -5392,12 +6243,22 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
misc$expected <- TRUE
}), list( .link.theta = link.theta , .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
theta <- eta2theta(eta, .link.theta , earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * (log(cos(theta)) + (-0.5+theta/pi)*log(y) +
- (-0.5-theta/pi)*log1p(-y )))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * (log(cos(theta)) + (-0.5 + theta/pi) * log(y) +
+ (-0.5 - theta/pi) * log1p(-y ))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .link.theta = link.theta , .earg = earg ))),
vfamily = c("hypersecant.1"),
deriv = eval(substitute(expression({
@@ -5475,7 +6336,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
.emu = emu, .elambda = elambda,
.imu = imu, .ilambda = ilambda ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .lmu , earg = .emu)
+ eta2theta(eta[, 1], .lmu , earg = .emu )
}, list( .lmu = lmu,
.emu = emu, .elambda = elambda ))),
last = eval(substitute(expression({
@@ -5487,13 +6348,23 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * (-0.5*log(y*(1-y)) - 0.5 * lambda *
- log1p((y-mu)^2 / (y*(1-y ))) - lgamma((lambda+1)/2) +
- lgamma(1+ lambda/2 )))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * (-0.5*log(y*(1-y)) - 0.5 * lambda *
+ log1p((y-mu)^2 / (y*(1-y ))) - lgamma((lambda+1)/2) +
+ lgamma(1+ lambda/2 ))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .llambda = llambda,
.emu = emu, .elambda = elambda ))),
vfamily = c("leipnik"),
@@ -5505,7 +6376,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
0.5*digamma((lambda+1)/2) +
0.5*digamma(1+lambda/2))
- dmu.deta <- dtheta.deta(mu, .lmu , earg = .emu)
+ dmu.deta <- dtheta.deta(mu, .lmu , earg = .emu )
dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
dtheta.detas <- cbind(dmu.deta, dlambda.deta)
@@ -5527,20 +6398,21 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
wz[, iam(1, 2, M)] <- -d2l.dthetas2[, 1, 2] * dtheta.detas[, 1] *
dtheta.detas[, 2]
if (!.expected) {
- d2mudeta2 <- d2theta.deta2(mu, .lmu , earg = .emu)
+ d2mudeta2 <- d2theta.deta2(mu, .lmu , earg = .emu )
d2lambda <- d2theta.deta2(lambda, .llambda , earg = .elambda )
wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] - dl.dthetas[, 1] * d2mudeta2
wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] - dl.dthetas[, 2] * d2lambda
}
if (intercept.only) {
- sumw <- sum(w)
- for (ii in 1:ncol(wz))
- wz[, ii] <- sum(wz[, ii]) / sumw
- pooled.weight <- TRUE
- wz <- c(w) * wz # Put back the weights
- } else
- pooled.weight <- FALSE
+ sumw <- sum(w)
+ for (ii in 1:ncol(wz))
+ wz[, ii] <- sum(wz[, ii]) / sumw
+ pooled.weight <- TRUE
+ wz <- c(w) * wz # Put back the weights
+ } else {
+ pooled.weight <- FALSE
+ }
wz
}), list( .lmu = lmu, .llambda = llambda, .expected = FALSE,
@@ -5626,27 +6498,37 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
}), list( .llambda = llambda, .lrho = lrho,
.elambda = elambda, .erho = erho ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- rho <- eta2theta(eta[, 1], .lrho, earg = .erho)
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ rho <- eta2theta(eta[, 1], .lrho , earg = .erho )
lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w) * (log(lambda) - lgamma(2*y+lambda) - lgamma(y+1) -
- lgamma(y+lambda+1) + y*log(rho) + y*log1p(-rho) +
- lambda*log(rho)))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * (log(lambda) - lgamma(2*y+lambda) - lgamma(y+1) -
+ lgamma(y+lambda+1) + y*log(rho) + y*log1p(-rho) +
+ lambda*log(rho))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .llambda = llambda, .lrho = lrho,
.elambda = elambda, .erho = erho ))),
vfamily = c("invbinomial"),
deriv = eval(substitute(expression({
- rho <- eta2theta(eta[, 1], .lrho, earg = .erho)
+ rho <- eta2theta(eta[, 1], .lrho , earg = .erho )
lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
dl.drho <- (y + lambda)/rho - y/(1-rho)
dl.dlambda <- 1/lambda - digamma(2*y+lambda) - digamma(y+lambda+1) +
log(rho)
- drho.deta <- dtheta.deta(rho, .lrho, earg = .erho)
+ drho.deta <- dtheta.deta(rho, .lrho , earg = .erho )
dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
c(w) * cbind(dl.drho * drho.deta,
@@ -5781,16 +6663,26 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
.use.approx = use.approx,
.etheta = etheta, .elambda = elambda ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
theta <- eta2theta(eta[, 2], .ltheta , earg = .etheta )
index <- (y == 0)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w[index] * (-theta[index])) +
- sum(w[!index] * (-y[!index]*lambda[!index]-theta[!index] +
- (y[!index]-1)*log(theta[!index]+y[!index]*lambda[!index]) +
- log(theta[!index]) - lgamma(y[!index]+1)) )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ (w[index] * (-theta[index])) +
+ (w[!index] * (-y[!index]*lambda[!index]-theta[!index] +
+ (y[!index]-1)*log(theta[!index]+y[!index]*lambda[!index]) +
+ log(theta[!index]) - lgamma(y[!index]+1)) )
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .ltheta = ltheta, .llambda = llambda,
.etheta = etheta, .elambda = elambda ))),
vfamily = c("genpoisson"),
@@ -5928,8 +6820,8 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
}
}), list( .link = link, .earg = earg, .init.k = init.k ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- k <- eta2theta(eta, .link , earg = .earg )
- digamma(k)
+ kay <- eta2theta(eta, .link , earg = .earg )
+ digamma(kay)
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$link <- c(k = .link )
@@ -5937,15 +6829,41 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
misc$expected <- TRUE
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- kk <- eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dlgamma(x = y, location = 0, scale = 1,
- k = kk, log = TRUE))
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ kay <- eta2theta(eta, .link , earg = .earg )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dlgamma(x = y, location = 0, scale = 1,
+ k = kay, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
- }, list( .link = link, .earg = earg ))),
+ }
+ }, list( .link = link, .earg = earg ))),
vfamily = c("lgammaff"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ kay <- eta2theta(eta, .link , earg = .earg )
+ rlgamma(nsim * length(kay), location = 0, scale = 1, k = kay)
+ }, list( .link = link, .earg = earg ))),
+
+
+
deriv = eval(substitute(expression({
kk <- eta2theta(eta, .link , earg = .earg )
dl.dk <- y - digamma(kk)
@@ -5966,7 +6884,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
lgamma3ff <-
- function(llocation = "identity", lscale = "loge", lshape = "loge",
+ function(llocation = "identitylink", lscale = "loge", lshape = "loge",
ilocation = NULL, iscale = NULL, ishape = 1, zero = NULL) {
if (length(zero) &&
@@ -6056,17 +6974,50 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
}), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
aa <- eta2theta(eta[, 1], .llocat , earg = .elocat )
bb <- eta2theta(eta[, 2], .lscale , earg = .escale )
kk <- eta2theta(eta[, 3], .lshape , earg = .eshape )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dlgamma(x = y, locat=aa, scale=bb, k=kk, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dlgamma(x = y, locat = aa, scale = bb, k = kk,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
}, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape))),
vfamily = c("lgamma3ff"),
+
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ aa <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ bb <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ kk <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+ rlgamma(nsim * length(kk), location = aa, scale = bb, k = kk)
+ }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape))),
+
+
+
+
deriv = eval(substitute(expression({
a <- eta2theta(eta[, 1], .llocat , earg = .elocat )
b <- eta2theta(eta[, 2], .lscale , earg = .escale )
@@ -6110,7 +7061,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
prentice74 <-
- function(llocation = "identity", lscale = "loge", lshape = "identity",
+ function(llocation = "identitylink", lscale = "loge", lshape = "identitylink",
ilocation = NULL, iscale = NULL, ishape = NULL, zero = 2:3) {
if (length(zero) &&
@@ -6199,16 +7150,26 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
}), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
a <- eta2theta(eta[, 1], .llocat , earg = .elocat )
b <- eta2theta(eta[, 2], .lscale , earg = .escale )
k <- eta2theta(eta[, 3], .lshape , earg = .eshape )
tmp55 <- k^(-2)
doubw <- (y-a)*k/b + digamma(tmp55)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(c(w)*(log(abs(k)) - log(b) - lgamma(tmp55) +
- doubw * tmp55 - exp(doubw )))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * (log(abs(k)) - log(b) - lgamma(tmp55) +
+ doubw * tmp55 - exp(doubw))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape))),
vfamily = c("prentice74"),
@@ -6411,18 +7372,51 @@ rgengamma <- function(n, scale = 1, d = 1, k = 1) {
}), list( .lscale = lscale, .ld = ld, .lk = lk,
.escale = escale, .ed = ed, .ek = ek ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
b <- eta2theta(eta[, 1], .lscale , earg = .escale )
d <- eta2theta(eta[, 2], .ld , earg = .ed )
k <- eta2theta(eta[, 3], .lk , earg = .ek )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dgengamma(x = y, scale = b, d = d, k = k, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dgengamma(x = y, scale = b, d = d, k = k, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lscale = lscale, .ld = ld, .lk = lk,
.escale = escale, .ed = ed, .ek = ek ))),
vfamily = c("gengamma"),
+
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ bbb <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ ddd <- eta2theta(eta[, 2], .ld , earg = .ed )
+ kkk <- eta2theta(eta[, 3], .lk , earg = .ek )
+ rgengamma(nsim * length(kkk), scale = bbb, d = ddd, k = kkk)
+ }, list( .lscale = lscale, .ld = ld, .lk = lk,
+ .escale = escale, .ed = ed, .ek = ek ))),
+
+
+
+
+
deriv = eval(substitute(expression({
b <- eta2theta(eta[, 1], .lscale , earg = .escale )
d <- eta2theta(eta[, 2], .ld , earg = .ed )
@@ -6528,7 +7522,7 @@ plog <- function(q, prob, log.p = FALSE) {
rlist <- .C("tyee_C_cum8sum",
as.double(onevector), answer = double(N),
as.integer(N), as.double(seqq),
- as.integer(length(onevector)), notok=integer(1), PACKAGE = "VGAM")
+ as.integer(length(onevector)), notok=integer(1))
if (rlist$notok != 0) stop("error in 'cum8sum'")
ans <- if (log.p) log(rlist$answer) else rlist$answer
if (specialCase)
@@ -6564,10 +7558,10 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
mean <- a*prob/(1-prob) # E(Y)
sigma <- sqrt(a * prob * (1 - a * prob)) / (1 - prob) # sd(Y)
ymax <- dlog(x = 1, prob)
- while(ptr2 < use.n) {
+ while (ptr2 < use.n) {
Lower <- 0.5 # A continuity correction is used = 1 - 0.5.
Upper <- mean + 5 * sigma
- while(plog(q = Upper, prob) < 1 - Smallno)
+ while (plog(q = Upper, prob) < 1 - Smallno)
Upper <- Upper + sigma
Upper <- Upper + 0.5
x <- round(runif(2 * use.n, min = Lower, max = Upper))
@@ -6613,12 +7607,13 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
"Mean: a * c / (1 - c)", "\n"),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero ))),
@@ -6638,10 +7633,10 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
mynames1 <- paste("c", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -6675,7 +7670,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .link , length = ncoly))
names(misc$link) <- mynames1
@@ -6685,23 +7680,49 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
misc$earg[[ii]] <- .earg
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$multipleResponses <- TRUE
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
cc <- eta2theta(eta, .link , earg = .earg )
aa <- -1 / log1p(-cc)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dlog(x = y, prob = -expm1(-1/aa), log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dlog(x = y, prob = -expm1(-1/aa), log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
}
+ }
}, list( .link = link, .earg = earg ))),
vfamily = c("logff"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ cc <- eta2theta(eta, .link , earg = .earg )
+ aa <- -1 / log1p(-cc)
+ rlog(nsim * length(aa), prob = -expm1(-1/aa))
+ }, list( .link = link, .earg = earg ))),
+
+
+
deriv = eval(substitute(expression({
- Musual <- 1
+ M1 <- 1
cc <- eta2theta(eta, .link , earg = .earg )
aa <- -1 / log1p(-cc)
dl.dc <- 1 / ((1 - cc) * log1p(-cc)) + y / cc
@@ -6741,7 +7762,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
if (delta.known) "Link: " else "Links: ",
namesof("gamma", link.gamma, earg = earg),
if (! delta.known)
- c(", ", namesof("delta", "identity", earg = list())),
+ c(", ", namesof("delta", "identitylink", earg = list())),
"\n\n",
"Mean: NA",
"\n"),
@@ -6757,7 +7778,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
predictors.names <-
c(namesof("gamma", .link.gamma, earg = .earg , tag = FALSE),
if ( .delta.known) NULL else
- namesof("delta", "identity", earg = list(), tag = FALSE))
+ namesof("delta", "identitylink", earg = list(), tag = FALSE))
if (!length(etastart)) {
@@ -6794,7 +7815,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
.delta.known = delta.known,
.delta = delta ))),
last = eval(substitute(expression({
- misc$link <- if ( .delta.known) NULL else c(delta = "identity")
+ misc$link <- if ( .delta.known) NULL else c(delta = "identitylink")
misc$link <- c(gamma = .link.gamma, misc$link)
misc$earg <- if ( .delta.known) list(gamma = .earg ) else
list(gamma = .earg , delta = list())
@@ -6804,14 +7825,24 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
.delta.known = delta.known,
.delta = delta ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
eta <- as.matrix(eta)
mygamma <- eta2theta(eta[, 1], .link.gamma, earg = .earg )
delta <- if ( .delta.known) .delta else eta[, 2]
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else
- sum(c(w) * 0.5 * (log(mygamma) -3 * log(y - delta) -
- mygamma / (y - delta)))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * 0.5 * (log(mygamma) -3 * log(y - delta) -
+ mygamma / (y - delta))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .link.gamma = link.gamma, .earg = earg,
.delta.known = delta.known,
.delta = delta ))),
@@ -6979,18 +8010,49 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
}), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
.eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- sh1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1)
- sh2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2)
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
+ shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
lambda <- eta2theta(eta[, 3], .llambda , earg = .elambda )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dlino(y, shape1 = sh1, shape2 = sh2,
- lambda = lambda, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dlino(y, shape1 = shape1, shape2 = shape2,
+ lambda = lambda, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
.eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
vfamily = c("lino"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
+ shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
+ lambda <- eta2theta(eta[, 3], .llambda , earg = .elambda )
+ rlino(nsim * length(shape1),
+ shape1 = shape1, shape2 = shape2, lambda = lambda)
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
+ .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
+
+
+
+
deriv = eval(substitute(expression({
sh1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1)
sh2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2)
@@ -7096,13 +8158,22 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
misc$earg <- list(shape1 = .earg , shape2 = .earg )
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL){
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
shapes <- eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * ((shapes[, 1]-1) * log(y) -
- lbeta(shapes[, 1], shapes[, 2]) -
- (shapes[, 2]+shapes[, 1]) * log1p(y)))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * ((shapes[, 1]-1) * log(y) -
+ lbeta(shapes[, 1], shapes[, 2]) -
+ (shapes[, 2]+shapes[, 1]) * log1p(y))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
vfamily = "betaprime",
@@ -7208,7 +8279,8 @@ rmaxwell <- function(n, a) {
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero ))),
@@ -7228,10 +8300,10 @@ rmaxwell <- function(n, a) {
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
mynames1 <- paste("a", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -7251,7 +8323,7 @@ rmaxwell <- function(n, a) {
}, list( .link = link,
.earg = earg ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$earg <- vector("list", M)
names(misc$earg) <- mynames1
@@ -7262,20 +8334,49 @@ rmaxwell <- function(n, a) {
misc$link <- rep( .link , length = ncoly)
names(misc$link) <- mynames1
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$expected <- TRUE
misc$multipleResponses <- TRUE
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
aa <- eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else
- sum(c(w) * dmaxwell(x = y, a = aa, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dmaxwell(x = y, a = aa, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .link = link,
.earg = earg ))),
vfamily = c("maxwell"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ aa <- eta2theta(eta, .link , earg = .earg )
+ rmaxwell(nsim * length(aa), a = c(aa))
+ }, list( .link = link,
+ .earg = earg ))),
+
+
+
+
deriv = eval(substitute(expression({
aa <- eta2theta(eta, .link , earg = .earg )
@@ -7356,8 +8457,8 @@ qnaka <- function(p, shape, scale = 1, ...) {
EY <- sqrt(scale[ii]/shape[ii]) *
gamma(shape[ii] + 0.5) / gamma(shape[ii])
Upper <- 5 * EY
- while(pnaka(q = Upper, shape = shape[ii],
- scale = scale[ii]) < p[ii])
+ while (pnaka(q = Upper, shape = shape[ii],
+ scale = scale[ii]) < p[ii])
Upper <- Upper + scale[ii]
ans[ii] <- uniroot(f = myfun, lower = 0, upper = Upper,
shape = shape[ii], scale = scale[ii],
@@ -7387,11 +8488,11 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
ptr1 <- 1
ptr2 <- 0
ymax <- dnaka(x = sqrt(scale * (1 - 0.5 / shape)),
- shape = shape, scale = scale)
- while(ptr2 < use.n) {
+ shape = shape, scale = scale)
+ while (ptr2 < use.n) {
EY <- sqrt(scale / shape) * gamma(shape + 0.5) / gamma(shape)
Upper <- EY + 5 * scale
- while(pnaka(q = Upper, shape = shape, scale = scale) < 1 - Smallno)
+ while (pnaka(q = Upper, shape = shape, scale = scale) < 1 - Smallno)
Upper <- Upper + scale
x <- runif(2*use.n, min = 0, max = Upper)
index <- runif(2*use.n, max = ymax) < dnaka(x, shape = shape,
@@ -7466,49 +8567,66 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape,
.ishape = ishape, .iscale = iscale ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
- scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
- sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)
- }, list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape))),
- last = eval(substitute(expression({
- misc$link <- c(shape = .lshape , scale = .lscale)
- misc$earg <- list(shape = .eshape, scale = .escale )
- misc$expected = TRUE
- }), list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
- scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else
- sum(c(w) * dnaka(x = y, shape = shape, scale = scale, log = TRUE))
- }, list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape))),
- vfamily = c("nakagami"),
- deriv = eval(substitute(expression({
- shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
- Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
- dl.dshape <- 1 + log(shape/Scale) - digamma(shape) +
- 2 * log(y) - y^2 / Scale
- dl.dscale <- -shape/Scale + shape * (y/Scale)^2
- dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
- dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
- c(w) * cbind(dl.dshape * dshape.deta,
- dl.dscale * dscale.deta)
- }), list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape))),
- weight = eval(substitute(expression({
- d2l.dshape2 <- trigamma(shape) - 1/shape
- d2l.dscale2 <- shape / Scale^2
- wz <- matrix(as.numeric(NA), n, M) # diagonal
- wz[, iam(1, 1, M)] <- d2l.dshape2 * dshape.deta^2
- wz[, iam(2, 2, M)] <- d2l.dscale2 * dscale.deta^2
- c(w) * wz
- }), list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape))))
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape))),
+ last = eval(substitute(expression({
+ misc$link <- c(shape = .lshape , scale = .lscale)
+ misc$earg <- list(shape = .eshape, scale = .escale )
+ misc$expected = TRUE
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dnaka(x = y, shape = shape, scale = scale, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape))),
+ vfamily = c("nakagami"),
+
+
+
+
+
+
+
+ deriv = eval(substitute(expression({
+ shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ dl.dshape <- 1 + log(shape/Scale) - digamma(shape) +
+ 2 * log(y) - y^2 / Scale
+ dl.dscale <- -shape/Scale + shape * (y/Scale)^2
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+ c(w) * cbind(dl.dshape * dshape.deta,
+ dl.dscale * dscale.deta)
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape))),
+ weight = eval(substitute(expression({
+ d2l.dshape2 <- trigamma(shape) - 1/shape
+ d2l.dscale2 <- shape / Scale^2
+ wz <- matrix(as.numeric(NA), n, M) # diagonal
+ wz[, iam(1, 1, M)] <- d2l.dshape2 * dshape.deta^2
+ wz[, iam(2, 2, M)] <- d2l.dscale2 * dscale.deta^2
+ c(w) * wz
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape))))
}
@@ -7590,12 +8708,13 @@ rrayleigh <- function(n, scale = 1) {
"Mean: scale * sqrt(pi / 2)"),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 1
+ M1 <- 1
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero ))),
@@ -7615,10 +8734,10 @@ rrayleigh <- function(n, scale = 1) {
ncoly <- ncol(y)
- Musual <- 1
+ M1 <- 1
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -7638,7 +8757,7 @@ rrayleigh <- function(n, scale = 1) {
}, list( .lscale = lscale, .escale = escale))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <- c(rep( .lscale , length = ncoly))
names(misc$link) <- mynames1
@@ -7648,22 +8767,47 @@ rrayleigh <- function(n, scale = 1) {
misc$earg[[ii]] <- .escale
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$multipleResponses <- TRUE
misc$nrfs <- .nrfs
}), list( .lscale = lscale,
.escale = escale, .nrfs = nrfs ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
Scale <- eta2theta(eta, .lscale , earg = .escale )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * drayleigh(x = y, scale = Scale, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * drayleigh(x = y, scale = Scale, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lscale = lscale, .escale = escale))),
vfamily = c("rayleigh"),
+
+
+
+ simslot =
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+
+ Scale <- fitted(object) / sqrt(pi / 2)
+ rrayleigh(nsim * length(Scale), scale = c(Scale))
+ },
+
+
+
deriv = eval(substitute(expression({
Scale <- eta2theta(eta, .lscale , earg = .escale )
@@ -7681,8 +8825,11 @@ rrayleigh <- function(n, scale = 1) {
((1 - .nrfs) * d2l.dScale2 + .nrfs * ned2l.dScale2)
+
+
if (intercept.only && .oim.mean ) {
- ave.oim <- weighted.mean(d2l.dScale2, w)
+ ave.oim <- weighted.mean(d2l.dScale2,
+ rep(c(w), length = length(d2l.dScale2)))
if (ave.oim > 0) {
wz <- c(w) * dScale.deta^2 * ave.oim
}
@@ -7942,76 +9089,85 @@ rparetoI <- function(n, scale = 1, shape = 1)
location + Scale * NA
}, list( .lscale = lscale, .linequ = linequ, .lshape = lshape,
.escale = escale, .einequ = einequ, .eshape = eshape))),
- last = eval(substitute(expression({
- misc$link <- c("scale" = .lscale ,
- "inequality" = .linequ,
- "shape" = .lshape)
- misc$earg <- list("scale" = .escale ,
- "inequality" = .einequ,
- "shape" = .eshape )
- misc$location = extra$location # Use this for prediction
- }), list( .lscale = lscale, .linequ = linequ,
- .escale = escale, .einequ = einequ,
- .lshape = lshape,
- .eshape = eshape))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location <- extra$location
- Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
- inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
- shape <- eta2theta(eta[, 3], .lshape , earg = .eshape )
- zedd <- (y - location) / Scale
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dparetoIV(x = y, location = location, scale = Scale,
- inequ = inequ, shape = shape,
- log = TRUE))
- }
- }, list( .lscale = lscale, .linequ = linequ,
- .escale = escale, .einequ = einequ,
- .lshape = lshape,
- .eshape = eshape))),
- vfamily = c("paretoIV"),
- deriv = eval(substitute(expression({
- location = extra$location
- Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
- inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
- shape <- eta2theta(eta[, 3], .lshape , earg = .eshape )
- zedd <- (y - location) / Scale
- temp100 <- 1 + zedd^(1/inequ)
- dl.dscale <- (shape - (1+shape) / temp100) / (inequ * Scale)
- dl.dinequ <- ((log(zedd) * (shape - (1+shape)/temp100)) /
- inequ - 1) / inequ
- dl.dshape <- -log(temp100) + 1/shape
- dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
- dinequ.deta <- dtheta.deta(inequ, .linequ, earg = .einequ)
- dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
- c(w) * cbind(dl.dscale * dscale.deta,
- dl.dinequ * dinequ.deta,
- dl.dshape * dshape.deta)
- }), list( .lscale = lscale, .linequ = linequ,
- .lshape = lshape,
- .escale = escale, .einequ = einequ,
- .eshape = eshape))),
- weight = eval(substitute(expression({
- temp200 <- digamma(shape) - digamma(1) - 1
- d2scale.deta2 <- shape / ((inequ*Scale)^2 * (shape+2))
- d2inequ.deta2 <- (shape * (temp200^2 + trigamma(shape) + trigamma(1)
- ) + 2*(temp200+1)) / (inequ^2 * (shape+2))
- d2shape.deta2 <- 1 / shape^2
- d2si.deta2 <- (shape*(-temp200) -1) / (inequ^2 * Scale * (shape+2))
- d2ss.deta2 <- -1 / ((inequ*Scale) * (shape+1))
- d2is.deta2 <- temp200 / (inequ*(shape+1))
- wz <- matrix(0, n, dimm(M))
- wz[, iam(1, 1, M)] <- dscale.deta^2 * d2scale.deta2
- wz[, iam(2, 2, M)] <- dinequ.deta^2 * d2inequ.deta2
- wz[, iam(3, 3, M)] <- dshape.deta^2 * d2shape.deta2
- wz[, iam(1, 2, M)] <- dscale.deta * dinequ.deta * d2si.deta2
- wz[, iam(1, 3, M)] <- dscale.deta * dshape.deta * d2ss.deta2
- wz[, iam(2, 3, M)] <- dinequ.deta * dshape.deta * d2is.deta2
+ last = eval(substitute(expression({
+ misc$link <- c("scale" = .lscale ,
+ "inequality" = .linequ,
+ "shape" = .lshape)
+ misc$earg <- list("scale" = .escale ,
+ "inequality" = .einequ,
+ "shape" = .eshape )
+ misc$location = extra$location # Use this for prediction
+ }), list( .lscale = lscale, .linequ = linequ,
+ .escale = escale, .einequ = einequ,
+ .lshape = lshape,
+ .eshape = eshape))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ location <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
+ shape <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+ zedd <- (y - location) / Scale
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dparetoIV(x = y, location = location, scale = Scale,
+ inequ = inequ, shape = shape,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .lscale = lscale, .linequ = linequ,
+ .escale = escale, .einequ = einequ,
+ .lshape = lshape,
+ .eshape = eshape))),
+ vfamily = c("paretoIV"),
+ deriv = eval(substitute(expression({
+ location = extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
+ shape <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+ zedd <- (y - location) / Scale
+ temp100 <- 1 + zedd^(1/inequ)
+ dl.dscale <- (shape - (1+shape) / temp100) / (inequ * Scale)
+ dl.dinequ <- ((log(zedd) * (shape - (1+shape)/temp100)) /
+ inequ - 1) / inequ
+ dl.dshape <- -log(temp100) + 1/shape
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+ dinequ.deta <- dtheta.deta(inequ, .linequ, earg = .einequ)
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+ c(w) * cbind(dl.dscale * dscale.deta,
+ dl.dinequ * dinequ.deta,
+ dl.dshape * dshape.deta)
+ }), list( .lscale = lscale, .linequ = linequ,
+ .lshape = lshape,
+ .escale = escale, .einequ = einequ,
+ .eshape = eshape))),
+ weight = eval(substitute(expression({
+ temp200 <- digamma(shape) - digamma(1) - 1
+ d2scale.deta2 <- shape / ((inequ*Scale)^2 * (shape+2))
+ d2inequ.deta2 <- (shape * (temp200^2 + trigamma(shape) + trigamma(1)
+ ) + 2*(temp200+1)) / (inequ^2 * (shape+2))
+ d2shape.deta2 <- 1 / shape^2
+ d2si.deta2 <- (shape*(-temp200) -1) / (inequ^2 * Scale * (shape+2))
+ d2ss.deta2 <- -1 / ((inequ*Scale) * (shape+1))
+ d2is.deta2 <- temp200 / (inequ*(shape+1))
+ wz <- matrix(0, n, dimm(M))
+ wz[, iam(1, 1, M)] <- dscale.deta^2 * d2scale.deta2
+ wz[, iam(2, 2, M)] <- dinequ.deta^2 * d2inequ.deta2
+ wz[, iam(3, 3, M)] <- dshape.deta^2 * d2shape.deta2
+ wz[, iam(1, 2, M)] <- dscale.deta * dinequ.deta * d2si.deta2
+ wz[, iam(1, 3, M)] <- dscale.deta * dshape.deta * d2ss.deta2
+ wz[, iam(2, 3, M)] <- dinequ.deta * dshape.deta * d2is.deta2
c(w) * wz
- }), list( .lscale = lscale, .linequ = linequ, .lshape = lshape,
- .escale = escale, .einequ = einequ, .eshape = eshape))))
+ }), list( .lscale = lscale, .linequ = linequ, .lshape = lshape,
+ .escale = escale, .einequ = einequ, .eshape = eshape))))
}
@@ -8106,16 +9262,25 @@ rparetoI <- function(n, scale = 1, shape = 1)
}), list( .lscale = lscale, .linequ = linequ,
.escale = escale, .einequ = einequ ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location <- extra$location
- Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
- inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
- zedd <- (y - location) / Scale
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dparetoIII(x = y, location = location, scale=Scale,
- inequ=inequ, log = TRUE))
- }
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ location <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ inequ <- eta2theta(eta[, 2], .linequ , earg = .einequ )
+ zedd <- (y - location) / Scale
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dparetoIII(x = y, location = location, scale = Scale,
+ inequ = inequ, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .lscale = lscale, .linequ = linequ,
.escale = escale, .einequ = einequ ))),
vfamily = c("paretoIII"),
@@ -8190,92 +9355,101 @@ rparetoI <- function(n, scale = 1, shape = 1)
- predictors.names <-
- c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
- namesof("shape", .lshape , earg = .eshape , tag = FALSE))
-
- extra$location <- location <- .location
+ predictors.names <-
+ c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape", .lshape , earg = .eshape , tag = FALSE))
+
+ extra$location <- location <- .location
+
+ if (any(y <= location))
+ stop("the response must have values > than the 'location' argument")
+
+ if (!length(etastart)) {
+ scale.init <- if (length( .iscale )) .iscale else NULL
+ shape.init <- if (length( .ishape )) .ishape else NULL
+ if (!length(shape.init) || !length(scale.init)) {
+ probs <- (1:4)/5
+ scale.init.0 <- 1
+ ytemp <- quantile(x = log(y-location+scale.init.0),
+ probs = probs)
+ fittemp <- lsfit(x = log1p(-probs), y = ytemp,
+ intercept = TRUE)
+ if (!length(shape.init))
+ shape.init <- max(-1/fittemp$coef["X"], 0.01)
+ if (!length(scale.init))
+ scale.init <- exp(fittemp$coef["Intercept"])
+ }
+ etastart <- cbind(theta2eta(rep(scale.init, length.out = n),
+ .lscale , earg = .escale ),
+ theta2eta(rep(shape.init, length.out = n),
+ .lshape , earg = .eshape ))
+ }
+ }), list( .location = location, .lscale = lscale,
+ .escale = escale, .eshape = eshape,
+ .lshape = lshape, .iscale = iscale, .ishape = ishape ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ location <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+ location + Scale * NA
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape))),
+ last = eval(substitute(expression({
+ misc$link <- c("scale" = .lscale , "shape" = .lshape)
- if (any(y <= location))
- stop("the response must have values > than the 'location' argument")
+ misc$earg <- list("scale" = .escale , "shape" = .eshape )
- if (!length(etastart)) {
- scale.init <- if (length( .iscale )) .iscale else NULL
- shape.init <- if (length( .ishape )) .ishape else NULL
- if (!length(shape.init) || !length(scale.init)) {
- probs <- (1:4)/5
- scale.init.0 <- 1
- ytemp <- quantile(x = log(y-location+scale.init.0),
- probs = probs)
- fittemp <- lsfit(x = log1p(-probs), y = ytemp,
- intercept = TRUE)
- if (!length(shape.init))
- shape.init <- max(-1/fittemp$coef["X"], 0.01)
- if (!length(scale.init))
- scale.init <- exp(fittemp$coef["Intercept"])
- }
- etastart <- cbind(theta2eta(rep(scale.init, length.out = n),
- .lscale , earg = .escale ),
- theta2eta(rep(shape.init, length.out = n),
- .lshape , earg = .eshape ))
- }
- }), list( .location = location, .lscale = lscale,
- .escale = escale, .eshape = eshape,
- .lshape = lshape, .iscale = iscale, .ishape = ishape ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- location <- extra$location
- Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
- shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
- location + Scale * NA
- }, list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape))),
- last = eval(substitute(expression({
- misc$link <- c("scale" = .lscale , "shape" = .lshape)
-
- misc$earg <- list("scale" = .escale , "shape" = .eshape )
-
- misc$location <- extra$location # Use this for prediction
- }), list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location <- extra$location
- Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
- shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
- zedd <- (y - location) / Scale
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dparetoII(x = y, location = location, scale=Scale,
- shape = shape, log = TRUE))
- }
- }, list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape))),
- vfamily = c("paretoII"),
- deriv = eval(substitute(expression({
- location <- extra$location
- Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
- shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
- zedd <- (y - location) / Scale
- temp100 <- 1 + zedd
- dl.dscale <- (shape - (1+shape) / temp100) / (1 * Scale)
- dl.dshape <- -log(temp100) + 1/shape
- dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
- dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
- c(w) * cbind(dl.dscale * dscale.deta,
- dl.dshape * dshape.deta)
- }), list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape))),
- weight = eval(substitute(expression({
- d2scale.deta2 <- shape / (Scale^2 * (shape+2))
- d2shape.deta2 <- 1 / shape^2
- d2ss.deta2 <- -1 / (Scale * (shape+1))
- wz <- matrix(0, n, dimm(M))
- wz[, iam(1, 1, M)] <- dscale.deta^2 * d2scale.deta2
- wz[, iam(2, 2, M)] <- dshape.deta^2 * d2shape.deta2
- wz[, iam(1, 2, M)] <- dscale.deta * dshape.deta * d2ss.deta2
- c(w) * wz
- }), list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape))))
+ misc$location <- extra$location # Use this for prediction
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ location <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+ zedd <- (y - location) / Scale
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dparetoII(x = y, location = location, scale = Scale,
+ shape = shape, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape))),
+ vfamily = c("paretoII"),
+ deriv = eval(substitute(expression({
+ location <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+ zedd <- (y - location) / Scale
+ temp100 <- 1 + zedd
+ dl.dscale <- (shape - (1+shape) / temp100) / (1 * Scale)
+ dl.dshape <- -log(temp100) + 1/shape
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+ c(w) * cbind(dl.dscale * dscale.deta,
+ dl.dshape * dshape.deta)
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape))),
+ weight = eval(substitute(expression({
+ d2scale.deta2 <- shape / (Scale^2 * (shape+2))
+ d2shape.deta2 <- 1 / shape^2
+ d2ss.deta2 <- -1 / (Scale * (shape+1))
+ wz <- matrix(0, n, dimm(M))
+ wz[, iam(1, 1, M)] <- dscale.deta^2 * d2scale.deta2
+ wz[, iam(2, 2, M)] <- dshape.deta^2 * d2shape.deta2
+ wz[, iam(1, 2, M)] <- dscale.deta * dshape.deta * d2ss.deta2
+ c(w) * wz
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape))))
}
@@ -8396,14 +9570,22 @@ rpareto <- function(n, location, shape) {
misc$location <- extra$location # Use this for prediction
}), list( .lshape = lshape, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
k <- eta2theta(eta, .lshape , earg = .earg )
location <- extra$location
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
- sum(c(w) * (log(k) + k * log(location) - (k+1) * log(y)))
+ ll.elts <- c(w) * (log(k) + k * log(location) - (k+1) * log(y))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape = lshape, .earg = earg ))),
vfamily = c("paretoff"),
@@ -8609,14 +9791,21 @@ rtruncpareto <- function(n, lower, upper, shape) {
}), list( .lshape = lshape, .earg = earg,
.lower = lower, .upper = upper ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
shape <- eta2theta(eta, .lshape , earg = .earg )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- ans <- sum(c(w) * dtruncpareto(x = y, lower = .lower ,
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dtruncpareto(x = y, lower = .lower ,
upper = .upper ,
- shape = shape, log = TRUE))
- ans
+ shape = shape, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lshape = lshape, .earg = earg,
.lower = lower, .upper = upper ))),
@@ -8695,11 +9884,21 @@ rtruncpareto <- function(n, lower, upper, shape) {
}), list( .link.lambda = link.lambda, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
lambda <- eta2theta(eta, link=.link.lambda, earg = .earg )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else
- sum(c(w) * (0.5 * log(lambda/(2*pi*y^3)) - lambda * (y-1)^2 / (2*y)))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * (0.5 * log(lambda/(2*pi*y^3)) - lambda * (y-1)^2 / (2*y))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .link.lambda = link.lambda, .earg = earg ))),
vfamily = "waldff",
deriv = eval(substitute(expression({
@@ -8801,13 +10000,23 @@ rtruncpareto <- function(n, lower, upper, shape) {
}), list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale))),
loglikelihood= eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else
- sum(c(w) * (log(shape) + log(scale) +
- (shape-1)*log1p(-exp(-scale*y)) - scale*y))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * (log(shape) + log(scale) +
+ (shape-1)*log1p(-exp(-scale*y)) - scale*y)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .lscale = lscale, .lshape = lshape,
.eshape = eshape, .escale = escale))),
vfamily = c("expexp"),
@@ -8940,14 +10149,24 @@ rtruncpareto <- function(n, lower, upper, shape) {
misc$pooled.weight <- pooled.weight
}), list( .lscale = lscale, .escale = escale))),
loglikelihood= eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
scale <- eta2theta(eta, .lscale , earg = .escale )
temp7 <- -expm1(-scale*y)
shape <- -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else
- sum(c(w) * (log(shape) + log(scale) +
- (shape-1)*log1p(-exp(-scale*y)) - scale*y))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * (log(shape) + log(scale) +
+ (shape-1)*log1p(-exp(-scale*y)) - scale*y)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
}, list( .lscale = lscale, .escale = escale))),
vfamily = c("expexp1"),
deriv = eval(substitute(expression({
@@ -8990,7 +10209,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
- logistic2 <- function(llocation = "identity",
+ logistic2 <- function(llocation = "identitylink",
lscale = "loge",
ilocation = NULL, iscale = NULL,
imethod = 1, zero = -2) {
@@ -9031,12 +10250,13 @@ rtruncpareto <- function(n, lower, upper, shape) {
"Variance: (pi * scale)^2 / 3"),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
zero = .zero )
}, list( .zero = zero ))),
@@ -9055,10 +10275,10 @@ rtruncpareto <- function(n, lower, upper, shape) {
ncoly <- ncol(y)
- Musual <- 2
+ M1 <- 2
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
@@ -9067,7 +10287,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
predictors.names <-
c(namesof(mynames1, .llocat , earg = .elocat , tag = FALSE),
namesof(mynames2, .lscale , earg = .escale , tag = FALSE))[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
if (!length(etastart)) {
@@ -9094,7 +10314,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
etastart <- cbind(
theta2eta(locat.init, .llocat , earg = .elocat ),
theta2eta(scale.init, .lscale , earg = .escale ))[,
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
}
}), list( .imethod = imethod,
.elocat = elocat, .escale = escale,
@@ -9102,29 +10322,29 @@ rtruncpareto <- function(n, lower, upper, shape) {
.ilocat = ilocat, .iscale = iscale ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
M <- ncol(eta)
- Musual <- 2
- ncoly <- M / Musual
- eta2theta(eta[, (1:ncoly) * Musual - 1], .llocat , earg = .elocat )
+ M1 <- 2
+ ncoly <- M / M1
+ eta2theta(eta[, (1:ncoly) * M1 - 1], .llocat , earg = .elocat )
}, list( .llocat = llocat,
.elocat = elocat ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <-
c(rep( .llocat , length = ncoly),
- rep( .lscale , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ rep( .lscale , length = ncoly))[interleave.VGAM(M, M = M1)]
temp.names <- c(mynames1, mynames2)[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .elocat
- misc$earg[[Musual*ii ]] <- .escale
+ misc$earg[[M1*ii-1]] <- .elocat
+ misc$earg[[M1*ii ]] <- .escale
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$imethod <- .imethod
misc$expected <- TRUE
misc$multipleResponses <- TRUE
@@ -9132,27 +10352,55 @@ rtruncpareto <- function(n, lower, upper, shape) {
.llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
M <- ncol(eta)
- Musual <- 2
- ncoly <- M / Musual
+ M1 <- 2
+ ncoly <- M / M1
- locat <- eta2theta(eta[, (1:ncoly)*Musual-1], .llocat , earg = .elocat )
- Scale <- eta2theta(eta[, (1:ncoly)*Musual ], .lscale , earg = .escale )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dlogis(x = y, location = locat,
- scale = Scale, log = TRUE))
+ locat <- eta2theta(eta[, (1:ncoly)*M1-1], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, (1:ncoly)*M1 ], .lscale , earg = .escale )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dlogis(x = y, location = locat,
+ scale = Scale, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale))),
vfamily = c("logistic2"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ locat <- eta2theta(eta[, c(TRUE, FALSE)], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , earg = .escale )
+ rlogis(nsim * length(Scale),
+ location = locat, scale = Scale)
+ }, list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale))),
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
- ncoly <- M / Musual
+ M1 <- 2
+ ncoly <- M / M1
- locat <- eta2theta(eta[, (1:ncoly)*Musual-1], .llocat , earg = .elocat )
- Scale <- eta2theta(eta[, (1:ncoly)*Musual ], .lscale , earg = .escale )
+ locat <- eta2theta(eta[, (1:ncoly)*M1-1], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, (1:ncoly)*M1 ], .lscale , earg = .escale )
zedd <- (y - locat) / Scale
ezedd <- exp(-zedd)
@@ -9164,7 +10412,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
c(w) * cbind(dl.dlocat * dlocat.deta,
- dl.dscale * dscale.deta)[, interleave.VGAM(M, M = Musual)]
+ dl.dscale * dscale.deta)[, interleave.VGAM(M, M = M1)]
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale))),
weight = eval(substitute(expression({
@@ -9172,8 +10420,8 @@ rtruncpareto <- function(n, lower, upper, shape) {
ned2l.dscale2 <- (3 + pi^2) / (9 * Scale^2)
wz <- matrix(as.numeric(NA), nrow = n, ncol = M) # diagonal
- wz[, (1:ncoly) * Musual - 1] <- ned2l.dlocat2 * dlocat.deta^2
- wz[, (1:ncoly) * Musual ] <- ned2l.dscale2 * dscale.deta^2
+ wz[, (1:ncoly) * M1 - 1] <- ned2l.dlocat2 * dlocat.deta^2
+ wz[, (1:ncoly) * M1 ] <- ned2l.dscale2 * dscale.deta^2
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
}), list( .llocat = llocat, .lscale = lscale,
@@ -9239,18 +10487,19 @@ rtruncpareto <- function(n, lower, upper, shape) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 1,
+ list(M1 = 1,
+ Q1 = 1,
zero = .zero)
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
- Musual <- 1
+ M1 <- 1
if (any(y < 0))
stop("negative values not allowed for the 'negbinomial.size' family")
@@ -9269,7 +10518,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
- M <- Musual * ncol(y)
+ M <- M1 * ncol(y)
NOS <- ncoly <- ncol(y) # Number of species
mynames1 <- paste("mu", if (NOS > 1) 1:NOS else "", sep = "")
predictors.names <-
@@ -9303,7 +10552,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
mu.init[, iii] <- abs(mu.init[, iii]) + 1 / 1024
}
- } # of for (iii)
+ } # of for (iii)
kmat <- matrix( .size , n, NOS, byrow = TRUE)
@@ -9329,9 +10578,9 @@ rtruncpareto <- function(n, lower, upper, shape) {
.zero = zero, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Musual <- 1
+ M1 <- 1
eta <- cbind(eta)
- NOS <- ncol(eta) / Musual
+ NOS <- ncol(eta) / M1
n <- nrow(eta)
kmat <- matrix( .size , n, NOS, byrow = TRUE)
@@ -9373,7 +10622,9 @@ rtruncpareto <- function(n, lower, upper, shape) {
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
mu <- cbind(mu)
y <- cbind(y)
w <- cbind(w)
@@ -9382,27 +10633,52 @@ rtruncpareto <- function(n, lower, upper, shape) {
n <- nrow(eta)
kmat <- matrix( .size , n, NOS, byrow = TRUE)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
ind1 <- is.finite(kmat)
ans1 <- ans2 <- 0
for (kk in 1:NOS) {
ind1 <- is.finite(kmat[, kk])
ans1 <- ans1 +
- sum(w[ind1] * dnbinom(x = y[ind1, kk], mu = mu[ind1, kk],
- size = kmat[ind1, kk], log = TRUE))
+ sum(w[ind1] * dnbinom(x = y[ind1, kk], mu = mu[ind1, kk],
+ size = kmat[ind1, kk], log = TRUE))
ans2 <- ans2 +
- sum(w[!ind1] * dpois(x = y[!ind1, kk], lambda = mu[!ind1, kk],
- log = TRUE))
+ sum(w[!ind1] * dpois(x = y[!ind1, kk],
+ lambda = mu[!ind1, kk],
+ log = TRUE))
}
ans <- ans1 + ans2
- ans
+ ll.elts <- ans
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .size = size ))),
vfamily = c("negbinomial.size"),
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ muuu <- fitted(object)
+ n <- nrow(as.matrix(muuu))
+ NOS <- ncol(as.matrix(muuu))
+ kmat <- matrix( .size , n, NOS, byrow = TRUE)
+ rnbinom(nsim * length(muuu), mu = muuu, size = kmat)
+ }, list( .size = size ))),
+
+
+
+
deriv = eval(substitute(expression({
eta <- cbind(eta)
NOS <- M <- ncol(eta)
diff --git a/R/family.vglm.R b/R/family.vglm.R
index c7ffc2d..5c693c0 100644
--- a/R/family.vglm.R
+++ b/R/family.vglm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/family.zeroinf.R b/R/family.zeroinf.R
index baae15c..fd9fde4 100644
--- a/R/family.zeroinf.R
+++ b/R/family.zeroinf.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -12,6 +12,7 @@
+
dzanegbin <- function(x, size, prob = NULL, munb = NULL, pobs0 = 0,
log = FALSE) {
if (length(munb)) {
@@ -435,14 +436,23 @@ rzipois <- function(n, lambda, pstr0 = 0) {
}
}), list( .link = link, .earg = earg ))),
- loglikelihood = eval(substitute(function(mu, y, w, residuals = FALSE,
- eta, extra = NULL) {
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
lambda <- eta2theta(eta, .link)
temp5 <- exp(-lambda)
pstr0 <- (1 - temp5 - extra$sumw / extra$narg) / (1 - temp5)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dzipois(x = y, pstr0 = pstr0, lambda = lambda, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) *
+ dzipois(x = y, pstr0 = pstr0, lambda = lambda, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .link = link, .earg = earg ))),
@@ -497,12 +507,13 @@ rzipois <- function(n, lambda, pstr0 = 0) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -510,7 +521,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
))),
initialize = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
if (any(y < 0))
stop("the response must not have negative values")
@@ -539,7 +550,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
predictors.names <-
c(namesof(mynames1, .lpobs.0, earg = .epobs.0, tag = FALSE),
namesof(mynames2, .llambda, earg = .elambda, tag = FALSE))[
- interleave.VGAM(Musual*NOS, M = Musual)]
+ interleave.VGAM(M1*NOS, M = M1)]
if (!length(etastart)) {
etastart <-
@@ -552,7 +563,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
theta2eta(y[!sthese, spp.] / (-expm1(-y[!sthese, spp.])),
.llambda, earg = .elambda )
}
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
}
}), list( .lpobs.0 = lpobs.0, .llambda = llambda,
.epobs.0 = epobs.0, .elambda = elambda,
@@ -568,12 +579,12 @@ rzipois <- function(n, lambda, pstr0 = 0) {
c("mean", "pobs0", "onempobs0"))[1]
NOS <- extra$NOS
- Musual <- 2
+ M1 <- 2
- pobs.0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ pobs.0 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lpobs.0, earg = .epobs.0 ))
- lambda <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ lambda <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
.llambda, earg = .elambda ))
@@ -585,7 +596,8 @@ rzipois <- function(n, lambda, pstr0 = 0) {
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
length(extra$dimnamesy[[2]]) > 0) {
- dimnames(ans) <- extra$dimnamesy
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
} else
if (NCOL(ans) == 1 &&
is.matrix(ans)) {
@@ -600,47 +612,74 @@ rzipois <- function(n, lambda, pstr0 = 0) {
temp.names <- c(rep( .lpobs.0 , len = NOS),
rep( .llambda , len = NOS))
- temp.names <- temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
+ temp.names <- temp.names[interleave.VGAM(M1*NOS, M = M1)]
misc$link <- temp.names
names(misc$link) <-
- c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
+ c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M = M1)]
- misc$earg <- vector("list", Musual * NOS)
+ misc$earg <- vector("list", M1 * NOS)
names(misc$earg) <- names(misc$link)
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] <- .epobs.0
- misc$earg[[Musual*ii ]] <- .elambda
+ misc$earg[[M1*ii-1]] <- .epobs.0
+ misc$earg[[M1*ii ]] <- .elambda
}
}), list( .lpobs.0 = lpobs.0, .llambda = llambda,
.epobs.0 = epobs.0, .elambda = elambda ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
NOS <- extra$NOS
- Musual <- 2
+ M1 <- 2
- pobs0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ pobs0 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lpobs.0, earg = .epobs.0))
- lambda <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ lambda <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
.llambda, earg = .elambda ))
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(c(w) * dzapois(x = y, pobs0 = pobs0, lambda = lambda,
- log = TRUE))
+ ll.elts <- c(w) * dzapois(x = y, pobs0 = pobs0, lambda = lambda,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lpobs.0 = lpobs.0, .llambda = llambda,
.epobs.0 = epobs.0, .elambda = elambda ))),
vfamily = c("zapoisson"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ pobs0 <- eta2theta(eta[, c(TRUE, FALSE)], .lpobs.0 , earg = .epobs.0 )
+ lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
+ rzapois(nsim * length(lambda), lambda = lambda, pobs0 = pobs0)
+ }, list( .lpobs.0 = lpobs.0, .llambda = llambda,
+ .epobs.0 = epobs.0, .elambda = elambda ))),
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
NOS <- extra$NOS
y0 <- extra$y0
skip <- extra$skip.these
- phimat <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ phimat <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lpobs.0, earg = .epobs.0 ))
- lambda <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ lambda <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
.llambda, earg = .elambda ))
dl.dlambda <- y / lambda + 1 / expm1(-lambda)
@@ -662,13 +701,13 @@ rzipois <- function(n, lambda, pstr0 = 0) {
ans <- cbind(temp3,
c(w) * dl.dlambda * dlambda.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
ans
}), list( .lpobs.0 = lpobs.0, .llambda = llambda,
.epobs.0 = epobs.0, .elambda = elambda ))),
weight = eval(substitute(expression({
- wz <- matrix(0.0, n, Musual * NOS)
+ wz <- matrix(0.0, n, M1 * NOS)
@@ -698,7 +737,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
wz[, 1:NOS] <- tmp200
- wz <- wz[, interleave.VGAM(ncol(wz), M = Musual)]
+ wz <- wz[, interleave.VGAM(ncol(wz), M = M1)]
@@ -734,19 +773,22 @@ rzipois <- function(n, lambda, pstr0 = 0) {
blurb = c("Zero-altered Poisson ",
"(Bernoulli and positive-Poisson conditional model)\n\n",
"Links: ",
- namesof("lambda", llambda, earg = elambda, tag = FALSE), ", ",
- namesof("onempobs0", lonempobs0, earg = eonempobs0, tag = FALSE), "\n",
+ namesof("lambda", llambda, earg = elambda,
+ tag = FALSE), ", ",
+ namesof("onempobs0", lonempobs0, earg = eonempobs0,
+ tag = FALSE), "\n",
"Mean: onempobs0 * lambda / (1 - exp(-lambda))"),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -754,7 +796,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
))),
initialize = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
if (any(y < 0))
stop("the response must not have negative values")
@@ -785,7 +827,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
predictors.names <-
c(namesof(mynames1, .llambda, earg = .elambda , tag = FALSE),
namesof(mynames2, .lonempobs0 , earg = .eonempobs0 , tag = FALSE))[
- interleave.VGAM(Musual*NOS, M = Musual)]
+ interleave.VGAM(M1*NOS, M = M1)]
if (!length(etastart)) {
etastart <-
@@ -798,7 +840,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
theta2eta(y[!sthese, spp.] / (-expm1(-y[!sthese, spp.])),
.llambda, earg = .elambda )
}
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
}
}), list( .lonempobs0 = lonempobs0, .llambda = llambda,
.eonempobs0 = eonempobs0, .elambda = elambda,
@@ -814,11 +856,11 @@ rzipois <- function(n, lambda, pstr0 = 0) {
c("mean", "pobs0", "onempobs0"))[1]
NOS <- extra$NOS
- Musual <- 2
+ M1 <- 2
- lambda <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ lambda <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.llambda , earg = .elambda ))
- onempobs0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ onempobs0 <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
.lonempobs0 , earg = .eonempobs0 ))
@@ -830,7 +872,8 @@ rzipois <- function(n, lambda, pstr0 = 0) {
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
length(extra$dimnamesy[[2]]) > 0) {
- dimnames(ans) <- extra$dimnamesy
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
} else
if (NCOL(ans) == 1 &&
is.matrix(ans)) {
@@ -845,47 +888,77 @@ rzipois <- function(n, lambda, pstr0 = 0) {
temp.names <- c(rep( .llambda , len = NOS),
rep( .lonempobs0 , len = NOS))
- temp.names <- temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
+ temp.names <- temp.names[interleave.VGAM(M1*NOS, M = M1)]
misc$link <- temp.names
names(misc$link) <-
- c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
+ c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M = M1)]
- misc$earg <- vector("list", Musual * NOS)
+ misc$earg <- vector("list", M1 * NOS)
names(misc$earg) <- names(misc$link)
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] <- .elambda
- misc$earg[[Musual*ii ]] <- .eonempobs0
+ misc$earg[[M1*ii-1]] <- .elambda
+ misc$earg[[M1*ii ]] <- .eonempobs0
}
}), list( .lonempobs0 = lonempobs0, .llambda = llambda,
.eonempobs0 = eonempobs0, .elambda = elambda ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
NOS <- extra$NOS
- Musual <- 2
+ M1 <- 2
- lambda <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ lambda <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.llambda , earg = .elambda ))
- onempobs0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ onempobs0 <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
.lonempobs0 , earg = .eonempobs0 ))
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(c(w) * dzapois(x = y, lambda = lambda, pobs0 = 1 - onempobs0,
- log = TRUE))
+ ll.elts <-
+ c(w) * dzapois(x = y, lambda = lambda, pobs0 = 1 - onempobs0,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lonempobs0 = lonempobs0, .llambda = llambda,
.eonempobs0 = eonempobs0, .elambda = elambda ))),
vfamily = c("zapoissonff"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ lambda <- eta2theta(eta[, c(TRUE, FALSE)], .llambda ,
+ earg = .elambda )
+ onempobs0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempobs0 ,
+ earg = .eonempobs0 )
+ rzapois(nsim * length(lambda), lambda = lambda, pobs0 = 1 - onempobs0)
+ }, list( .lonempobs0 = lonempobs0, .llambda = llambda,
+ .eonempobs0 = eonempobs0, .elambda = elambda ))),
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
NOS <- extra$NOS
y0 <- extra$y0
skip <- extra$skip.these
- lambda <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ lambda <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.llambda, earg = .elambda ))
- omphimat <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ omphimat <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
.lonempobs0, earg = .eonempobs0 ))
phimat <- 1 - omphimat
@@ -908,13 +981,13 @@ rzipois <- function(n, lambda, pstr0 = 0) {
ans <- cbind(c(w) * dl.dlambda * dlambda.deta,
temp3)
- ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
ans
}), list( .lonempobs0 = lonempobs0, .llambda = llambda,
.eonempobs0 = eonempobs0, .elambda = elambda ))),
weight = eval(substitute(expression({
- wz <- matrix(0.0, n, Musual * NOS)
+ wz <- matrix(0.0, n, M1 * NOS)
temp5 <- expm1(lambda)
@@ -936,7 +1009,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
wz[, 1 * NOS + (1:NOS)] <- tmp200
- wz <- wz[, interleave.VGAM(ncol(wz), M = Musual)]
+ wz <- wz[, interleave.VGAM(ncol(wz), M = M1)]
@@ -1025,13 +1098,14 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 3
+ M1 <- 3
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 3,
+ list(M1 = 3,
+ Q1 = 1,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -1039,7 +1113,7 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
))),
initialize = eval(substitute(expression({
- Musual <- 3
+ M1 <- 3
if (any(y < 0))
stop("the response must not have negative values")
@@ -1057,7 +1131,7 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
- M <- Musual * ncoly
+ M <- M1 * ncoly
extra$dimnamesy <- dimnames(y)
extra$type.fitted <- .type.fitted
@@ -1069,7 +1143,7 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
c(namesof(mynames1, .lpobs0 , earg = .epobs0 , tag = FALSE),
namesof(mynames2, .lmunb , earg = .emunb , tag = FALSE),
namesof(mynames3, .lsize , earg = .esize , tag = FALSE))[
- interleave.VGAM(Musual*NOS, M = Musual)]
+ interleave.VGAM(M1*NOS, M = M1)]
extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
@@ -1133,7 +1207,7 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
etastart <- cbind(theta2eta(pnb0, .lpobs0 , earg = .epobs0 ),
theta2eta(mu.init, .lmunb , earg = .emunb ),
theta2eta(kmat0, .lsize , earg = .esize ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
} # End of if (!length(etastart))
@@ -1152,11 +1226,11 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
type.fitted <- match.arg(type.fitted,
c("mean", "pobs0"))[1]
- Musual <- 3
+ M1 <- 3
NOS <- extra$NOS
- phi0 <- eta2theta(eta[, Musual*(1:NOS)-2], .lpobs0 , earg = .epobs0 )
- munb <- eta2theta(eta[, Musual*(1:NOS)-1], .lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, Musual*(1:NOS) ], .lsize , earg = .esize )
+ phi0 <- eta2theta(eta[, M1*(1:NOS)-2], .lpobs0 , earg = .epobs0 )
+ munb <- eta2theta(eta[, M1*(1:NOS)-1], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, M1*(1:NOS) ], .lsize , earg = .esize )
pnb0 <- (kmat / (kmat + munb))^kmat # p(0) from negative binomial
@@ -1167,7 +1241,8 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
length(extra$dimnamesy[[2]]) > 0) {
- dimnames(ans) <- extra$dimnamesy
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
} else
if (NCOL(ans) == 1 &&
is.matrix(ans)) {
@@ -1180,19 +1255,19 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
misc$link =
c(rep( .lpobs0 , length = NOS),
rep( .lmunb , length = NOS),
- rep( .lsize , length = NOS))[interleave.VGAM(Musual*NOS,
- M = Musual)]
+ rep( .lsize , length = NOS))[interleave.VGAM(M1*NOS,
+ M = M1)]
temp.names <- c(mynames1,
mynames2,
- mynames3)[interleave.VGAM(Musual*NOS, M = Musual)]
+ mynames3)[interleave.VGAM(M1*NOS, M = M1)]
names(misc$link) <- temp.names
- misc$earg <- vector("list", Musual*NOS)
+ misc$earg <- vector("list", M1*NOS)
names(misc$earg) <- temp.names
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-2]] <- .epobs0
- misc$earg[[Musual*ii-1]] <- .emunb
- misc$earg[[Musual*ii ]] <- .esize
+ misc$earg[[M1*ii-2]] <- .epobs0
+ misc$earg[[M1*ii-1]] <- .emunb
+ misc$earg[[M1*ii ]] <- .esize
}
misc$nsimEIM <- .nsimEIM
@@ -1206,30 +1281,63 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
.nsimEIM = nsimEIM,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
NOS <- extra$NOS
- Musual <- 3
- phi0 <- eta2theta(eta[, Musual*(1:NOS)-2], .lpobs0 , earg = .epobs0 )
- munb <- eta2theta(eta[, Musual*(1:NOS)-1], .lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, Musual*(1:NOS) ], .lsize , earg = .esize )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dzanegbin(x = y, pobs0 = phi0, munb = munb, size = kmat,
- log = TRUE))
+ M1 <- 3
+ phi0 <- eta2theta(eta[, M1*(1:NOS)-2], .lpobs0 , earg = .epobs0 )
+ munb <- eta2theta(eta[, M1*(1:NOS)-1], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, M1*(1:NOS) ], .lsize , earg = .esize )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dzanegbin(x = y, pobs0 = phi0, munb = munb, size = kmat,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lpobs0 = lpobs0, .lmunb = lmunb, .lsize = lsize,
.epobs0 = epobs0, .emunb = emunb, .esize = esize ))),
vfamily = c("zanegbinomial"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ phi0 <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lpobs0 , earg = .epobs0 )
+ munb <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lsize , earg = .esize )
+ rzanegbin(nsim * length(munb),
+ pobs0 = phi0, munb = munb, size = kmat)
+ }, list( .lpobs0 = lpobs0, .lmunb = lmunb, .lsize = lsize,
+ .epobs0 = epobs0, .emunb = emunb, .esize = esize ))),
+
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 3
+ M1 <- 3
NOS <- extra$NOS
y0 <- extra$y0
- phi0 <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+ phi0 <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
.lpobs0 , earg = .epobs0 )
- munb <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ kmat <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
.lsize , earg = .esize )
skip <- extra$skip.these
@@ -1273,23 +1381,23 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
c(w) * dphi0.deta * (y0 / muphi0 - 1) / (1 - muphi0)
}
ans <- cbind(dl.deta1, dl.deta23)
- ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
ans
}), list( .lpobs0 = lpobs0 , .lmunb = lmunb , .lsize = lsize ,
.epobs0 = epobs0 , .emunb = emunb , .esize = esize ))),
weight = eval(substitute(expression({
- six <- dimm(Musual)
+ six <- dimm(M1)
wz <- run.varcov <- matrix(0.0, n, six*NOS-1)
- Musualm1 <- Musual - 1
+ M1m1 <- M1 - 1
- ind2 <- iam(NA, NA, M = Musual - 1, both = TRUE, diag = TRUE)
+ ind2 <- iam(NA, NA, M = M1 - 1, both = TRUE, diag = TRUE)
for (ii in 1:( .nsimEIM )) {
@@ -1328,12 +1436,12 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
- run.varcov[, ((kk-1)*Musual+2):(kk*Musual)] <-
- run.varcov[, ((kk-1)*Musual+2):(kk*Musual)] +
- c(small.varcov[, 1:Musualm1])
- run.varcov[, M + (kk-1)*Musual + 2] <-
- run.varcov[, M + (kk-1)*Musual + 2] +
- c(small.varcov[, Musualm1 + 1])
+ run.varcov[, ((kk-1)*M1+2):(kk*M1)] <-
+ run.varcov[, ((kk-1)*M1+2):(kk*M1)] +
+ c(small.varcov[, 1:M1m1])
+ run.varcov[, M + (kk-1)*M1 + 2] <-
+ run.varcov[, M + (kk-1)*M1 + 2] +
+ c(small.varcov[, M1m1 + 1])
} # kk; end of NOS
} # ii; end of nsimEIM
@@ -1346,9 +1454,9 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
- wzind1 <- sort(c( Musual*(1:NOS) - 1,
- Musual*(1:NOS) - 0,
- M + Musual*(1:NOS) - 1))
+ wzind1 <- sort(c( M1*(1:NOS) - 1,
+ M1*(1:NOS) - 0,
+ M + M1*(1:NOS) - 1))
wz[, wzind1] <- c(w) * run.varcov[, wzind1]
@@ -1366,7 +1474,7 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
tmp200[index200, ii] <- .Machine$double.eps # Diagonal 0's are bad
}
}
- wz[, Musual*(1:NOS)-2] <- tmp200
+ wz[, M1*(1:NOS)-2] <- tmp200
@@ -1450,13 +1558,14 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 3
+ M1 <- 3
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 3,
+ list(M1 = 3,
+ Q1 = 1,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -1464,7 +1573,7 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
))),
initialize = eval(substitute(expression({
- Musual <- 3
+ M1 <- 3
if (any(y < 0))
stop("the response must not have negative values")
@@ -1482,7 +1591,7 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
- M <- Musual * ncoly
+ M <- M1 * ncoly
extra$dimnamesy <- dimnames(y)
extra$type.fitted <- .type.fitted
@@ -1496,7 +1605,7 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
namesof(mynames2, .lsize , earg = .esize , tag = FALSE),
namesof(mynames3, .lonempobs0 , earg = .eonempobs0 ,
tag = FALSE))[
- interleave.VGAM(Musual*NOS, M = Musual)]
+ interleave.VGAM(M1*NOS, M = M1)]
extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
@@ -1561,7 +1670,7 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
cbind(theta2eta(mu.init , .lmunb , earg = .emunb ),
theta2eta(kmat0 , .lsize , earg = .esize ),
theta2eta(1 - pnb0, .lonempobs0 , earg = .eonempobs0 ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
} # End of if (!length(etastart))
@@ -1580,11 +1689,11 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
type.fitted <- match.arg(type.fitted,
c("mean", "pobs0", "onempobs0"))[1]
- Musual <- 3
+ M1 <- 3
NOS <- extra$NOS
- munb <- eta2theta(eta[, Musual*(1:NOS)-2], .lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, Musual*(1:NOS)-1], .lsize , earg = .esize )
- onempobs0 <- eta2theta(eta[, Musual*(1:NOS) ], .lonempobs0 ,
+ munb <- eta2theta(eta[, M1*(1:NOS)-2], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, M1*(1:NOS)-1], .lsize , earg = .esize )
+ onempobs0 <- eta2theta(eta[, M1*(1:NOS) ], .lonempobs0 ,
earg = .eonempobs0 )
pnb0 <- (kmat / (kmat + munb))^kmat # p(0) from negative binomial
@@ -1597,7 +1706,8 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
length(extra$dimnamesy[[2]]) > 0) {
- dimnames(ans) <- extra$dimnamesy
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
} else
if (NCOL(ans) == 1 &&
is.matrix(ans)) {
@@ -1611,18 +1721,18 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
c(rep( .lmunb , length = NOS),
rep( .lsize , length = NOS),
rep( .lonempobs0 , length = NOS))[
- interleave.VGAM(Musual*NOS, M = Musual)]
+ interleave.VGAM(M1*NOS, M = M1)]
temp.names <- c(mynames1,
mynames2,
- mynames3)[interleave.VGAM(Musual*NOS, M = Musual)]
+ mynames3)[interleave.VGAM(M1*NOS, M = M1)]
names(misc$link) <- temp.names
- misc$earg <- vector("list", Musual*NOS)
+ misc$earg <- vector("list", M1*NOS)
names(misc$earg) <- temp.names
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-2]] <- .emunb
- misc$earg[[Musual*ii-1]] <- .esize
- misc$earg[[Musual*ii ]] <- .eonempobs0
+ misc$earg[[M1*ii-2]] <- .emunb
+ misc$earg[[M1*ii-1]] <- .esize
+ misc$earg[[M1*ii ]] <- .eonempobs0
}
misc$nsimEIM <- .nsimEIM
@@ -1636,32 +1746,65 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
.nsimEIM = nsimEIM,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
NOS <- extra$NOS
- Musual <- 3
- munb <- eta2theta(eta[, Musual*(1:NOS)-2], .lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, Musual*(1:NOS)-1], .lsize , earg = .esize )
- onempobs0 <- eta2theta(eta[, Musual*(1:NOS) ], .lonempobs0 ,
+ M1 <- 3
+ munb <- eta2theta(eta[, M1*(1:NOS)-2], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, M1*(1:NOS)-1], .lsize , earg = .esize )
+ onempobs0 <- eta2theta(eta[, M1*(1:NOS) ], .lonempobs0 ,
earg = .eonempobs0 )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dzanegbin(x = y, pobs0 = 1 - onempobs0,
- munb = munb, size = kmat,
- log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dzanegbin(x = y, pobs0 = 1 - onempobs0,
+ munb = munb, size = kmat,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lonempobs0 = lonempobs0, .lmunb = lmunb, .lsize = lsize,
.eonempobs0 = eonempobs0, .emunb = emunb, .esize = esize ))),
vfamily = c("zanegbinomialff"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ munb <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lsize , earg = .esize )
+ onempobs0 <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lonempobs0 ,
+ earg = .eonempobs0 )
+
+ rzanegbin(nsim * length(munb),
+ pobs0 = 1 - onempobs0, munb = munb, size = kmat)
+ }, list( .lonempobs0 = lonempobs0, .lmunb = lmunb, .lsize = lsize,
+ .eonempobs0 = eonempobs0, .emunb = emunb, .esize = esize ))),
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 3
+ M1 <- 3
NOS <- extra$NOS
y0 <- extra$y0
- munb <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+ munb <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
.lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ kmat <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lsize , earg = .esize )
- onempobs0 <- eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ onempobs0 <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
.lonempobs0 , earg = .eonempobs0 )
skip <- extra$skip.these
phi0 <- 1 - onempobs0
@@ -1709,22 +1852,22 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
c(w) * donempobs0.deta * dl.donempobs0
}
ans <- cbind(dl.deta12, dl.deta3)
- ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
ans
}), list( .lonempobs0 = lonempobs0 , .lmunb = lmunb , .lsize = lsize ,
.eonempobs0 = eonempobs0 , .emunb = emunb , .esize = esize ))),
weight = eval(substitute(expression({
- six <- dimm(Musual)
+ six <- dimm(M1)
wz <- run.varcov <- matrix(0.0, n, six*NOS-1)
- Musualm1 <- Musual - 1
+ M1m1 <- M1 - 1
- ind2 <- iam(NA, NA, M = Musual - 1, both = TRUE, diag = TRUE)
+ ind2 <- iam(NA, NA, M = M1 - 1, both = TRUE, diag = TRUE)
for (ii in 1:( .nsimEIM )) {
@@ -1759,12 +1902,12 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
temp2[, ind2$col.index]
- run.varcov[, ((kk-1)*Musual+2-1):(kk*Musual-1)] <-
- run.varcov[, ((kk-1)*Musual+2-1):(kk*Musual-1)] +
- c(small.varcov[, 1:Musualm1])
- run.varcov[, M + (kk-1)*Musual + 2-1] <-
- run.varcov[, M + (kk-1)*Musual + 2-1] +
- c(small.varcov[, Musualm1 + 1])
+ run.varcov[, ((kk-1)*M1+2-1):(kk*M1-1)] <-
+ run.varcov[, ((kk-1)*M1+2-1):(kk*M1-1)] +
+ c(small.varcov[, 1:M1m1])
+ run.varcov[, M + (kk-1)*M1 + 2-1] <-
+ run.varcov[, M + (kk-1)*M1 + 2-1] +
+ c(small.varcov[, M1m1 + 1])
} # kk; end of NOS
} # ii; end of nsimEIM
@@ -1776,9 +1919,9 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
- wzind1 <- sort(c( Musual*(1:NOS) - 1 - 1,
- Musual*(1:NOS) - 0 - 1,
- M + Musual*(1:NOS) - 1 - 1))
+ wzind1 <- sort(c( M1*(1:NOS) - 1 - 1,
+ M1*(1:NOS) - 0 - 1,
+ M + M1*(1:NOS) - 1 - 1))
wz[, wzind1] <- c(w) * run.varcov[, wzind1]
@@ -1794,7 +1937,7 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
tmp200[index200, ii] <- .Machine$double.eps # Diagonal 0's are bad
}
}
- wz[, Musual*(1:NOS) ] <- tmp200
+ wz[, M1*(1:NOS) ] <- tmp200
@@ -1821,7 +1964,7 @@ rposnegbin <- function(n, munb, size) {
munb <- rep(munb, length = n)
size <- rep(size, length = n)
index <- ans == 0
- while(any(index)) {
+ while (any(index)) {
more <- rnbinom(n = sum(index), mu = munb[index], size = size[index])
ans[index] <- more
index <- ans == 0
@@ -1852,11 +1995,12 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
- zipoisson <- function(lpstr0 = "logit", llambda = "loge",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
- ipstr0 = NULL, ilambda = NULL,
- imethod = 1,
- shrinkage.init = 0.8, zero = NULL) {
+ zipoisson <-
+ function(lpstr0 = "logit", llambda = "loge",
+ type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+ ipstr0 = NULL, ilambda = NULL,
+ imethod = 1,
+ shrinkage.init = 0.8, zero = NULL) {
ipstr00 <- ipstr0
@@ -1903,12 +2047,13 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -1930,11 +2075,11 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
ncoly <- ncol(y)
- Musual <- 2
+ M1 <- 2
extra$ncoly <- ncoly
- extra$Musual <- Musual
+ extra$M1 <- M1
extra$dimnamesy <- dimnames(y)
- M <- Musual * ncoly
+ M <- M1 * ncoly
extra$type.fitted <- .type.fitted
@@ -1947,7 +2092,7 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
predictors.names <-
c(namesof(mynames1, .lpstr00 , earg = .epstr00 , tag = FALSE),
namesof(mynames2, .llambda , earg = .elambda , tag = FALSE))[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
@@ -1997,13 +2142,13 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
matP[, spp.] <- Phimat.init
if (!length( .ilambda ))
matL[, spp.] <- Lambda.init
- } # spp.
+ } # spp.
etastart <- cbind(theta2eta(matP, .lpstr00, earg = .epstr00 ),
theta2eta(matL, .llambda, earg = .elambda ))[,
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
mustart <- NULL # Since etastart has been computed.
- } # End of !length(etastart)
+ } # End of !length(etastart)
}), list( .lpstr00 = lpstr00, .llambda = llambda,
.epstr00 = epstr00, .elambda = elambda,
.ipstr00 = ipstr00, .ilambda = ilambda,
@@ -2033,7 +2178,8 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
length(extra$dimnamesy[[2]]) > 0) {
- dimnames(ans) <- extra$dimnamesy
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
} else
if (NCOL(ans) == 1 &&
is.matrix(ans)) {
@@ -2045,21 +2191,21 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
.type.fitted = type.fitted
))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <-
c(rep( .lpstr00 , length = ncoly),
- rep( .llambda , length = ncoly))[interleave.VGAM(M, M = Musual)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ rep( .llambda , length = ncoly))[interleave.VGAM(M, M = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .epstr00
- misc$earg[[Musual*ii ]] <- .elambda
+ misc$earg[[M1*ii-1]] <- .epstr00
+ misc$earg[[M1*ii ]] <- .elambda
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$imethod <- .imethod
misc$expected <- TRUE
misc$multipleResponses <- TRUE
@@ -2075,19 +2221,47 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
.epstr00 = epstr00, .elambda = elambda,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dzipois(x = y, pstr0 = phimat, lambda = lambda,
- log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) * dzipois(x = y, pstr0 = phimat, lambda = lambda,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lpstr00 = lpstr00, .llambda = llambda,
.epstr00 = epstr00, .elambda = elambda ))),
vfamily = c("zipoisson"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
+ lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
+ rzipois(nsim * length(lambda), lambda = lambda, pstr0 = phimat)
+ }, list( .lpstr00 = lpstr00, .llambda = llambda,
+ .epstr00 = epstr00, .elambda = elambda ))),
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
phimat <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr00 ,
earg = .epstr00 )
lambda <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .llambda ,
@@ -2108,13 +2282,13 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
ans <- c(w) * cbind(dl.dphimat * dphimat.deta,
dl.dlambda * dlambda.deta)
- ans <- ans[, interleave.VGAM(M, M = Musual)]
+ ans <- ans[, interleave.VGAM(M, M = M1)]
if ( .llambda == "loge" && is.empty.list( .elambda ) &&
any(lambda[!index0] < .Machine$double.eps)) {
- for (spp. in 1:(M / Musual)) {
- ans[!index0[, spp.], Musual * spp.] <-
+ for (spp. in 1:(M / M1)) {
+ ans[!index0[, spp.], M1 * spp.] <-
w[!index0[, spp.]] *
(y[!index0[, spp.], spp.] - lambda[!index0[, spp.], spp.])
}
@@ -2136,8 +2310,8 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
wz <- array(c(c(w) * ned2l.dphimat2 * dphimat.deta^2,
c(w) * ned2l.dlambda2 * dlambda.deta^2,
c(w) * ned2l.dphimatlambda * dphimat.deta * dlambda.deta),
- dim = c(n, M / Musual, 3))
- wz <- arwz2wz(wz, M = M, Musual = Musual)
+ dim = c(n, M / M1, 3))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
@@ -2198,7 +2372,7 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -2341,13 +2515,22 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
}, list( .lpstr0 = lpstr0, .lprob = lprob,
.epstr0 = epstr0, .eprob = eprob ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
pstr0 <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
mubin <- eta2theta(eta[, 2], .lprob , earg = .eprob )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(dzibinom(x = round(w * y), size = w, prob = mubin,
- log = TRUE, pstr0 = pstr0))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ dzibinom(x = round(w * y), size = w, prob = mubin,
+ log = TRUE, pstr0 = pstr0)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lpstr0 = lpstr0, .lprob = lprob,
.epstr0 = epstr0, .eprob = eprob ))),
@@ -2470,7 +2653,7 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -2616,13 +2799,22 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
}, list( .lonempstr0 = lonempstr0, .lprob = lprob,
.eonempstr0 = eonempstr0, .eprob = eprob ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
mubin <- eta2theta(eta[, 1], .lprob , earg = .eprob )
onempstr0 <- eta2theta(eta[, 2], .lonempstr0 , earg = .eonempstr0 )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(dzibinom(x = round(w * y), size = w, prob = mubin,
- log = TRUE, pstr0 = 1 - onempstr0))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ dzibinom(x = round(w * y), size = w, prob = mubin,
+ log = TRUE, pstr0 = 1 - onempstr0)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lonempstr0 = lonempstr0, .lprob = lprob,
.eonempstr0 = eonempstr0, .eprob = eprob ))),
@@ -3061,13 +3253,14 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 3
+ M1 <- 3
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 3,
+ list(M1 = 3,
+ Q1 = 1,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -3076,7 +3269,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
initialize = eval(substitute(expression({
- Musual <- 3
+ M1 <- 3
temp5 <-
w.y.check(w = w, y = y,
@@ -3105,7 +3298,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
c(namesof(mynames1, .lpstr0 , earg = .epstr0 , tag = FALSE),
namesof(mynames2, .lmunb , earg = .emunb , tag = FALSE),
namesof(mynames3, .lsize , earg = .esize , tag = FALSE))[
- interleave.VGAM(Musual*NOS, M = Musual)]
+ interleave.VGAM(M1*NOS, M = M1)]
if (!length(etastart)) {
mum.init <- if ( .imethod == 3) {
@@ -3172,7 +3365,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
theta2eta(mum.init, .lmunb , earg = .emunb ),
theta2eta(kay.init, .lsize , earg = .esize ))
etastart <-
- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart[, interleave.VGAM(ncol(etastart), M = M1)]
}
}), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
.epstr0 = epstr0, .emunb = emunb, .esize = esize,
@@ -3191,15 +3384,15 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
type.fitted <- match.arg(type.fitted,
c("mean", "pobs0", "pstr0", "onempstr0"))[1]
- Musual <- 3
+ M1 <- 3
NOS <- extra$NOS
- pstr0 <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+ pstr0 <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
.lpstr0 , earg = .epstr0 )
if (type.fitted %in% c("mean", "pobs0"))
- munb <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lmunb , earg = .emunb )
if (type.fitted %in% c("pobs0"))
- kmat <- eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ kmat <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
.lsize , earg = .esize )
ans <- switch(type.fitted,
@@ -3212,7 +3405,8 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
length(extra$dimnamesy[[2]]) > 0) {
- dimnames(ans) <- extra$dimnamesy
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
} else
if (NCOL(ans) == 1 &&
is.matrix(ans)) {
@@ -3227,26 +3421,26 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
misc$link <-
c(rep( .lpstr0 , length = NOS),
rep( .lmunb , length = NOS),
- rep( .lsize , length = NOS))[interleave.VGAM(Musual*NOS,
- M = Musual)]
+ rep( .lsize , length = NOS))[interleave.VGAM(M1*NOS,
+ M = M1)]
temp.names <-
c(mynames1,
mynames2,
- mynames3)[interleave.VGAM(Musual*NOS, M = Musual)]
+ mynames3)[interleave.VGAM(M1*NOS, M = M1)]
names(misc$link) <- temp.names
- misc$earg <- vector("list", Musual*NOS)
+ misc$earg <- vector("list", M1*NOS)
names(misc$earg) <- temp.names
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-2]] <- .epstr0
- misc$earg[[Musual*ii-1]] <- .emunb
- misc$earg[[Musual*ii ]] <- .esize
+ misc$earg[[M1*ii-2]] <- .epstr0
+ misc$earg[[M1*ii-1]] <- .emunb
+ misc$earg[[M1*ii ]] <- .esize
}
misc$imethod <- .imethod
misc$nsimEIM <- .nsimEIM
misc$expected <- TRUE
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$ipstr0 <- .ipstr0
misc$isize <- .isize
misc$multipleResponses <- TRUE
@@ -3257,32 +3451,66 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
.ipstr0 = ipstr0, .isize = isize,
.nsimEIM = nsimEIM, .imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual <- 3
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ M1 <- 3
NOS <- extra$NOS
- pstr0 <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
- .lpstr0 , earg = .epstr0 )
- munb <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
- .lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
- .lsize , earg = .esize )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dzinegbin(x = y, size = kmat, munb = munb,
- pstr0 = pstr0, log = TRUE))
+ pstr0 <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
+ .lpstr0 , earg = .epstr0 )
+ munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+ .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lsize , earg = .esize )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dzinegbin(x = y, size = kmat, munb = munb,
+ pstr0 = pstr0, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
.epstr0 = epstr0, .emunb = emunb, .esize = esize ))),
vfamily = c("zinegbinomial"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ pstr0 <- eta2theta(eta[, c(TRUE, FALSE, FALSE)],
+ .lpstr0 , earg = .epstr0 )
+ munb <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lsize , earg = .esize )
+ rzinegbin(nsim * length(munb),
+ size = kmat, munb = munb, pstr0 = pstr0)
+ }, list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
+ .epstr0 = epstr0, .emunb = emunb, .esize = esize ))),
+
+
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 3
+ M1 <- 3
NOS <- extra$NOS
- pstr0 <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+ pstr0 <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
.lpstr0 , earg = .epstr0 )
- munb <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ kmat <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
.lsize , earg = .esize )
dpstr0.deta <- dtheta.deta(pstr0, .lpstr0 , earg = .epstr0 )
@@ -3291,7 +3519,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
dthetas.detas <-
(cbind(dpstr0.deta,
dmunb.deta,
- dsize.deta))[, interleave.VGAM(Musual*NOS, M = Musual)]
+ dsize.deta))[, interleave.VGAM(M1*NOS, M = M1)]
@@ -3329,7 +3557,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
dl.dthetas <-
cbind(dl.dpstr0,
dl.dmunb,
- dl.dsize)[, interleave.VGAM(Musual*NOS, M = Musual)]
+ dl.dsize)[, interleave.VGAM(M1*NOS, M = M1)]
c(w) * dl.dthetas * dthetas.detas
@@ -3340,9 +3568,9 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
- wz <- matrix(0, n, Musual*M - Musual)
+ wz <- matrix(0, n, M1*M - M1)
- ind3 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ ind3 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
run.varcov <- array(0.0, c(n, length(ind3$row.index), NOS))
@@ -3418,23 +3646,23 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
for (spp. in 1:NOS) {
wz1[,, spp.] <- wz1[,, spp.] *
- dthetas.detas[, Musual * (spp. - 1) + ind3$row] *
- dthetas.detas[, Musual * (spp. - 1) + ind3$col]
+ dthetas.detas[, M1 * (spp. - 1) + ind3$row] *
+ dthetas.detas[, M1 * (spp. - 1) + ind3$col]
}
for (spp. in 1:NOS) {
- for (jay in 1:Musual) {
- for (kay in jay:Musual) {
- cptr <- iam((spp. - 1) * Musual + jay,
- (spp. - 1) * Musual + kay, M = M)
+ for (jay in 1:M1) {
+ for (kay in jay:M1) {
+ cptr <- iam((spp. - 1) * M1 + jay,
+ (spp. - 1) * M1 + kay, M = M)
temp.wz1 <- wz1[,, spp.]
- wz[, cptr] <- temp.wz1[, iam(jay, kay, M = Musual)]
+ wz[, cptr] <- temp.wz1[, iam(jay, kay, M = M1)]
}
}
}
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / M1)
}), list( .lpstr0 = lpstr0,
.epstr0 = epstr0, .nsimEIM = nsimEIM ))))
} # End of zinegbinomial
@@ -3515,13 +3743,14 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 3
+ M1 <- 3
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 3,
+ list(M1 = 3,
+ Q1 = 1,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -3530,7 +3759,7 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
initialize = eval(substitute(expression({
- Musual <- 3
+ M1 <- 3
temp5 <-
w.y.check(w = w, y = y,
@@ -3558,7 +3787,7 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
c(namesof(mynames1, .lmunb , earg = .emunb , tag = FALSE),
namesof(mynames2, .lsize , earg = .esize , tag = FALSE),
namesof(mynames3, .lonempstr0 , earg = .eonempstr0 , tag = FALSE))[
- interleave.VGAM(Musual*NOS, M = Musual)]
+ interleave.VGAM(M1*NOS, M = M1)]
if (!length(etastart)) {
mum.init <- if ( .imethod == 3) {
@@ -3626,7 +3855,7 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
theta2eta(onempstr0.init, .lonempstr0 ,
earg = .eonempstr0 ))
etastart <-
- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart[, interleave.VGAM(ncol(etastart), M = M1)]
}
}), list( .lonempstr0 = lonempstr0, .lmunb = lmunb, .lsize = lsize,
.eonempstr0 = eonempstr0, .emunb = emunb, .esize = esize,
@@ -3645,15 +3874,15 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
type.fitted <- match.arg(type.fitted,
c("mean", "pobs0", "pstr0", "onempstr0"))[1]
- Musual <- 3
+ M1 <- 3
NOS <- extra$NOS
if (type.fitted %in% c("mean", "pobs0"))
- munb <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+ munb <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
.lmunb , earg = .emunb )
if (type.fitted %in% c("pobs0"))
- kmat <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ kmat <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lsize , earg = .esize )
- onempstr0 <- eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ onempstr0 <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
.lonempstr0 , earg = .eonempstr0 )
ans <- switch(type.fitted,
@@ -3666,7 +3895,8 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
length(extra$dimnamesy[[2]]) > 0) {
- dimnames(ans) <- extra$dimnamesy
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
} else
if (NCOL(ans) == 1 &&
is.matrix(ans)) {
@@ -3681,26 +3911,26 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
misc$link <-
c(rep( .lmunb , length = NOS),
rep( .lsize , length = NOS),
- rep( .lonempstr0 , length = NOS))[interleave.VGAM(Musual*NOS,
- M = Musual)]
+ rep( .lonempstr0 , length = NOS))[interleave.VGAM(M1*NOS,
+ M = M1)]
temp.names <-
c(mynames1,
mynames2,
- mynames3)[interleave.VGAM(Musual*NOS, M = Musual)]
+ mynames3)[interleave.VGAM(M1*NOS, M = M1)]
names(misc$link) <- temp.names
- misc$earg <- vector("list", Musual*NOS)
+ misc$earg <- vector("list", M1*NOS)
names(misc$earg) <- temp.names
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-2]] <- .emunb
- misc$earg[[Musual*ii-1]] <- .esize
- misc$earg[[Musual*ii ]] <- .eonempstr0
+ misc$earg[[M1*ii-2]] <- .emunb
+ misc$earg[[M1*ii-1]] <- .esize
+ misc$earg[[M1*ii ]] <- .eonempstr0
}
misc$imethod <- .imethod
misc$nsimEIM <- .nsimEIM
misc$expected <- TRUE
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$ionempstr0 <- .ionempstr0
misc$isize <- .isize
misc$multipleResponses <- TRUE
@@ -3710,32 +3940,64 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
.ionempstr0 = ionempstr0, .isize = isize,
.nsimEIM = nsimEIM, .imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual <- 3
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ M1 <- 3
NOS <- extra$NOS
- munb <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+ munb <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
.lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ kmat <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lsize , earg = .esize )
- onempstr0 <- eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ onempstr0 <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
.lonempstr0 , earg = .eonempstr0 )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dzinegbin(x = y, size = kmat, munb = munb,
- pstr0 = 1 - onempstr0, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dzinegbin(x = y, size = kmat, munb = munb,
+ pstr0 = 1 - onempstr0, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lonempstr0 = lonempstr0, .lmunb = lmunb, .lsize = lsize,
.eonempstr0 = eonempstr0, .emunb = emunb, .esize = esize ))),
vfamily = c("zinegbinomialff"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ munb <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lsize , earg = .esize )
+ onempstr0 <- eta2theta(eta[, c(FALSE, FALSE, TRUE)],
+ .lpstr0 , earg = .epstr0 )
+ rzinegbin(nsim * length(munb),
+ size = kmat, munb = munb, pstr0 = 1 - onempstr0)
+ }, list( .lonempstr0 = lonempstr0, .lmunb = lmunb, .lsize = lsize,
+ .eonempstr0 = eonempstr0, .emunb = emunb, .esize = esize ))),
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 3
+ M1 <- 3
NOS <- extra$NOS
- munb <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+ munb <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
.lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ kmat <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lsize , earg = .esize )
- onempstr0 <- eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ onempstr0 <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
.lonempstr0 , earg = .eonempstr0 )
donempstr0.deta <- dtheta.deta(onempstr0, .lonempstr0 ,
@@ -3745,8 +4007,8 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
dthetas.detas <-
(cbind(dmunb.deta,
dsize.deta,
- donempstr0.deta))[, interleave.VGAM(Musual*NOS,
- M = Musual)]
+ donempstr0.deta))[, interleave.VGAM(M1*NOS,
+ M = M1)]
@@ -3784,7 +4046,7 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
dl.dthetas <-
cbind(dl.dmunb,
dl.dsize,
- dl.donempstr0)[, interleave.VGAM(Musual*NOS, M = Musual)]
+ dl.donempstr0)[, interleave.VGAM(M1*NOS, M = M1)]
c(w) * dl.dthetas * dthetas.detas
@@ -3795,9 +4057,9 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
- wz <- matrix(0, n, Musual*M - Musual)
+ wz <- matrix(0, n, M1*M - M1)
- ind3 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ ind3 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
run.varcov <- array(0.0, c(n, length(ind3$row.index), NOS))
@@ -3873,23 +4135,23 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
for (spp. in 1:NOS) {
wz1[,, spp.] <- wz1[,, spp.] *
- dthetas.detas[, Musual * (spp. - 1) + ind3$row] *
- dthetas.detas[, Musual * (spp. - 1) + ind3$col]
+ dthetas.detas[, M1 * (spp. - 1) + ind3$row] *
+ dthetas.detas[, M1 * (spp. - 1) + ind3$col]
}
for (spp. in 1:NOS) {
- for (jay in 1:Musual) {
- for (kay in jay:Musual) {
- cptr <- iam((spp. - 1) * Musual + jay,
- (spp. - 1) * Musual + kay, M = M)
+ for (jay in 1:M1) {
+ for (kay in jay:M1) {
+ cptr <- iam((spp. - 1) * M1 + jay,
+ (spp. - 1) * M1 + kay, M = M)
temp.wz1 <- wz1[,, spp.]
- wz[, cptr] <- temp.wz1[, iam(jay, kay, M = Musual)]
+ wz[, cptr] <- temp.wz1[, iam(jay, kay, M = M1)]
}
}
}
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / M1)
}), list( .lonempstr0 = lonempstr0,
.eonempstr0 = eonempstr0, .nsimEIM = nsimEIM ))))
} # End of zinegbinomialff
@@ -3954,12 +4216,13 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
"Mean: onempstr0 * lambda"),
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -3984,10 +4247,10 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
ncoly <- ncol(y)
- Musual <- 2
+ M1 <- 2
extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
extra$type.fitted <- .type.fitted
extra$dimnamesy <- dimnames(y)
@@ -3997,7 +4260,7 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
predictors.names <-
c(namesof(mynames1, .llambda , earg = .elambda , tag = FALSE),
namesof(mynames2, .lonempstr0 , earg = .eonempstr0 , tag = FALSE))[
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
if (!length(etastart)) {
@@ -4049,7 +4312,7 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
etastart <-
cbind(theta2eta( matL, .llambda , earg = .elambda ),
theta2eta(1 - matP, .lonempstr0 , earg = .eonempstr0 ))[,
- interleave.VGAM(M, M = Musual)]
+ interleave.VGAM(M, M = M1)]
mustart <- NULL # Since etastart has been computed.
}
@@ -4069,11 +4332,11 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
type.fitted <- match.arg(type.fitted,
c("mean", "pobs0", "pstr0", "onempstr0"))[1]
- Musual <- 2
+ M1 <- 2
ncoly <- extra$ncoly
- lambda <- eta2theta(eta[, Musual*(1:ncoly) - 1], .llambda ,
+ lambda <- eta2theta(eta[, M1*(1:ncoly) - 1], .llambda ,
earg = .elambda )
- onempstr0 <- eta2theta(eta[, Musual*(1:ncoly) ], .lonempstr0 ,
+ onempstr0 <- eta2theta(eta[, M1*(1:ncoly) ], .lonempstr0 ,
earg = .eonempstr0 )
@@ -4086,7 +4349,8 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
length(extra$dimnamesy[[2]]) > 0) {
- dimnames(ans) <- extra$dimnamesy
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
} else
if (NCOL(ans) == 1 &&
is.matrix(ans)) {
@@ -4097,22 +4361,22 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
.eonempstr0 = eonempstr0, .elambda = elambda,
.type.fitted = type.fitted ))),
last = eval(substitute(expression({
- Musual <- extra$Musual
+ M1 <- extra$M1
misc$link <-
c(rep( .llambda , length = ncoly),
- rep( .lonempstr0 , length = ncoly))[interleave.VGAM(M, M = Musual)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ rep( .lonempstr0 , length = ncoly))[interleave.VGAM(M, M = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
names(misc$link) <- temp.names
- misc$earg <- vector("list", Musual * ncoly)
+ misc$earg <- vector("list", M1 * ncoly)
names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .elambda
- misc$earg[[Musual*ii ]] <- .eonempstr0
+ misc$earg[[M1*ii-1]] <- .elambda
+ misc$earg[[M1*ii ]] <- .eonempstr0
}
- misc$Musual <- Musual
+ misc$M1 <- M1
misc$imethod <- .imethod
misc$expected <- TRUE
misc$multipleResponses <- TRUE
@@ -4130,29 +4394,59 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
.eonempstr0 = eonempstr0, .elambda = elambda,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual <- 2
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ M1 <- 2
ncoly <- extra$ncoly
- lambda <- eta2theta(eta[, Musual*(1:ncoly) - 1], .llambda ,
+ lambda <- eta2theta(eta[, M1*(1:ncoly) - 1], .llambda ,
earg = .elambda )
- onempstr0 <- eta2theta(eta[, Musual*(1:ncoly) ], .lonempstr0 ,
+ onempstr0 <- eta2theta(eta[, M1*(1:ncoly) ], .lonempstr0 ,
earg = .eonempstr0 )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dzipois(x = y, pstr0 = 1 - onempstr0, lambda = lambda,
- log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <- c(w) *
+ dzipois(x = y, pstr0 = 1 - onempstr0, lambda = lambda,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lonempstr0 = lonempstr0, .llambda = llambda,
.eonempstr0 = eonempstr0, .elambda = elambda ))),
vfamily = c("zipoissonff"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
+ earg = .eonempstr0 )
+ lambda <- eta2theta(eta[, c(TRUE, FALSE)], .llambda ,
+ earg = .elambda )
+ rzipois(nsim * length(lambda), lambda = lambda, pstr0 = 1 - onempstr0)
+ }, list( .lonempstr0 = lonempstr0, .llambda = llambda,
+ .eonempstr0 = eonempstr0, .elambda = elambda ))),
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
ncoly <- extra$ncoly
- lambda <- eta2theta(eta[, Musual*(1:ncoly) - 1], .llambda ,
+ lambda <- eta2theta(eta[, M1*(1:ncoly) - 1], .llambda ,
earg = .elambda )
- onempstr0 <- eta2theta(eta[, Musual*(1:ncoly) ], .lonempstr0 ,
+ onempstr0 <- eta2theta(eta[, M1*(1:ncoly) ], .lonempstr0 ,
earg = .eonempstr0 )
@@ -4170,13 +4464,13 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
ans <- c(w) * cbind(dl.dlambda * dlambda.deta,
dl.donempstr0 * donempstr0.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
if ( .llambda == "loge" && is.empty.list( .elambda ) &&
any(lambda[!ind0] < .Machine$double.eps)) {
for (spp. in 1:ncoly) {
- ans[!ind0[, spp.], Musual * spp.] <-
+ ans[!ind0[, spp.], M1 * spp.] <-
w[!ind0[, spp.]] *
(y[!ind0[, spp.], spp.] - lambda[!ind0[, spp.], spp.])
}
@@ -4199,8 +4493,8 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
wz <- array(c(c(w) * ned2l.dlambda2 * dlambda.deta^2,
c(w) * ned2l.donempstr0.2 * donempstr0.deta^2,
c(w) * ned2l.dphilambda * donempstr0.deta * dlambda.deta),
- dim = c(n, M / Musual, 3))
- wz <- arwz2wz(wz, M = M, Musual = Musual)
+ dim = c(n, M / M1, 3))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
wz
}), list( .llambda = llambda ))))
@@ -4394,19 +4688,20 @@ rzigeom <- function(n, prob, pstr0 = 0) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
.type.fitted = type.fitted ))),
initialize = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
if (any(y < 0))
stop("the response must not have negative values")
@@ -4434,7 +4729,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
predictors.names <-
c(namesof(mynames1, .lpstr0, earg = .epstr0, tag = FALSE),
namesof(mynames2, .lprob, earg = .eprob, tag = FALSE))[
- interleave.VGAM(Musual * NOS, M = Musual)]
+ interleave.VGAM(M1 * NOS, M = M1)]
if (!length(etastart)) {
@@ -4477,7 +4772,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
etastart <-
cbind(theta2eta(psze.init, .lpstr0, earg = .epstr0),
theta2eta(prob.init, .lprob , earg = .eprob ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
}
}), list( .lprob = lprob, .lpstr0 = lpstr0,
.eprob = eprob, .epstr0 = epstr0,
@@ -4520,18 +4815,18 @@ rzigeom <- function(n, prob, pstr0 = 0) {
last = eval(substitute(expression({
temp.names <- c(rep( .lpstr0 , len = NOS),
rep( .lprob , len = NOS))
- temp.names <- temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
+ temp.names <- temp.names[interleave.VGAM(M1*NOS, M = M1)]
misc$link <- temp.names
- misc$earg <- vector("list", Musual * NOS)
+ misc$earg <- vector("list", M1 * NOS)
names(misc$link) <-
names(misc$earg) <-
- c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
+ c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M = M1)]
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] <- .epstr0
- misc$earg[[Musual*ii ]] <- .eprob
+ misc$earg[[M1*ii-1]] <- .epstr0
+ misc$earg[[M1*ii ]] <- .eprob
}
@@ -4558,19 +4853,48 @@ rzigeom <- function(n, prob, pstr0 = 0) {
.bias.red = bias.red,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
pstr0 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
prob <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , earg = .eprob )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dzigeom(x = y, prob = prob, pstr0 = pstr0, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dzigeom(x = y, prob = prob, pstr0 = pstr0, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lprob = lprob, .lpstr0 = lpstr0,
.eprob = eprob, .epstr0 = epstr0 ))),
vfamily = c("zigeometric"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ pstr0 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
+ prob <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , earg = .eprob )
+ rzigeom(nsim * length(pstr0), prob = prob, pstr0 = pstr0)
+ }, list( .lprob = lprob, .lpstr0 = lpstr0,
+ .eprob = eprob, .epstr0 = epstr0 ))),
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
pstr0 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
prob <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , earg = .eprob )
@@ -4592,7 +4916,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
dl.deta12 <- c(w) * cbind(dl.dpstr0 * dpstr0.deta,
dl.dprob * dprob.deta)
- dl.deta12 <- dl.deta12[, interleave.VGAM(ncol(dl.deta12), M = Musual)]
+ dl.deta12 <- dl.deta12[, interleave.VGAM(ncol(dl.deta12), M = M1)]
dl.deta12
}), list( .lprob = lprob, .lpstr0 = lpstr0,
.eprob = eprob, .epstr0 = epstr0 ))),
@@ -4621,8 +4945,8 @@ rzigeom <- function(n, prob, pstr0 = 0) {
c(c(w) * od2l.dpstr02 * dpstr0.deta^2,
c(w) * od2l.dprob2 * dprob.deta^2,
c(w) * od2l.dpstr0.prob * dprob.deta * dpstr0.deta)
- wz <- array(allvals, dim = c(n, M / Musual, 3))
- wz <- arwz2wz(wz, M = M, Musual = Musual)
+ wz <- array(allvals, dim = c(n, M / M1, 3))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
wz
@@ -4694,19 +5018,20 @@ rzigeom <- function(n, prob, pstr0 = 0) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
.type.fitted = type.fitted ))),
initialize = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
if (any(y < 0))
stop("the response must not have negative values")
@@ -4734,7 +5059,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
predictors.names <-
c(namesof(mynames1, .lprob , earg = .eprob , tag = FALSE),
namesof(mynames2, .lonempstr0 , earg = .eonempstr0 , tag = FALSE))[
- interleave.VGAM(Musual*NOS, M = Musual)]
+ interleave.VGAM(M1*NOS, M = M1)]
if (!length(etastart)) {
@@ -4777,7 +5102,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
etastart <-
cbind(theta2eta( prob.init, .lprob , earg = .eprob ),
theta2eta(1 - psze.init, .lonempstr0 , earg = .eonempstr0 ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
}
}), list( .lprob = lprob, .lonempstr0 = lonempstr0,
.eprob = eprob, .eonempstr0 = eonempstr0,
@@ -4822,18 +5147,18 @@ rzigeom <- function(n, prob, pstr0 = 0) {
last = eval(substitute(expression({
temp.names <- c(rep( .lprob , len = NOS),
rep( .lonempstr0 , len = NOS))
- temp.names <- temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
+ temp.names <- temp.names[interleave.VGAM(M1*NOS, M = M1)]
misc$link <- temp.names
- misc$earg <- vector("list", Musual * NOS)
+ misc$earg <- vector("list", M1 * NOS)
names(misc$link) <-
names(misc$earg) <-
- c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
+ c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M = M1)]
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] <- .eprob
- misc$earg[[Musual*ii ]] <- .eonempstr0
+ misc$earg[[M1*ii-1]] <- .eprob
+ misc$earg[[M1*ii ]] <- .eonempstr0
}
@@ -4858,22 +5183,54 @@ rzigeom <- function(n, prob, pstr0 = 0) {
.bias.red = bias.red,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
prob <- eta2theta(eta[, c(TRUE, FALSE)], .lprob ,
earg = .eprob )
onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
earg = .eonempstr0 )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * dzigeom(x = y, prob = prob, pstr0 = 1 - onempstr0,
- log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dzigeom(x = y, prob = prob, pstr0 = 1 - onempstr0,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lprob = lprob, .lonempstr0 = lonempstr0,
.eprob = eprob, .eonempstr0 = eonempstr0 ))),
vfamily = c("zigeometricff"),
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ prob <- eta2theta(eta[, c(TRUE, FALSE)], .lprob ,
+ earg = .eprob )
+ onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
+ earg = .eonempstr0 )
+ rzigeom(nsim * length(onempstr0), prob = prob, pstr0 = 1 - onempstr0)
+ }, list( .lprob = lprob, .lonempstr0 = lonempstr0,
+ .eprob = eprob, .eonempstr0 = eonempstr0 ))),
+
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
prob <- eta2theta(eta[, c(TRUE, FALSE)], .lprob ,
earg = .eprob )
onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
@@ -4900,7 +5257,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
dl.deta12 <- c(w) * cbind(dl.dprob * dprob.deta,
dl.donempstr0 * donempstr0.deta)
- dl.deta12 <- dl.deta12[, interleave.VGAM(ncol(dl.deta12), M = Musual)]
+ dl.deta12 <- dl.deta12[, interleave.VGAM(ncol(dl.deta12), M = M1)]
dl.deta12
}), list( .lprob = lprob, .lonempstr0 = lonempstr0,
.eprob = eprob, .eonempstr0 = eonempstr0 ))),
@@ -4931,8 +5288,8 @@ rzigeom <- function(n, prob, pstr0 = 0) {
c(w) * od2l.donempstr02 * donempstr0.deta^2,
c(w) * od2l.donempstr0.prob * dprob.deta *
donempstr0.deta)
- wz <- array(allvals, dim = c(n, M / Musual, 3))
- wz <- arwz2wz(wz, M = M, Musual = Musual)
+ wz <- array(allvals, dim = c(n, M / M1, 3))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
wz
@@ -5183,7 +5540,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -5332,17 +5689,26 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
new.w <- if (length(extra$new.w)) extra$new.w else 1
Size <- new.w / orig.w
pobs0 <- eta2theta(eta[, 1], .lpobs0 , earg = .epobs0 )
prob <- eta2theta(eta[, 2], .lprob , earg = .eprob )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(orig.w * dzabinom(x = round(y * Size), size = Size,
- prob = prob, pobs0 = pobs0,
- log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ orig.w * dzabinom(x = round(y * Size), size = Size,
+ prob = prob, pobs0 = pobs0,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lprob = lprob, .lpobs0 = lpobs0,
.eprob = eprob, .epobs0 = epobs0 ))),
@@ -5350,7 +5716,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
deriv = eval(substitute(expression({
NOS <- if (length(extra$NOS)) extra$NOS else 1
- Musual <- 2
+ M1 <- 2
orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
new.w <- if (length(extra$new.w)) extra$new.w else 1
@@ -5390,7 +5756,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
weight = eval(substitute(expression({
- wz <- matrix(0.0, n, Musual)
+ wz <- matrix(0.0, n, M1)
usualmeanY <- prob
meanY <- (1 - phi0) * usualmeanY / oneminusf0
@@ -5478,7 +5844,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -5626,17 +5992,26 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
new.w <- if (length(extra$new.w)) extra$new.w else 1
Size <- new.w / orig.w
prob <- eta2theta(eta[, 1], .lprob , earg = .eprob )
onempobs0 <- eta2theta(eta[, 2], .lonempobs0 , earg = .eonempobs0 )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(orig.w * dzabinom(x = round(y * Size), size = Size,
- prob = prob, pobs0 = 1 - onempobs0,
- log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ orig.w * dzabinom(x = round(y * Size), size = Size,
+ prob = prob, pobs0 = 1 - onempobs0,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lprob = lprob, .lonempobs0 = lonempobs0,
.eprob = eprob, .eonempobs0 = eonempobs0 ))),
@@ -5644,7 +6019,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
deriv = eval(substitute(expression({
NOS <- if (length(extra$NOS)) extra$NOS else 1
- Musual <- 2
+ M1 <- 2
orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
new.w <- if (length(extra$new.w)) extra$new.w else 1
@@ -5687,7 +6062,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
weight = eval(substitute(expression({
- wz <- matrix(0.0, n, Musual)
+ wz <- matrix(0.0, n, M1)
usualmeanY <- prob
meanY <- (1 - phi0) * usualmeanY / oneminusf0
@@ -5770,12 +6145,13 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -5783,7 +6159,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
))),
initialize = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
if (any(y < 0))
stop("the response must not have negative values")
@@ -5816,7 +6192,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
predictors.names <-
c(namesof(mynames1, .lpobs0 , earg = .epobs0 , tag = FALSE),
namesof(mynames2, .lprob , earg = .eprob , tag = FALSE))[
- interleave.VGAM(Musual*NOS, M = Musual)]
+ interleave.VGAM(M1*NOS, M = M1)]
if (!length(etastart)) {
@@ -5844,7 +6220,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
etastart <- cbind(theta2eta(phi0.init, .lpobs0 , earg = .epobs0 ),
theta2eta(prob.init, .lprob , earg = .eprob ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
}
}), list( .lpobs0 = lpobs0, .lprob = lprob,
.epobs0 = epobs0, .eprob = eprob,
@@ -5862,11 +6238,11 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
c("mean", "pobs0", "onempobs0"))[1]
NOS <- extra$NOS
- Musual <- 2
+ M1 <- 2
- phi0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ phi0 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lpobs0 , earg = .epobs0 ))
- prob <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ prob <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
.lprob , earg = .eprob ))
@@ -5890,18 +6266,18 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
last = eval(substitute(expression({
temp.names <- c(rep( .lpobs0 , len = NOS),
rep( .lprob , len = NOS))
- temp.names <- temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
+ temp.names <- temp.names[interleave.VGAM(M1*NOS, M = M1)]
misc$link <- temp.names
- misc$earg <- vector("list", Musual * NOS)
+ misc$earg <- vector("list", M1 * NOS)
names(misc$link) <-
names(misc$earg) <-
- c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M <- Musual)]
+ c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M = M1)]
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] <- .epobs0
- misc$earg[[Musual*ii ]] <- .eprob
+ misc$earg[[M1*ii-1]] <- .epobs0
+ misc$earg[[M1*ii ]] <- .eprob
}
@@ -5915,31 +6291,63 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
.ipobs0 = ipobs0, .iprob = iprob,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
NOS <- extra$NOS
- Musual <- 2
+ M1 <- 2
- phi0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ phi0 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lpobs0 , earg = .epobs0 ))
- prob <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ prob <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
.lprob , earg = .eprob ))
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(c(w) * dzageom(x = y, pobs0 = phi0, prob = prob, log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dzageom(x = y, pobs0 = phi0, prob = prob, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lpobs0 = lpobs0, .lprob = lprob,
.epobs0 = epobs0, .eprob = eprob ))),
vfamily = c("zageometric"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ phi0 <- cbind(eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+ .lpobs0 , earg = .epobs0 ))
+ prob <- cbind(eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+ .lprob , earg = .eprob ))
+ rzageom(nsim * length(prob), prob = prob, pobs0 = phi0)
+ }, list( .lpobs0 = lpobs0, .lprob = lprob,
+ .epobs0 = epobs0, .eprob = eprob ))),
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
NOS <- extra$NOS
y0 <- extra$y0
skip <- extra$skip.these
- phi0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ phi0 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lpobs0 , earg = .epobs0 ))
- prob <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ prob <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
.lprob , earg = .eprob ))
@@ -5957,13 +6365,13 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
ans <- c(w) * cbind(dl.dphi0 * dphi0.deta,
dl.dprob * dprob.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
ans
}), list( .lpobs0 = lpobs0, .lprob = lprob,
.epobs0 = epobs0, .eprob = eprob ))),
weight = eval(substitute(expression({
- wz <- matrix(0.0, n, Musual*NOS)
+ wz <- matrix(0.0, n, M1*NOS)
ned2l.dprob2 <- (1 - phi0) / (prob^2 * (1 - prob))
@@ -5981,7 +6389,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
wz[, 1:NOS] <- tmp200
- wz <- wz[, interleave.VGAM(ncol(wz), M = Musual)]
+ wz <- wz[, interleave.VGAM(ncol(wz), M = M1)]
wz
@@ -6038,12 +6446,13 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
constraints = eval(substitute(expression({
dotzero <- .zero
- Musual <- 2
+ M1 <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
- list(Musual = 2,
+ list(M1 = 2,
+ Q1 = 1,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -6051,7 +6460,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
))),
initialize = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
if (any(y < 0))
stop("the response must not have negative values")
@@ -6084,7 +6493,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
predictors.names <-
c(namesof(mynames1, .lprob , earg = .eprob , tag = FALSE),
namesof(mynames2, .lonempobs0 , earg = .eonempobs0 , tag = FALSE))[
- interleave.VGAM(Musual*NOS, M = Musual)]
+ interleave.VGAM(M1*NOS, M = M1)]
if (!length(etastart)) {
@@ -6114,7 +6523,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
cbind(theta2eta( prob.init, .lprob , earg = .eprob ),
theta2eta(1 - phi0.init, .lonempobs0 , earg = .eonempobs0 ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
}
}), list( .lonempobs0 = lonempobs0, .lprob = lprob,
.eonempobs0 = eonempobs0, .eprob = eprob,
@@ -6132,11 +6541,11 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
c("mean", "pobs0", "onempobs0"))[1]
NOS <- extra$NOS
- Musual <- 2
+ M1 <- 2
- prob <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ prob <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lprob , earg = .eprob ))
- onempobs0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ onempobs0 <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
.lonempobs0 , earg = .eonempobs0 ))
@@ -6160,18 +6569,18 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
last = eval(substitute(expression({
temp.names <- c(rep( .lprob , len = NOS),
rep( .lonempobs0 , len = NOS))
- temp.names <- temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
+ temp.names <- temp.names[interleave.VGAM(M1*NOS, M = M1)]
misc$link <- temp.names
- misc$earg <- vector("list", Musual * NOS)
+ misc$earg <- vector("list", M1 * NOS)
names(misc$link) <-
names(misc$earg) <-
- c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
+ c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M = M1)]
for (ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] <- .eprob
- misc$earg[[Musual*ii ]] <- .eonempobs0
+ misc$earg[[M1*ii-1]] <- .eprob
+ misc$earg[[M1*ii ]] <- .eonempobs0
}
@@ -6185,32 +6594,64 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
.ionempobs0 = ionempobs0, .iprob = iprob,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
NOS <- extra$NOS
- Musual <- 2
+ M1 <- 2
- prob <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ prob <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lprob , earg = .eprob ))
- onempobs0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ onempobs0 <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
.lonempobs0 , earg = .eonempobs0 ))
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(c(w) * dzageom(x = y, pobs0 = 1 - onempobs0, prob = prob,
- log = TRUE))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dzageom(x = y, pobs0 = 1 - onempobs0, prob = prob,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
}
}, list( .lonempobs0 = lonempobs0, .lprob = lprob,
.eonempobs0 = eonempobs0, .eprob = eprob ))),
vfamily = c("zageometricff"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ onempobs0 <- cbind(eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+ .lonempobs0 , earg = .eonempobs0 ))
+ prob <- cbind(eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+ .lprob , earg = .eprob ))
+ rzageom(nsim * length(prob), pobs0 = 1 - onempobs0, prob = prob)
+ }, list( .lonempobs0 = lonempobs0, .lprob = lprob,
+ .eonempobs0 = eonempobs0, .eprob = eprob ))),
+
+
+
+
deriv = eval(substitute(expression({
- Musual <- 2
+ M1 <- 2
NOS <- extra$NOS
y0 <- extra$y0
skip <- extra$skip.these
- prob <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ prob <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lprob , earg = .eprob ))
- onempobs0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ onempobs0 <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
.lonempobs0 , earg = .eonempobs0 ))
pobs0 <- 1 - onempobs0
@@ -6230,13 +6671,13 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
ans <- c(w) * cbind(dl.dprob * dprob.deta,
dl.donempobs0 * donempobs0.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
ans
}), list( .lonempobs0 = lonempobs0, .lprob = lprob,
.eonempobs0 = eonempobs0, .eprob = eprob ))),
weight = eval(substitute(expression({
- wz <- matrix(0.0, n, Musual*NOS)
+ wz <- matrix(0.0, n, M1*NOS)
ned2l.dprob2 <- (1 - pobs0) / (prob^2 * (1 - prob))
@@ -6257,7 +6698,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
wz[, NOS+(1:NOS)] <- tmp200
- wz <- wz[, interleave.VGAM(ncol(wz), M = Musual)]
+ wz <- wz[, interleave.VGAM(ncol(wz), M = M1)]
wz
diff --git a/R/fittedvlm.R b/R/fittedvlm.R
index 5422ccd..50a10db 100644
--- a/R/fittedvlm.R
+++ b/R/fittedvlm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -55,10 +55,6 @@ fittedvlm <- function(object, matrix.arg = TRUE,
-if (!isGeneric("fitted"))
- setGeneric("fitted",
- function(object, ...)
- standardGeneric("fitted"))
diff --git a/R/formula.vlm.q b/R/formula.vlm.q
index 50d1e3b..b5c1d58 100644
--- a/R/formula.vlm.q
+++ b/R/formula.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -8,7 +8,7 @@
-formulavlm = function(x, fnumber=1, ...) {
+formulavlm <- function(x, fnumber = 1, ...) {
if (!is.Numeric(fnumber, integer.valued = TRUE,
length.arg = 1, positive = TRUE) ||
fnumber > 2)
@@ -22,8 +22,8 @@ formulavlm = function(x, fnumber=1, ...) {
-formulaNA.VGAM = function(x, ...) {
- stop("a formula does not make sense for object 'x'")
+formulaNA.VGAM <- function(x, ...) {
+ stop("a formula does not make sense for object 'x'")
}
@@ -66,17 +66,17 @@ setMethod("formula", "grc",
variable.namesvlm <- function(object, full = FALSE, ...) {
- qrslot <- object at qr
- if (!length(qrslot$qr)) {
- use.this <- object at x
- if (!length(use.this))
- stop("argument 'object' has empty 'qr' and 'x' slots.")
- } else {
- use.this = qrslot$qr
- }
- if (full) dimnames(use.this)[[2]] else
- if (object at rank) dimnames(use.this)[[2]][seq_len(object at rank)] else
- character(0)
+ qrslot <- object at qr
+ if (!length(qrslot$qr)) {
+ use.this <- object at x
+ if (!length(use.this))
+ stop("argument 'object' has empty 'qr' and 'x' slots.")
+ } else {
+ use.this <- qrslot$qr
+ }
+ if (full) dimnames(use.this)[[2]] else
+ if (object at rank) dimnames(use.this)[[2]][seq_len(object at rank)] else
+ character(0)
}
@@ -84,15 +84,15 @@ variable.namesvlm <- function(object, full = FALSE, ...) {
variable.namesrrvglm <- function(object, ...) {
- qrslot <- object at qr
- if (!length(qrslot$qr)) {
- use.this <- object at x
- if (!length(use.this))
- stop("argument 'object' has empty 'qr' and 'x' slots.")
- } else {
- use.this = qrslot$qr
- }
- dimnames(use.this)[[2]]
+ qrslot <- object at qr
+ if (!length(qrslot$qr)) {
+ use.this <- object at x
+ if (!length(use.this))
+ stop("argument 'object' has empty 'qr' and 'x' slots.")
+ } else {
+ use.this <- qrslot$qr
+ }
+ dimnames(use.this)[[2]]
}
@@ -102,42 +102,43 @@ variable.namesrrvglm <- function(object, ...) {
case.namesvlm <- function(object, full = FALSE, ...) {
- w <- weights(object, type="prior")
- use.this <- residuals(object, type="working")
- if (!length(use.this))
- use.this <- object at x
- if (!length(use.this))
- use.this <- object at y
- if (!length(use.this))
- stop("argument 'object' has empty 'x' and 'y' slots.")
- dn <- dimnames(use.this)[[1]]
- if (full || is.null(w) || ncol(cbind(w)) != 1) dn else dn[w!=0]
+ w <- weights(object, type="prior")
+ use.this <- residuals(object, type = "working")
+ if (!length(use.this))
+ use.this <- object at x
+ if (!length(use.this))
+ use.this <- object at y
+ if (!length(use.this))
+ stop("argument 'object' has empty 'x' and 'y' slots.")
+ dn <- dimnames(use.this)[[1]]
+ if (full || is.null(w) || ncol(cbind(w)) != 1)
+ dn else dn[w != 0]
}
setMethod("variable.names", "vlm",
function(object, ...)
- variable.namesvlm(object=object, ...))
+ variable.namesvlm(object = object, ...))
setMethod("variable.names", "vglm",
function(object, ...)
- variable.namesvlm(object=object, ...))
+ variable.namesvlm(object = object, ...))
setMethod("variable.names", "vgam",
function(object, ...)
- variable.namesvlm(object=object, ...))
+ variable.namesvlm(object = object, ...))
setMethod("variable.names", "rrvglm",
function(object, ...)
- variable.namesrrvglm(object=object, ...))
+ variable.namesrrvglm(object = object, ...))
setMethod("variable.names", "qrrvglm",
function(object, ...)
- variable.namesvlm(object=object, ...))
+ variable.namesvlm(object = object, ...))
setMethod("variable.names", "grc",
function(object, ...)
- variable.namesvlm(object=object, ...))
+ variable.namesvlm(object = object, ...))
@@ -146,27 +147,27 @@ setMethod("variable.names", "grc",
setMethod("case.names", "vlm",
function(object, ...)
- case.namesvlm(object=object, ...))
+ case.namesvlm(object = object, ...))
setMethod("case.names", "vglm",
function(object, ...)
- case.namesvlm(object=object, ...))
+ case.namesvlm(object = object, ...))
setMethod("case.names", "vgam",
function(object, ...)
- case.namesvlm(object=object, ...))
+ case.namesvlm(object = object, ...))
setMethod("case.names", "rrvglm",
function(object, ...)
- case.namesvlm(object=object, ...))
+ case.namesvlm(object = object, ...))
setMethod("case.names", "qrrvglm",
function(object, ...)
- case.namesvlm(object=object, ...))
+ case.namesvlm(object = object, ...))
setMethod("case.names", "grc",
function(object, ...)
- case.namesvlm(object=object, ...))
+ case.namesvlm(object = object, ...))
diff --git a/R/generic.q b/R/generic.q
index 7b71020..cea5d02 100644
--- a/R/generic.q
+++ b/R/generic.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/links.q b/R/links.q
index 515fd11..655e0b0 100644
--- a/R/links.q
+++ b/R/links.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -20,8 +20,8 @@ ToString <- function(x)
TypicalVGAMfamilyFunction <-
function(lsigma = "loge",
isigma = NULL,
- link.list = list("(Default)" = "identity",
- x2 = "loge",
+ link.list = list("(Default)" = "identitylink",
+ x2 = "loge",
x3 = "logoff",
x4 = "mlogit",
x5 = "mlogit"),
@@ -186,7 +186,7 @@ care.exp <- function(x,
- identity <- function(theta,
+ identitylink <- function(theta,
inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE) {
@@ -245,10 +245,11 @@ care.exp <- function(x,
- logit <- function(theta,
- bvalue = NULL, # .Machine$double.eps is an alternative
- inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE) {
+ logit <-
+ function(theta,
+ bvalue = NULL,
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE) {
if (is.character(theta)) {
string <- if (short)
paste("logit(", theta, ")", sep = "") else
@@ -264,8 +265,7 @@ care.exp <- function(x,
}
if (inverse) {
if (deriv > 0) {
- 1 / Recall(theta = theta,
- bvalue = bvalue,
+ 1 / Recall(theta = theta, bvalue = bvalue,
inverse = FALSE, deriv = deriv)
} else {
exp(theta - log1p(exp(theta)))
diff --git a/R/logLik.vlm.q b/R/logLik.vlm.q
index acd9c61..460df6f 100644
--- a/R/logLik.vlm.q
+++ b/R/logLik.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -10,8 +10,30 @@
-logLik.vlm <- function(object, ...)
- object at criterion$loglikelihood
+logLik.vlm <- function(object,
+ summation = TRUE,
+ ...) {
+
+ if (summation) {
+ object at criterion$loglikelihood
+ } else {
+
+
+ Args <- formals(args(object at family@loglikelihood))
+ if (length(Args$summation) == 0)
+ stop("there is no 'summation' argument for the function in the ",
+ "'loglikelihood' slot of the object.")
+
+
+ object at family@loglikelihood(mu = fitted(object),
+ y = depvar(object),
+ w = weights(object, type = "prior"),
+ residuals = FALSE,
+ eta = predict(object),
+ extra = object at extra,
+ summation = summation)
+ }
+}
diff --git a/R/lrwaldtest.R b/R/lrwaldtest.R
index 2ef246b..4ee7635 100644
--- a/R/lrwaldtest.R
+++ b/R/lrwaldtest.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/model.matrix.vglm.q b/R/model.matrix.vglm.q
index 35208b9..a6839c9 100644
--- a/R/model.matrix.vglm.q
+++ b/R/model.matrix.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -31,21 +31,22 @@ attrassignlm <- function(object, ...)
- vlabel <- function(xn, ncolBlist, M, separator = ":") {
+ vlabel <- function(xn, ncolHlist, M, separator = ":", colon = FALSE) {
- if (length(xn) != length(ncolBlist))
+ if (length(xn) != length(ncolHlist))
stop("length of first two arguments not equal")
- n1 <- rep(xn, ncolBlist)
+ n1 <- rep(xn, ncolHlist)
if (M == 1)
return(n1)
- n2 <- as.list(ncolBlist)
+ n2 <- as.list(ncolHlist)
n2 <- lapply(n2, seq)
n2 <- unlist(n2)
n2 <- as.character(n2)
n2 <- paste(separator, n2, sep = "")
- n3 <- rep(ncolBlist, ncolBlist)
- n2[n3 == 1] <- ""
+ n3 <- rep(ncolHlist, ncolHlist)
+ if (!colon)
+ n2[n3 == 1] <- ""
n1n2 <- paste(n1, n2, sep = "")
n1n2
}
@@ -54,7 +55,7 @@ attrassignlm <- function(object, ...)
vlm2lm.model.matrix <-
- function(x.vlm, Blist = NULL,
+ function(x.vlm, Hlist = NULL,
which.linpred = 1,
M = NULL) {
@@ -65,14 +66,14 @@ attrassignlm <- function(object, ...)
if (is.numeric(M)) {
- if (M != nrow(Blist[[1]]))
- stop("argument 'M' does not match argument 'Blist'")
+ if (M != nrow(Hlist[[1]]))
+ stop("argument 'M' does not match argument 'Hlist'")
} else {
- M <- nrow(Blist[[1]])
+ M <- nrow(Hlist[[1]])
}
- Hmatrices <- matrix(c(unlist(Blist)), nrow = M)
+ Hmatrices <- matrix(c(unlist(Hlist)), nrow = M)
if (ncol(Hmatrices) != ncol(x.vlm))
stop("ncol(Hmatrices) != ncol(x.vlm)")
@@ -92,14 +93,14 @@ attrassignlm <- function(object, ...)
lm2vlm.model.matrix <-
- function(x, Blist = NULL, assign.attributes = TRUE,
+ function(x, Hlist = NULL, assign.attributes = TRUE,
M = NULL, xij = NULL, Xm2 = NULL) {
- if (length(Blist) != ncol(x))
- stop("length(Blist) != ncol(x)")
+ if (length(Hlist) != ncol(x))
+ stop("length(Hlist) != ncol(x)")
if (length(xij)) {
if (inherits(xij, "formula"))
@@ -109,18 +110,18 @@ attrassignlm <- function(object, ...)
}
if (!is.numeric(M))
- M <- nrow(Blist[[1]])
+ M <- nrow(Hlist[[1]])
nrow.X.lm <- nrow(x)
- if (all(trivial.constraints(Blist) == 1)) {
+ if (all(trivial.constraints(Hlist) == 1)) {
X.vlm <- if (M > 1) kronecker(x, diag(M)) else x
- ncolBlist <- rep(M, ncol(x))
+ ncolHlist <- rep(M, ncol(x))
} else {
- allB <- matrix(unlist(Blist), nrow = M)
- ncolBlist <- unlist(lapply(Blist, ncol))
- Rsum <- sum(ncolBlist)
+ allB <- matrix(unlist(Hlist), nrow = M)
+ ncolHlist <- unlist(lapply(Hlist, ncol))
+ Rsum <- sum(ncolHlist)
- X1 <- rep(c(t(x)), rep(ncolBlist, nrow.X.lm))
+ X1 <- rep(c(t(x)), rep(ncolHlist, nrow.X.lm))
dim(X1) <- c(Rsum, nrow.X.lm)
X.vlm <- kronecker(t(X1), matrix(1, M, 1)) *
kronecker(matrix(1, nrow.X.lm, 1), allB)
@@ -131,7 +132,7 @@ attrassignlm <- function(object, ...)
yn <- dn[[1]]
xn <- dn[[2]]
dimnames(X.vlm) <- list(vlabel(yn, rep(M, nrow.X.lm), M),
- vlabel(xn, ncolBlist, M))
+ vlabel(xn, ncolHlist, M))
if (assign.attributes) {
attr(X.vlm, "contrasts") <- attr(x, "contrasts")
@@ -144,7 +145,7 @@ attrassignlm <- function(object, ...)
nasgn <- oasgn <- attr(x, "assign")
lowind <- 0
for (ii in 1:length(oasgn)) {
- mylen <- length(oasgn[[ii]]) * ncolBlist[oasgn[[ii]][1]]
+ mylen <- length(oasgn[[ii]]) * ncolHlist[oasgn[[ii]][1]]
nasgn[[ii]] <- (lowind+1):(lowind+mylen)
lowind <- lowind + mylen
} # End of ii
@@ -166,7 +167,7 @@ attrassignlm <- function(object, ...)
names(vasgn) <- vlabel(names(oasgn), fred, M)
attr(X.vlm, "vassign") <- vasgn
- attr(X.vlm, "constraints") <- Blist
+ attr(X.vlm, "constraints") <- Hlist
} # End of if (assign.attributes)
@@ -209,7 +210,7 @@ attrassignlm <- function(object, ...)
allXk <- Xm2[,use.cols.Xm2,drop=FALSE]
cmat.no <- (at.x[[name.term.y]])[1] # First one will do (all the same).
- cmat <- Blist[[cmat.no]]
+ cmat <- Hlist[[cmat.no]]
Rsum.k <- ncol(cmat)
tmp44 <- kronecker(matrix(1, nrow.X.lm, 1), t(cmat)) *
kronecker(allXk, matrix(1,ncol(cmat), 1)) # n*Rsum.k x M
@@ -300,8 +301,8 @@ attrassignlm <- function(object, ...)
M <- object at misc$M
- Blist <- object at constraints # == constraints(object, type = "lm")
- X.vlm <- lm2vlm.model.matrix(x = x, Blist = Blist,
+ Hlist <- object at constraints # == constraints(object, type = "lm")
+ X.vlm <- lm2vlm.model.matrix(x = x, Hlist = Hlist,
xij = object at control$xij, Xm2 = Xm2)
if (type == "vlm") {
@@ -314,7 +315,7 @@ attrassignlm <- function(object, ...)
stop("argument 'linpred.index' should have ",
"a single value from the set 1:", M)
- Hlist <- Blist
+ Hlist <- Hlist
n.lm <- nobs(object) # Number of rows of the LM matrix
M <- object at misc$M # Number of linear/additive predictors
Hmatrices <- matrix(c(unlist(Hlist)), nrow = M)
@@ -525,7 +526,7 @@ hatvaluesvlm <-
type = c("diagonal", "matrix", "centralBlocks"), ...) {
- if(!missing(type))
+ if (!missing(type))
type <- as.character(substitute(type))
type.arg <- match.arg(type, c("diagonal", "matrix", "centralBlocks"))[1]
@@ -579,22 +580,20 @@ hatvaluesvlm <-
all.mat
} else {
ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- MM12 <- M * (M + 1) / 2
- all.rows.index <- rep((0:(nn-1)) * M, rep(MM12, nn)) + ind1$row.index
- all.cols.index <- rep((0:(nn-1)) * M, rep(MM12, nn)) + ind1$col.index
+ MMp1d2 <- M * (M + 1) / 2
+ all.rows.index <- rep((0:(nn-1)) * M, rep(MMp1d2, nn)) + ind1$row.index
+ all.cols.index <- rep((0:(nn-1)) * M, rep(MMp1d2, nn)) + ind1$col.index
H.ss <- rowSums(Q.S3[all.rows.index, ] *
Q.S3[all.cols.index, ])
- H.ss <- matrix(H.ss, nn, MM12, byrow = TRUE)
+ H.ss <- matrix(H.ss, nn, MMp1d2, byrow = TRUE)
H.ss
}
}
-if (!isGeneric("hatvalues"))
- setGeneric("hatvalues", function(model, ...)
- standardGeneric("hatvalues"), package = "VGAM")
+
setMethod("hatvalues", "vlm", function(model, ...)
@@ -768,19 +767,18 @@ dfbetavlm <-
-if (!isGeneric("dfbeta"))
- setGeneric("dfbeta", function(model, ...)
- standardGeneric("dfbeta"), package = "VGAM")
setMethod("dfbeta", "matrix", function(model, ...)
dfbetavlm(model, ...))
+
setMethod("dfbeta", "vlm", function(model, ...)
dfbetavlm(model, ...))
setMethod("dfbeta", "vglm", function(model, ...)
dfbetavlm(model, ...))
+
setMethod("dfbeta", "rrvglm", function(model, ...)
dfbetavlm(model, ...))
setMethod("dfbeta", "qrrvglm", function(model, ...)
diff --git a/R/mux.q b/R/mux.q
index 3825dec..dc7fe0e 100644
--- a/R/mux.q
+++ b/R/mux.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -20,13 +20,13 @@ mux34 <- function(xmat, cc, symmetric = FALSE) {
c( .C("VGAM_C_mux34", as.double(xmat), as.double(cc),
as.integer(nnn), as.integer(RRR),
as.integer(symmetric), ans = as.double(rep(0.0, nnn)),
- NAOK = TRUE, PACKAGE = "VGAM")$ans)
+ NAOK = TRUE)$ans)
}
-if(FALSE)
+if (FALSE)
mux34 <- function(xmat, cc, symmetric = FALSE) {
if (!is.matrix(xmat))
@@ -41,7 +41,7 @@ mux34 <- function(xmat, cc, symmetric = FALSE) {
c( .Fortran("vgamf90mux34", as.double(xmat), as.double(cc),
as.integer(n), as.integer(R),
as.integer(symmetric), ans = as.double(rep(0.0, n)),
- NAOK = TRUE, PACKAGE = "VGAM")$ans)
+ NAOK = TRUE)$ans)
}
@@ -65,7 +65,7 @@ mux2 <- function(cc, xmat) {
ans <- rep(as.numeric(NA), n*M)
fred <- .C("mux2", as.double(cc), as.double(t(xmat)),
ans = as.double(ans), as.integer(p), as.integer(n),
- as.integer(M), NAOK = TRUE, PACKAGE = "VGAM")
+ as.integer(M), NAOK = TRUE)
matrix(fred$ans, n, M, byrow = TRUE)
}
@@ -86,7 +86,7 @@ mux22 <- function(cc, xmat, M, upper = FALSE, as.matrix = FALSE) {
ans = as.double(ans), as.integer(dimm.value),
as.integer(index$row), as.integer(index$col),
as.integer(n), as.integer(M), wk = double(M*M),
- as.integer(as.numeric(upper)), NAOK = TRUE, PACKAGE = "VGAM")
+ as.integer(as.numeric(upper)), NAOK = TRUE)
if (!as.matrix) fred$ans else {
dim(fred$ans) <- c(M, n)
t(fred$ans)
@@ -133,7 +133,7 @@ mux5 <- function(cc, x, M, matrix.arg = FALSE) {
double(M*M), double(r*r),
as.integer(index.M$row), as.integer(index.M$col),
as.integer(index.r$row), as.integer(index.r$col),
- ok3 = as.integer(1), NAOK = TRUE, PACKAGE = "VGAM")
+ ok3 = as.integer(1), NAOK = TRUE)
if (fred$ok3 == 0)
stop("can only handle matrix.arg == 1")
@@ -157,15 +157,15 @@ mux55 <- function(evects, evals, M) {
if (d[1] != M || d[2] != M || d[3] != n ||
nrow(evals)!= M || ncol(evals) != n)
stop("input nonconformable")
- MM12 <- M*(M+1)/2 # The answer is a full-matrix
+ MMp1d2 <- M*(M+1)/2 # The answer is a full-matrix
index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
fred <- .C("mux55", as.double(evects), as.double(evals),
- ans = double(MM12 * n),
- double(M*M), double(M*M),
- as.integer(index$row), as.integer(index$col),
- as.integer(M), as.integer(n), NAOK = TRUE, PACKAGE = "VGAM")
- dim(fred$ans) <- c(MM12, n)
+ ans = double(MMp1d2 * n),
+ double(M*M), double(M*M),
+ as.integer(index$row), as.integer(index$col),
+ as.integer(M), as.integer(n), NAOK = TRUE)
+ dim(fred$ans) <- c(MMp1d2, n)
fred$ans
}
@@ -190,7 +190,7 @@ mux7 <- function(cc, x) {
fred <- .C("mux7", as.double(cc), as.double(x),
ans = as.double(ans),
as.integer(M), as.integer(qq), as.integer(n),
- as.integer(r), NAOK = TRUE, PACKAGE = "VGAM")
+ as.integer(r), NAOK = TRUE)
array(fred$ans, c(M, r, n))
}
@@ -215,7 +215,7 @@ mux9 <- function(cc, xmat) {
ans <- matrix(as.numeric(NA), n, M)
fred <- .C("mux9", as.double(cc), as.double(xmat),
ans = as.double(ans),
- as.integer(M), as.integer(n), NAOK = TRUE, PACKAGE = "VGAM")
+ as.integer(M), as.integer(n), NAOK = TRUE)
matrix(fred$ans, n, M)
}
@@ -257,7 +257,7 @@ mux111 <- function(cc, xmat, M, upper = TRUE) {
as.integer(R), as.integer(n), wk = double(M * M),
wk2 = double(M * R), as.integer(index$row),
as.integer(index$col), as.integer(dimm.value),
- as.integer(as.numeric(upper)), NAOK = TRUE, PACKAGE = "VGAM")
+ as.integer(as.numeric(upper)), NAOK = TRUE)
ans <- fred$b
dim(ans) <- c(R, nrow(xmat))
@@ -283,7 +283,7 @@ mux15 <- function(cc, xmat) {
ans <- rep(as.numeric(NA), n*M*M)
fred <- .C("mux15", as.double(cc), as.double(t(xmat)),
ans = as.double(ans), as.integer(M),
- as.integer(n), NAOK = TRUE, PACKAGE = "VGAM")
+ as.integer(n), NAOK = TRUE)
array(fred$ans, c(M, M, n))
}
@@ -302,7 +302,7 @@ vforsub <- function(cc, b, M, n) {
fred <- .C("vforsub", as.double(cc), b = as.double(t(b)),
as.integer(M), as.integer(n), wk = double(M*M),
as.integer(index$row), as.integer(index$col),
- as.integer(dimm.value), NAOK = TRUE, PACKAGE = "VGAM")
+ as.integer(dimm.value), NAOK = TRUE)
dim(fred$b) <- c(M, n)
fred$b
@@ -320,7 +320,7 @@ vbacksub <- function(cc, b, M, n) {
fred <- .C("vbacksub", as.double(cc), b = as.double(b),
as.integer(M), as.integer(n), wk = double(M*M),
as.integer(index$row), as.integer(index$col),
- as.integer(dimm.value), NAOK = TRUE, PACKAGE = "VGAM")
+ as.integer(dimm.value), NAOK = TRUE)
if (M == 1) {
fred$b
@@ -347,7 +347,7 @@ vchol <- function(cc, M, n, silent = FALSE, callno = 0) {
wk = double(M*M), as.integer(index$row),
as.integer(index$col),
as.integer(MM),
- NAOK = TRUE, PACKAGE = "VGAM")
+ NAOK = TRUE)
failed <- (fred$ok != 1)
if ((correction.needed <- any(failed))) {
@@ -451,7 +451,7 @@ myf <- function(x) {
.Fortran("VGAM_F90_fill9",
x = as.double(x), lenx = as.integer(length(x)),
answer = as.double(x),
- NAOK = TRUE, PACKAGE = "VGAM")$answer
+ NAOK = TRUE)$answer
}
diff --git a/R/nobs.R b/R/nobs.R
index 17e8535..7bf74eb 100644
--- a/R/nobs.R
+++ b/R/nobs.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/plot.vglm.q b/R/plot.vglm.q
index 7871b9e..5085ebe 100644
--- a/R/plot.vglm.q
+++ b/R/plot.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -910,12 +910,12 @@ plotqrrvglm <- function(object,
ask = FALSE,
main = paste(Rtype, "residuals vs latent variable(s)"),
xlab = "Latent Variable",
- ITolerances = object at control$EqualTolerances,
+ I.tolerances = object at control$eq.tolerances,
...) {
M <- object at misc$M
n <- object at misc$n
Rank <- object at control$Rank
- Coef.object <- Coef(object, ITolerances = ITolerances)
+ Coef.object <- Coef(object, I.tolerances = I.tolerances)
rtype <- match.arg(rtype,
c("response", "pearson", "deviance", "working"))[1]
res <- resid(object, type = rtype)
diff --git a/R/predict.vgam.q b/R/predict.vgam.q
index 329f123..e0f94d0 100644
--- a/R/predict.vgam.q
+++ b/R/predict.vgam.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -62,13 +62,13 @@ predict.vgam <-
stop("an intercept is assumed")
M <- object at misc$M
- Blist <- object at constraints
- ncolBlist <- unlist(lapply(Blist, ncol))
+ Hlist <- object at constraints
+ ncolHlist <- unlist(lapply(Hlist, ncol))
if (intercept)
- ncolBlist <- ncolBlist[-1]
+ ncolHlist <- ncolHlist[-1]
if (raw) {
- Blist <- canonical.Blist(Blist)
- object at constraints <- Blist
+ Hlist <- canonical.Hlist(Hlist)
+ object at constraints <- Hlist
}
if (!length(newdata)) {
@@ -153,18 +153,18 @@ predict.vgam <-
if (is.null(tmp6 <- attr(if (se.fit) predictor$fitted.values else
predictor, "vterm.assign"))) {
- Blist <- subconstraints(object at misc$orig.assign,
+ Hlist <- subconstraints(object at misc$orig.assign,
object at constraints)
- ncolBlist <- unlist(lapply(Blist, ncol))
+ ncolHlist <- unlist(lapply(Hlist, ncol))
if (intercept)
- ncolBlist <- ncolBlist[-1]
+ ncolHlist <- ncolHlist[-1]
- cs <- if (raw) cumsum(c(1, ncolBlist)) else
- cumsum(c(1, M + 0 * ncolBlist))
- tmp6 <- vector("list", length(ncolBlist))
+ cs <- if (raw) cumsum(c(1, ncolHlist)) else
+ cumsum(c(1, M + 0 * ncolHlist))
+ tmp6 <- vector("list", length(ncolHlist))
for (ii in 1:length(tmp6))
tmp6[[ii]] <- cs[ii]:(cs[ii+1]-1)
- names(tmp6) <- names(ncolBlist)
+ names(tmp6) <- names(ncolHlist)
}
n.s.xargument <- names(s.xargument) # e.g., c("s(x)", "s(x2)")
@@ -181,7 +181,7 @@ predict.vgam <-
deriv = deriv.arg)$y
- eta.mat <- if (raw) rawMat else (rawMat %*% t(Blist[[ii]]))
+ eta.mat <- if (raw) rawMat else (rawMat %*% t(Hlist[[ii]]))
if (type == "terms") {
hhh <- tmp6[[ii]]
@@ -347,12 +347,12 @@ varassign <- function(constraints, n.s.xargument) {
ans <- vector("list", length(n.s.xargument))
- ncolBlist <- unlist(lapply(constraints, ncol))
+ ncolHlist <- unlist(lapply(constraints, ncol))
names(ans) <- n.s.xargument
ptr <- 1
for (ii in n.s.xargument) {
- temp <- ncolBlist[[ii]]
+ temp <- ncolHlist[[ii]]
ans[[ii]] <- ptr:(ptr + temp - 1)
ptr <- ptr + temp
}
diff --git a/R/predict.vglm.q b/R/predict.vglm.q
index 895a6e0..634aaa8 100644
--- a/R/predict.vglm.q
+++ b/R/predict.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/predict.vlm.q b/R/predict.vlm.q
index 62a7524..1490b62 100644
--- a/R/predict.vlm.q
+++ b/R/predict.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -95,10 +95,10 @@ predict.vlm <- function(object,
dx1 <- dimnames(X)[[1]]
M <- object at misc$M
- Blist <- object at constraints
- ncolBlist <- unlist(lapply(Blist, ncol))
+ Hlist <- object at constraints
+ ncolHlist <- unlist(lapply(Hlist, ncol))
if (hasintercept)
- ncolBlist <- ncolBlist[-1]
+ ncolHlist <- ncolHlist[-1]
xbar <- x2bar <- NULL
if (type == "terms" && hasintercept) {
@@ -123,13 +123,13 @@ predict.vlm <- function(object,
nn <- if (!is.null(newdata)) nrow(newdata) else object at misc$n
if (raw) {
- Blist <- canonical.Blist(Blist)
- object at constraints <- Blist
+ Hlist <- canonical.Hlist(Hlist)
+ object at constraints <- Hlist
}
- X_vlm <- lm2vlm.model.matrix(X, Blist = Blist, M = M,
+ X_vlm <- lm2vlm.model.matrix(X, Hlist = Hlist, M = M,
xij = object at control$xij, Xm2 = Xm2)
@@ -196,12 +196,12 @@ predict.vlm <- function(object,
if (type == "terms") {
- Blist <- subconstraints(object at misc$orig.assign, object at constraints)
- ncolBlist <- unlist(lapply(Blist, ncol))
+ Hlist <- subconstraints(object at misc$orig.assign, object at constraints)
+ ncolHlist <- unlist(lapply(Hlist, ncol))
if (hasintercept)
- ncolBlist <- ncolBlist[-1]
+ ncolHlist <- ncolHlist[-1]
- cs <- cumsum(c(1, ncolBlist)) # Like a pointer
+ cs <- cumsum(c(1, ncolHlist)) # Like a pointer
for (ii in 1:(length(cs)-1))
if (cs[ii+1] - cs[ii] > 1)
for (kk in (cs[ii]+1):(cs[ii+1]-1))
@@ -239,7 +239,7 @@ predict.vlm <- function(object,
if (raw) {
kindex <- NULL
for (ii in 1:pp)
- kindex <- c(kindex, (ii-1)*M + (1:ncolBlist[ii]))
+ kindex <- c(kindex, (ii-1)*M + (1:ncolHlist[ii]))
if (se.fit) {
pred$fitted.values <- pred$fitted.values[, kindex, drop = FALSE]
pred$se.fit <- pred$se.fit[, kindex, drop = FALSE]
@@ -248,8 +248,8 @@ predict.vlm <- function(object,
}
}
- temp <- if (raw) ncolBlist else rep(M, length(ncolBlist))
- dd <- vlabel(names(ncolBlist), temp, M)
+ temp <- if (raw) ncolHlist else rep(M, length(ncolHlist))
+ dd <- vlabel(names(ncolHlist), temp, M)
if (se.fit) {
dimnames(pred$fitted.values) <-
dimnames(pred$se.fit) <- list(if (length(newdata))
@@ -269,11 +269,11 @@ predict.vlm <- function(object,
}
if (!raw)
- cs <- cumsum(c(1, M + 0 * ncolBlist))
- fred <- vector("list", length(ncolBlist))
+ cs <- cumsum(c(1, M + 0 * ncolHlist))
+ fred <- vector("list", length(ncolHlist))
for (ii in 1:length(fred))
fred[[ii]] <- cs[ii]:(cs[ii+1]-1)
- names(fred) <- names(ncolBlist)
+ names(fred) <- names(ncolHlist)
if (se.fit) {
attr(pred$fitted.values, "vterm.assign") <-
attr(pred$se.fit, "vterm.assign") <- fred
@@ -309,7 +309,7 @@ setMethod("predict", "vlm",
predict.vglm.se <- function(fit, ...) {
- H_ss <- hatvalues(fit, type = "centralBlocks") # diag = FALSE
+ H.ss <- hatvalues(fit, type = "centralBlocks") # diag = FALSE
M <- npred(fit)
nn <- nobs(fit, type = "lm")
@@ -317,8 +317,8 @@ predict.vglm.se <- function(fit, ...) {
Uarray <- array(0, c(M, M, nn))
ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- MM12 <- M * (M + 1) / 2
- for (jay in 1:MM12)
+ MMp1d2 <- M * (M + 1) / 2
+ for (jay in 1:MMp1d2)
Uarray[ind1$row.index[jay],
ind1$col.index[jay], ] <- U[jay, ]
@@ -334,15 +334,15 @@ predict.vglm.se <- function(fit, ...) {
}
}
- var.boldeta_i <- mux5(H_ss, Utinv.array, M = M,
+ var.boldeta.i <- mux5(H.ss, Utinv.array, M = M,
matrix.arg = TRUE) # First M cols are SE^2
- sqrt(var.boldeta_i[, 1:M]) # SE(linear.predictor)
+ sqrt(var.boldeta.i[, 1:M]) # SE(linear.predictor)
- sqrt(var.boldeta_i[, 1:M])
+ sqrt(var.boldeta.i[, 1:M])
}
@@ -379,13 +379,13 @@ is.linear.term <- function(ch) {
}
-canonical.Blist <- function(Blist) {
- for (ii in 1:length(Blist)) {
- temp <- Blist[[ii]] * 0
+canonical.Hlist <- function(Hlist) {
+ for (ii in 1:length(Hlist)) {
+ temp <- Hlist[[ii]] * 0
temp[cbind(1:ncol(temp), 1:ncol(temp))] <- 1
- Blist[[ii]] <- temp
+ Hlist[[ii]] <- temp
}
- Blist
+ Hlist
}
diff --git a/R/print.vglm.q b/R/print.vglm.q
index 2fdb2e1..d50fb3b 100644
--- a/R/print.vglm.q
+++ b/R/print.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/print.vlm.q b/R/print.vlm.q
index aa67391..715a058 100644
--- a/R/print.vlm.q
+++ b/R/print.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/qrrvglm.control.q b/R/qrrvglm.control.q
index ca32a97..f6cc5cc 100644
--- a/R/qrrvglm.control.q
+++ b/R/qrrvglm.control.q
@@ -1,16 +1,19 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
+
+
qrrvglm.control <- function(Rank = 1,
Bestof = if (length(Cinit)) 1 else 10,
checkwz = TRUE,
Cinit = NULL,
Crow1positive = TRUE,
epsilon = 1.0e-06,
- EqualTolerances = TRUE,
+ EqualTolerances = NULL,
+ eq.tolerances = TRUE, # 20140520; replaces EqualTolerances
Etamat.colmax = 10,
FastAlgorithm = TRUE,
GradientFunction = TRUE,
@@ -19,7 +22,8 @@ qrrvglm.control <- function(Rank = 1,
length = Rank),
iKvector = 0.1,
iShape = 0.1,
- ITolerances = FALSE,
+ ITolerances = NULL,
+ I.tolerances = FALSE, # 20140520; replaces ITolerances
maxitl = 40,
imethod = 1,
Maxit.optim = 250,
@@ -27,7 +31,7 @@ qrrvglm.control <- function(Rank = 1,
noRRR = ~ 1,
Norrr = NA,
optim.maxit = 20,
- Parscale = if (ITolerances) 0.001 else 1.0,
+ Parscale = if (I.tolerances) 0.001 else 1.0,
sd.Cinit = 0.02,
SmallNo = 5.0e-13,
trace = TRUE,
@@ -39,6 +43,37 @@ qrrvglm.control <- function(Rank = 1,
+
+ if (!is.null(EqualTolerances)) {
+ warning("argument 'EqualTolerances' is depreciated. ",
+ "Use argument 'eq.tolerances'")
+ if (is.logical(EqualTolerances)) {
+ if (eq.tolerances != EqualTolerances)
+ stop("arguments 'eq.tolerances' and 'EqualTolerances' differ")
+ } else {
+ stop("argument 'EqualTolerances' is not a logical")
+ }
+ }
+
+
+
+
+ if (!is.null(ITolerances)) {
+ warning("argument 'ITolerances' is depreciated. ",
+ "Use argument 'I.tolerances'")
+ if (is.logical(ITolerances)) {
+ if (I.tolerances != ITolerances)
+ stop("arguments 'I.tolerances' and 'ITolerances' differ")
+ } else {
+ stop("argument 'ITolerances' is not a logical")
+ }
+ }
+
+
+
+
+
+
if (length(Norrr) != 1 || !is.na(Norrr)) {
warning("argument 'Norrr' has been replaced by 'noRRR'. ",
"Assigning the latter but using 'Norrr' will become an error in ",
@@ -90,8 +125,8 @@ qrrvglm.control <- function(Rank = 1,
if (!is.Numeric(sd.Cinit, positive = TRUE,
length.arg = 1))
stop("bad input for 'sd.Cinit'")
- if (ITolerances && !EqualTolerances)
- stop("'EqualTolerances' must be TRUE if 'ITolerances' is TRUE")
+ if (I.tolerances && !eq.tolerances)
+ stop("'eq.tolerances' must be TRUE if 'I.tolerances' is TRUE")
if (!is.Numeric(Bestof, positive = TRUE,
length.arg = 1, integer.valued = TRUE))
stop("bad input for 'Bestof'")
@@ -123,7 +158,7 @@ qrrvglm.control <- function(Rank = 1,
Corner = FALSE, # Needed for valt.1iter()
Dzero = NULL,
epsilon = epsilon,
- EqualTolerances = EqualTolerances,
+ eq.tolerances = eq.tolerances,
Etamat.colmax = Etamat.colmax,
FastAlgorithm = FastAlgorithm,
GradientFunction = GradientFunction,
@@ -131,7 +166,7 @@ qrrvglm.control <- function(Rank = 1,
isd.latvar = rep(isd.latvar, len = Rank),
iKvector = as.numeric(iKvector),
iShape = as.numeric(iShape),
- ITolerances = ITolerances,
+ I.tolerances = I.tolerances,
maxitl = maxitl,
imethod = imethod,
Maxit.optim = Maxit.optim,
diff --git a/R/qtplot.q b/R/qtplot.q
index 404641a..ed13441 100644
--- a/R/qtplot.q
+++ b/R/qtplot.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/residuals.vlm.q b/R/residuals.vlm.q
index 4efce8c..a31772b 100644
--- a/R/residuals.vlm.q
+++ b/R/residuals.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/rrvglm.R b/R/rrvglm.R
index 2f220ca..e70983f 100644
--- a/R/rrvglm.R
+++ b/R/rrvglm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/rrvglm.control.q b/R/rrvglm.control.q
index 51fd403..81cfb34 100644
--- a/R/rrvglm.control.q
+++ b/R/rrvglm.control.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -150,7 +150,7 @@ rrvglm.control <-
if (Quadratic) qrrvglm.control(Rank = Rank, ...) else NULL)
- if (Quadratic && ans$ITolerances) {
+ if (Quadratic && ans$I.tolerances) {
ans$Svd.arg <- FALSE
ans$Uncorrelated.latvar <- FALSE
ans$Corner <- FALSE
diff --git a/R/rrvglm.fit.q b/R/rrvglm.fit.q
index 2062637..d225f3e 100644
--- a/R/rrvglm.fit.q
+++ b/R/rrvglm.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -42,163 +42,8 @@ rrvglm.fit <-
n <- dim(x)[1]
- new.s.call <- expression({
- if (c.list$one.more) {
- fv <- c.list$fit
- new.coeffs <- c.list$coeff
- if (length(family at middle))
- eval(family at middle)
- eta <- fv + offset
-
- mu <- family at linkinv(eta, extra)
-
- if (length(family at middle2))
- eval(family at middle2)
-
- old.crit <- new.crit
- new.crit <-
- switch(criterion,
- coefficients = new.coeffs,
- tfun(mu = mu, y = y, w = w,
- res = FALSE, eta = eta, extra))
-
-
-
- if (trace && orig.stepsize == 1) {
- cat(if (control$Quadratic) "QRR-VGLM" else "RR-VGLM",
- " linear loop ", iter, ": ", criterion, "= ")
- UUUU <- switch(criterion,
- coefficients =
- format(new.crit,
- dig = round(1 - log10(epsilon))),
- format(new.crit,
- dig = max(4,
- round(-0 - log10(epsilon) +
- log10(sqrt(eff.n))))))
-
- switch(criterion,
- coefficients = {if (length(new.crit) > 2) cat("\n");
- cat(UUUU, fill = TRUE, sep = ", ")},
- cat(UUUU, fill = TRUE, sep = ", "))
- }
-
- take.half.step <- (control$half.stepsizing &&
- length(old.coeffs)) &&
- !control$Quadratic &&
- ((orig.stepsize != 1) ||
- (criterion != "coefficients" &&
- (if (minimize.criterion)
- new.crit > old.crit else
- new.crit < old.crit)))
- if (!is.logical(take.half.step))
- take.half.step <- TRUE
- if (take.half.step) {
- stepsize <- 2 * min(orig.stepsize, 2*stepsize)
- new.coeffs.save <- new.coeffs
- if (trace)
- cat("Taking a modified step")
- repeat {
- if (trace) {
- cat(".")
- flush.console()
- }
- stepsize <- stepsize / 2
- if (too.small <- stepsize < 0.001)
- break
- new.coeffs <- (1 - stepsize) * old.coeffs +
- stepsize * new.coeffs.save
-
- if (length(family at middle))
- eval(family at middle)
-
- fv <- X.vlm.save %*% new.coeffs
- if (M > 1)
- fv <- matrix(fv, n, M, byrow = TRUE)
-
- eta <- fv + offset
-
- mu <- family at linkinv(eta, extra)
-
- if (length(family at middle2))
- eval(family at middle2)
-
-
- new.crit <-
- switch(criterion,
- coefficients = new.coeffs,
- tfun(mu = mu,y = y,w = w,res = FALSE,
- eta = eta,extra))
-
- if ((criterion == "coefficients") ||
- ( minimize.criterion && new.crit < old.crit) ||
- (!minimize.criterion && new.crit > old.crit))
- break
- }
-
- if (trace)
- cat("\n")
- if (too.small) {
- warning("iterations terminated because ",
- "half-step sizes are very small")
- one.more <- FALSE
- } else {
- if (trace) {
- cat(if (control$Quadratic) "QRR-VGLM" else "RR-VGLM",
- " linear loop ", iter, ": ", criterion, "= ")
- UUUU <-
- switch(criterion,
- coefficients =
- format(new.crit,
- dig = round(1 - log10(epsilon))),
- format(new.crit,
- dig = max(4,
- round(-0 - log10(epsilon) +
- log10(sqrt(eff.n))))))
-
- switch(criterion,
- coefficients = {if (length(new.crit) > 2)
- cat("\n");
- cat(UUUU, fill = TRUE, sep = ", ")},
- cat(UUUU, fill = TRUE, sep = ", "))
- }
-
- one.more <- eval(control$convergence)
- }
- } else {
- one.more <- eval(control$convergence)
- }
- flush.console()
-
- if (one.more) {
- iter <- iter + 1
- deriv.mu <- eval(family at deriv)
- wz <- eval(family at weight)
- if (control$checkwz)
- wz <- checkwz(wz, M = M, trace = trace,
- wzepsilon = control$wzepsilon)
-
-
- wz <- matrix(wz, nrow = n)
- U <- vchol(wz, M = M, n = n, silent=!trace)
- tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
- z <- eta + vbacksub(U, tvfor, M, n) - offset # Contains \bI \bnu
-
- rrr.expression <- get(RRR.expression)
- eval(rrr.expression)
-
- c.list$z <- z # contains \bI_{Rank} \bnu
- c.list$U <- U
- if (copy.X.vlm) c.list$X.vlm <- X.vlm.save
- }
-
- c.list$one.more <- one.more
- c.list$coeff <- runif(length(new.coeffs)) # 12/3/03; twist needed!
- old.coeffs <- new.coeffs
- }
- c.list
- }) # end of new.s.call
@@ -343,38 +188,38 @@ rrvglm.fit <-
rrcontrol$Ainit <- control$Ainit <- Amat # Good for valt()
rrcontrol$Cinit <- control$Cinit <- Cmat # Good for valt()
- Blist <- process.constraints(constraints, x, M,
+ Hlist <- process.constraints(constraints, x, M,
specialCM = specialCM)
nice31 <- control$Quadratic &&
- (!control$EqualTol || control$ITolerances) &&
- all(trivial.constraints(Blist) == 1)
+ (!control$eq.tol || control$I.tolerances) &&
+ all(trivial.constraints(Hlist) == 1)
- Blist <- Blist.save <- replace.constraints(Blist, Amat, colx2.index)
+ Hlist <- Hlist.save <- replace.constraints(Hlist, Amat, colx2.index)
- ncolBlist <- unlist(lapply(Blist, ncol))
- dimB <- sum(ncolBlist)
+ ncolHlist <- unlist(lapply(Hlist, ncol))
+ dimB <- sum(ncolHlist)
X.vlm.save <- if (control$Quadratic) {
- tmp500 <- lm2qrrvlm.model.matrix(x = x, Blist = Blist,
+ tmp500 <- lm2qrrvlm.model.matrix(x = x, Hlist = Hlist,
C = Cmat, control = control)
xsmall.qrr <- tmp500$new.latvar.model.matrix
- B.list <- tmp500$constraints
+ H.list <- tmp500$constraints
if (FALSE && modelno == 3) {
- B.list[[1]] <- (B.list[[1]])[, c(TRUE, FALSE), drop = FALSE] # Amat
- B.list[[2]] <- (B.list[[2]])[, c(TRUE, FALSE), drop = FALSE] # D
+ H.list[[1]] <- (H.list[[1]])[, c(TRUE, FALSE), drop = FALSE] # Amat
+ H.list[[2]] <- (H.list[[2]])[, c(TRUE, FALSE), drop = FALSE] # D
}
latvar.mat <- tmp500$latvar.mat
if (length(tmp500$offset)) {
offset <- tmp500$offset
}
- lm2vlm.model.matrix(xsmall.qrr, B.list, xij = control$xij)
+ lm2vlm.model.matrix(xsmall.qrr, H.list, xij = control$xij)
} else {
latvar.mat <- x[, colx2.index, drop = FALSE] %*% Cmat
- lm2vlm.model.matrix(x, Blist, xij = control$xij)
+ lm2vlm.model.matrix(x, Hlist, xij = control$xij)
}
@@ -429,9 +274,9 @@ rrvglm.fit <-
if (nrow.X.vlm < ncol.X.vlm)
stop(ncol.X.vlm, " parameters but only ", nrow.X.vlm, " observations")
- bf.call <- expression(vlm.wfit(xmat=X.vlm.save, zedd,
- Blist = if (control$Quadratic) B.list else Blist,
- ncolx=ncol(x), U=U,
+ bf.call <- expression(vlm.wfit(xmat = X.vlm.save, zedd,
+ Hlist = if (control$Quadratic) H.list else Hlist,
+ ncolx = ncol(x), U = U,
Eta.range = control$Eta.range,
matrix.out = if (control$Quadratic) FALSE else TRUE,
is.vlmX = TRUE, qr = qr.arg, xij = control$xij))
@@ -455,7 +300,8 @@ rrvglm.fit <-
rrcontrol$Cinit <- control$Cinit <- Cmat # Good for valt()
}
- if (!nice31) c.list$coeff <- tfit$coefficients
+ if (!nice31)
+ c.list$coeff <- tfit$coefficients
if (control$Quadratic) {
if (control$Corner)
@@ -467,9 +313,168 @@ rrvglm.fit <-
tfit$predictors <- tfit$fitted.values # Does not contain the offset
if (!nice31)
c.list$fit <- tfit$fitted.values
- c.list <- eval(new.s.call)
- NULL
- }
+
+
+ if (!c.list$one.more) {
+ break
+ }
+
+
+
+ fv <- c.list$fit
+ new.coeffs <- c.list$coeff
+
+ if (length(family at middle))
+ eval(family at middle)
+
+ eta <- fv + offset
+
+ mu <- family at linkinv(eta, extra)
+
+ if (length(family at middle2))
+ eval(family at middle2)
+
+ old.crit <- new.crit
+ new.crit <-
+ switch(criterion,
+ coefficients = new.coeffs,
+ tfun(mu = mu, y = y, w = w,
+ res = FALSE, eta = eta, extra))
+
+
+
+ if (trace && orig.stepsize == 1) {
+ cat(if (control$Quadratic) "QRR-VGLM" else "RR-VGLM",
+ " linear loop ", iter, ": ", criterion, "= ")
+ UUUU <- switch(criterion,
+ coefficients =
+ format(new.crit,
+ dig = round(1 - log10(epsilon))),
+ format(new.crit,
+ dig = max(4,
+ round(-0 - log10(epsilon) +
+ log10(sqrt(eff.n))))))
+
+ switch(criterion,
+ coefficients = {if (length(new.crit) > 2) cat("\n");
+ cat(UUUU, fill = TRUE, sep = ", ")},
+ cat(UUUU, fill = TRUE, sep = ", "))
+ }
+
+ take.half.step <- (control$half.stepsizing &&
+ length(old.coeffs)) &&
+ !control$Quadratic &&
+ ((orig.stepsize != 1) ||
+ (criterion != "coefficients" &&
+ (if (minimize.criterion)
+ new.crit > old.crit else
+ new.crit < old.crit)))
+ if (!is.logical(take.half.step))
+ take.half.step <- TRUE
+ if (take.half.step) {
+ stepsize <- 2 * min(orig.stepsize, 2*stepsize)
+ new.coeffs.save <- new.coeffs
+ if (trace)
+ cat("Taking a modified step")
+ repeat {
+ if (trace) {
+ cat(".")
+ flush.console()
+ }
+ stepsize <- stepsize / 2
+ if (too.small <- stepsize < 0.001)
+ break
+ new.coeffs <- (1 - stepsize) * old.coeffs +
+ stepsize * new.coeffs.save
+
+ if (length(family at middle))
+ eval(family at middle)
+
+ fv <- X.vlm.save %*% new.coeffs
+ if (M > 1)
+ fv <- matrix(fv, n, M, byrow = TRUE)
+
+ eta <- fv + offset
+
+ mu <- family at linkinv(eta, extra)
+
+ if (length(family at middle2))
+ eval(family at middle2)
+
+
+ new.crit <-
+ switch(criterion,
+ coefficients = new.coeffs,
+ tfun(mu = mu,y = y,w = w,res = FALSE,
+ eta = eta,extra))
+
+ if ((criterion == "coefficients") ||
+ ( minimize.criterion && new.crit < old.crit) ||
+ (!minimize.criterion && new.crit > old.crit))
+ break
+ }
+
+ if (trace)
+ cat("\n")
+ if (too.small) {
+ warning("iterations terminated because ",
+ "half-step sizes are very small")
+ one.more <- FALSE
+ } else {
+ if (trace) {
+ cat(if (control$Quadratic) "QRR-VGLM" else "RR-VGLM",
+ " linear loop ", iter, ": ", criterion, "= ")
+ UUUU <-
+ switch(criterion,
+ coefficients =
+ format(new.crit,
+ dig = round(1 - log10(epsilon))),
+ format(new.crit,
+ dig = max(4,
+ round(-0 - log10(epsilon) +
+ log10(sqrt(eff.n))))))
+
+ switch(criterion,
+ coefficients = {if (length(new.crit) > 2)
+ cat("\n");
+ cat(UUUU, fill = TRUE, sep = ", ")},
+ cat(UUUU, fill = TRUE, sep = ", "))
+ }
+
+ one.more <- eval(control$convergence)
+ }
+ } else {
+ one.more <- eval(control$convergence)
+ }
+ flush.console()
+
+ if (one.more) {
+ iter <- iter + 1
+ deriv.mu <- eval(family at deriv)
+ wz <- eval(family at weight)
+ if (control$checkwz)
+ wz <- checkwz(wz, M = M, trace = trace,
+ wzepsilon = control$wzepsilon)
+
+
+ wz <- matrix(wz, nrow = n)
+ U <- vchol(wz, M = M, n = n, silent=!trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
+ z <- eta + vbacksub(U, tvfor, M, n) - offset # Contains \bI \bnu
+
+ rrr.expression <- get(RRR.expression)
+ eval(rrr.expression)
+
+ c.list$z <- z # contains \bI_{Rank} \bnu
+ c.list$U <- U
+ if (copy.X.vlm) c.list$X.vlm <- X.vlm.save
+ }
+
+ c.list$one.more <- one.more
+ c.list$coeff <- runif(length(new.coeffs)) # 12/3/03; twist needed!
+ old.coeffs <- new.coeffs
+
+ } # End of while()
if (maxit > 1 && iter >= maxit && !control$noWarning)
@@ -484,10 +489,10 @@ rrvglm.fit <-
ynrow.X.vlm <- dnrow.X.vlm[[1]]
if (length(family at fini))
- eval(family at fini)
+ eval(family at fini)
if (M > 1 && !nice31)
- tfit$predictors <- matrix(tfit$predictors, n, M)
+ tfit$predictors <- matrix(tfit$predictors, n, M)
asgn <- attr(X.vlm.save, "assign")
if (nice31) {
@@ -573,7 +578,7 @@ rrvglm.fit <-
fit <- list(assign = asgn,
coefficients = coefs,
- constraints = if (control$Quadratic) B.list else Blist,
+ constraints = if (control$Quadratic) H.list else Hlist,
df.residual = df.residual,
df.total = n*M,
effects = effects,
@@ -590,7 +595,7 @@ rrvglm.fit <-
}
if (M == 1) {
- wz <- as.vector(wz) # Convert wz into a vector
+ wz <- as.vector(wz) # Convert wz into a vector
} # else
fit$weights <- if (save.weight) wz else NULL
@@ -619,7 +624,7 @@ rrvglm.fit <-
if (criterion != "coefficients")
crit.list[[criterion]] <- fit[[criterion]] <- new.crit
- for (ii in names(.min.criterion.VGAM)) {
+ for (ii in names( .min.criterion.VGAM )) {
if (ii != criterion &&
any(slotNames(family) == ii) &&
length(body(slot(family, ii)))) {
diff --git a/R/s.q b/R/s.q
index 7d3fcdd..6859086 100644
--- a/R/s.q
+++ b/R/s.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/s.vam.q b/R/s.vam.q
index 34d0f84..e292c20 100644
--- a/R/s.vam.q
+++ b/R/s.vam.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -12,7 +12,7 @@
s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
bf.epsilon = 0.001, trace = FALSE, se.fit = TRUE,
- X.vlm.save, Blist, ncolBlist, M, qbig, Umat,
+ X.vlm.save, Hlist, ncolHlist, M, qbig, Umat,
all.knots = FALSE, nk = NULL,
sf.only = FALSE) {
nwhich <- names(which)
@@ -43,21 +43,21 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
if (!is.numeric(temp) || any(temp < 0)) {
stop("spar cannot be negative or non-numeric")
}
- if (length(temp) > ncolBlist[ii]) {
- warning("only the first ", ncolBlist[ii], " values of ",
+ if (length(temp) > ncolHlist[ii]) {
+ warning("only the first ", ncolHlist[ii], " values of ",
"'spar' are used for variable '", s.xargument, "'")
}
- osparv[[ii]] <- rep(temp, length = ncolBlist[ii]) # Recycle
+ osparv[[ii]] <- rep(temp, length = ncolHlist[ii]) # Recycle
temp <- odfvec[[ii]]
if (!is.numeric(temp) || any(temp < 1)) {
stop("argument 'df' is non-numeric or less than 1")
}
- if (length(temp) > ncolBlist[ii]) {
- warning("only the first ", ncolBlist[ii], " value(s) of 'df' ",
+ if (length(temp) > ncolHlist[ii]) {
+ warning("only the first ", ncolHlist[ii], " value(s) of 'df' ",
"are used for variable '", s.xargument, "'")
}
- odfvec[[ii]] <- rep(temp, length = ncolBlist[ii]) # Recycle
+ odfvec[[ii]] <- rep(temp, length = ncolHlist[ii]) # Recycle
if (max(temp) > smooth.frame$neffec[kk]-1) {
stop("'df' value too high for variable '", s.xargument, "'")
}
@@ -75,7 +75,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
smooth.frame$odfvec <- odfvec # Original
if (sum(smooth.frame$dfvec[smooth.frame$osparv == 0]) + pbig >
- smooth.frame$n.lm * sum(ncolBlist[nwhich])) {
+ smooth.frame$n.lm * sum(ncolHlist[nwhich])) {
stop("too many parameters/dof for data on hand")
}
@@ -90,17 +90,17 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
smooth.frame$s.xargument <- s.xargument # Stored here
smooth.frame$smap <-
- as.vector(cumsum(c(1, ncolBlist[nwhich]))[1:length(nwhich)])
+ as.vector(cumsum(c(1, ncolHlist[nwhich]))[1:length(nwhich)])
smooth.frame$try.sparv <- osparv
smooth.frame$bindex <-
- as.integer(cumsum(c(1, smooth.frame$nknots * ncolBlist[nwhich])))
+ as.integer(cumsum(c(1, smooth.frame$nknots * ncolHlist[nwhich])))
smooth.frame$lindex <-
- as.integer(cumsum(c(1, smooth.frame$neffec * ncolBlist[nwhich])))
+ as.integer(cumsum(c(1, smooth.frame$neffec * ncolHlist[nwhich])))
smooth.frame$kindex <-
@@ -118,7 +118,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
- ldk <- 3 * max(ncolBlist[nwhich]) + 1 # 20020711
+ ldk <- 3 * max(ncolHlist[nwhich]) + 1 # 20020711
which <- unlist(which)
p.lm <- smooth.frame$p.lm
@@ -127,15 +127,15 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
dim1U <- if (is.matrix(Umat)) nrow(Umat) else 1
- nBlist <- names(Blist)
- for (ii in length(nBlist):1) {
- if (!any(nBlist[ii] == nwhich)) {
- Blist[[ii]] <- NULL
+ nHlist <- names(Hlist)
+ for (ii in length(nHlist):1) {
+ if (!any(nHlist[ii] == nwhich)) {
+ Hlist[[ii]] <- NULL
}
}
- trivc <- trivial.constraints(Blist)
+ trivc <- trivial.constraints(Hlist)
- ncbvec <- ncolBlist[nwhich]
+ ncbvec <- ncolHlist[nwhich]
ncolbmax <- max(ncbvec)
@@ -168,7 +168,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
qr = as.double(X.vlm.save), qraux = double(pbig),
qpivot = as.integer(1:pbig),
as.double(Umat),
- as.double(unlist(Blist)),
+ as.double(unlist(Hlist)),
as.integer(ncbvec), as.integer(smooth.frame$smap),
trivc = as.integer(trivc),
@@ -180,7 +180,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
bindex = as.integer(smooth.frame$bindex),
lindex = as.integer(smooth.frame$lindex),
nknots = as.integer(smooth.frame$nknots),
- kindex = as.integer(smooth.frame$kindex), PACKAGE = "VGAM") # End of dotC
+ kindex = as.integer(smooth.frame$kindex)) # End of dotC
if (exists("flush.console"))
@@ -233,7 +233,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
for (ii in 1:length(nwhich)) {
b.coefs <- fit$bcoeff[(smooth.frame$bindex[ii]):
(smooth.frame$bindex[ii + 1] - 1)]
- b.coefs <- matrix(b.coefs, ncol = ncolBlist[nwhich[ii]])
+ b.coefs <- matrix(b.coefs, ncol = ncolHlist[nwhich[ii]])
Bspline[[ii]] <-
new("vsmooth.spline.fit",
"Bcoefficients" = b.coefs,
@@ -252,7 +252,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
(smooth.frame$lindex[ii+1]-1)]
levmat <- matrix(levvec,
nrow = smooth.frame$neffec[ii],
- ncol = ncolBlist[nwhich[ii]])
+ ncol = ncolHlist[nwhich[ii]])
Leverages[[ii]] <- levmat
}
diff --git a/R/simulate.vglm.R b/R/simulate.vglm.R
new file mode 100644
index 0000000..07e6a6a
--- /dev/null
+++ b/R/simulate.vglm.R
@@ -0,0 +1,62 @@
+# These functions are Copyright (C) 1998-2013 T. W. Yee All rights reserved.
+
+# Trying to get simulate() to work for some VGAM family functions.
+# 20131228
+
+
+# Last modified:
+# 20131228: adapting simulate.vglm() from stats:::simulate.lm
+# It comes from R 3.0.2.
+
+
+
+simulate.vlm <- function (object, nsim = 1, seed = NULL, ...) {
+ if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE))
+ runif(1)
+ if (is.null(seed)) {
+ RNGstate <- get(".Random.seed", envir = .GlobalEnv)
+ } else {
+ R.seed <- get(".Random.seed", envir = .GlobalEnv)
+ set.seed(seed)
+ RNGstate <- structure(seed, kind = as.list(RNGkind()))
+ on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv))
+ }
+ ftd <- fitted(object)
+ nm <- names(ftd)
+ n <- length(ftd)
+ ntot <- n * nsim
+ Fam <- if (inherits(object, "vlm")) {
+# object at family$family
+ object at family
+ } else {
+# "gaussian"
+ stop("cannot get at the 'family' slot")
+ }
+# if (!is.null(Fam at simslot)) {
+#print("Hi1")
+ val <-
+ if (length(Fam at simslot) > 0) {
+ Fam at simslot(object, nsim)
+ } else {
+ stop(gettextf("family '%s' not implemented", Fam), domain = NA)
+ }
+#print("val")
+#print( val )
+#stop("hello")
+ if (!is.list(val)) {
+ dim(val) <- c(n, nsim)
+ val <- as.data.frame(val)
+ } else {
+ class(val) <- "data.frame"
+ }
+ names(val) <- paste("sim", seq_len(nsim), sep = "_")
+ if (!is.null(nm))
+ row.names(val) <- nm
+ attr(val, "seed") <- RNGstate
+ val
+}
+
+
+
+
+
diff --git a/R/smart.R b/R/smart.R
index dd802f3..5622e64 100644
--- a/R/smart.R
+++ b/R/smart.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -21,6 +21,7 @@
+
smartpredenv <- new.env()
@@ -179,7 +180,7 @@ is.smart <- function(object) {
-bs <-
+ sm.bs <-
function (x, df = NULL, knots = NULL, degree = 3, intercept = FALSE,
Boundary.knots = range(x)) {
x <- x # Evaluate x
@@ -266,14 +267,14 @@ function (x, df = NULL, knots = NULL, degree = 3, intercept = FALSE,
basis
}
-attr(bs, "smart") <- TRUE
+attr( sm.bs, "smart") <- TRUE
-ns <-
+ sm.ns <-
function (x, df = NULL, knots = NULL, intercept = FALSE,
Boundary.knots = range(x)) {
x <- x # Evaluate x
@@ -355,7 +356,7 @@ ns <-
basis
}
-attr(ns, "smart") <- TRUE
+attr( sm.ns, "smart") <- TRUE
@@ -364,7 +365,7 @@ attr(ns, "smart") <- TRUE
-poly <-
+ sm.poly <-
function (x, ..., degree = 1, coefs = NULL, raw = FALSE) {
x <- x # Evaluate x
if (!raw && smart.mode.is("read")) {
@@ -457,14 +458,14 @@ poly <-
Z
}
-attr(poly, "smart") <- TRUE
+attr(sm.poly, "smart") <- TRUE
-scale.default <-
+ sm.scale.default <-
function (x, center = TRUE, scale = TRUE) {
x <- as.matrix(x)
@@ -505,10 +506,29 @@ scale.default <-
x
}
-attr(scale.default, "smart") <- TRUE
+attr(sm.scale.default, "smart") <- TRUE
+
+
+
+
+
+
+ sm.scale <- function (x, center = TRUE, scale = TRUE)
+ UseMethod("sm.scale")
+
+
+
+attr(sm.scale, "smart") <- TRUE
+
+
+
+
+
+
+
+
-attr(scale, "smart") <- TRUE
diff --git a/R/step.vglm.q b/R/step.vglm.q
index 2d95ef2..1b18d06 100644
--- a/R/step.vglm.q
+++ b/R/step.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/summary.vgam.q b/R/summary.vgam.q
index 74f7684..62e24fd 100644
--- a/R/summary.vgam.q
+++ b/R/summary.vgam.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/summary.vglm.q b/R/summary.vglm.q
index fc8ce01..a3c4c5e 100644
--- a/R/summary.vglm.q
+++ b/R/summary.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/summary.vlm.q b/R/summary.vlm.q
index 542f459..ed24efd 100644
--- a/R/summary.vlm.q
+++ b/R/summary.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -27,14 +27,14 @@ summaryvlm <-
nrow.X.vlm <- object at misc$nrow.X.vlm
ncol.X.vlm <- object at misc$ncol.X.vlm # May be NULL for CQO objects
- coef <- object at coefficients
- cnames <- names(coef)
+ Coefs <- object at coefficients
+ cnames <- names(Coefs)
if (presid) {
Presid <- residualsvlm(object, type = "pearson") # NULL if pooled.weight
}
- if (any(is.na(coef))) {
+ if (any(is.na(Coefs))) {
warning(paste("Some NAs in the coefficients---no summary",
" provided; returning object\n"))
return(object)
@@ -72,22 +72,28 @@ summaryvlm <-
if (ncol.X.vlm < max(dim(R)))
stop("R is rank deficient")
- rinv <- diag(ncol.X.vlm)
- rinv <- backsolve(R, rinv)
- rowlen <- drop(((rinv^2) %*% rep(1, ncol.X.vlm))^0.5)
- covun <- rinv %*% t(rinv)
+
+
+
+
+ covun <- chol2inv(R)
+
dimnames(covun) <- list(cnames, cnames)
}
- coef <- matrix(rep(coef, 3), ncol = 3)
- dimnames(coef) <- list(cnames, Colnames)
+ coef3 <- matrix(rep(Coefs, 3), ncol = 3)
+ dimnames(coef3) <- list(cnames, Colnames)
+ SEs <- sqrt(diag(covun))
if (length(sigma) == 1 && is.Numeric(ncol.X.vlm)) {
- coef[, 2] <- rowlen %o% sigma # Fails here when sigma is a vector
- coef[, 3] <- coef[, 1] / coef[, 2]
+ coef3[, 2] <- SEs %o% sigma # Fails here when sigma is a vector
+ coef3[, 3] <- coef3[, 1] / coef3[, 2]
} else {
- coef[, 1] <- coef[, 2] <- coef[, 3] <- NA
+ coef3[, 1] <- coef3[, 2] <- coef3[, 3] <- NA
}
if (correlation) {
- correl <- covun * outer(1 / rowlen, 1 / rowlen)
+ correl <- covun * outer(1 / SEs, 1 / SEs)
+
+ diag(correl) <- 1.0
+
dimnames(correl) <- list(cnames, cnames)
} else {
correl <- matrix(0, 0, 0) # was NULL, but now a special matrix
@@ -99,10 +105,10 @@ summaryvlm <-
answer <-
new("summary.vlm",
object,
- coef3 = coef,
+ coef3 = coef3,
correlation = correl,
- df = c(ncol.X.vlm, rdf),
- sigma = sigma)
+ df = c(ncol.X.vlm, rdf),
+ sigma = sigma)
if (is.Numeric(ncol.X.vlm))
answer at cov.unscaled <- covun
diff --git a/R/vgam.R b/R/vgam.R
index de66464..a4b1487 100644
--- a/R/vgam.R
+++ b/R/vgam.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/vgam.control.q b/R/vgam.control.q
index bed56a7..522cf54 100644
--- a/R/vgam.control.q
+++ b/R/vgam.control.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -104,6 +104,8 @@ vgam.nlchisq <- function(qr, resid, wz, smomat, deriv, U, smooth.labels,
ans <- rep(as.numeric(NA), length = ncol(smomat))
Uderiv <- vbacksub(U, t(deriv), M = M, n = n) # \bU_i^{-1} \biu_i
+
+
ptr <- 0
for (ii in 1:length(smooth.labels)) {
cmat <- constraints[[ smooth.labels[ii] ]]
diff --git a/R/vgam.fit.q b/R/vgam.fit.q
index 3faafa0..3d557c1 100644
--- a/R/vgam.fit.q
+++ b/R/vgam.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -42,73 +42,8 @@ vgam.fit <-
- new.s.call <- expression({
- if (c.list$one.more) {
- fv <- c.list$fit
- new.coeffs <- c.list$coeff
- if (length(family at middle))
- eval(family at middle)
- eta <- fv + offset
- mu <- family at linkinv(eta, extra)
-
- if (length(family at middle2))
- eval(family at middle2)
-
- old.crit <- new.crit
-
- new.crit <- switch(criterion,
- coefficients = new.coeffs,
- tfun(mu = mu, y = y, w = w,
- res = FALSE, eta = eta, extra))
- if (trace) {
- cat("VGAM ", bf, " loop ", iter, ": ", criterion, "= ")
-
- UUUU <- switch(criterion,
- coefficients =
- format(new.crit,
- dig = round(1 - log10(epsilon))),
- format(new.crit,
- dig = max(4,
- round(-0 - log10(epsilon) +
- log10(sqrt(eff.n))))))
-
- switch(criterion,
- coefficients = {if (length(new.crit) > 2) cat("\n");
- cat(UUUU, fill = TRUE, sep = ", ")},
- cat(UUUU, fill = TRUE, sep = ", "))
- }
-
- one.more <- eval(control$convergence)
-
- flush.console()
-
- if (!is.finite(one.more) || !is.logical(one.more))
- one.more <- FALSE
- if (one.more) {
- iter <- iter + 1
- deriv.mu <- eval(family at deriv)
- wz <- eval(family at weight)
- if (control$checkwz)
- wz <- checkwz(wz, M = M, trace = trace,
- wzepsilon = control$wzepsilon)
-
- U <- vchol(wz, M = M, n = n, silent = !trace)
- tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
- z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset
-
- c.list$z <- z
- c.list$wz <- wz
- c.list$U <- U
- }
-
- c.list$one.more <- one.more
- c.list$coeff <- runif(length(new.coeffs)) # 20030312; twist needed!
- old.coeffs <- new.coeffs
- }
- c.list
- })
@@ -147,10 +82,10 @@ vgam.fit <-
if (length(family at constraints))
eval(family at constraints)
- Blist <- process.constraints(constraints, x, M, specialCM = specialCM)
+ Hlist <- process.constraints(constraints, x, M, specialCM = specialCM)
- ncolBlist <- unlist(lapply(Blist, ncol))
- dimB <- sum(ncolBlist)
+ ncolHlist <- unlist(lapply(Hlist, ncol))
+ dimB <- sum(ncolHlist)
if (nonparametric) {
@@ -164,29 +99,29 @@ vgam.fit <-
bf.call <- parse(text = paste(
"s.vam(x, z, wz, tfit$smomat, which, tfit$smooth.frame,",
"bf.maxit, bf.epsilon, trace, se = se.fit, X.vlm.save, ",
- "Blist, ncolBlist, M = M, qbig = qbig, Umat = U, ",
+ "Hlist, ncolHlist, M = M, qbig = qbig, Umat = U, ",
"all.knots = control$all.knots, nk = control$nk)",
sep = ""))[[1]]
- qbig <- sum(ncolBlist[smooth.labels]) # Number of component funs
+ qbig <- sum(ncolHlist[smooth.labels]) # Number of component funs
smomat <- matrix(0, n, qbig)
dy <- if (is.matrix(y)) dimnames(y)[[1]] else names(y)
d2 <- if (is.null(predictors.names))
paste("(Additive predictor ",1:M,")", sep = "") else
predictors.names
dimnames(smomat) <- list(dy, vlabel(smooth.labels,
- ncolBlist[smooth.labels], M))
+ ncolHlist[smooth.labels], M))
tfit <- list(smomat = smomat, smooth.frame = smooth.frame)
} else {
bf.call <- expression(vlm.wfit(xmat = X.vlm.save, z,
- Blist = NULL, U = U,
+ Hlist = NULL, U = U,
matrix.out = FALSE, is.vlmX = TRUE,
qr = qr.arg, xij = NULL))
bf <- "vlm.wfit"
}
- X.vlm.save <- lm2vlm.model.matrix(x, Blist, xij = control$xij)
+ X.vlm.save <- lm2vlm.model.matrix(x, Hlist, xij = control$xij)
if (length(coefstart)) {
@@ -231,17 +166,86 @@ vgam.fit <-
if (nrow.X.vlm < ncol.X.vlm)
stop(ncol.X.vlm, " parameters but only ", nrow.X.vlm, " observations")
+
while (c.list$one.more) {
tfit <- eval(bf.call) # fit$smooth.frame is new
c.list$coeff <- tfit$coefficients
-
tfit$predictors <- tfit$fitted.values + offset
c.list$fit <- tfit$fitted.values
- c.list <- eval(new.s.call)
- NULL
- }
+
+ if (!c.list$one.more) {
+ break
+ }
+
+
+
+
+ fv <- c.list$fit
+ new.coeffs <- c.list$coeff
+
+ if (length(family at middle))
+ eval(family at middle)
+
+ eta <- fv + offset
+ mu <- family at linkinv(eta, extra)
+
+ if (length(family at middle2))
+ eval(family at middle2)
+
+ old.crit <- new.crit
+
+ new.crit <- switch(criterion,
+ coefficients = new.coeffs,
+ tfun(mu = mu, y = y, w = w,
+ res = FALSE, eta = eta, extra))
+ if (trace) {
+ cat("VGAM ", bf, " loop ", iter, ": ", criterion, "= ")
+
+ UUUU <- switch(criterion,
+ coefficients =
+ format(new.crit,
+ dig = round(1 - log10(epsilon))),
+ format(new.crit,
+ dig = max(4,
+ round(-0 - log10(epsilon) +
+ log10(sqrt(eff.n))))))
+
+ switch(criterion,
+ coefficients = {if (length(new.crit) > 2) cat("\n");
+ cat(UUUU, fill = TRUE, sep = ", ")},
+ cat(UUUU, fill = TRUE, sep = ", "))
+ }
+
+ one.more <- eval(control$convergence)
+
+ flush.console()
+
+ if (!is.finite(one.more) || !is.logical(one.more))
+ one.more <- FALSE
+ if (one.more) {
+ iter <- iter + 1
+ deriv.mu <- eval(family at deriv)
+ wz <- eval(family at weight)
+ if (control$checkwz)
+ wz <- checkwz(wz, M = M, trace = trace,
+ wzepsilon = control$wzepsilon)
+
+ U <- vchol(wz, M = M, n = n, silent = !trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
+ z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset
+
+ c.list$z <- z
+ c.list$wz <- wz
+ c.list$U <- U
+ }
+
+ c.list$one.more <- one.more
+ c.list$coeff <- runif(length(new.coeffs)) # 20030312; twist needed!
+ old.coeffs <- new.coeffs
+
+ } # End of while()
if (maxit > 1 && iter >= maxit)
warning("convergence not obtained in ", maxit, " iterations")
@@ -293,7 +297,7 @@ vgam.fit <-
tfit$fitted.values <- NULL # Have to kill it off 3/12/01
fit <- structure(c(tfit,
list(assign = asgn,
- constraints = Blist,
+ constraints = Hlist,
control = control,
fitted.values = mu,
formula = as.vector(attr(Terms, "formula")),
@@ -325,7 +329,7 @@ vgam.fit <-
dimnames(fit$predictors) <- list(yn, predictors.names)
}
- NewBlist <- process.constraints(constraints, x, M,
+ NewHlist <- process.constraints(constraints, x, M,
specialCM = specialCM, by.col = FALSE)
misc <- list(
@@ -337,7 +341,7 @@ vgam.fit <-
predictors.names = predictors.names,
M = M,
n = n,
- new.assign = new.assign(x, NewBlist),
+ new.assign = new.assign(x, NewHlist),
nonparametric = nonparametric,
nrow.X.vlm = nrow.X.vlm,
orig.assign = attr(x, "assign"),
@@ -352,7 +356,7 @@ vgam.fit <-
if (se.fit && length(fit$s.xargument)) {
- misc$varassign <- varassign(Blist, names(fit$s.xargument))
+ misc$varassign <- varassign(Hlist, names(fit$s.xargument))
}
@@ -404,7 +408,7 @@ vgam.fit <-
smomat = fit$smomat,
deriv = deriv.mu, U = U,
smooth.labels, attr(x, "assign"),
- M = M, n = n, constraints = Blist)
+ M = M, n = n, constraints = Hlist)
}
@@ -434,9 +438,9 @@ vgam.fit <-
-new.assign <- function(X, Blist) {
+new.assign <- function(X, Hlist) {
- M <- nrow(Blist[[1]])
+ M <- nrow(Hlist[[1]])
dn <- labels(X)
xn <- dn[[2]]
@@ -444,22 +448,22 @@ new.assign <- function(X, Blist) {
nasgn <- names(asgn)
lasgn <- unlist(lapply(asgn, length))
- ncolBlist <- unlist(lapply(Blist, ncol))
- names(ncolBlist) <- NULL # This is necessary for below to work
+ ncolHlist <- unlist(lapply(Hlist, ncol))
+ names(ncolHlist) <- NULL # This is necessary for below to work
- temp2 <- vlabel(nasgn, ncolBlist, M)
+ temp2 <- vlabel(nasgn, ncolHlist, M)
L <- length(temp2)
newasgn <- vector("list", L)
kk <- 0
low <- 1
for (ii in 1:length(asgn)) {
- len <- low:(low + ncolBlist[ii] * lasgn[ii] -1)
- temp <- matrix(len, ncolBlist[ii], lasgn[ii])
- for (mm in 1:ncolBlist[ii])
+ len <- low:(low + ncolHlist[ii] * lasgn[ii] -1)
+ temp <- matrix(len, ncolHlist[ii], lasgn[ii])
+ for (mm in 1:ncolHlist[ii])
newasgn[[kk + mm]] <- temp[mm, ]
- low <- low + ncolBlist[ii] * lasgn[ii]
- kk <- kk + ncolBlist[ii]
+ low <- low + ncolHlist[ii] * lasgn[ii]
+ kk <- kk + ncolHlist[ii]
}
names(newasgn) <- temp2
diff --git a/R/vgam.match.q b/R/vgam.match.q
index 5ebe1f0..7d04320 100644
--- a/R/vgam.match.q
+++ b/R/vgam.match.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -72,7 +72,7 @@ vgam.match <- function(x, all.knots = FALSE, nk = NULL) {
knot.list <- .C("vknootl2",
as.double(xbar),
as.integer(neffec), knot = double(neffec+6),
- k = as.integer(nk+4), chosen = as.integer(chosen), PACKAGE = "VGAM")
+ k = as.integer(nk+4), chosen = as.integer(chosen))
if (noround) {
knot <- valid.vknotl2(knot.list$knot[1:(knot.list$k)])
knot.list$k <- length(knot)
diff --git a/R/vglm.R b/R/vglm.R
index e4f85ab..f6dc7b9 100644
--- a/R/vglm.R
+++ b/R/vglm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/vglm.control.q b/R/vglm.control.q
index 4564dcd..202f154 100644
--- a/R/vglm.control.q
+++ b/R/vglm.control.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/vglm.fit.q b/R/vglm.fit.q
index fefd4b3..12ca1b7 100644
--- a/R/vglm.fit.q
+++ b/R/vglm.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -7,17 +7,18 @@
-vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
- X.vlm.arg = NULL,
- Xm2 = NULL, Ym2 = NULL,
- etastart = NULL, mustart = NULL, coefstart = NULL,
- offset = 0, family,
- control = vglm.control(),
- criterion = "coefficients",
- qr.arg = FALSE,
- constraints = NULL,
- extra = NULL,
- Terms = Terms, function.name = "vglm", ...) {
+vglm.fit <-
+ function(x, y, w = rep(1, length(x[, 1])),
+ X.vlm.arg = NULL,
+ Xm2 = NULL, Ym2 = NULL,
+ etastart = NULL, mustart = NULL, coefstart = NULL,
+ offset = 0, family,
+ control = vglm.control(),
+ criterion = "coefficients",
+ qr.arg = FALSE,
+ constraints = NULL,
+ extra = NULL,
+ Terms = Terms, function.name = "vglm", ...) {
eff.n <- nrow(x) # + sum(abs(w[1:nrow(x)]))
@@ -41,164 +42,12 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
n <- dim(x)[1]
- new.s.call <- expression({
- if (c.list$one.more) {
- fv <- c.list$fit
- new.coeffs <- c.list$coeff
- if (length(slot(family, "middle")))
- eval(slot(family, "middle"))
- eta <- fv + offset
- mu <- slot(family, "linkinv")(eta, extra)
- if (length(slot(family, "middle2")))
- eval(slot(family, "middle2"))
-
- old.crit <- new.crit
- new.crit <-
- switch(criterion,
- coefficients = new.coeffs,
- tfun(mu = mu, y = y, w = w,
- res = FALSE, eta = eta, extra))
-
-
- if (trace && orig.stepsize == 1) {
- cat("VGLM linear loop ", iter, ": ", criterion, "= ")
- UUUU <- switch(criterion,
- coefficients =
- format(new.crit,
- dig = round(1 - log10(epsilon))),
- format(new.crit,
- dig = max(4,
- round(-0 - log10(epsilon) +
- log10(sqrt(eff.n))))))
-
- switch(criterion,
- coefficients = {if (length(new.crit) > 2) cat("\n");
- cat(UUUU, fill = TRUE, sep = ", ")},
- cat(UUUU, fill = TRUE, sep = ", "))
- }
-
-
- take.half.step <- (control$half.stepsizing &&
- length(old.coeffs)) &&
- ((orig.stepsize != 1) ||
- (criterion != "coefficients" &&
- (if (minimize.criterion) new.crit > old.crit else
- new.crit < old.crit)))
- if (!is.logical(take.half.step))
- take.half.step <- TRUE
- if (take.half.step) {
- stepsize <- 2 * min(orig.stepsize, 2*stepsize)
- new.coeffs.save <- new.coeffs
- if (trace)
- cat("Taking a modified step")
- repeat {
- if (trace) {
- cat(".")
- flush.console()
- }
- stepsize <- stepsize / 2
- if (too.small <- stepsize < 0.001)
- break
- new.coeffs <- (1-stepsize) * old.coeffs +
- stepsize * new.coeffs.save
-
- if (length(slot(family, "middle")))
- eval(slot(family, "middle"))
-
- fv <- X.vlm.save %*% new.coeffs
- if (M > 1)
- fv <- matrix(fv, n, M, byrow = TRUE)
-
- eta <- fv + offset
- mu <- slot(family, "linkinv")(eta, extra)
-
- if (length(slot(family, "middle2")))
- eval(slot(family, "middle2"))
-
-
- new.crit <-
- switch(criterion,
- coefficients = new.coeffs,
- tfun(mu = mu, y = y, w = w,
- res = FALSE, eta = eta, extra))
-
- if ((criterion == "coefficients") ||
- ( minimize.criterion && new.crit < old.crit) ||
- (!minimize.criterion && new.crit > old.crit))
- break
- } # of repeat
-
- if (trace)
- cat("\n")
- if (too.small) {
- warning("iterations terminated because ",
- "half-step sizes are very small")
- one.more <- FALSE
- } else {
- if (trace) {
- cat("VGLM linear loop ",
- iter, ": ", criterion, "= ")
-
- UUUU <- switch(criterion,
- coefficients =
- format(new.crit,
- dig = round(1 - log10(epsilon))),
- format(new.crit,
- dig = max(4,
- round(-0 - log10(epsilon) +
- log10(sqrt(eff.n))))))
-
- switch(criterion,
- coefficients = {
- if (length(new.crit) > 2) cat("\n");
- cat(UUUU, fill = TRUE, sep = ", ")},
- cat(UUUU, fill = TRUE, sep = ", "))
- }
-
- one.more <- eval(control$convergence)
- }
- } else {
- one.more <- eval(control$convergence)
- }
- flush.console()
-
- if (!is.logical(one.more))
- one.more <- FALSE
- if (one.more) {
- iter <- iter + 1
- deriv.mu <- eval(slot(family, "deriv"))
- wz <- eval(slot(family, "weight"))
- if (control$checkwz)
- wz <- checkwz(wz, M = M, trace = trace,
- wzepsilon = control$wzepsilon)
-
- U <- vchol(wz, M = M, n = n, silent = !trace)
- tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
- z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset
-
- c.list$z <- z
- c.list$U <- U
- if (copy.X.vlm)
- c.list$X.vlm <- X.vlm.save
- }
-
- c.list$one.more <- one.more
- c.list$coeff <- runif(length(new.coeffs)) # 20030312; twist needed!
- old.coeffs <- new.coeffs
- }
- c.list
- })
-
-
-
-
-
- copy.X.vlm <- FALSE # May be overwritten in @initialize
+ copy.X.vlm <- FALSE # May be overwritten in @initialize
stepsize <- orig.stepsize
old.coeffs <- coefstart
@@ -241,12 +90,12 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
eval(slot(family, "constraints"))
- Blist <- process.constraints(constraints, x, M,
+ Hlist <- process.constraints(constraints, x, M,
specialCM = specialCM)
- ncolBlist <- unlist(lapply(Blist, ncol))
- dimB <- sum(ncolBlist)
+ ncolHlist <- unlist(lapply(Hlist, ncol))
+ dimB <- sum(ncolHlist)
@@ -255,7 +104,7 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
X.vlm.save <- if (length(X.vlm.arg)) {
X.vlm.arg
} else {
- lm2vlm.model.matrix(x, Blist, xij = control$xij,
+ lm2vlm.model.matrix(x, Hlist, xij = control$xij,
Xm2 = Xm2)
}
@@ -263,11 +112,12 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
if (length(coefstart)) {
eta <- if (ncol(X.vlm.save) > 1) {
- X.vlm.save %*% coefstart + offset
+ matrix(X.vlm.save %*% coefstart, n, M, byrow = TRUE) + offset
} else {
- X.vlm.save * coefstart + offset
+ matrix(X.vlm.save * coefstart, n, M, byrow = TRUE) + offset
}
- eta <- if (M > 1) matrix(eta, ncol = M, byrow = TRUE) else c(eta)
+ if (M == 1)
+ eta <- c(eta)
mu <- slot(family, "linkinv")(eta, extra)
}
@@ -309,29 +159,176 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
ncol.X.vlm <- dX.vlm[[2]]
if (nrow.X.vlm < ncol.X.vlm)
- stop(ncol.X.vlm, "parameters but only ", nrow.X.vlm, " observations")
+ stop(ncol.X.vlm, " parameters but only ", nrow.X.vlm, " observations")
- bf.call <- expression(vlm.wfit(xmat = X.vlm.save, z,
- Blist = NULL, U = U,
- matrix.out = FALSE,
- is.vlmX = TRUE,
- qr = qr.arg, xij = NULL))
while (c.list$one.more) {
- tfit <- eval(bf.call) # fit$smooth.frame is new
+ tfit <- vlm.wfit(xmat = X.vlm.save, z,
+ Hlist = NULL, U = U,
+ matrix.out = FALSE,
+ is.vlmX = TRUE,
+ qr = qr.arg, xij = NULL) # fit$smooth.frame is new
- c.list$coeff <- tfit$coefficients
+ c.list$coeff <- tfit$coefficients
- tfit$predictors <- tfit$fitted.values
-
- c.list$fit <- tfit$fitted.values
- c.list <- eval(new.s.call)
- NULL
- }
+ tfit$predictors <- tfit$fitted.values
+ c.list$fit <- tfit$fitted.values
+
+
+ if (!c.list$one.more) {
+ break
+ }
+
+
+
+ fv <- c.list$fit
+ new.coeffs <- c.list$coeff
+
+ if (length(slot(family, "middle")))
+ eval(slot(family, "middle"))
+
+ eta <- fv + offset
+ mu <- slot(family, "linkinv")(eta, extra)
+
+ if (length(slot(family, "middle2")))
+ eval(slot(family, "middle2"))
+
+ old.crit <- new.crit
+ new.crit <-
+ switch(criterion,
+ coefficients = new.coeffs,
+ tfun(mu = mu, y = y, w = w,
+ res = FALSE, eta = eta, extra))
+
+
+ if (trace && orig.stepsize == 1) {
+ cat("VGLM linear loop ", iter, ": ", criterion, "= ")
+ UUUU <- switch(criterion,
+ coefficients =
+ format(new.crit,
+ dig = round(1 - log10(epsilon))),
+ format(new.crit,
+ dig = max(4,
+ round(-0 - log10(epsilon) +
+ log10(sqrt(eff.n))))))
+ switch(criterion,
+ coefficients = {if (length(new.crit) > 2) cat("\n");
+ cat(UUUU, fill = TRUE, sep = ", ")},
+ cat(UUUU, fill = TRUE, sep = ", "))
+ }
+
+
+ take.half.step <- (control$half.stepsizing &&
+ length(old.coeffs)) &&
+ ((orig.stepsize != 1) ||
+ (criterion != "coefficients" &&
+ (if (minimize.criterion) new.crit > old.crit else
+ new.crit < old.crit)))
+ if (!is.logical(take.half.step))
+ take.half.step <- TRUE
+ if (take.half.step) {
+ stepsize <- 2 * min(orig.stepsize, 2*stepsize)
+ new.coeffs.save <- new.coeffs
+ if (trace)
+ cat("Taking a modified step")
+ repeat {
+ if (trace) {
+ cat(".")
+ flush.console()
+ }
+ stepsize <- stepsize / 2
+ if (too.small <- stepsize < 0.001)
+ break
+ new.coeffs <- (1-stepsize) * old.coeffs +
+ stepsize * new.coeffs.save
+
+ if (length(slot(family, "middle")))
+ eval(slot(family, "middle"))
+
+ fv <- X.vlm.save %*% new.coeffs
+ if (M > 1)
+ fv <- matrix(fv, n, M, byrow = TRUE)
+
+ eta <- fv + offset
+ mu <- slot(family, "linkinv")(eta, extra)
+
+ if (length(slot(family, "middle2")))
+ eval(slot(family, "middle2"))
+
+ new.crit <-
+ switch(criterion,
+ coefficients = new.coeffs,
+ tfun(mu = mu, y = y, w = w,
+ res = FALSE, eta = eta, extra))
+
+ if ((criterion == "coefficients") ||
+ ( minimize.criterion && new.crit < old.crit) ||
+ (!minimize.criterion && new.crit > old.crit))
+ break
+ } # of repeat
+
+ if (trace)
+ cat("\n")
+ if (too.small) {
+ warning("iterations terminated because ",
+ "half-step sizes are very small")
+ one.more <- FALSE
+ } else {
+ if (trace) {
+ cat("VGLM linear loop ",
+ iter, ": ", criterion, "= ")
+
+ UUUU <- switch(criterion,
+ coefficients =
+ format(new.crit,
+ dig = round(1 - log10(epsilon))),
+ format(new.crit,
+ dig = max(4,
+ round(-0 - log10(epsilon) +
+ log10(sqrt(eff.n))))))
+
+ switch(criterion,
+ coefficients = {
+ if (length(new.crit) > 2) cat("\n");
+ cat(UUUU, fill = TRUE, sep = ", ")},
+ cat(UUUU, fill = TRUE, sep = ", "))
+ }
+
+ one.more <- eval(control$convergence)
+ }
+ } else {
+ one.more <- eval(control$convergence)
+ }
+ flush.console()
+
+ if (!is.logical(one.more))
+ one.more <- FALSE
+ if (one.more) {
+ iter <- iter + 1
+ deriv.mu <- eval(slot(family, "deriv"))
+ wz <- eval(slot(family, "weight"))
+ if (control$checkwz)
+ wz <- checkwz(wz, M = M, trace = trace,
+ wzepsilon = control$wzepsilon)
+
+ U <- vchol(wz, M = M, n = n, silent = !trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
+ z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset
+
+ c.list$z <- z
+ c.list$U <- U
+ if (copy.X.vlm)
+ c.list$X.vlm <- X.vlm.save
+ }
+
+ c.list$one.more <- one.more
+ c.list$coeff <- runif(length(new.coeffs)) # 20030312; twist needed!
+ old.coeffs <- new.coeffs
+ } # End of while()
if (maxit > 1 && iter >= maxit && !control$noWarning)
warning("convergence not obtained in ", maxit, " iterations")
@@ -402,7 +399,7 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
df.residual <- nrow.X.vlm - rank
fit <- list(assign = asgn,
coefficients = coefs,
- constraints = Blist,
+ constraints = Hlist,
df.residual = df.residual,
df.total = n * M,
effects = effects,
diff --git a/R/vlm.R b/R/vlm.R
index 5adb172..f905390 100644
--- a/R/vlm.R
+++ b/R/vlm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -91,10 +91,10 @@ vlm <- function(formula,
}
control <- control
- Blist <- process.constraints(constraints, x, M)
+ Hlist <- process.constraints(constraints, x, M)
intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
- fit <- vlm.wfit(xmat = x, zmat = y, Blist = Blist, wz = wz, U = NULL,
+ fit <- vlm.wfit(xmat = x, zmat = y, Hlist = Hlist, wz = wz, U = NULL,
matrix.out = FALSE, is.vlmX = FALSE,
res.ss = TRUE, qr = qr.arg,
x.ret = TRUE, offset = offset)
@@ -106,7 +106,7 @@ vlm <- function(formula,
- fit$constraints <- Blist
+ fit$constraints <- Hlist
dnrow.X.vlm <- labels(fit$X.vlm)
xnrow.X.vlm <- dnrow.X.vlm[[2]]
diff --git a/R/vlm.wfit.q b/R/vlm.wfit.q
index e852438..37935e3 100644
--- a/R/vlm.wfit.q
+++ b/R/vlm.wfit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -11,7 +11,7 @@
vlm.wfit <-
- function(xmat, zmat, Blist, wz = NULL, U = NULL,
+ function(xmat, zmat, Hlist, wz = NULL, U = NULL,
matrix.out = FALSE, is.vlmX = FALSE, res.ss = TRUE, qr = FALSE,
x.ret = FALSE,
offset = NULL,
@@ -25,7 +25,7 @@ vlm.wfit <-
lp.names = NULL, Eta.range = NULL, Xm2 = NULL, ...) {
- missing.Blist <- missing(Blist)
+ missing.Hlist <- missing(Hlist)
zmat <- as.matrix(zmat)
n <- nrow(zmat)
M <- ncol(zmat)
@@ -48,11 +48,11 @@ vlm.wfit <-
X.vlm.save <- if (is.vlmX) {
xmat
} else {
- if (missing.Blist || !length(Blist)) {
- Blist <- replace.constraints(vector("list", ncol(xmat)),
+ if (missing.Hlist || !length(Hlist)) {
+ Hlist <- replace.constraints(vector("list", ncol(xmat)),
diag(M), 1:ncol(xmat)) # NULL
}
- lm2vlm.model.matrix(x = xmat, Blist = Blist, M = M,
+ lm2vlm.model.matrix(x = xmat, Hlist = Hlist, M = M,
assign.attributes = FALSE,
xij = xij,
Xm2 = Xm2)
@@ -108,7 +108,7 @@ vlm.wfit <-
ans$misc <- list(M = M, n = n)
ans$call <- match.call()
- ans$constraints <- Blist
+ ans$constraints <- Hlist
ans$contrasts <- contrast.save
if (x.ret) {
ans$X.vlm <- X.vlm.save
@@ -129,14 +129,14 @@ vlm.wfit <-
dx2 <- if (is.vlmX) NULL else dimnames(xmat)[[2]]
B <- matrix(as.numeric(NA),
nrow = M, ncol = ncolx, dimnames = list(lp.names, dx2))
- if (is.null(Blist)) {
- Blist <- replace.constraints(vector("list", ncolx), diag(M), 1:ncolx)
+ if (is.null(Hlist)) {
+ Hlist <- replace.constraints(vector("list", ncolx), diag(M), 1:ncolx)
}
- ncolBlist <- unlist(lapply(Blist, ncol))
- temp <- c(0, cumsum(ncolBlist))
+ ncolHlist <- unlist(lapply(Hlist, ncol))
+ temp <- c(0, cumsum(ncolHlist))
for (ii in 1:ncolx) {
index <- (temp[ii]+1):temp[ii+1]
- cm <- Blist[[ii]]
+ cm <- Hlist[[ii]]
B[, ii] <- cm %*% ans$coef[index]
}
ans$mat.coefficients <- t(B)
diff --git a/R/vsmooth.spline.q b/R/vsmooth.spline.q
index e16de2a..feab966 100644
--- a/R/vsmooth.spline.q
+++ b/R/vsmooth.spline.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -141,9 +141,9 @@ vsmooth.spline <-
contr.sp[(names(control.spar))] <- control.spar
- if(!all(sapply(contr.sp[1:4], is.numeric)) ||
- contr.sp$tol < 0 || contr.sp$eps <= 0 || contr.sp$maxit <= 0)
- stop("invalid 'control.spar'")
+ if (!all(sapply(contr.sp[1:4], is.numeric)) ||
+ contr.sp$tol < 0 || contr.sp$eps <= 0 || contr.sp$maxit <= 0)
+ stop("invalid 'control.spar'")
my.call <- match.call()
@@ -235,9 +235,9 @@ vsmooth.spline <-
wzbar = double(neff * dim2wz),
uwzbar = double(1), wzybar = double(neff * M), okint = as.integer(0),
as.integer(M), dim2wz = as.integer(dim2wz), dim1U = as.integer(dim1U),
- blist = as.double(diag(M)), ncolb = as.integer(M),
+ Hlist1 = as.double(diag(M)), ncolb = as.integer(M),
trivc = as.integer(1), wuwzbar = as.integer(0),
- dim1Uwzbar = as.integer(dim1U), dim2wzbar = as.integer(dim2wz), PACKAGE = "VGAM")
+ dim1Uwzbar = as.integer(dim1U), dim2wzbar = as.integer(dim2wz))
if (collaps$okint != 1) {
stop("some non-positive-definite weight matrices ",
@@ -333,7 +333,7 @@ vsmooth.spline <-
knot.list <- .C("vknootl2", as.double(xbar),
as.integer(neff), knot = double(neff+6),
k = as.integer(nknots+4),
- chosen = as.integer(chosen), PACKAGE = "VGAM")
+ chosen = as.integer(chosen))
if (noround) {
knot <- valid.vknotl2(knot.list$knot[1:(knot.list$k)])
knot.list$k <- length(knot)
@@ -368,9 +368,9 @@ vsmooth.spline <-
wzbar = double(neff * dim2wzbar),
uwzbar = double(1), wzybar = double(neff * ncb), okint = as.integer(0),
as.integer(M), as.integer(dim2wz), as.integer(dim1U),
- blist = as.double(conmat), ncolb = as.integer(ncb),
+ Hlist1 = as.double(conmat), ncolb = as.integer(ncb),
as.integer(trivc), wuwzbar = as.integer(0),
- as.integer(dim1Uwzbar), as.integer(dim2wzbar), PACKAGE = "VGAM")
+ as.integer(dim1Uwzbar), as.integer(dim2wzbar))
if (collaps$okint != 1) {
stop("some non-positive-definite weight matrices ",
@@ -437,7 +437,7 @@ vsmooth.spline <-
double(1), as.integer(0),
icontrsp = as.integer(contr.sp$maxit),
- contrsp = as.double(unlist(contr.sp[1:4])), PACKAGE = "VGAM")
+ contrsp = as.double(unlist(contr.sp[1:4])))
@@ -638,7 +638,7 @@ predictvsmooth.spline.fit <- function(object, x, deriv = 0) {
junk <- .C("Yee_vbvs", as.integer(ngood),
as.double(object at knots), as.double(object at Bcoefficients),
as.double(xs[good]), smomat = double(ngood * ncb),
- as.integer(nknots), as.integer(deriv), as.integer(ncb), PACKAGE = "VGAM")
+ as.integer(nknots), as.integer(deriv), as.integer(ncb))
y[good,] <- junk$smomat
if (TRUE && deriv > 1) {
@@ -680,7 +680,7 @@ valid.vknotl2 <- function(knot, tol = 1/1024) {
junk <- .C("Yee_pknootl2", knot = as.double(knot),
as.integer(length(knot)),
- keep = integer(length(knot)), as.double(tol), PACKAGE = "VGAM")
+ keep = integer(length(knot)), as.double(tol))
keep <- as.logical(junk$keep)
knot <- junk$knot[keep]
if (length(knot) <= 11) {
diff --git a/build/vignette.rds b/build/vignette.rds
deleted file mode 100644
index a2acbd1..0000000
Binary files a/build/vignette.rds and /dev/null differ
diff --git a/data/Huggins89.t1.rda b/data/Huggins89.t1.rda
index c8cafc5..4202808 100644
Binary files a/data/Huggins89.t1.rda and b/data/Huggins89.t1.rda differ
diff --git a/data/Huggins89table1.rda b/data/Huggins89table1.rda
index 3c2ea93..40b9ea7 100644
Binary files a/data/Huggins89table1.rda and b/data/Huggins89table1.rda differ
diff --git a/data/cfibrosis.rda b/data/cfibrosis.rda
new file mode 100644
index 0000000..5fe8e08
Binary files /dev/null and b/data/cfibrosis.rda differ
diff --git a/data/lakeO.rda b/data/lakeO.rda
new file mode 100644
index 0000000..3911833
Binary files /dev/null and b/data/lakeO.rda differ
diff --git a/data/prinia.rda b/data/prinia.rda
index d219194..25ed41a 100644
Binary files a/data/prinia.rda and b/data/prinia.rda differ
diff --git a/data/wine.rda b/data/wine.rda
new file mode 100644
index 0000000..843901e
Binary files /dev/null and b/data/wine.rda differ
diff --git a/demo/binom2.or.R b/demo/binom2.or.R
index d68e1cd..7008a4f 100755
--- a/demo/binom2.or.R
+++ b/demo/binom2.or.R
@@ -1,40 +1,37 @@
# Demo for binom2.or
-if(dev.cur() <= 1) get(getOption("device"))()
-opar <- par(ask = interactive() &&
- (.Device %in% c("X11", "GTK", "gnome", "windows","quartz")))
+data(hunua, package = "VGAM")
+Hunua <- hunua
+Hunua <- transform(Hunua, y00 = (1-agaaus) * (1-kniexc),
+ y01 = (1-agaaus) * kniexc,
+ y10 = agaaus * (1-kniexc),
+ y11 = agaaus * kniexc)
-data(hunua)
-attach(hunua)
-y00 = (1-agaaus) * (1-kniexc)
-y01 = (1-agaaus) * kniexc
-y10 = agaaus * (1-kniexc)
-y11 = agaaus * kniexc
-detach(hunua)
-fit = vgam(cbind(y00,y01,y10,y11) ~ s(altitude, df=c(4,4,2.5)),
- binom2.or(zero=NULL), data=hunua)
-par(mfrow=c(1,1))
-plot(fit, se=TRUE, scol="darkgreen", lcol="blue")
+
+fit <- vgam(cbind(y00, y01, y10, y11) ~ s(altitude, df = c(4, 4, 2.5)),
+ binom2.or(zero = NULL), data = Hunua)
+par(mfrow = c(2, 3))
+plot(fit, se = TRUE, scol = "darkgreen", lcol = "blue")
summary(fit)
# Plot the marginal functions together
-mycols = c("blue","red")
-plot(fit, which.cf=1:2, lcol=mycols, scol=mycols,
- overlay=TRUE, se=TRUE, llwd=2, slwd=2)
-legend(x=100, y=-4, leg=c("Agathis australis", "Knightia excelsa"),
- col=mycols, lty=1)
+mycols <- c("blue", "orange")
+plot(fit, which.cf = 1:2, lcol = mycols, scol = mycols,
+ overlay = TRUE, se = TRUE, llwd = 2, slwd = 2)
+legend(x = 100, y = -4, leg = c("Agathis australis", "Knightia excelsa"),
+ col = mycols, lty = 1)
# Plot the odds ratio
-o = order(fit at x[,2])
-plot(fit at x[o,2], exp(predict(fit)[o,"log(OR)"]),
- log="y", xlab="Altitude (m)", ylab="Odds ratio (log scale)",
- col="blue", type="b")
-abline(h=1, lty=2) # Denotes independence between species
+ooo <- order(fit at x[, 2])
+plot(fit at x[ooo, 2], exp(predict(fit)[ooo, "log(oratio)"]),
+ log = "y", xlab = "Altitude (m)", ylab = "Odds ratio (log scale)",
+ col = "blue", type = "b", las = 1)
+abline(h = 1, lty = 2) # Denotes independence between species
diff --git a/demo/cqo.R b/demo/cqo.R
index c719c58..d1d9444 100755
--- a/demo/cqo.R
+++ b/demo/cqo.R
@@ -1,71 +1,72 @@
-# Demo for canonical Gaussian ordination
+# Demo for constrained quadratic ordination (CQO; aka
+# canonical Gaussian ordination)
-if(dev.cur() <= 1) get(getOption("device"))()
-opar <- par(ask = interactive() &&
- (.Device %in% c("X11", "GTK", "gnome", "windows","quartz")))
+data(hspider, package = "VGAM")
+hspider[, 1:6] <- scale(hspider[, 1:6]) # standardize environmental vars
-data(hspider)
-hspider[,1:6] = scale(hspider[,1:6]) # standardize environmental vars
-
-## Rank-1 model (unequal tolerances, deviance=1176.0)
+## Rank-1 model (unequal tolerances, deviance = 1176.0)
set.seed(123)
-p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+p1 <-
+ cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- family = quasipoissonff, data = hspider,
- Bestof=10, Crow1positive=FALSE, EqualTolerances=FALSE,
- ITolerances=FALSE)
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ quasipoissonff, data = hspider,
+ Bestof = 10, Crow1positive = FALSE, eq.tolerances = FALSE,
+ I.tolerances = FALSE)
+
+par(mfrow = c(3, 3))
-lvplot(p1, lcol=1:12, llwd=2, llty=1:12, y=TRUE, pch=1:12, pcol=1:12,
- las=1, main="Hunting spider data")
+lvplot(p1, lcol = 1:12, llwd = 2, llty = 1:12, y = TRUE, pch = 1:12,
+ pcol = 1:12, las = 1, main = "Hunting spider data")
-print(ccoef(p1), digits=3)
-print(Coef(p1), digits=3)
+print(cancoef(p1), digits = 3)
+print(Coef(p1), digits = 3)
# trajectory plot
-trplot(p1, which=1:3, log="xy", type="b", lty=1,
- col=c("blue","red","green"), lwd=2, label=TRUE) -> ii
-legend(0.00005, 0.3, paste(ii$species[,1], ii$species[,2], sep=" and "),
- lwd=2, lty=1, col=c("blue","red","green"))
-abline(a=0, b=1, lty="dashed")
+trplot(p1, which = 1:3, log = "xy", type = "b", lty = 1,
+ col = c("blue", "orange", "green"), lwd = 2, label = TRUE) -> ii
+legend(0.00005, 0.3, paste(ii$species[, 1], ii$species[, 2], sep = " and "),
+ lwd = 2, lty = 1, col = c("blue", "orange", "green"))
+abline(a = 0, b = 1, lty = "dashed")
-## Rank-2 model (equal tolerances, deviance=856.5)
+## Rank-2 model (equal tolerances, deviance = 856.5)
set.seed(111)
-r2 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
- Pardmont, Pardnigr, Pardpull, Trocterr) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- family = quasipoissonff, data = hspider, Rank = 2,
- Bestof=10, ITolerances = TRUE,
- EqualTolerances = TRUE, Crow1positive = c(FALSE, FALSE))
-print(ccoef(r2), digits=3)
-print(Coef(r2), digits=3)
-
-clr = (1:(10+1))[-7] # Omit yellow colour
-adj = c(-0.1, -0.1, -0.1, 1.1, 1.1, 1.1, -0.1, -0.1, -0.1, 1.1)
+r2 <-
+ cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+ Pardmont, Pardnigr, Pardpull, Trocterr) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ quasipoissonff, data = hspider, Rank = 2,
+ Bestof = 10, I.tolerances = TRUE,
+ eq.tolerances = TRUE, Crow1positive = c(FALSE, FALSE))
+print(ccoef(r2), digits = 3)
+print(Coef(r2), digits = 3)
+
+clr <- (1:(10+1))[-7] # Omit yellow colour
+adj <- c(-0.1, -0.1, -0.1, 1.1, 1.1, 1.1, -0.1, -0.1, -0.1, 1.1)
# With C arrows
-lvplot(r2, label=TRUE, xlim=c(-2.8, 5.0), ellipse=FALSE, C=TRUE,
- Cadj=c(1.1, -0.1, 1.2, 1.1, 1.1, -0.1), adj=adj,
- las=1, chull=TRUE, pch="+", pcol=clr, sites=TRUE)
+lvplot(r2, label = TRUE, xlim = c(-2.8, 5.0), ellipse = FALSE, C = TRUE,
+ Cadj = c(1.1, -0.1, 1.2, 1.1, 1.1, -0.1), adj = adj,
+ las = 1, chull = TRUE, pch = "+", pcol = clr, sites = TRUE)
# With circular contours
-lvplot(r2, label=TRUE, xlim=c(-2.8, 5.0), ellipse=TRUE, C=FALSE,
- Cadj=c(1.1, -0.1, 1.2, 1.1, 1.1, -0.1), adj=adj,
- las=1, chull=TRUE, pch="+", pcol=clr, sites=TRUE)
+lvplot(r2, label = TRUE, xlim = c(-2.8, 5.0), ellipse = TRUE, C = FALSE,
+ Cadj = c(1.1, -0.1, 1.2, 1.1, 1.1, -0.1), adj = adj,
+ las = 1, chull = TRUE, pch = "+", pcol = clr, sites = TRUE)
# With neither C arrows or circular contours
-lvplot(r2, label=TRUE, xlim=c(-2.8, 5.0), ellipse=FALSE, C=FALSE,
- Cadj=c(1.1, -0.1, 1.2, 1.1, 1.1, -0.1), adj=adj,
- las=1, chull=TRUE, pch="+", pcol=clr, sites=TRUE)
+lvplot(r2, label = TRUE, xlim = c(-2.8, 5.0), ellipse = FALSE, C = FALSE,
+ Cadj = c(1.1, -0.1, 1.2, 1.1, 1.1, -0.1), adj = adj,
+ las = 1, chull = TRUE, pch = "+", pcol = clr, sites = TRUE)
# Perspective plot
-persp(r2, xlim=c(-5,5), ylim=c(-3,6), theta = 50, phi = 20)
+persp(r2, xlim = c(-5, 5), ylim = c(-3, 6), theta = 50, phi = 20)
@@ -73,27 +74,29 @@ persp(r2, xlim=c(-5,5), ylim=c(-3,6), theta = 50, phi = 20)
## Not recommended actually because the number of sites is far too low.
## Deviance = 154.6, equal tolerances.
-attach(hspider)
-ybin = 0 + (cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
- Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
- Trocterr, Zoraspin) > 0) # Matrix of 0's and 1's
-detach(hspider)
+ybin <- with(hspider,
+ 0 + (cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) > 0)) # Matrix of 0s and 1s
+colnames(ybin) <- paste0(colnames(ybin), ".01")
+hspider <- data.frame(hspider, ybin)
+
set.seed(1312)
-b1 = cqo(ybin[,-c(1,5)] ~ WaterCon + BareSand + FallTwig + CoveMoss +
- CoveHerb + ReflLux, family = quasibinomialff(mv=TRUE),
- Bestof=4, ITolerances=TRUE,
- data = hspider, EqualTolerances=TRUE, Crow1positive=FALSE)
-lvplot(b1, type="predictors", llwd=2, las=1, ylab="logit mu",
- ylim=c(-20,11), lcol=1:10)
-c1 = Coef(b1)
-cts = c("Trocterr", "Pardmont", "Alopfabr", "Arctlute")
-text(c1 at Optimum[1,cts], logit(c1 at Maximum[cts])+1.0, cts)
-
-round(t(Coef(b1, ITolerances=FALSE)@C), dig=3)
+b1 <- cqo(ybin[, -c(1, 5)] ~ WaterCon + BareSand + FallTwig + CoveMoss +
+ CoveHerb + ReflLux, quasibinomialff(mv = TRUE),
+ Bestof = 4, I.tolerances = TRUE,
+ data = hspider, eq.tolerances = TRUE, Crow1positive = FALSE)
+lvplot(b1, type = "predictors", llwd = 2, las = 1, ylab = "logit mu",
+ ylim = c(-20, 11), lcol = 1:10)
+c1 <- Coef(b1)
+cts <- c("Trocterr", "Pardmont", "Alopfabr", "Arctlute")
+text(c1 at Optimum[1, cts], logit(c1 at Maximum[cts])+1.0, cts)
+
+round(t(Coef(b1, I.tolerances = FALSE)@C), dig = 3)
# On the probability scale
-lvplot(b1, type="fitted", llwd=2, las=1, llty=1,
- ylab="Probability of presence",
- ylim=c(0,1), lcol=1:10)
+lvplot(b1, type = "fitted", llwd = 2, las = 1, llty = 1,
+ ylab = "Probability of presence",
+ ylim = c(0, 1), lcol = 1:10)
diff --git a/demo/distributions.R b/demo/distributions.R
index 1dbfeb4..ae6f2c4 100755
--- a/demo/distributions.R
+++ b/demo/distributions.R
@@ -5,26 +5,36 @@
## Negative binomial distribution
## Data from Bliss and Fisher (1953).
-y = 0:7
-w = c(70, 38, 17, 10, 9, 3, 2, 1)
-fit = vglm(y ~ 1, negbinomial, weights=w)
+appletree <- data.frame(y = 0:7, w = c(70, 38, 17, 10, 9, 3, 2, 1))
+fit <- vglm(y ~ 1, negbinomial(deviance = TRUE), data = appletree,
+ weights = w, crit = "coef", half.step = FALSE)
summary(fit)
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
+deviance(fit) # NB2 only; needs 'crit = "coef"' & 'deviance = TRUE' above
## Beta distribution
set.seed(123)
-nn = 1000
-y = rbeta(nn, shape1=1, shape2=3)
-fit = vglm(y ~ 1, betaff(link="identity"), trace = TRUE, crit="c")
-fit = vglm(y ~ 1, betaff, trace = TRUE, crit="c")
-coef(fit, matrix=TRUE)
-Coef(fit) # Useful for intercept-only models
-
-Y = 5 + 8 * y # From 5 to 13, not 0 to 1
-fit = vglm(Y ~ 1, betaff(A=5, B=13), trace = TRUE)
-Coef(fit)
-fitted(fit)[1:4,]
+bdata <- data.frame(y = rbeta(nn <- 1000, shape1 = exp(0), shape2 = exp(1)))
+fit1 <- vglm(y ~ 1, betaff, data = bdata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+Coef(fit1) # Useful for intercept-only models
+
+# General A and B, and with a covariate
+bdata <- transform(bdata, x2 = runif(nn))
+bdata <- transform(bdata, mu = logit(0.5 - x2, inverse = TRUE),
+ prec = exp(3.0 + x2)) # prec == phi
+bdata <- transform(bdata, shape2 = prec * (1 - mu),
+ shape1 = mu * prec)
+bdata <- transform(bdata,
+ y = rbeta(nn, shape1 = shape1, shape2 = shape2))
+bdata <- transform(bdata, Y = 5 + 8 * y) # From 5 to 13, not 0 to 1
+fit2 <- vglm(Y ~ x2, data = bdata, trace = TRUE,
+ betaff(A = 5, B = 13, lmu = elogit(min = 5, max = 13)))
+coef(fit2, matrix = TRUE)
+
+
+
diff --git a/demo/lmsqreg.R b/demo/lmsqreg.R
index 815ed3a..8ca0a6b 100755
--- a/demo/lmsqreg.R
+++ b/demo/lmsqreg.R
@@ -1,32 +1,29 @@
# Demo for lmsqreg
-# At the moment this is copied from lms.bcn.Rd
+# At the moment this is copied from lms.bcn.Rd
-if(dev.cur() <= 1) get(getOption("device"))()
-opar <- par(ask = interactive() &&
- (.Device %in% c("X11", "GTK", "gnome", "windows","quartz")))
-
-data(bminz)
-fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz, tr=TRUE)
-predict(fit)[1:3,]
-fitted(fit)[1:3,]
-bminz[1:3,]
+data(bmi.nz, package = "VGAM")
+fit <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1),
+ data = bmi.nz, trace = TRUE)
+head(predict(fit), 3)
+head(fitted(fit), 3)
+head(bmi.nz, 3)
# Person 1 is near the lower quartile of BMI amongst people his age
-cdf(fit)[1:3]
+head(cdf(fit), 3)
# Quantile plot
-par(bty="l", mar=c(5,4,4,3)+0.1, xpd=TRUE)
-qtplot(fit, percentiles=c(5,50,90,99), main="Quantiles",
- xlim=c(15,90), las=1, ylab="BMI", lwd=2, lcol=4)
+par(bty = "l", mar = c(5, 4, 4, 3) + 0.1, xpd = TRUE, mfrow = c(1, 2))
+qtplot(fit, percentiles = c(5, 50, 90, 99), main = "Quantiles",
+ xlim = c(15, 90), las = 1, ylab = "BMI", lwd = 2, lcol = 4)
# Density plot
-ygrid = seq(15, 43, len=100) # BMI ranges
-par(mfrow=c(1,1), lwd=2)
-a = deplot(fit, x0=20, y=ygrid,
- main="Density functions at Age = 20, 42 and 55", xlab="BMI")
-a
-a = deplot(fit, x0=42, y=ygrid, add=TRUE, lty=2, col=2)
-a = deplot(fit, x0=55, y=ygrid, add=TRUE, lty=4, col=4, Attach=TRUE)
-a at post$deplot # Contains density function values
+ygrid <- seq(15, 43, len = 100) # BMI ranges
+par(lwd = 2)
+aa <- deplot(fit, x0 = 20, y = ygrid,
+ main = "Density functions at Age = 20, 42 and 55", xlab = "BMI")
+aa
+aa <- deplot(fit, x0 = 42, y = ygrid, add = TRUE, lty = 2, col = "orange")
+aa <- deplot(fit, x0 = 55, y = ygrid, add = TRUE, lty = 4, col = 4, Attach = TRUE)
+aa at post$deplot # Contains density function values
diff --git a/demo/vgam.R b/demo/vgam.R
index 6058bc8..3cbc321 100755
--- a/demo/vgam.R
+++ b/demo/vgam.R
@@ -1,20 +1,16 @@
# Demo for vgam
-if(dev.cur() <= 1) get(getOption("device"))()
-opar <- par(ask = interactive() &&
- (.Device %in% c("X11", "GTK", "gnome", "windows","quartz")))
+data(hunua, package = "VGAM")
+fit.h <- vgam(agaaus ~ s(altitude), binomialff, data = hunua)
+plot(fit.h, se = TRUE, lcol = "blue", scol = "orange", llwd = 2,
+ slwd = 2, las = 1)
-data(hunua)
-fit.h = vgam(agaaus ~ s(altitude), binomialff, hunua)
-plot(fit.h, se=TRUE, lcol="blue", scol="red", llwd=2, slwd=2, las=1)
-attach(hunua)
-n = nrow(hunua)
-o = order(altitude)
-plot(altitude[o], fitted(fit.h)[o], type="l", ylim=0:1,
- lwd=2, col="blue", las=1)
-points(altitude, agaaus + (runif(n)-0.5)/30, col="red")
-detach(hunua)
+nn <- nrow(hunua)
+ooo <- with(hunua, order(altitude))
+with(hunua, plot(altitude[ooo], fitted(fit.h)[ooo], type = "l",
+ ylim = 0:1, lwd = 2, col = "blue", las = 1))
+points(agaaus + (runif(nn)-0.5)/30 ~ altitude, hunua, col = "orange")
diff --git a/demo/zipoisson.R b/demo/zipoisson.R
index 7b1f46c..216363f 100755
--- a/demo/zipoisson.R
+++ b/demo/zipoisson.R
@@ -1,20 +1,29 @@
-# Demo for Zero Inflated Poisson
+# Demo for Zero-Inflated Poisson
set.seed(111)
-n <- 1000
-phi <- 0.35 # Proportion that are zero by definition
-lambda <- 4 # Poisson parameter
-y <- ifelse(runif(n) < phi, 0, rpois(n, lambda))
-stem(y)
+zdata <- data.frame(x2 = runif(nn <- 1000))
+zdata <- transform(zdata, pstr01 = logit(-0.5 + 1*x2, inverse = TRUE),
+ pstr02 = logit( 0.5 - 1*x2, inverse = TRUE),
+ Ps01 = logit(-0.5 , inverse = TRUE),
+ Ps02 = logit( 0.5 , inverse = TRUE),
+ lambda1 = loge(-0.5 + 2*x2, inverse = TRUE),
+ lambda2 = loge( 0.5 + 2*x2, inverse = TRUE))
+zdata <- transform(zdata, y1 = rzipois(nn, lambda = lambda1, pstr0 = Ps01),
+ y2 = rzipois(nn, lambda = lambda2, pstr0 = Ps02))
-fit <- vglm(y ~ 1, family=zipoisson, trace=TRUE, crit="c" )
-true.mean <- (1-phi)*lambda
-true.mean
-fitted(fit)[1:5,]
-fit at misc$prob0 # The estimate of P(Y=0)
+with(zdata, table(y1)) # Eyeball the data
+with(zdata, table(y2))
+with(zdata, stem(y2))
+fit1 <- vglm(y1 ~ x2, zipoisson(zero = 1), data = zdata, crit = "coef")
+fit2 <- vglm(y2 ~ x2, zipoisson(zero = 1), data = zdata, crit = "coef")
+coef(fit1, matrix = TRUE) # These should agree with the above values
+coef(fit2, matrix = TRUE) # These should agree with the above values
-coef(fit)
-coef(fit, matrix=TRUE)
-Coef(fit)
+
+head(fit1 at misc$pobs0) # The estimate of P(Y=0)
+
+coef(fit1)
+coef(fit1, matrix=TRUE)
+Coef(fit1)
diff --git a/inst/doc/categoricalVGAM.R b/inst/doc/categoricalVGAM.R
deleted file mode 100644
index 43a0c11..0000000
--- a/inst/doc/categoricalVGAM.R
+++ /dev/null
@@ -1,278 +0,0 @@
-### R code from vignette source 'categoricalVGAM.Rnw'
-
-###################################################
-### code chunk number 1: categoricalVGAM.Rnw:84-90
-###################################################
-library("VGAM")
-library("VGAMdata")
-ps.options(pointsize = 12)
-options(width = 72, digits = 4)
-options(SweaveHooks = list(fig = function() par(las = 1)))
-options(prompt = "R> ", continue = "+")
-
-
-###################################################
-### code chunk number 2: pneumocat
-###################################################
-pneumo <- transform(pneumo, let = log(exposure.time))
-fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
- cumulative(reverse = TRUE, parallel = TRUE), pneumo)
-
-
-###################################################
-### code chunk number 3: categoricalVGAM.Rnw:900-904
-###################################################
-journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
-squaremat <- matrix(c(NA, 33, 320, 284, 730, NA, 813, 276,
- 498, 68, NA, 325, 221, 17, 142, NA), 4, 4)
-dimnames(squaremat) <- list(winner = journal, loser = journal)
-
-
-###################################################
-### code chunk number 4: categoricalVGAM.Rnw:1005-1009
-###################################################
-abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
-fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, abodat)
-coef(fit, matrix = TRUE)
-Coef(fit) # Estimated pA and pB
-
-
-###################################################
-### code chunk number 5: categoricalVGAM.Rnw:1287-1289
-###################################################
-head(marital.nz, 4)
-summary(marital.nz)
-
-
-###################################################
-### code chunk number 6: categoricalVGAM.Rnw:1292-1294
-###################################################
-fit.ms <- vgam(mstatus ~ s(age, df = 3), multinomial(refLevel = 2),
- data = marital.nz)
-
-
-###################################################
-### code chunk number 7: categoricalVGAM.Rnw:1298-1300
-###################################################
-head(fit.ms at y, 4)
-colSums(fit.ms at y)
-
-
-###################################################
-### code chunk number 8: categoricalVGAM.Rnw:1309-1321
-###################################################
-# Plot output
-mycol <- c("red","darkgreen","blue")
- par(mfrow=c(2,2))
-plot(fit.ms, se=TRUE, scale=12,
- lcol=mycol, scol=mycol)
-
-# Plot output overlayed
-#par(mfrow=c(1,1))
-plot(fit.ms, se=TRUE, scale=12,
- overlay=TRUE,
- llwd=2,
- lcol=mycol, scol=mycol)
-
-
-###################################################
-### code chunk number 9: categoricalVGAM.Rnw:1364-1377
-###################################################
-getOption("SweaveHooks")[["fig"]]()
-# Plot output
-mycol <- c("red","darkgreen","blue")
- par(mfrow=c(2,2))
- par(mar=c(4.2,4.0,1.2,2.2)+0.1)
-plot(fit.ms, se=TRUE, scale=12,
- lcol=mycol, scol=mycol)
-
-# Plot output overlaid
-#par(mfrow=c(1,1))
-plot(fit.ms, se=TRUE, scale=12,
- overlay=TRUE,
- llwd=2,
- lcol=mycol, scol=mycol)
-
-
-###################################################
-### code chunk number 10: categoricalVGAM.Rnw:1397-1398
-###################################################
-plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
-
-
-###################################################
-### code chunk number 11: categoricalVGAM.Rnw:1407-1411
-###################################################
-getOption("SweaveHooks")[["fig"]]()
-# Plot output
- par(mfrow=c(1,3))
- par(mar=c(4.5,4.0,0.2,2.2)+0.1)
-plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
-
-
-###################################################
-### code chunk number 12: categoricalVGAM.Rnw:1434-1446
-###################################################
-foo <- function(x, elbow=50)
- poly(pmin(x, elbow), 2)
-
-clist <- list("(Intercept)" = diag(3),
- "poly(age, 2)" = rbind(1, 0, 0),
- "foo(age)" = rbind(0, 1, 0),
- "age" = rbind(0, 0, 1))
-fit2.ms <-
- vglm(mstatus ~ poly(age, 2) + foo(age) + age,
- family = multinomial(refLevel = 2),
- constraints = clist,
- data = marital.nz)
-
-
-###################################################
-### code chunk number 13: categoricalVGAM.Rnw:1449-1450
-###################################################
-coef(fit2.ms, matrix = TRUE)
-
-
-###################################################
-### code chunk number 14: categoricalVGAM.Rnw:1454-1461
-###################################################
-par(mfrow=c(2,2))
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[1], scol = mycol[1], which.term = 1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[2], scol=mycol[2], which.term = 2)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[3], scol = mycol[3], which.term = 3)
-
-
-###################################################
-### code chunk number 15: categoricalVGAM.Rnw:1472-1481
-###################################################
-getOption("SweaveHooks")[["fig"]]()
-# Plot output
-par(mfrow=c(2,2))
- par(mar=c(4.5,4.0,1.2,2.2)+0.1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[1], scol = mycol[1], which.term = 1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[2], scol = mycol[2], which.term = 2)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[3], scol = mycol[3], which.term = 3)
-
-
-###################################################
-### code chunk number 16: categoricalVGAM.Rnw:1499-1500
-###################################################
-deviance(fit.ms) - deviance(fit2.ms)
-
-
-###################################################
-### code chunk number 17: categoricalVGAM.Rnw:1506-1507
-###################################################
-(dfdiff <- df.residual(fit2.ms) - df.residual(fit.ms))
-
-
-###################################################
-### code chunk number 18: categoricalVGAM.Rnw:1510-1511
-###################################################
-1-pchisq(deviance(fit.ms) - deviance(fit2.ms), df=dfdiff)
-
-
-###################################################
-### code chunk number 19: categoricalVGAM.Rnw:1524-1535
-###################################################
-ooo <- with(marital.nz, order(age))
-with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
- type="l", las=1, lwd=2, ylim=0:1,
- ylab="Fitted probabilities",
- xlab="Age", # main="Marital status amongst NZ Male Europeans",
- col=c(mycol[1], "black", mycol[-1])))
-legend(x=52.5, y=0.62, # x="topright",
- col=c(mycol[1], "black", mycol[-1]),
- lty=1:4,
- legend=colnames(fit.ms at y), lwd=2)
-abline(v=seq(10,90,by=5), h=seq(0,1,by=0.1), col="gray", lty="dashed")
-
-
-###################################################
-### code chunk number 20: categoricalVGAM.Rnw:1550-1563
-###################################################
-getOption("SweaveHooks")[["fig"]]()
- par(mfrow=c(1,1))
- par(mar=c(4.5,4.0,0.2,0.2)+0.1)
-ooo <- with(marital.nz, order(age))
-with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
- type="l", las=1, lwd=2, ylim=0:1,
- ylab="Fitted probabilities",
- xlab="Age",
- col=c(mycol[1], "black", mycol[-1])))
-legend(x=52.5, y=0.62,
- col=c(mycol[1], "black", mycol[-1]),
- lty=1:4,
- legend=colnames(fit.ms at y), lwd=2.1)
-abline(v=seq(10,90,by=5), h=seq(0,1,by=0.1), col="gray", lty="dashed")
-
-
-###################################################
-### code chunk number 21: categoricalVGAM.Rnw:1597-1601
-###################################################
-# Scale the variables? Yes; the Anderson (1984) paper did (see his Table 6).
-head(backPain, 4)
-summary(backPain)
-backPain <- transform(backPain, sx1 = -scale(x1), sx2 = -scale(x2), sx3 = -scale(x3))
-
-
-###################################################
-### code chunk number 22: categoricalVGAM.Rnw:1605-1606
-###################################################
-bp.rrmlm1 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, backPain)
-
-
-###################################################
-### code chunk number 23: categoricalVGAM.Rnw:1609-1610
-###################################################
-Coef(bp.rrmlm1)
-
-
-###################################################
-### code chunk number 24: categoricalVGAM.Rnw:1638-1639
-###################################################
-set.seed(123)
-
-
-###################################################
-### code chunk number 25: categoricalVGAM.Rnw:1642-1644
-###################################################
-bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, backPain, Rank = 2,
- Corner = FALSE, Uncor = TRUE)
-
-
-###################################################
-### code chunk number 26: categoricalVGAM.Rnw:1652-1656
-###################################################
-biplot(bp.rrmlm2, Acol="blue", Ccol="darkgreen", scores=TRUE,
-# xlim=c(-1,6), ylim=c(-1.2,4), # Use this if not scaled
- xlim=c(-4.5,2.2), ylim=c(-2.2, 2.2), # Use this if scaled
- chull=TRUE, clty=2, ccol="blue")
-
-
-###################################################
-### code chunk number 27: categoricalVGAM.Rnw:1688-1696
-###################################################
-getOption("SweaveHooks")[["fig"]]()
-# Plot output
- par(mfrow=c(1,1))
- par(mar=c(4.5,4.0,0.2,2.2)+0.1)
-
-biplot(bp.rrmlm2, Acol="blue", Ccol="darkgreen", scores=TRUE,
-# xlim=c(-1,6), ylim=c(-1.2,4), # Use this if not scaled
- xlim=c(-4.5,2.2), ylim=c(-2.2, 2.2), # Use this if scaled
- chull=TRUE, clty=2, ccol="blue")
-
-
-###################################################
-### code chunk number 28: categoricalVGAM.Rnw:1810-1811
-###################################################
-iam(NA, NA, M = 4, both = TRUE, diag = TRUE)
-
-
diff --git a/inst/doc/categoricalVGAM.Rnw b/inst/doc/categoricalVGAM.Rnw
deleted file mode 100644
index c4f98e0..0000000
--- a/inst/doc/categoricalVGAM.Rnw
+++ /dev/null
@@ -1,2323 +0,0 @@
-\documentclass[article,shortnames,nojss]{jss}
-\usepackage{thumbpdf}
-%% need no \usepackage{Sweave.sty}
-
-\SweaveOpts{engine=R,eps=FALSE}
-%\VignetteIndexEntry{The VGAM Package for Categorical Data Analysis}
-%\VignetteDepends{VGAM}
-%\VignetteKeywords{categorical data analysis, Fisher scoring, iteratively reweighted least squares, multinomial distribution, nominal and ordinal polytomous responses, smoothing, vector generalized linear and additive models, VGAM R package}
-%\VignettePackage{VGAM}
-
-%% new commands
-\newcommand{\sVLM}{\mbox{\scriptsize VLM}}
-\newcommand{\sformtwo}{\mbox{\scriptsize F2}}
-\newcommand{\pr}{\mbox{$P$}}
-\newcommand{\logit}{\mbox{\rm logit}}
-\newcommand{\bzero}{{\bf 0}}
-\newcommand{\bone}{{\bf 1}}
-\newcommand{\bid}{\mbox{\boldmath $d$}}
-\newcommand{\bie}{\mbox{\boldmath $e$}}
-\newcommand{\bif}{\mbox{\boldmath $f$}}
-\newcommand{\bix}{\mbox{\boldmath $x$}}
-\newcommand{\biy}{\mbox{\boldmath $y$}}
-\newcommand{\biz}{\mbox{\boldmath $z$}}
-\newcommand{\biY}{\mbox{\boldmath $Y$}}
-\newcommand{\bA}{\mbox{\rm \bf A}}
-\newcommand{\bB}{\mbox{\rm \bf B}}
-\newcommand{\bC}{\mbox{\rm \bf C}}
-\newcommand{\bH}{\mbox{\rm \bf H}}
-\newcommand{\bI}{\mbox{\rm \bf I}}
-\newcommand{\bX}{\mbox{\rm \bf X}}
-\newcommand{\bW}{\mbox{\rm \bf W}}
-\newcommand{\bY}{\mbox{\rm \bf Y}}
-\newcommand{\bbeta}{\mbox{\boldmath $\beta$}}
-\newcommand{\boldeta}{\mbox{\boldmath $\eta$}}
-\newcommand{\bmu}{\mbox{\boldmath $\mu$}}
-\newcommand{\bnu}{\mbox{\boldmath $\nu$}}
-\newcommand{\diag}{ \mbox{\rm diag} }
-\newcommand{\Var}{ \mbox{\rm Var} }
-\newcommand{\R}{{\textsf{R}}}
-\newcommand{\VGAM}{\pkg{VGAM}}
-
-
-\author{Thomas W.~Yee\\University of Auckland}
-\Plainauthor{Thomas W. Yee}
-
-\title{The \pkg{VGAM} Package for Categorical Data Analysis}
-\Plaintitle{The VGAM Package for Categorical Data Analysis}
-
-\Abstract{
- Classical categorical regression models such as the multinomial logit and
- proportional odds models are shown to be readily handled by the vector
- generalized linear and additive model (VGLM/VGAM) framework. Additionally,
- there are natural extensions, such as reduced-rank VGLMs for
- dimension reduction, and allowing covariates that have values
- specific to each linear/additive predictor,
- e.g., for consumer choice modeling. This article describes some of the
- framework behind the \pkg{VGAM} \R{}~package, its usage and implementation
- details.
-}
-\Keywords{categorical data analysis, Fisher scoring,
- iteratively reweighted least squares,
- multinomial distribution, nominal and ordinal polytomous responses,
- smoothing, vector generalized linear and additive models,
- \VGAM{} \R{} package}
-\Plainkeywords{categorical data analysis, Fisher scoring,
- iteratively reweighted least squares, multinomial distribution,
- nominal and ordinal polytomous responses, smoothing,
- vector generalized linear and additive models, VGAM R package}
-
-\Address{
- Thomas W. Yee \\
- Department of Statistics \\
- University of Auckland, Private Bag 92019 \\
- Auckland Mail Centre \\
- Auckland 1142, New Zealand \\
- E-mail: \email{t.yee at auckland.ac.nz}\\
- URL: \url{http://www.stat.auckland.ac.nz/~yee/}
-}
-
-
-\begin{document}
-
-
-<<echo=FALSE, results=hide>>=
-library("VGAM")
-library("VGAMdata")
-ps.options(pointsize = 12)
-options(width = 72, digits = 4)
-options(SweaveHooks = list(fig = function() par(las = 1)))
-options(prompt = "R> ", continue = "+")
-@
-
-
-% ----------------------------------------------------------------------
-\section{Introduction}
-\label{sec:jsscat.intoduction}
-
-
-This is a \pkg{VGAM} vignette for categorical data analysis (CDA)
-based on~\cite{Yee:2010}.
-Any subsequent features (especially non-backward compatible ones)
-will appear here.
-
-The subject of CDA is concerned with
-analyses where the response is categorical regardless of whether
-the explanatory variables are continuous or categorical. It is a
-very frequent form of data. Over the years several CDA regression
-models for polytomous responses have become popular, e.g., those
-in Table~\ref{tab:cat.quantities}. Not surprisingly, the models
-are interrelated: their foundation is the multinomial distribution
-and consequently they share similar and overlapping properties which
-modellers should know and exploit. Unfortunately, software has been
-slow to reflect their commonality and this makes analyses unnecessarily
-difficult for the practitioner on several fronts, e.g., using different
-functions/procedures to fit different models which does not aid the
-understanding of their connections.
-
-
-This historical misfortune can be seen by considering \R{}~functions
-for~CDA. From the Comprehensive \proglang{R} Archive Network
-(CRAN, \url{http://CRAN.R-project.org/}) there is~\texttt{polr()}
-\citep[in \pkg{MASS};][]{Venables+Ripley:2002} for a proportional odds
-model and~\texttt{multinom()}
-\citep[in~\pkg{nnet};][]{Venables+Ripley:2002} for the multinomial
-logit model. However, both of these can be considered `one-off'
-modeling functions rather than providing a unified offering for CDA.
-The function \texttt{lrm()} \citep[in \pkg{rms};][]{Harrell:2009}
-has greater functionality: it can fit the proportional odds model
-(and the forward continuation ratio model upon preprocessing). Neither
-\texttt{polr()} or \texttt{lrm()} appear able to fit the nonproportional
-odds model. There are non-CRAN packages too, such as the modeling
-function~\texttt{nordr()} \citep[in \pkg{gnlm};][]{gnlm:2007}, which can fit
-the proportional odds, continuation ratio and adjacent categories models;
-however it calls \texttt{nlm()} and the user must supply starting values.
-In general these \R{} \citep{R} modeling functions are not modular
-and often require preprocessing and sometimes are not self-starting.
-The implementations can be perceived as a smattering and piecemeal
-in nature. Consequently if the practitioner wishes to fit the models
-of Table~\ref{tab:cat.quantities} then there is a need to master several
-modeling functions from several packages each having different syntaxes
-etc. This is a hindrance to efficient CDA.
-
-
-
-\begin{table}[tt]
-\centering
-\begin{tabular}{|c|c|l|}
-\hline
-Quantity & Notation &
-%Range of~$j$ &
-\VGAM{} family function \\
-\hline
-%
-$\pr(Y=j+1) / \pr(Y=j)$ &$\zeta_{j}$ &
-%$1,\ldots,M$ &
-\texttt{acat()} \\
-%
-$\pr(Y=j) / \pr(Y=j+1)$ &$\zeta_{j}^{R}$ &
-%$2,\ldots,M+1$ &
-\texttt{acat(reverse = TRUE)} \\
-%
-$\pr(Y>j|Y \geq j)$ &$\delta_{j}^*$ &
-%$1,\ldots,M$ &
-\texttt{cratio()} \\
-%
-$\pr(Y<j|Y \leq j)$ &$\delta_{j}^{*R}$ &
-%$2,\ldots,M+1$ &
-\texttt{cratio(reverse = TRUE)} \\
-%
-$\pr(Y\leq j)$ &$\gamma_{j}$ &
-%$1,\ldots,M$ &
-\texttt{cumulative()} \\
-%
-$\pr(Y\geq j)$ &$\gamma_{j}^R$&
-%$2,\ldots,M+1$ &
-\texttt{cumulative(reverse = TRUE)} \\
-%
-$\log\{\pr(Y=j)/\pr(Y=M+1)\}$ & &
-%$1,\ldots,M$ &
-\texttt{multinomial()} \\
-%
-$\pr(Y=j|Y \geq j)$ &$\delta_{j}$ &
-%$1,\ldots,M$ &
-\texttt{sratio()} \\
-%
-$\pr(Y=j|Y \leq j)$ &$\delta_{j}^R$ &
-%$2,\ldots,M+1$ &
-\texttt{sratio(reverse = TRUE)} \\
-%
-\hline
-\end{tabular}
-\caption{
-Quantities defined in \VGAM{} for a
-categorical response~$Y$ taking values $1,\ldots,M+1$.
-Covariates \bix{} have been omitted for clarity.
-The LHS quantities are~$\eta_{j}$
-or~$\eta_{j-1}$ for~$j=1,\ldots,M$ (not reversed)
-and~$j=2,\ldots,M+1$ (if reversed), respectively.
-All models are estimated by minimizing the deviance.
-All except for \texttt{multinomial()} are suited to ordinal~$Y$.
-\label{tab:cat.quantities}
-}
-\end{table}
-
-
-
-
-\proglang{SAS} \citep{SAS} does not fare much better than~\R. Indeed,
-it could be considered as having an \textit{excess} of options which
-bewilders the non-expert user; there is little coherent overriding
-structure. Its \code{proc logistic} handles the multinomial logit
-and proportional odds models, as well as exact logistic regression
-\citep[see][which is for Version~8 of \proglang{SAS}]{stok:davi:koch:2000}.
-The fact that the proportional odds model may be fitted by \code{proc
-logistic}, \code{proc genmod} and \code{proc probit} arguably leads
-to possible confusion rather than the making of connections, e.g.,
-\code{genmod} is primarily for GLMs and the proportional odds model is not
-a GLM in the classical \cite{neld:wedd:1972} sense. Also, \code{proc
-phreg} fits the multinomial logit model, and \code{proc catmod} with
-its WLS implementation adds to further potential confusion.
-
-
-This article attempts to show how these deficiencies can be addressed
-by considering the vector generalized linear and additive model
-(VGLM/VGAM) framework, as implemented by the author's~\pkg{VGAM}
-package for~\R{}. The main purpose of this paper is to demonstrate
-how the framework is very well suited to many `classical' regression
-models for categorical responses, and to describe the implementation and
-usage of~\pkg{VGAM} for such. To this end an outline of this article
-is as follows. Section~\ref{sec:jsscat.VGLMVGAMoverview} summarizes
-the basic VGLM/VGAM framework. Section~\ref{sec:jsscat.vgamff}
-centers on functions for CDA in~\VGAM. Given an adequate framework,
-some natural extensions of Section~\ref{sec:jsscat.VGLMVGAMoverview} are
-described in Section~\ref{sec:jsscat.othermodels}. Users of \pkg{VGAM}
-can benefit from Section~\ref{sec:jsscat.userTopics} which shows how
-the software reflects their common theory. Some examples are given in
-Section~\ref{sec:jsscat.eg}. Section~\ref{sec:jsscat.implementDetails}
-contains selected topics in statistial computing that are
-more relevant to programmers interested in the underlying code.
-Section~\ref{sec:jsscat.extnUtil} discusses several utilities and
-extensions needed for advanced CDA modeling, and the article concludes
-with a discussion. This document was run using \pkg{VGAM}~0.7-10
-\citep{yee:VGAM:2010} under \R~2.10.0.
-
-
-Some general references for categorical data providing
-background to this article include
-\cite{agre:2002},
-\cite{fahr:tutz:2001},
-\cite{leon:2000},
-\cite{lloy:1999},
-\cite{long:1997},
-\cite{mccu:neld:1989} and
-\cite{simo:2003}.
-An overview of models for ordinal responses is~\cite{liu:agre:2005},
-and a manual for fitting common models found in~\cite{agre:2002}
-to polytomous responses with various software is~\cite{thom:2009}.
-A package for visualizing categorical data in~\R{} is~\pkg{vcd}
-\citep{Meyer+Zeileis+Hornik:2006,Meyer+Zeileis+Hornik:2009}.
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{VGLM/VGAM overview}
-\label{sec:jsscat.VGLMVGAMoverview}
-
-
-This section summarizes the VGLM/VGAM framework with a particular emphasis
-toward categorical models since the classes encapsulates many multivariate
-response models in, e.g., survival analysis, extreme value analysis,
-quantile and expectile regression, time series, bioassay data, nonlinear
-least-squares models, and scores of standard and nonstandard univariate
-and continuous distributions. The framework is partially summarized by
-Table~\ref{tab:rrvglam.jss.subset}. More general details about VGLMs
-and VGAMs can be found in \cite{yee:hast:2003} and \cite{yee:wild:1996}
-respectively. An informal and practical article connecting the general
-framework with the software is~\cite{Rnews:Yee:2008}.
-
-
-
-\subsection{VGLMs}
-\label{sec:wffc.appendixa.vglms}
-
-Suppose the observed response \biy{} is a $q$-dimensional vector.
-VGLMs are defined as a model for which the conditional distribution
-of $\biY$ given explanatory $\bix$ is of the form
-\begin{eqnarray}
-f(\biy | \bix ; \bB, \phi) ~=~ h(\biy, \eta_1,\ldots, \eta_M, \phi)
-\label{gammod}
-\end{eqnarray}
-for some known function $h(\cdot)$, where $\bB = (\bbeta_1 \,
-\bbeta_2 \, \cdots \, \bbeta_M)$ is a $p \times M$ matrix of
-unknown regression coefficients,
-and the~$j$th linear predictor is
-\begin{equation}
-\eta_j ~=~ \eta_j(\bix) ~=~ \bbeta_j^{\top} \bix ~=~
-\sum_{k=1}^p \beta_{(j)k} \, x_k , ~~~~ j=1,\ldots,M.
-\label{gammod2}
-\end{equation}
-Here $\bix=(x_1,\ldots,x_p)^{\top}$ with $x_1 = 1$ if there is an intercept.
-Note that~(\ref{gammod2}) means that \textit{all} the parameters may be
-potentially modelled as functions of~\bix. It can be seen that VGLMs are
-like GLMs but allow for multiple linear predictors, and they encompass
-models outside the small confines of the exponential family.
-In~(\ref{gammod}) the quantity~$\phi$ is an optional scaling parameter
-which is included for backward compatibility with common adjustments
-to overdispersion, e.g., with respect to GLMs.
-
-
-In general there is no relationship between~$q$ and~$M$: it
-depends specifically on the model or distribution to be fitted.
-However, for the `classical' categorical regression models of
-Table~\ref{tab:cat.quantities} we have~$M=q-1$ since~$q$ is the number
-of levels the multi-category response~$Y$ has.
-
-
-
-
-
-The $\eta_j$ of VGLMs may be applied directly to parameters of a
-distribution rather than just to a mean for GLMs. A simple example is
-a univariate distribution with a location parameter~$\xi$ and a scale
-parameter~$\sigma > 0$, where we may take~$\eta_1 = \xi$ and~$\eta_2 =
-\log\,\sigma$. In general, $\eta_{j}=g_{j}(\theta_{j})$ for some parameter
-link function~$g_{j}$ and parameter~$\theta_{j}$.
-For example, the adjacent categories models in
-Table~\ref{tab:cat.quantities} are ratios of two probabilities, therefore
-a log link of~$\zeta_{j}^{R}$ or~$\zeta_{j}$ is the default.
-In \VGAM{}, there are currently over a dozen links to choose from, of
-which any can be assigned to any parameter, ensuring maximum flexibility.
-Table~\ref{tab:jsscat.links} lists some of them.
-
-
-
-\begin{table}[tt]
-\centering
-%\ ~~~~ \par
-\begin{tabular}{|l|l|l|l|}
-\hline
-\ \ ~~~~~~~~~~~~ $\boldeta$ &
-Model & Modeling & Reference \\
- & & function & \\
-%-------------------------------------------------------------
-\hline
-\hline
-%-------------------------------------------------------------
- &&&\\[-1.1ex]
-$\bB_1^{\top} \bix_{1} + \bB_2^{\top} \bix_{2}\ ( = \bB^{\top} \bix)$ &
-VGLM & \texttt{vglm()}
-&
-\cite{yee:hast:2003} \\[1.6ex]
-%Yee \& Hastie~(2003) \\[1.6ex]
-%-------------------------------------------------------------
-\hline
- &&&\\[-1.1ex]
-$\bB_1^{\top} \bix_{1} +
- \sum\limits_{k=p_1+1}^{p_1+p_2} \bH_k \, \bif_{k}^{*}(x_k)$ &
-%\sum\limits_{k=1}^{p_2} \bH_k \, \bif_k(x_k)$ &
-VGAM & \texttt{vgam()}
-&
-\cite{yee:wild:1996} \\[2.2ex]
-%Yee \& Wild~(1996) \\[2.2ex]
-%-------------------------------------------------------------
-\hline
- &&&\\[-1.1ex]
-$\bB_1^{\top} \bix_{1} + \bA \, \bnu$ &
-RR-VGLM & \texttt{rrvglm()}
-&
-\cite{yee:hast:2003} \\[1.8ex]
-%Yee \& Hastie~(2003) \\[1.8ex]
-%-------------------------------------------------------------
-\hline
- &&&\\[-1.1ex]
-See \cite{yee:hast:2003} &
-Goodman's~RC & \texttt{grc()}
-&
-%\cite{yee:hast:2003} \\[1.8ex]
-\cite{good:1981} \\[1.8ex]
-%-------------------------------------------------------------
-\hline
-\end{tabular}
-\caption{
-Some of
-the package \VGAM{} and
-its framework.
-The vector of latent variables $\bnu = \bC^{\top} \bix_2$
-where
-$\bix^{\top} = (\bix_1^{\top}, \bix_2^{\top})$.
-\label{tab:rrvglam.jss.subset}
-}
-%\medskip
-\end{table}
-
-
-
-
-
-
-VGLMs are estimated using iteratively reweighted least squares~(IRLS)
-which is particularly suitable for categorical models
-\citep{gree:1984}.
-All models in this article have a log-likelihood
-\begin{equation}
-\ell ~=~ \sum_{i=1}^n \, w_i \, \ell_i
-\label{eq:log-likelihood.VGAM}
-\end{equation}
-where the~$w_i$ are known positive prior weights.
-Let~$\bix_i$ denote the explanatory vector for the~$i$th observation,
-for $i=1,\dots,n$.
-Then one can write
-\begin{eqnarray}
-\boldeta_i &=& \boldeta(\bix_i) ~=~
-\left(
-\begin{array}{c}
-\eta_1(\bix_i) \\
-\vdots \\
-\eta_M(\bix_i)
-\end{array} \right) ~=~
-\bB^{\top} \bix_i ~=~
-\left(
-\begin{array}{c}
-\bbeta_1^{\top} \bix_i \\
-\vdots \\
-\bbeta_M^{\top} \bix_i
-\end{array} \right)
-\nonumber
-\\
-&=&
-\left(
-\begin{array}{cccc}
-\beta_{(1)1} & \cdots & \beta_{(1)p} \\
-\vdots \\
-\beta_{(M)1} & \cdots & \beta_{(M)p} \\
-\end{array} \right)
-\bix_i ~=~
-\left(
-\bbeta_{(1)} \; \cdots \; \bbeta_{(p)}
-\right)
-\bix_i .
-\label{eq:lin.pred}
-\end{eqnarray}
-In IRLS,
-an adjusted dependent vector $\biz_i = \boldeta_i + \bW_i^{-1} \bid_i$
-is regressed upon a large (VLM) model matrix, with
-$\bid_i = w_i \, \partial \ell_i / \partial \boldeta_i$.
-The working weights $\bW_i$ here are
-$w_i \Var(\partial \ell_i / \partial \boldeta_i)$
-(which, under regularity conditions, is equal to
-$-w_i \, E[ \partial^2 \ell_i / (\partial \boldeta_i \,
-\partial \boldeta_i^{\top})]$),
-giving rise to the Fisher~scoring algorithm.
-
-
-Let $\bX=(\bix_1,\ldots,\bix_n)^{\top}$ be the usual $n \times p$
-(LM) model matrix
-obtained from the \texttt{formula} argument of \texttt{vglm()}.
-Given $\biz_i$, $\bW_i$ and~$\bX{}$ at the current IRLS iteration,
-a weighted multivariate regression is performed.
-To do this, a \textit{vector linear model} (VLM) model matrix
-$\bX_{\sVLM}$ is formed from~$\bX{}$ and~$\bH_k$
-(see Section~\ref{sec:wffc.appendixa.vgams}).
-This is has $nM$~rows, and if there are no constraints then $Mp$~columns.
-Then $\left(\biz_1^{\top},\ldots,\biz_n^{\top}\right)^{\top}$ is regressed
-upon $\bX_{\sVLM}$
-with variance-covariance matrix $\diag(\bW_1^{-1},\ldots,\bW_n^{-1})$.
-This system of linear equations is converted to one large
-WLS fit by premultiplication of the output of
-a Cholesky decomposition of the~$\bW_i$.
-
-
-Fisher~scoring usually has good numerical stability
-because the~$\bW_i$ are positive-definite over a larger
-region of parameter space than Newton-Raphson.
-For the categorical models in this article the expected
-information matrices are simpler than the observed
-information matrices, and are easily derived,
-therefore all the families in Table~\ref{tab:cat.quantities}
-implement Fisher~scoring.
-
-
-
-\subsection{VGAMs and constraint matrices}
-\label{sec:wffc.appendixa.vgams}
-
-
-VGAMs provide additive-model extensions to VGLMs, that is,
-(\ref{gammod2})~is generalized to
-\begin{equation}
-\eta_j(\bix) ~=~ \beta_{(j)1} +
-\sum_{k=2}^p \; f_{(j)k}(x_k), ~~~~ j = 1,\ldots, M,
-\label{addmod}
-\end{equation}
-a sum of smooth functions of the individual covariates, just as
-with ordinary GAMs \citep{hast:tibs:1990}. The $\bif_k =
-(f_{(1)k}(x_k),\ldots,f_{(M)k}(x_k))^{\top}$ are centered for uniqueness,
-and are estimated simultaneously using \textit{vector smoothers}.
-VGAMs are thus a visual data-driven method that is well suited to
-exploring data, and they retain the simplicity of interpretation that
-GAMs possess.
-
-
-
-An important concept, especially for CDA, is the idea of
-`constraints-on-the functions'.
-In practice we often wish to constrain the effect of a covariate to
-be the same for some of the~$\eta_j$ and to have no effect for others.
-We shall see below that this constraints idea is important
-for several categorical models because of a popular parallelism assumption.
-As a specific example, for VGAMs we may wish to take
-\begin{eqnarray*}
-\eta_1 & = & \beta_{(1)1} + f_{(1)2}(x_2) + f_{(1)3}(x_3), \\
-\eta_2 & = & \beta_{(2)1} + f_{(1)2}(x_2),
-\end{eqnarray*}
-so that $f_{(1)2} \equiv f_{(2)2}$ and $f_{(2)3} \equiv 0$.
-For VGAMs, we can represent these models using
-\begin{eqnarray}
-\boldeta(\bix) & = & \bbeta_{(1)} + \sum_{k=2}^p \, \bif_k(x_k)
-\ =\ \bH_1 \, \bbeta_{(1)}^* + \sum_{k=2}^p \, \bH_k \, \bif_k^*(x_k)
-\label{eqn:constraints.VGAM}
-\end{eqnarray}
-where $\bH_1,\bH_2,\ldots,\bH_p$ are known full-column rank
-\textit{constraint matrices}, $\bif_k^*$ is a vector containing a
-possibly reduced set of component functions and $\bbeta_{(1)}^*$ is a
-vector of unknown intercepts. With no constraints at all, $\bH_1 =
-\bH_2 = \cdots = \bH_p = \bI_M$ and $\bbeta_{(1)}^* = \bbeta_{(1)}$.
-Like the $\bif_k$, the~$\bif_k^*$ are centered for uniqueness.
-For VGLMs, the~$\bif_k$ are linear so that
-\begin{eqnarray}
-{\bB}^{\top} &=&
-\left(
-\bH_1 \bbeta_{(1)}^*
- \;
-\Bigg|
- \;
-\bH_2 \bbeta_{(2)}^*
- \;
-\Bigg|
- \;
-\cdots
- \;
-\Bigg|
- \;
-\bH_p \bbeta_{(p)}^*
-\right)
-\label{eqn:lin.coefs4}
-\end{eqnarray}
-for some vectors
-$\bbeta_{(1)}^*,\ldots,\bbeta_{(p)}^*$.
-
-
-The
-$\bX_{\sVLM}$ matrix is constructed from \bX{} and the $\bH_k$ using
-Kronecker product operations.
-For example, with trivial constraints,
-$\bX_{\sVLM} = \bX \otimes \bI_M$.
-More generally,
-\begin{eqnarray}
-\bX_{\sVLM} &=&
-\left(
-\left( \bX \, \bie_{1} \right) \otimes \bH_1
- \;
-\Bigg|
- \;
-\left( \bX \, \bie_{2} \right) \otimes \bH_2
- \;
-\Bigg|
- \;
-\cdots
- \;
-\Bigg|
- \;
-\left( \bX \, \bie_{p} \right) \otimes \bH_p
-\right)
-\label{eqn:X_vlm_Hk}
-\end{eqnarray}
-($\bie_{k}$ is a vector of zeros except for a one in the $k$th~position)
-so that
-$\bX_{\sVLM}$ is $(nM) \times p^*$ where
-$p^* = \sum_{k=1}^{p} \mbox{\textrm{ncol}}(\bH_k)$ is the total number
-of columns of all the constraint matrices.
-Note that $\bX_{\sVLM}$ and \bX{} can be obtained by
-\texttt{model.matrix(vglmObject, type = "vlm")}
-and
-\texttt{model.matrix(vglmObject, type = "lm")}
-respectively.
-Equation~\ref{eqn:lin.coefs4} focusses on the rows of~\bB{} whereas
-\ref{eq:lin.pred}~is on the columns.
-
-
-VGAMs are estimated by applying a modified vector backfitting algorithm
-\citep[cf.][]{buja:hast:tibs:1989} to the $\biz_i$.
-
-
-
-\subsection{Vector splines and penalized likelihood}
-\label{sec:ex.vspline}
-
-If~(\ref{eqn:constraints.VGAM}) is estimated using a vector spline (a
-natural extension of the cubic smoothing spline to vector responses)
-then it can be shown that the resulting solution maximizes a penalized
-likelihood; some details are sketched in~\cite{yee:step:2007}. In fact,
-knot selection for vector spline follows the same idea as O-splines
-\citep[see][]{wand:orme:2008} in order to lower the computational cost.
-
-
-The usage of \texttt{vgam()} with smoothing is very similar
-to~\texttt{gam()} \citep{gam:pack:2009}, e.g.,
-to fit a nonparametric proportional odds model
-\citep[cf.~p.179 of][]{mccu:neld:1989}
-to the pneumoconiosis data one could try
-<<label = pneumocat, eval=T>>=
-pneumo <- transform(pneumo, let = log(exposure.time))
-fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
- cumulative(reverse = TRUE, parallel = TRUE), pneumo)
-@
-Here, setting \texttt{df = 1} means a linear fit so that
-\texttt{df = 2} affords a little nonlinearity.
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section[VGAM family functions]{\pkg{VGAM} family functions}
-\label{sec:jsscat.vgamff}
-
-
-
-This section summarizes and comments on the~\VGAM{} family functions
-of Table~\ref{tab:cat.quantities} for a categorical response variable
-taking values $Y=1,2,\ldots,M+1$. In its most basic invokation, the usage
-entails a trivial change compared to \texttt{glm()}: use \texttt{vglm()}
-instead and assign the \texttt{family} argument a \VGAM{}~family function.
-The use of a \VGAM{}~family function to fit a specific model is far
-simpler than having a different modeling function for each model.
-Options specific to that model appear as arguments of that \VGAM{}~family
-function.
-
-
-
-
-
-While writing \texttt{cratio()} it was found that various authors defined
-the quantity ``continuation ratio'' differently, therefore it became
-necessary to define a ``stopping ratio''. Table~\ref{tab:cat.quantities}
-defines these quantities for \VGAM{}.
-
-
-
-
-The multinomial logit model is usually described by choosing the first or
-last level of the factor to be baseline. \VGAM{}~chooses the last level
-(Table~\ref{tab:cat.quantities}) by default, however that can be changed
-to any other level by use of the \texttt{refLevel} argument.
-
-
-
-
-If the proportional odds assumption is inadequate then one strategy is
-to try use a different link function (see Section~\ref{sec:jsscat.links}
-for a selection). Another alternative is to add extra terms such as
-interaction terms into the linear predictor
-\citep[available in the \proglang{S}~language;][]{cham:hast:1993}.
-Another is to fit the so-called \textit{partial}
-proportional odds model \citep{pete:harr:1990}
-which \VGAM{} can fit via constraint matrices.
-
-
-
-In the terminology of~\cite{agre:2002},
-\texttt{cumulative()} fits the class of \textit{cumulative link models},
-e.g.,
-\texttt{cumulative(link = probit)} is a cumulative probit model.
-For \texttt{cumulative()}
-it was difficult to decide whether
-\texttt{parallel = TRUE}
-or
-\texttt{parallel = FALSE}
-should be the default.
-In fact, the latter is (for now?).
-Users need to set
-\texttt{cumulative(parallel = TRUE)} explicitly to
-fit a proportional odds model---hopefully this will alert
-them to the fact that they are making
-the proportional odds assumption and
-check its validity (\cite{pete:1990}; e.g., through a deviance or
-likelihood ratio test). However the default means numerical problems
-can occur with far greater likelihood.
-Thus there is tension between the two options.
-As a compromise there is now a \VGAM{} family function
-called \texttt{propodds(reverse = TRUE)} which is equivalent to
-\texttt{cumulative(parallel = TRUE, reverse = reverse, link = "logit")}.
-
-
-
-By the way, note that arguments such as
-\texttt{parallel}
-can handle a slightly more complex syntax.
-A call such as
-\code{parallel = TRUE ~ x2 + x5 - 1} means the parallelism assumption
-is only applied to~$X_2$ and~$X_5$.
-This might be equivalent to something like
-\code{parallel = FALSE ~ x3 + x4}, i.e., to the remaining
-explanatory variables.
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Other models}
-\label{sec:jsscat.othermodels}
-
-
-Given the VGLM/VGAM framework of Section~\ref{sec:jsscat.VGLMVGAMoverview}
-it is found that natural extensions are readily proposed in several
-directions. This section describes some such extensions.
-
-
-
-
-\subsection{Reduced-rank VGLMs}
-\label{sec:jsscat.RRVGLMs}
-
-
-Consider a multinomial logit model where $p$ and $M$ are both large.
-A (not-too-convincing) example might be the data frame \texttt{vowel.test}
-in the package \pkg{ElemStatLearn} \citep[see][]{hast:tibs:buja:1994}.
-The vowel recognition data set involves $q=11$ symbols produced from
-8~speakers with 6~replications of each. The training data comprises
-$10$~input features (not including the intercept) based on digitized
-utterances. A multinomial logit model fitted to these data would
-have $\widehat{\bB}$ comprising of $p \times (q-1) = 110$ regression
-coefficients for $n=8\times 6\times 11 = 528$ observations. The ratio
-of $n$~to the number of parameters is small, and it would be good to
-introduce some parsimony into the model.
-
-
-
-A simple and elegant solution is to represent $\widehat{\bB}$ by
-its reduced-rank approximation. To do this, partition $\bix$ into
-$(\bix_1^{\top}, \bix_2^{\top})^{\top}$ and $\bB = (\bB_1^{\top} \;
-\bB_2^{\top})^{\top}$ so that the reduced-rank regression is applied
-to~$\bix_2$. In general, \bB{} is a dense matrix of full rank, i.e., rank
-$=\min(M,p)$, and since there are $M \times p$ regression coefficients
-to estimate this is `too' large for some models and/or data sets.
-If we approximate~$\bB_2$ by a reduced-rank regression \begin{equation}
-\label{eq:rrr.BAC} \bB_2 ~=~ \bC{} \, \bA^{\top} \end{equation} and if
-the rank~$R$ is kept low then this can cut down the number of regression
-coefficients dramatically. If~$R=2$ then the results may be biplotted
-(\texttt{biplot()} in \VGAM{}). Here, \bC{} and \bA{} are $p_2 \times R$
-and $M \times R$ respectively, and usually they are `thin'.
-
-
-More generally, the class of \textit{reduced-rank VGLMs} (RR-VGLMs)
-is simply a VGLM where~$\bB_2$ is expressed as a product of two thin
-estimated matrices (Table~\ref{tab:rrvglam.jss.subset}). Indeed,
-\cite{yee:hast:2003} show that RR-VGLMs are VGLMs with constraint
-matrices that are unknown and estimated. Computationally, this is
-done using an alternating method: in~(\ref{eq:rrr.BAC}) estimate~\bA{}
-given the current estimate of~\bC{}, and then estimate~\bC{} given the
-current estimate of~\bA{}. This alternating algorithm is repeated until
-convergence within each IRLS iteration.
-
-
-Incidentally, special cases of RR-VGLMs have appeared in the
-literature. For example, a RR-multinomial logit model, is known as the
-\textit{stereotype} model \citep{ande:1984}. Another is \cite{good:1981}'s
-RC~model (see Section~\ref{sec:jsscat.rrr.goodman}) which is reduced-rank
-multivariate Poisson model. Note that the parallelism assumption of the
-proportional odds model \citep{mccu:neld:1989} can be thought of as a
-type of reduced-rank regression where the constraint matrices are thin
-($\bone_M$, actually) and known.
-
-
-
-The modeling function \texttt{rrvglm()} should work with any \VGAM{}
-family function compatible with \texttt{vglm()}. Of course, its
-applicability should be restricted to models where a reduced-rank
-regression of~$\bB_2$ makes sense.
-
-
-
-
-
-
-
-
-
-\subsection[Goodman's R x C association model]{Goodman's $R \times C$ association model}
-\label{sec:jsscat.rrr.goodman}
-
-
-
-
-
-Let~$\bY = [(y_{ij})]$ be a $n \times M$ matrix of counts.
-Section~4.2 of~\cite{yee:hast:2003} shows that Goodman's~RC$(R)$ association
-model \citep{good:1981} fits within the VGLM framework by setting up
-the appropriate indicator variables, structural zeros and constraint
-matrices. Goodman's model fits a reduced-rank type model to~\bY{}
-by firstly assuming that~$Y_{ij}$ has a Poisson distribution, and that
-\begin{eqnarray}
-\log \, \mu_{ij} &=& \mu + \alpha_{i} + \gamma_{j} +
-\sum_{k=1}^R a_{ik} \, c_{jk} ,
-\ \ \ i=1,\ldots,n;\ \ j=1,\ldots,M,
-\label{eqn:goodmanrc}
-\end{eqnarray}
-where $\mu_{ij} = E(Y_{ij})$ is the mean of the $i$-$j$ cell, and the
-rank~$R$ satisfies $R < \min(n,M)$.
-
-
-The modeling function \texttt{grc()} should work on any two-way
-table~\bY{} of counts generated by~(\ref{eqn:goodmanrc}) provided
-the number of 0's is not too large. Its usage is quite simple, e.g.,
-\texttt{grc(Ymatrix, Rank = 2)} fits a rank-2 model to a matrix of counts.
-By default a \texttt{Rank = 1} model is fitted.
-
-
-
-
-\subsection{Bradley-Terry models}
-\label{sec:jsscat.brat}
-
-Consider
-an experiment consists of $n_{ij}$ judges who compare
-pairs of items $T_i$, $i=1,\ldots,M+1$.
-They express their preferences between $T_i$ and $T_j$.
-Let $N=\sum \sum_{i<j} n_{ij}$ be the total number of pairwise
-comparisons, and assume independence for ratings of the same pair
-by different judges and for ratings of different pairs by the same judge.
-Let $\pi_i$ be the \textit{worth} of item~$T_i$,
-$$
-\pr(T_i > T_j) ~=~ p_{i/ij} ~=~ \frac{\pi_i}{\pi_i + \pi_j},
-\ ~~~~~i \neq {j},
-$$
-where ``$T_i>T_j$'' means~$i$ is preferred over~$j$.
-Suppose that $\pi_i > 0$.
-Let~$Y_{ij}$ be the number of times that $T_i$ is preferred
-over~$T_j$ in the~$n_{ij}$ comparisons of the pairs.
-Then~$Y_{ij} \sim {\rm Bin}(n_{ij},p_{i/ij})$.
-This is a Bradley-Terry model (without ties),
-and the \VGAM{} family function is~\texttt{brat()}.
-
-
-Maximum likelihood estimation of the parameters $\pi_1,\ldots,\pi_{M+1}$
-involves maximizing
-$$
-\prod_{i<j}^{M+1}
-\left(
-\begin{array}{c}
-n_{ij} \\
-y_{ij}
-\end{array} \right)
-\left(
-\frac{\pi_i}{\pi_i + \pi_j}
-\right)^{y_{ij}}
-\left(
-\frac{\pi_j}{\pi_i + \pi_j}
-\right)^{n_{ij}-y_{ij}} .
-$$
-By default, $\pi_{M+1} \equiv 1$ is used for identifiability,
-however, this can be changed very easily.
-Note that one can define
-linear predictors $\eta_{ij}$ of the form
-\begin{equation}
-\label{eq:bradter.logit}
-\logit
-\left(
-\frac{\pi_i}{\pi_i + \pi_j}
-\right) ~=~ \log
-\left(
-\frac{\pi_i}{\pi_j}
-\right) ~=~ \lambda_i - \lambda_j .
-\end{equation}
-The VGAM{} framework can handle the Bradley-Terry model only for
-intercept-only models; it has
-\begin{equation}
-\label{eq:bradter}
-\lambda_j ~=~ \eta_j ~=~ \log\, \pi_j = \beta_{(1)j},
-\ \ \ \ j=1,\ldots,M.
-\end{equation}
-
-
-As well as having many applications in the field of preferences,
-the Bradley-Terry model has many uses in modeling `contests' between
-teams~$i$ and~$j$, where only one of the teams can win in each
-contest (ties are not allowed under the classical model).
-The {packaging} function \texttt{Brat()} can be used to
-convert a square matrix into one that has more columns, to
-serve as input to \texttt{vglm()}.
-For example,
-for journal citation data where a citation of article~B
-by article~A is a win for article~B and a loss for article~A.
-On a specific data set,
-<<>>=
-journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
-squaremat <- matrix(c(NA, 33, 320, 284, 730, NA, 813, 276,
- 498, 68, NA, 325, 221, 17, 142, NA), 4, 4)
-dimnames(squaremat) <- list(winner = journal, loser = journal)
-@
-then \texttt{Brat(squaremat)} returns a~$1 \times 12$ matrix.
-
-
-
-
-
-
-
-\subsubsection{Bradley-Terry model with ties}
-\label{sec:cat.bratt}
-
-
-The \VGAM{} family function \texttt{bratt()}
-implements
-a Bradley-Terry model with ties (no preference), e.g.,
-where both $T_i$ and $T_j$ are equally good or bad.
-Here we assume
-\begin{eqnarray*}
- \pr(T_i > T_j) &=& \frac{\pi_i}{\pi_i + \pi_j + \pi_0},
-\ ~~~~~
- \pr(T_i = T_j) ~=~ \frac{\pi_0}{\pi_i + \pi_j + \pi_0},
-\end{eqnarray*}
-with $\pi_0 > 0$ as an extra parameter.
-It has
-$$
-\boldeta=(\log \pi_1,\ldots, \log \pi_{M-1}, \log \pi_{0})^{\top}
-$$
-by default, where there are $M$~competitors and $\pi_M \equiv 1$.
-Like \texttt{brat()}, one can choose a different reference group
-and reference value.
-
-
-Other \R{}~packages for the Bradley-Terry model
-include \pkg{BradleyTerry2}
-by H.~Turner and D.~Firth
-\citep[with and without ties;][]{firth:2005,firth:2008}
-and \pkg{prefmod} \citep{Hatzinger:2009}.
-
-
-
-
-\begin{table}[tt]
-\centering
-\begin{tabular}[small]{|l|c|}
-\hline
-\pkg{VGAM} family function & Independent parameters \\
-\hline
-\texttt{ABO()} & $p, q$ \\
-\texttt{MNSs()} & $m_S, m_s, n_S$ \\
-\texttt{AB.Ab.aB.ab()} & $p$ \\
-\texttt{AB.Ab.aB.ab2()} & $p$ \\
-\texttt{AA.Aa.aa()} & $p_A$ \\
-\texttt{G1G2G3()} & $p_1, p_2, f$ \\
-\hline
-\end{tabular}
-\caption{Some genetic models currently implemented
-and their unique parameters.
-\label{tab:gen.all}
-}
-\end{table}
-
-
-
-
-
-\subsection{Genetic models}
-\label{sec:jsscat.genetic}
-
-
-There are quite a number of population genetic models based on the
-multinomial distribution,
-e.g., \cite{weir:1996}, \cite{lang:2002}.
-Table~\ref{tab:gen.all} lists some \pkg{VGAM}~family functions for such.
-
-
-
-
-For example the ABO blood group system
-has two independent parameters~$p$ and~$q$, say.
-Here,
-the blood groups A, B and O~form six possible combinations (genotypes)
-consisting of AA, AO, BB, BO, AB, OO
-(see Table~\ref{tab:ABO}). A and~B are dominant over
-bloodtype~O. Let $p$, $q$ and $r$ be the probabilities
-for A, B and~O respectively (so that
-$p+q+r=1$) for a given population.
-The log-likelihood function is
-\[
-\ell(p,q) \;=\; n_A\, \log(p^2 + 2pr) + n_B\, \log(q^2 + 2qr) + n_{AB}\,
-\log(2pq) + 2 n_O\, \log(1-p-q),
-\]
-where $r = 1 - p -q$, $p \in (\,0,1\,)$,
-$q \in (\,0,1\,)$, $p+q<1$.
-We let $\boldeta = (g(p), g(r))^{\top}$ where $g$ is the link function.
-Any~$g$ from Table~\ref{tab:jsscat.links} appropriate for
-a parameter $\theta \in (0,1)$ will do.
-
-
-A toy example where $p=p_A$ and $q=p_B$ is
-<<>>=
-abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
-fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, abodat)
-coef(fit, matrix = TRUE)
-Coef(fit) # Estimated pA and pB
-@
-The function \texttt{Coef()}, which applies only to intercept-only models,
-applies to $g_{j}(\theta_{j})=\eta_{j}$
-the inverse link function $g_{j}^{-1}$ to~$\widehat{\eta}_{j}$
-to give~$\widehat{\theta}_{j}$.
-
-
-
-
-
-
-
-\begin{table}[tt]
-% Same as Table 14.1 of E-J, and Table 2.6 of Weir 1996
-\begin{center}
-\begin{tabular}{|l|cc|cc|c|c|}
-\hline
-Genotype & AA & AO & BB & BO & AB & OO \\
-Probability&$p^2$&$2pr$&$q^2$&$ 2qr$&$2pq$& $r^2$\\
-Blood group& A & A & B & B & AB & O \\
-\hline
-\end{tabular}
-\end{center}
-\caption{Probability table for the ABO blood group system.
-Note that $p$~and $q$~are the parameters and $r=1-p-q$.
-\label{tab:ABO}
-}
-\end{table}
-
-
-
-
-
-\subsection{Three main distributions}
-\label{sec:jsscat.3maindist}
-
-\cite{agre:2002} discusses three main distributions for categorical
-variables: binomial, multinomial, and Poisson
-\citep{thom:2009}.
-All these are well-represented in the \VGAM{} package,
-accompanied by variant forms.
-For example,
-there is a
-\VGAM{} family function named \texttt{mbinomial()}
-which implements a
-matched-binomial (suitable for matched case-control studies),
-Poisson ordination (useful in ecology for multi-species-environmental data),
-negative binomial families,
-positive and zero-altered and zero-inflated variants,
-and the bivariate odds ratio model
-\citep[\texttt{binom2.or()}; see Section~6.5.6 of][]{mccu:neld:1989}.
-The latter has an \texttt{exchangeable} argument to allow for an
-exchangeable error structure:
-\begin{eqnarray}
-\bH_1 ~=~
-\left( \begin{array}{cc}
-1 & 0 \\
-1 & 0 \\
-0 & 1 \\
-\end{array} \right), ~~~~~
-\bH_k ~=~
-\left( \begin{array}{cc}
-1 \\
-1 \\
-0 \\
-\end{array} \right), ~~k=2,\ldots,p,
-\label{eqn:blom.exchangeable}
-\end{eqnarray}
-since, for data $(Y_1,Y_2,\bix)$,
-$\logit \, P\!\left( Y_{j} = 1 \Big{|} \bix \right) =
-\eta_{j}$ for ${j}=1,2$, and
-$\log \, \psi = \eta_{3}$
-where $\psi$~is the odds ratio,
-and so $\eta_{1}=\eta_{2}$.
-Here, \texttt{binom2.or(zero = 3)} by default meaning $\psi$~is
-modelled as an intercept-only
-(in general, \texttt{zero} may be assigned an integer vector
-such that the value~$j$ means $\eta_{j} = \beta_{(j)1}$,
-i.e., the $j$th~linear/additive predictor is an intercept-only).
-See the online help for all of these models.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Some user-oriented topics}
-\label{sec:jsscat.userTopics}
-
-
-Making the most of \VGAM{} requires an understanding of the general
-VGLM/VGAM framework described Section~\ref{sec:jsscat.VGLMVGAMoverview}.
-In this section we connect elements of that framework with the software.
-Before doing so it is noted that
-a fitted \VGAM{} categorical model has access to the usual
-generic functions, e.g.,
-\texttt{coef()} for
-$\left(\widehat{\bbeta}_{(1)}^{*T},\ldots,\widehat{\bbeta}_{(p)}^{*T}\right)^{\top}$
-(see Equation~\ref{eqn:lin.coefs4}),
-\texttt{constraints()} for $\bH_k$,
-\texttt{deviance()} for $2\left(\ell_{\mathrm{max}} - \ell\right)$,
-\texttt{fitted()} for $\widehat{\bmu}_i$,
-\texttt{logLik()} for $\ell$,
-\texttt{predict()} for $\widehat{\boldeta}_i$,
-\texttt{print()},
-\texttt{residuals(..., type = "response")} for $\biy_i - \widehat{\bmu}_i$ etc.,
-\texttt{summary()},
-\texttt{vcov()} for $\widehat{\Var}(\widehat{\bbeta})$,
-etc.
-The methods function for the extractor function
-\texttt{coef()} has an argument \texttt{matrix}
-which, when set \texttt{TRUE}, returns~$\widehat{\bB}$
-(see Equation~\ref{gammod}) as a $p \times M$ matrix,
-and this is particularly useful for confirming that a fit
-has made a parallelism assumption.
-
-
-
-
-
-
-
-\subsection{Common arguments}
-\label{sec:jsscat.commonArgs}
-
-
-The structure of the unified framework given in
-Section~\ref{sec:jsscat.VGLMVGAMoverview}
-appears clearly through
-the pool of common arguments
-shared by the
-\VGAM{} family functions in Table~\ref{tab:cat.quantities}.
-In particular,
-\texttt{reverse} and
-\texttt{parallel}
-are prominent with CDA.
-These are merely convenient shortcuts for the argument \texttt{constraints},
-which accepts a named list of constraint matrices~$\bH_k$.
-For example, setting
-\texttt{cumulative(parallel = TRUE)} would constrain the coefficients $\beta_{(j)k}$
-in~(\ref{gammod2}) to be equal for all $j=1,\ldots,M$,
-each separately for $k=2,\ldots,p$.
-That is, $\bH_k = \bone_M$.
-The argument~\texttt{reverse} determines the `direction' of
-the parameter or quantity.
-
-Another argument not so much used with CDA is~\texttt{zero};
-this accepts a vector specifying which~$\eta_j$ is to be modelled as
-an intercept-only; assigning a \texttt{NULL} means none.
-
-
-
-
-
-
-
-
-\subsection{Link functions}
-\label{sec:jsscat.links}
-
-Almost all \VGAM{} family functions
-(one notable exception is \texttt{multinomial()})
-allow, in theory, for any link function to be assigned to each~$\eta_j$.
-This provides maximum capability.
-If so then there is an extra argument to pass in any known parameter
-associated with the link function.
-For example, \texttt{link = "logoff", earg = list(offset = 1)}
-signifies a log link with a unit offset:
-$\eta_{j} = \log(\theta_{j} + 1)$ for some parameter~$\theta_{j}\ (> -1)$.
-The name \texttt{earg} stands for ``extra argument''.
-Table~\ref{tab:jsscat.links} lists some links relevant to categorical data.
-While the default gives a reasonable first choice,
-users are encouraged to try different links.
-For example, fitting a binary regression model
-(\texttt{binomialff()}) to the coal miners data set \texttt{coalminers} with
-respect to the response wheeze gives a
-nonsignificant regression coefficient for $\beta_{(1)3}$~with probit analysis
-but not with a logit link when
-$\eta = \beta_{(1)1} + \beta_{(1)2} \, \mathrm{age} + \beta_{(1)3} \, \mathrm{age}^2$.
-Developers and serious users are encouraged to write and use
-new link functions compatible with~\VGAM.
-
-
-
-
-
-
-\begin{table*}[tt]
-\centering
-\medskip
-\begin{tabular}{|l|c|c|}
-\hline
-Link function & $g(\theta)$ & Range of $\theta$ \\
-\hline
-\texttt{cauchit()} & $\tan(\pi(\theta-\frac12))$ & $(0,1)$ \\
-\texttt{cloglog()} & $\log_e\{-\log_e(1 - \theta)\}$ & $(0,1)$ \\
-\texttt{fisherz()} &
-$\frac12\,\log_e\{(1 + \theta)/(1 - \theta)\}$ & $(-1,1)$ \\
-\texttt{identity()} & $\theta$ & $(-\infty,\infty)$ \\
-\texttt{logc()} & $\log_e(1 - \theta)$ & $(-\infty,1)$ \\
-\texttt{loge()} & $\log_e(\theta)$ & $(0,\infty)$ \\
-\texttt{logit()} & $\log_e(\theta/(1 - \theta))$ & $(0,1)$ \\
-\texttt{logoff()} & $\log_e(\theta + A)$ & $(-A,\infty)$ \\
-\texttt{probit()} & $\Phi^{-1}(\theta)$ & $(0,1)$ \\
-\texttt{rhobit()} & $\log_e\{(1 + \theta)/(1 - \theta)\}$ & $(-1,1)$ \\
-\hline
-\end{tabular}
-\caption{
-Some \VGAM{} link functions pertinent to this article.
-\label{tab:jsscat.links}
-}
-\end{table*}
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Examples}
-\label{sec:jsscat.eg}
-
-This section illustrates CDA modeling on three
-data sets in order to give a flavour of what is available in the package.
-
-
-
-
-%20130919
-%Note:
-%\subsection{2008 World Fly Fishing Championships}
-%\label{sec:jsscat.eg.WFFC}
-%are deleted since there are problems with accessing the \texttt{wffc.nc}
-%data etc. since they are now in \pkg{VGAMdata}.
-
-
-
-
-
-
-
-\subsection{Marital status data}
-\label{sec:jsscat.eg.mstatus}
-
-We fit a nonparametric multinomial logit model to data collected from
-a self-administered questionnaire administered in a large New Zealand
-workforce observational study conducted during 1992--3.
-The data were augmented by a second study consisting of retirees.
-For homogeneity, this analysis is restricted
-to a subset of 6053 European males with no missing values.
-The ages ranged between~16 and 88~years.
-The data can be considered a reasonable representation of the white
-male New Zealand population in the early 1990s, and
-are detailed in~\cite{macm:etal:1995} and~\cite{yee:wild:1996}.
-We are interested in exploring how $Y=$ marital status varies as a function
-of $x_2=$ age. The nominal response~$Y$ has four levels;
-in sorted order, they are divorced or separated, married or partnered,
-single and widower.
-We will write these levels as $Y=1$, $2$, $3$, $4$, respectively,
-and will choose the married/partnered (second level) as the reference group
-because the other levels emanate directly from it.
-
-Suppose the data is in a data frame called \texttt{marital.nz}
-and looks like
-<<>>=
-head(marital.nz, 4)
-summary(marital.nz)
-@
-We fit the VGAM
-<<>>=
-fit.ms <- vgam(mstatus ~ s(age, df = 3), multinomial(refLevel = 2),
- data = marital.nz)
-@
-
-Once again let's firstly check the input.
-<<>>=
-head(fit.ms at y, 4)
-colSums(fit.ms at y)
-@
-This seems ok.
-
-
-
-
-Now the estimated component functions $\widehat{f}_{(s)2}(x_2)$
-may be plotted with
-<<fig=F>>=
-# Plot output
-mycol <- c("red","darkgreen","blue")
- par(mfrow=c(2,2))
-plot(fit.ms, se=TRUE, scale=12,
- lcol=mycol, scol=mycol)
-
-# Plot output overlayed
-#par(mfrow=c(1,1))
-plot(fit.ms, se=TRUE, scale=12,
- overlay=TRUE,
- llwd=2,
- lcol=mycol, scol=mycol)
-@
-to produce Figure~\ref{fig:jsscat.eg.mstatus}.
-The \texttt{scale} argument is used here to ensure that the $y$-axes have
-a common scale---this makes comparisons between the component functions
-less susceptible to misinterpretation.
-The first three plots are the (centered) $\widehat{f}_{(s)2}(x_2)$ for
-$\eta_1$,
-$\eta_2$,
-$\eta_3$,
-where
-\begin{eqnarray}
-\label{eq:jsscat.eg.nzms.cf}
-\eta_{s} ~=~
-\log(\pr(Y={t}) / \pr(Y={2})) ~=~
-\beta_{(s)1} + f_{(s)2}(x_2),
-\end{eqnarray}
-$(s,t) = (1,1), (2,3), (3,4)$,
-and~$x_2$ is~\texttt{age}.
-The last plot are the smooths overlaid to aid comparison.
-
-
-It may be seen that the $\pm 2$ standard error bands
-about the \texttt{Widowed} group is particularly wide at
-young ages because of a paucity of data, and
-likewise at old ages amongst the \texttt{Single}s.
-The $\widehat{f}_{(s)2}(x_2)$ appear as one would expect.
-The log relative risk of
-being single relative to being married/partnered drops sharply from
-ages~16 to~40.
-The fitted function for the~\texttt{Widowed} group increases
-with~\texttt{age} and looks reasonably linear.
-The $\widehat{f}_{(1)2}(x_2)$
-suggests a possible maximum around 50~years old---this
-could indicate the greatest marital conflict occurs during
-the mid-life crisis years!
-
-
-
-\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=5.6,echo=FALSE>>=
-# Plot output
-mycol <- c("red","darkgreen","blue")
- par(mfrow=c(2,2))
- par(mar=c(4.2,4.0,1.2,2.2)+0.1)
-plot(fit.ms, se=TRUE, scale=12,
- lcol=mycol, scol=mycol)
-
-# Plot output overlaid
-#par(mfrow=c(1,1))
-plot(fit.ms, se=TRUE, scale=12,
- overlay=TRUE,
- llwd=2,
- lcol=mycol, scol=mycol)
-@
-\caption{
-Fitted (and centered) component functions
-$\widehat{f}_{(s)2}(x_2)$
-from the NZ marital status data
-(see Equation~\ref{eq:jsscat.eg.nzms.cf}).
-The bottom RHS plot are the smooths overlaid.
-\label{fig:jsscat.eg.mstatus}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-The methods function for~\texttt{plot()} can also plot the
-derivatives of the smooths.
-The call
-<<fig=F>>=
-plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
-@
-results in Figure~\ref{fig:jsscat.eg.mstatus.cf.deriv}.
-Once again the $y$-axis scales are commensurate.
-
-\setkeys{Gin}{width=\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=7.2,height=2.4,echo=FALSE>>=
-# Plot output
- par(mfrow=c(1,3))
- par(mar=c(4.5,4.0,0.2,2.2)+0.1)
-plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
-@
-\caption{
-Estimated first derivatives of the component functions,
-$\widehat{f'}_{(s)2}(x_2)$,
-from the NZ marital status data
-(see Equation~\ref{eq:jsscat.eg.nzms.cf}).
-\label{fig:jsscat.eg.mstatus.cf.deriv}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-The derivative for the \texttt{Divorced/Separated} group appears
-linear so that a quadratic component function could be tried.
-Not surprisingly the \texttt{Single} group shows the greatest change;
-also, $\widehat{f'}_{(2)2}(x_2)$ is approximately linear till~50
-and then flat---this suggests one could fit a piecewise quadratic
-function to model that component function up to 50~years.
-The~\texttt{Widowed} group appears largely flat.
-We thus fit the parametric model
-<<>>=
-foo <- function(x, elbow=50)
- poly(pmin(x, elbow), 2)
-
-clist <- list("(Intercept)" = diag(3),
- "poly(age, 2)" = rbind(1, 0, 0),
- "foo(age)" = rbind(0, 1, 0),
- "age" = rbind(0, 0, 1))
-fit2.ms <-
- vglm(mstatus ~ poly(age, 2) + foo(age) + age,
- family = multinomial(refLevel = 2),
- constraints = clist,
- data = marital.nz)
-@
-Then
-<<>>=
-coef(fit2.ms, matrix = TRUE)
-@
-confirms that one term was used for each component function.
-The plots from
-<<fig=F>>=
-par(mfrow=c(2,2))
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[1], scol = mycol[1], which.term = 1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[2], scol=mycol[2], which.term = 2)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[3], scol = mycol[3], which.term = 3)
-@
-are given in Figure~\ref{fig:jsscat.eg.mstatus.vglm}
-and appear like
-Figure~\ref{fig:jsscat.eg.mstatus}.
-
-
-\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=5.6,echo=FALSE>>=
-# Plot output
-par(mfrow=c(2,2))
- par(mar=c(4.5,4.0,1.2,2.2)+0.1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[1], scol = mycol[1], which.term = 1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[2], scol = mycol[2], which.term = 2)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[3], scol = mycol[3], which.term = 3)
-@
-\caption{
-Parametric version of~\texttt{fit.ms}: \texttt{fit2.ms}.
-The component functions are now quadratic, piecewise quadratic/zero,
-or linear.
-\label{fig:jsscat.eg.mstatus.vglm}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-
-It is possible to perform very crude inference based on heuristic theory
-of a deviance test:
-<<>>=
-deviance(fit.ms) - deviance(fit2.ms)
-@
-is small, so it seems the parametric model is quite reasonable
-against the original nonparametric model.
-Specifically,
-the difference in the number of `parameters' is approximately
-<<>>=
-(dfdiff <- df.residual(fit2.ms) - df.residual(fit.ms))
-@
-which gives an approximate $p$~value of
-<<>>=
-1-pchisq(deviance(fit.ms) - deviance(fit2.ms), df=dfdiff)
-@
-Thus \texttt{fit2.ms} appears quite reasonable.
-
-
-
-
-
-
-
-
-The estimated probabilities of the original fit can be plotted
-against~\texttt{age} using
-<<fig=F>>=
-ooo <- with(marital.nz, order(age))
-with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
- type="l", las=1, lwd=2, ylim=0:1,
- ylab="Fitted probabilities",
- xlab="Age", # main="Marital status amongst NZ Male Europeans",
- col=c(mycol[1], "black", mycol[-1])))
-legend(x=52.5, y=0.62, # x="topright",
- col=c(mycol[1], "black", mycol[-1]),
- lty=1:4,
- legend=colnames(fit.ms at y), lwd=2)
-abline(v=seq(10,90,by=5), h=seq(0,1,by=0.1), col="gray", lty="dashed")
-@
-which gives Figure~\ref{fig:jsscat.eg.mstatus.fitted}.
-This shows that between 80--90\%~of NZ white males
-aged between their early~30s to mid-70s
-were married/partnered.
-The proportion widowed
-started to rise steeply from 70~years onwards but remained below~0.5
-since males die younger than females on average.
-
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=4.8,echo=FALSE>>=
- par(mfrow=c(1,1))
- par(mar=c(4.5,4.0,0.2,0.2)+0.1)
-ooo <- with(marital.nz, order(age))
-with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
- type="l", las=1, lwd=2, ylim=0:1,
- ylab="Fitted probabilities",
- xlab="Age",
- col=c(mycol[1], "black", mycol[-1])))
-legend(x=52.5, y=0.62,
- col=c(mycol[1], "black", mycol[-1]),
- lty=1:4,
- legend=colnames(fit.ms at y), lwd=2.1)
-abline(v=seq(10,90,by=5), h=seq(0,1,by=0.1), col="gray", lty="dashed")
-@
-\caption{
-Fitted probabilities for each class for the
-NZ male European
-marital status data
-(from Equation~\ref{eq:jsscat.eg.nzms.cf}).
-\label{fig:jsscat.eg.mstatus.fitted}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-
-
-
-
-\subsection{Stereotype model}
-\label{sec:jsscat.eg.grc.stereotype}
-
-We reproduce some of the analyses of \cite{ande:1984} regarding the
-progress of 101~patients with back pain
-using the data frame \texttt{backPain} from \pkg{gnm}
-\citep{Rnews:Turner+Firth:2007,Turner+Firth:2009}.
-The three prognostic variables are
-length of previous attack ($x_1=1,2$),
-pain change ($x_2=1,2,3$)
-and lordosis ($x_3=1,2$).
-Like him, we treat these as numerical and standardize and negate them.
-%
-The output
-<<>>=
-# Scale the variables? Yes; the Anderson (1984) paper did (see his Table 6).
-head(backPain, 4)
-summary(backPain)
-backPain <- transform(backPain, sx1 = -scale(x1), sx2 = -scale(x2), sx3 = -scale(x3))
-@
-displays the six ordered categories.
-Now a rank-1 stereotype model can be fitted with
-<<>>=
-bp.rrmlm1 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, backPain)
-@
-Then
-<<>>=
-Coef(bp.rrmlm1)
-@
-are the fitted \bA, \bC{} and~$\bB_1$ (see Equation~\ref{eq:rrr.BAC}) and
-Table~\ref{tab:rrvglam.jss.subset}) which agrees with his Table~6.
-Here, what is known as ``corner constraints'' is used
-($(1,1)$ element of \bA{} $\equiv 1$),
-and only the intercepts are not subject to any reduced-rank regression
-by default.
-The maximized log-likelihood from \textsl{\texttt{logLik(bp.rrmlm1)}}
-is $\Sexpr{round(logLik(bp.rrmlm1), 2)}$.
-The standard errors of each parameter can be obtained by
-\textsl{\texttt{summary(bp.rrmlm1)}}.
-The negative elements of~$\widehat{\bC}$ imply the
-latent variable~$\widehat{\nu}$ decreases in value with increasing
-\textsl{\texttt{sx1}},
-\textsl{\texttt{sx2}} and
-\textsl{\texttt{sx3}}.
-The elements of~$\widehat{\bA}$ tend to decrease so it suggests
-patients get worse as $\nu$~increases,
-i.e., get better as \textsl{\texttt{sx1}},
-\textsl{\texttt{sx2}} and
-\textsl{\texttt{sx3}} increase.
-
-
-
-
-
-
-<<echo=FALSE>>=
-set.seed(123)
-@
-A rank-2 model fitted \textit{with a different normalization}
-<<>>=
-bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, backPain, Rank = 2,
- Corner = FALSE, Uncor = TRUE)
-@
-produces uncorrelated $\widehat{\bnu}_i = \widehat{\bC}^{\top} \bix_{2i}$.
-In fact \textsl{\texttt{var(lv(bp.rrmlm2))}} equals $\bI_2$
-so that the latent variables are also scaled to have unit variance.
-The fit was biplotted
-(rows of $\widehat{\bC}$ plotted as arrow;
- rows of $\widehat{\bA}$ plotted as labels) using
-<<figure=F>>=
-biplot(bp.rrmlm2, Acol="blue", Ccol="darkgreen", scores=TRUE,
-# xlim=c(-1,6), ylim=c(-1.2,4), # Use this if not scaled
- xlim=c(-4.5,2.2), ylim=c(-2.2, 2.2), # Use this if scaled
- chull=TRUE, clty=2, ccol="blue")
-@
-to give Figure~\ref{fig:jsscat.eg.rrmlm2.backPain}.
-It is interpreted via inner products due to~(\ref{eq:rrr.BAC}).
-The different normalization means that the interpretation of~$\nu_1$
-and~$\nu_2$ has changed, e.g., increasing
-\textsl{\texttt{sx1}},
-\textsl{\texttt{sx2}} and
-\textsl{\texttt{sx3}} results in increasing $\widehat{\nu}_1$ and
-patients improve more.
-Many of the latent variable points $\widehat{\bnu}_i$ are coincidental
-due to discrete nature of the~$\bix_i$. The rows of~$\widehat{\bA}$
-are centered on the blue labels (rather cluttered unfortunately) and
-do not seem to vary much as a function of~$\nu_2$.
-In fact this is confirmed by~\cite{ande:1984} who showed a rank-1
-model is to be preferred.
-
-
-
-This example demonstrates the ability to obtain a low dimensional view
-of higher dimensional data. The package's website has additional
-documentation including more detailed Goodman's~RC and stereotype
-examples.
-
-
-
-
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=5.3,echo=FALSE>>=
-# Plot output
- par(mfrow=c(1,1))
- par(mar=c(4.5,4.0,0.2,2.2)+0.1)
-
-biplot(bp.rrmlm2, Acol="blue", Ccol="darkgreen", scores=TRUE,
-# xlim=c(-1,6), ylim=c(-1.2,4), # Use this if not scaled
- xlim=c(-4.5,2.2), ylim=c(-2.2, 2.2), # Use this if scaled
- chull=TRUE, clty=2, ccol="blue")
-@
-\caption{
-Biplot of a rank-2 reduced-rank multinomial logit (stereotype) model
-fitted to the back pain data.
-A convex hull surrounds the latent variable scores
-$\widehat{\bnu}_i$
-(whose observation numbers are obscured because of their discrete nature).
-The position of the $j$th~row of~$\widehat{\bA}$
-is the center of the label ``\texttt{log(mu[,j])/mu[,6])}''.
-\label{fig:jsscat.eg.rrmlm2.backPain}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Some implementation details}
-\label{sec:jsscat.implementDetails}
-
-This section describes some implementation details of~\VGAM{}
-which will be more of interest to the developer than to the casual user.
-
-
-
-\subsection{Common code}
-\label{sec:jsscat.implementDetails.code}
-
-It is good programming practice to write reusable code where possible.
-All the \VGAM{}~family functions in Table~\ref{tab:cat.quantities}
-process the response in the same way because the same segment of code
-is executed. This offers a degree of uniformity in terms of how input is
-handled, and also for software maintenance
-(\cite{altm:jack:2010} enumerates good programming techniques and references).
-As well, the default initial values are computed in the same manner
-based on sample proportions of each level of~$Y$.
-
-
-
-
-
-\subsection[Matrix-band format of wz]{Matrix-band format of \texttt{wz}}
-\label{sec:jsscat.implementDetails.mbformat}
-
-The working weight matrices $\bW_i$ may become large for categorical
-regression models. In general, we have to evaluate the~$\bW_i$
-for~$i=1,\ldots,n$, and naively, this could be held in an \texttt{array} of
-dimension~\texttt{c(M, M, n)}. However, since the~$\bW_i$ are symmetric
-positive-definite it suffices to only store the upper or lower half of
-the matrix.
-
-
-
-The variable~\texttt{wz} in \texttt{vglm.fit()}
-stores the working weight matrices $\bW_i$ in
-a special format called the \textit{matrix-band} format. This
-format comprises a $n \times M^*$ matrix where
-$$
-M^* ~=~ \sum_{i=1}^{\footnotesize \textit{hbw}} \;
-\left(M-i+1\right) ~=~
-\frac12 \, \textit{hbw}\, \left(2\,M - \textit{hbw} +1\right)
-$$
-is the number of columns. Here, \textit{hbw} refers to the
-\textit{half-bandwidth} of the matrix, which is an integer
-between~1 and~$M$ inclusive. A diagonal matrix has
-unit half-bandwidth, a tridiagonal matrix has half-bandwidth~2, etc.
-
-
-Suppose $M=4$. Then \texttt{wz} will have up to $M^*=10$ columns
-enumerating the unique elements of~$\bW_i$ as follows:
-\begin{eqnarray}
-\bW_i ~=~
-\left( \begin{array}{rrrr}
-1 & 5 & 8 & 10 \\
- & 2 & 6 & 9 \\
- & & 3 & 7 \\
- & & & 4
-\end{array} \right).
-\label{eqn:hbw.eg}
-\end{eqnarray}
-That is, the order is firstly the diagonal, then the band above that,
-followed by the second band above the diagonal etc.
-Why is such a format adopted?
-For this example, if $\bW_i$ is diagonal then only the first 4 columns
-of \texttt{wz} are needed. If $\bW_i$ is tridiagonal then only the
-first~7 columns of \texttt{wz} are needed.
-If $\bW_i$ \textit{is} banded then \texttt{wz} needs not have
-all $\frac12 M(M+1)$ columns; only~$M^*$ columns suffice, and the
-rest of the elements of~$\bW_i$ are implicitly zero.
-As well as reducing the size of \texttt{wz} itself in most cases, the
-matrix-band format often makes the computation of \texttt{wz} very
-simple and efficient. Furthermore, a Cholesky decomposition of a
-banded matrix will be banded. A final reason is that sometimes we
-want to input~$\bW_i$ into \VGAM: if \texttt{wz} is $M \times M \times
-n$ then \texttt{vglm(\ldots, weights = wz)} will result in an error
-whereas it will work if \texttt{wz} is an $n \times M^*$ matrix.
-
-
-
-To facilitate the use of the matrix-band format,
-a few auxiliary functions have been written.
-In particular, there is \texttt{iam()} which gives the indices
-for an array-to-matrix.
-In the $4\times 4$ example above,
-<<>>=
-iam(NA, NA, M = 4, both = TRUE, diag = TRUE)
-@
-returns the indices for the respective array coordinates for
-successive columns of matrix-band format
-(see Equation~\ref{eqn:hbw.eg}).
-If \texttt{diag = FALSE} then the first~4 elements in each vector
-are omitted. Note that the first two arguments of
-\texttt{iam()} are not used here and have been assigned
-\texttt{NA}s for simplicity.
-For its use on the multinomial logit model, where
-$(\bW_i)_{jj} = w_i\,\mu_{ij} (1-\mu_{ij}),\ j=1,\ldots,M$, and
-$(\bW_i)_{jk} = -w_i\,\mu_{ij} \mu_{ik},\ j\neq k$,
-this can be programmed succinctly like
-\begin{Code}
-wz <- mu[, 1:M] * (1 - mu[, 1:M])
-if (M > 1) {
- index <- iam(NA, NA, M = M, both = TRUE, diag = FALSE)
- wz <- cbind(wz, -mu[, index$row] * mu[, index$col])
-}
-wz <- w * wz
-\end{Code}
-(the actual code is slightly more complicated).
-In general, \VGAM{}~family functions can be remarkably compact,
-e.g.,
-\texttt{acat()},
-\texttt{cratio()}
-and
-\texttt{multinomial()} are all less than 120~lines of code each.
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Extensions and utilities}
-\label{sec:jsscat.extnUtil}
-
-This section describes some useful utilities/extensions of the above.
-
-
-
-\subsection{Marginal effects}
-\label{sec:jsscat.extnUtil.margeff}
-
-
-Models such as the multinomial logit and cumulative link models
-model the posterior probability $p_{j} = \pr(Y=j|\bix)$ directly.
-In some applications, knowing the derivative of~$p_{j}$
-with respect to some of the~$x_k$ is useful;
-in fact, often just knowing the sign is important.
-The function \texttt{margeff()} computes the derivatives and
-returns them as a $p \times (M+1) \times n$ array.
-For the multinomial logit model it is easy to show
-\begin{eqnarray}
-\frac{\partial \, p_{j}(\bix_i)}{\partial \,
-\bix_{i}}
-&=&
-p_{j}(\bix_i)
-\left\{
- \bbeta_{j} -
-\sum_{s=1}^{M+1}
-p_{s}(\bix_i)
-\,
- \bbeta_{s}
-\right\},
-\label{eqn:multinomial.marginalEffects}
-\end{eqnarray}
-while for
-\texttt{cumulative(reverse = FALSE)}
-we have
-$p_{j} = \gamma_{j} - \gamma_{j-1} = h(\eta_{j}) - h(\eta_{j-1})$
-where $h=g^{-1}$ is the inverse of the link function
-(cf.~Table~\ref{tab:cat.quantities})
-so that
-\begin{eqnarray}
-\frac{\partial \, p_{j}(\bix_{})}{\partial \,
-\bix}
-&=&
-h'(\eta_{j}) \, \bbeta_{j} -
-h'(\eta_{j-1}) \, \bbeta_{j-1} .
-\label{eqn:cumulative.marginalEffects}
-\end{eqnarray}
-
-
-
-
-The function \texttt{margeff()} returns an array with these
-derivatives and should handle any value of
-\texttt{reverse} and \texttt{parallel}.
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\subsection[The xij argument]{The \texttt{xij} argument}
-\label{sec:jsscat.extnUtil.xij}
-
-There are many models, including those for categorical data,
-where the value of an explanatory variable~$x_k$ differs depending
-on which linear/additive predictor~$\eta_{j}$.
-Here is a well-known example from {consumer choice} modeling.
-Suppose an econometrician is interested in peoples'
-choice of transport for travelling to work
-and that there are four choices:
-$Y=1$ for ``bus'',
-$Y=2$ ``train'',
-$Y=3$ ``car'' and
-$Y=4$ means ``walking''.
-Assume that people only choose one means to go to work.
-Suppose there are three covariates:
-$X_2=$ cost,
-$X_3=$ journey time, and
-$X_4=$ distance.
-Of the covariates only~$X_4$ (and the intercept~$X_1$)
-is the same for all transport choices;
-the cost and journey time differ according to the means chosen.
-Suppose a random sample of~$n$ people is collected
-from some population, and that each person has
-access to all these transport modes.
-For such data, a natural regression model would be a
-multinomial logit model with~$M=3$:
-for $j=1,\ldots,M$, we have
-$\eta_{j} =$
-\begin{eqnarray}
-\log \frac{\pr(Y=j)}{\pr(Y=M+1)}
-&=&
-\beta_{(j)1}^{*} +
-\beta_{(1)2}^{*} \, (x_{i2j}-x_{i24}) +
-\beta_{(1)3}^{*} \, (x_{i3j}-x_{i34}) +
-\beta_{(1)4}^{*} \, x_{i4},
-\label{eqn:xij.eg.gotowork}
-\end{eqnarray}
-where, for the~$i$th person,
-$x_{i2j}$ is the cost for the~$j$th transport means, and
-$x_{i3j}$ is the journey time of the~$j$th transport means.
-The distance to get to work is $x_{i4}$; it has the same value
-regardless of the transport means.
-
-
-Equation~\ref{eqn:xij.eg.gotowork}
-implies $\bH_1=\bI_3$ and $\bH_2=\bH_3=\bH_4=\bone_3$.
-Note
-also that if the last response category is used as the baseline or
-reference group (the default of \texttt{multinomial()}) then $x_{ik,M+1}$
-can be subtracted from $x_{ikj}$ for~$j=1,\ldots,M$---this
-is the natural way $x_{ik,M+1}$ enters into the model.
-
-
-
-
-Recall from~(\ref{gammod2}) that we had
-\begin{equation}
-\eta_j(\bix_i) ~=~ \bbeta_j^{\top} \bix_i ~=~
-\sum_{k=1}^{p} \, x_{ik} \, \beta_{(j)k} .
-\label{eqn:xij0}
-\end{equation}
-Importantly, this can be generalized to
-\begin{equation}
-\eta_j(\bix_{ij}) ~=~ \bbeta_j^{\top} \bix_{ij} ~=~
-\sum_{k=1}^{p} \, x_{ikj} \, \beta_{(j)k} ,
-\label{eqn:xij}
-\end{equation}
-or writing this another way (as a mixture or hybrid),
-\begin{equation}
-\eta_j(\bix_{i}^{*},\bix_{ij}^{*}) ~=~
-\bbeta_{j}^{*T} \bix_{i}^{*} + \bbeta_{j}^{**T} \bix_{ij}^{*} .
-\label{eqn:xij2}
-\end{equation}
-Often $\bbeta_{j}^{**} = \bbeta_{}^{**}$, say.
-In~(\ref{eqn:xij2}) the variables in~$\bix_{i}^{*}$ are common to
-all~$\eta_{j}$, and the variables in~$\bix_{ij}^{*}$ have
-different values for differing~$\eta_{j}$.
-This allows for covariate values that are specific to each~$\eta_j$,
-a facility which is very important in many applications.
-
-
-The use of the \texttt{xij} argument with the \VGAM{} family function
-\texttt{multinomial()} has very important applications in economics.
-In that field the term ``multinomial logit model'' includes a variety of
-models such as the ``generalized logit model'' where (\ref{eqn:xij0})
-holds, the ``conditional logit model'' where~(\ref{eqn:xij}) holds,
-and the ``mixed logit model,'' which is a combination of the two,
-where~(\ref{eqn:xij2}) holds.
-The generalized logit model focusses on the individual as the unit of
-analysis, and uses individual characteristics as explanatory variables,
-e.g., age of the person in the transport example.
-The conditional logit model assumes different values for each
-alternative and the impact of a unit of~$x_k$ is assumed to be constant
-across alternatives, e.g., journey time in the choice of transport mode.
-Unfortunately, there is confusion in the literature for the terminology
-of the models. Some authors call \texttt{multinomial()}
-with~(\ref{eqn:xij0}) the ``generalized logit model''.
-Others call the mixed
-logit model the ``multinomial logit model'' and view the generalized
-logit and conditional logit models as special cases.
-In~\VGAM{} terminology there is no need to give different names to
-all these slightly differing special cases. They are all still called
-multinomial logit models, although it may be added that there are
-some covariate-specific linear/additive predictors.
-The important thing is that the framework accommodates~$\bix_{ij}$,
-so one tries to avoid making life unnecessarily complicated.
-And~\texttt{xij} can apply in theory to any VGLM and not just to the
-multinomial logit model.
-\cite{imai:king:lau:2008} present another perspective on the
-$\bix_{ij}$ problem with illustrations from \pkg{Zelig}
-\citep{Zelig:2009}.
-
-
-
-
-
-\subsubsection[Using the xij argument]{Using the \texttt{xij} argument}
-\label{sec:xij.sub}
-
-\VGAM{} handles variables whose values depend on $\eta_{j}$,
-(\ref{eqn:xij2}), using the \texttt{xij} argument.
-It is assigned an~S formula or a list of \proglang{S}~formulas.
-Each formula, which must have~$M$ \textit{different} terms,
-forms a matrix that premultiplies a constraint matrix.
-In detail, (\ref{eqn:xij0})~can be written in vector form as
-\begin{equation}
-\boldeta(\bix_i) ~=~ \bB^{\top} \bix_i ~=~
-\sum_{k=1}^{p} \, \bH_{k} \, \bbeta_{k}^{*} \, x_{ik},
-\label{eqn:xij0.vector}
-\end{equation}
-where
-$\bbeta_{k}^{*} =
-\left( \beta_{(1)k}^{*},\ldots,\beta_{(r_k)k}^{*} \right)^{\top}$
-is to be estimated.
-This may be written
-\begin{eqnarray}
-\boldeta(\bix_{i})
-&=&
-\sum_{k=1}^{p} \, \diag(x_{ik},\ldots,x_{ik}) \,
-\bH_k \, \bbeta_{k}^{*}.
-\label{eqn:xij.d.vector}
-\end{eqnarray}
-To handle~(\ref{eqn:xij})--(\ref{eqn:xij2})
-we can generalize~(\ref{eqn:xij.d.vector}) to
-\begin{eqnarray}
-\boldeta_i
-&=&
-\sum_{k=1}^{p} \, \diag(x_{ik1},\ldots,x_{ikM}) \;
-\bH_k \, \bbeta_{k}^{*}
-\ \ \ \ \left(=
-\sum_{k=1}^{p} \, \bX_{(ik)}^{*} \,
-\bH_k \, \bbeta_{k}^{*} ,
-\mathrm{\ say} \right).
-\label{eqn:xij.vector}
-\end{eqnarray}
-Each component of the list \texttt{xij} is a formula having~$M$ terms
-(ignoring the intercept) which
-specifies the successive diagonal elements of the matrix~$\bX_{(ik)}^{*}$.
-Thus each row of the constraint matrix may be multiplied by a different
-vector of values.
-The constraint matrices themselves are not affected by the
-\texttt{xij} argument.
-
-
-
-
-
-How can one fit such models in \VGAM{}?
-Let us fit~(\ref{eqn:xij.eg.gotowork}).
-Suppose the journey cost and time variables have had the
-cost and time of walking subtracted from them.
-Then,
-using ``\texttt{.trn}'' to denote train,
-\begin{Code}
-fit2 <- vglm(cbind(bus, train, car, walk) ~ Cost + Time + Distance,
- fam = multinomial(parallel = TRUE ~ Cost + Time + Distance - 1),
- xij = list(Cost ~ Cost.bus + Cost.trn + Cost.car,
- Time ~ Time.bus + Time.trn + Time.car),
- form2 = ~ Cost.bus + Cost.trn + Cost.car +
- Time.bus + Time.trn + Time.car +
- Cost + Time + Distance,
- data = gotowork)
-\end{Code}
-should do the job.
-Here, the argument \texttt{form2} is assigned a second \proglang{S}~formula which
-is used in some special circumstances or by certain types
-of~\VGAM{} family functions.
-The model has $\bH_{1} = \bI_{3}$ and $\bH_{2} = \bH_{3} = \bH_{4} = \bone_{3}$
-because the lack of parallelism only applies to the intercept.
-However, unless \texttt{Cost} is the same as \texttt{Cost.bus} and
-\texttt{Time} is the same as \texttt{Time.bus},
-this model should not be plotted with \texttt{plotvgam()};
-see the author's homepage for further documentation.
-
-
-By the way,
-suppose
-$\beta_{(1)4}^{*}$
-in~(\ref{eqn:xij.eg.gotowork})
-is replaced by~$\beta_{(j)4}^{*}$.
-Then the above code but with
-\begin{Code}
- fam = multinomial(parallel = FALSE ~ 1 + Distance),
-\end{Code}
-should fit this model.
-Equivalently,
-\begin{Code}
- fam = multinomial(parallel = TRUE ~ Cost + Time - 1),
-\end{Code}
-
-
-
-
-
-
-\subsubsection{A more complicated example}
-\label{sec:xij.complicated}
-
-The above example is straightforward because the
-variables were entered linearly. However, things
-become more tricky if data-dependent functions are used in
-any \texttt{xij} terms, e.g., \texttt{bs()}, \texttt{ns()} or \texttt{poly()}.
-In particular, regression splines such as \texttt{bs()} and \texttt{ns()}
-can be used to estimate a general smooth function~$f(x_{ij})$, which is
-very useful for exploratory data analysis.
-
-
-
-Suppose we wish to fit the variable \texttt{Cost} with a smoother.
-This is possible with regression splines and using a trick.
-Firstly note that
-\begin{Code}
-fit3 <- vglm(cbind(bus, train, car, walk) ~ ns(Cost) + Time + Distance,
- multinomial(parallel = TRUE ~ ns(Cost) + Time + Distance - 1),
- xij = list(ns(Cost) ~ ns(Cost.bus) + ns(Cost.trn) + ns(Cost.car),
- Time ~ Time.bus + Time.trn + Time.car),
- form2 = ~ ns(Cost.bus) + ns(Cost.trn) + ns(Cost.car) +
- Time.bus + Time.trn + Time.car +
- ns(Cost) + Cost + Time + Distance,
- data = gotowork)
-\end{Code}
-will \textit{not} work because the basis functions for
-\texttt{ns(Cost.bus)}, \texttt{ns(Cost.trn)} and \texttt{ns(Cost.car)}
-are not identical since the knots differ.
-Consequently, they represent different functions despite
-having common regression coefficients.
-
-
-Fortunately, it is possible to force the~\texttt{ns()} terms
-to have identical basis functions by using a trick:
-combine the vectors temporarily.
-To do this, one can let
-\begin{Code}
-NS <- function(x, ..., df = 3)
- ns(c(x, ...), df = df)[1:length(x), , drop = FALSE]
-\end{Code}
-This computes a natural cubic B-spline evaluated at~\texttt{x} but it uses the
-other arguments as well to form an overall vector from which to obtain
-the (common) knots.
-Then the usage of \texttt{NS()} can be something like
-\begin{Code}
-fit4 <- vglm(cbind(bus, train, car, walk) ~ NS(Cost.bus, Cost.trn, Cost.car)
- + Time + Distance,
- multinomial(parallel = TRUE ~ NS(Cost.bus, Cost.trn, Cost.car)
- + Time + Distance - 1),
- xij = list(NS(Cost.bus, Cost.trn, Cost.car) ~
- NS(Cost.bus, Cost.trn, Cost.car) +
- NS(Cost.trn, Cost.car, Cost.bus) +
- NS(Cost.car, Cost.bus, Cost.trn),
- Time ~ Time.bus + Time.trn + Time.car),
- form2 = ~ NS(Cost.bus, Cost.trn, Cost.car) +
- NS(Cost.trn, Cost.car, Cost.bus) +
- NS(Cost.car, Cost.bus, Cost.trn) +
- Time.bus + Time.trn + Time.car +
- Cost.bus + Cost.trn + Cost.car +
- Time + Distance,
- data = gotowork)
-\end{Code}
-So \texttt{NS(Cost.bus, Cost.trn, Cost.car)}
-is the smooth term for
-\texttt{Cost.bus}, etc.
-Furthermore, \texttt{plotvgam()} may be applied to
-\texttt{fit4}, in which case the fitted regression spline is plotted
-against its first inner argument, viz.~\texttt{Cost.bus}.
-
-
-One of the reasons why it will predict correctly, too,
-is due to ``smart prediction''
-\citep{Rnews:Yee:2008}.
-
-
-
-\subsubsection{Implementation details}
-\label{sec:jss.xij.implementationDetails}
-
-The~\texttt{xij} argument operates \textit{after} the
-ordinary $\bX_{\sVLM}$ matrix is created. Then selected columns
-of~$\bX_{\sVLM}$ are modified from the constraint matrices, \texttt{xij}
-and~\texttt{form2} arguments. That is, from \texttt{form2}'s model
-matrix $\bX_{\sformtwo}$, and the~$\bH_k$. This whole operation
-is possible because $\bX_{\sVLM}$ remains structurally the same.
-The crucial equation is~(\ref{eqn:xij.vector}).
-
-
-Other \texttt{xij} examples are given in the online help of
-\texttt{fill()} and \texttt{vglm.control()},
-as well as at the package's webpage.
-
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Discussion}
-\label{sec:jsscat.discussion}
-
-
-This article has sought to convey how VGLMs/VGAMs are well suited for
-fitting regression models for categorical data. Its primary strength
-is its simple and unified framework, and when reflected in software,
-makes practical CDA more understandable and efficient. Furthermore,
-there are natural extensions such as a reduced-rank variant and
-covariate-specific~$\eta_{j}$. The \VGAM{}~package potentially offers
-a wide selection of models and utilities.
-
-
-There is much future work to do.
-Some useful additions to the package include:
-\begin{enumerate}
-
-\item
-Bias-reduction \citep{firt:1993} is a method for removing the~$O(n^{-1})$
-bias from a maximum likelihood estimate. For a substantial class of
-models including GLMs it can be formulated in terms of a minor adjustment
-of the score vector within an IRLS algorithm \citep{kosm:firt:2009}.
-One by-product, for logistic regression, is that while the maximum
-likelihood estimate (MLE) can be infinite, the adjustment leads to
-estimates that are always finite. At present the \R{}~package \pkg{brglm}
-\citep{Kosmidis:2008} implements bias-reduction for a number of models.
-Bias-reduction might be implemented by adding an argument
-\texttt{bred = FALSE}, say, to some existing \VGAM{} family functions.
-
-
-\item
-Nested logit models were developed to overcome a fundamental shortcoming
-related to the multinomial logit model, viz.~the independence of
-irrelevant alternatives~(IIA) assumption. Roughly, the multinomial logit
-model assumes the ratio of the choice probabilities of two alternatives
-is not dependent on the presence or absence of other alternatives in
-the model. This presents problems that are often illustrated by the
-famed red bus-blue bus problem.
-
-
-
-
-\item
-The generalized estimating equations (GEE) methodology is largely
-amenable to IRLS and this should be added to the package in the future
-\citep{wild:yee:1996}.
-
-
-\item
-For logistic regression \proglang{SAS}'s \code{proc logistic} gives
-a warning if the data is {completely separate} or {quasi-completely
-separate}. Its effects are that some regression coefficients tend to~$\pm
-\infty$. With such data, all (to my knowledge) \R{}~implementations
-give warnings that are vague, if any at all, and this is rather
-unacceptable \citep{alli:2004}. The \pkg{safeBinaryRegression} package
-\citep{Konis:2009} overloads \code{glm()} so that a check for the
-existence of the MLE is made before fitting a binary response GLM.
-
-
-\end{enumerate}
-
-
-In closing, the \pkg{VGAM} package is continually being developed,
-therefore some future changes in the implementation details and usage
-may occur. These may include non-backward-compatible changes (see the
-\code{NEWS} file.) Further documentation and updates are available at
-the author's homepage whose URL is given in the \code{DESCRIPTION} file.
-
-
-
-% ----------------------------------------------------------------------
-\section*{Acknowledgments}
-
-The author thanks Micah Altman, David Firth and Bill Venables for helpful
-conversations, and Ioannis Kosmidis for a reprint.
-Thanks also to The Institute for Quantitative Social Science at Harvard
-University for their hospitality while this document was written during a
-sabbatical visit.
-
-
-
-
-
-\bibliography{categoricalVGAMbib}
-
-\end{document}
-
-
-
-
diff --git a/inst/doc/categoricalVGAM.pdf b/inst/doc/categoricalVGAM.pdf
deleted file mode 100644
index 3883457..0000000
Binary files a/inst/doc/categoricalVGAM.pdf and /dev/null differ
diff --git a/man/AB.Ab.aB.ab.Rd b/man/AB.Ab.aB.ab.Rd
index 0aa625e..0801dce 100644
--- a/man/AB.Ab.aB.ab.Rd
+++ b/man/AB.Ab.aB.ab.Rd
@@ -65,7 +65,7 @@ Lange, K. (2002)
\examples{
ymat <- cbind(AB=1997, Ab=906, aB=904, ab=32) # Data from Fisher (1925)
-fit <- vglm(ymat ~ 1, AB.Ab.aB.ab(link = "identity", init.p = 0.9), trace = TRUE)
+fit <- vglm(ymat ~ 1, AB.Ab.aB.ab(link = "identitylink", init.p = 0.9), trace = TRUE)
fit <- vglm(ymat ~ 1, AB.Ab.aB.ab, trace = TRUE)
rbind(ymat, sum(ymat)*fitted(fit))
Coef(fit) # Estimated p
diff --git a/man/ABO.Rd b/man/ABO.Rd
index 3a25744..c102aec 100644
--- a/man/ABO.Rd
+++ b/man/ABO.Rd
@@ -76,7 +76,7 @@ ABO(link = "logit", ipA = NULL, ipO = NULL)
}
\examples{
ymat <- cbind(A = 725, B = 258, AB = 72, O = 1073) # Order matters, not the name
-fit <- vglm(ymat ~ 1, ABO(link = identity), trace = TRUE, cri = "coef")
+fit <- vglm(ymat ~ 1, ABO(link = identitylink), trace = TRUE, cri = "coef")
coef(fit, matrix = TRUE)
Coef(fit) # Estimated pA and pB
rbind(ymat, sum(ymat) * fitted(fit))
diff --git a/man/AICvlm.Rd b/man/AICvlm.Rd
index 849c5a6..9889bab 100644
--- a/man/AICvlm.Rd
+++ b/man/AICvlm.Rd
@@ -143,13 +143,13 @@ Regression and time series model selection in small samples,
\examples{
pneumo <- transform(pneumo, let = log(exposure.time))
(fit1 <- vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel = TRUE, reverse = TRUE), pneumo))
+ cumulative(parallel = TRUE, reverse = TRUE), data = pneumo))
coef(fit1, matrix = TRUE)
AIC(fit1)
AICc(fit1) # Quick way
AIC(fit1, corrected = TRUE) # Slow way
(fit2 <- vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel = FALSE, reverse = TRUE), pneumo))
+ cumulative(parallel = FALSE, reverse = TRUE), data = pneumo))
coef(fit2, matrix = TRUE)
AIC(fit2)
AICc(fit2)
diff --git a/man/BICvlm.Rd b/man/BICvlm.Rd
index 4a05a43..10e8879 100644
--- a/man/BICvlm.Rd
+++ b/man/BICvlm.Rd
@@ -102,11 +102,11 @@ BICvlm(object, \dots, k = log(nobs(object)))
\examples{
pneumo <- transform(pneumo, let = log(exposure.time))
(fit1 <- vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel = TRUE, reverse = TRUE), pneumo))
+ cumulative(parallel = TRUE, reverse = TRUE), data = pneumo))
coef(fit1, matrix = TRUE)
BIC(fit1)
(fit2 <- vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel = FALSE, reverse = TRUE), pneumo))
+ cumulative(parallel = FALSE, reverse = TRUE), data = pneumo))
coef(fit2, matrix = TRUE)
BIC(fit2)
diff --git a/man/Coef.qrrvglm-class.Rd b/man/Coef.qrrvglm-class.Rd
index 01e959b..c3037fd 100644
--- a/man/Coef.qrrvglm-class.Rd
+++ b/man/Coef.qrrvglm-class.Rd
@@ -102,10 +102,12 @@ canonical Gaussian ordination.
\seealso{
\code{\link{Coef.qrrvglm}},
\code{\link{cqo}},
-% \code{qrrvglm-class},
\code{print.Coef.qrrvglm}.
+% \code{qrrvglm-class},
+
+
}
\examples{
diff --git a/man/Coef.qrrvglm.Rd b/man/Coef.qrrvglm.Rd
index d4d8230..f954f15 100644
--- a/man/Coef.qrrvglm.Rd
+++ b/man/Coef.qrrvglm.Rd
@@ -37,7 +37,7 @@ Coef.qrrvglm(object, varI.latvar = FALSE, reference = NULL, ...)
the sites scores (latent variables) are made uncorrelated.
See below for further details.
-% If \code{EqualTolerances=FALSE}, then transformations occur so that
+% If \code{eq.tolerances=FALSE}, then transformations occur so that
% the reference species has a tolerance matrix equal to the rank-\eqn{R}
% identity matrix.
@@ -46,7 +46,7 @@ Coef.qrrvglm(object, varI.latvar = FALSE, reference = NULL, ...)
}
\details{
- If \code{ITolerances=TRUE} or \code{EqualTolerances=TRUE} (and its
+ If \code{I.tolerances=TRUE} or \code{eq.tolerances=TRUE} (and its
estimated tolerance matrix is positive-definite) then all species'
tolerances are unity by transformation or by definition, and the spread
of the site scores can be compared to them. Vice versa, if one wishes
@@ -72,11 +72,13 @@ Coef.qrrvglm(object, varI.latvar = FALSE, reference = NULL, ...)
\value{
The \bold{A}, \bold{B1}, \bold{C}, \bold{T}, \bold{D} matrices/arrays
are returned, along with other slots.
-% For UQO, \bold{C} is undefined.
The returned object has class \code{"Coef.qrrvglm"}
(see \code{\link{Coef.qrrvglm-class}}).
+% For UQO, \bold{C} is undefined.
+
+
}
\references{
diff --git a/man/Coef.rrvglm-class.Rd b/man/Coef.rrvglm-class.Rd
index 5987656..9ce80a5 100644
--- a/man/Coef.rrvglm-class.Rd
+++ b/man/Coef.rrvglm-class.Rd
@@ -66,7 +66,7 @@ Reduced-rank vector generalized linear models.
\examples{
# Rank-1 stereotype model of Anderson (1984)
pneumo <- transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo)))
-fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3, multinomial, pneumo)
+fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3, multinomial, data = pneumo)
coef(fit, matrix = TRUE)
Coef(fit)
# print(Coef(fit), digits = 3)
diff --git a/man/Coef.rrvglm.Rd b/man/Coef.rrvglm.Rd
index 6cd793e..2fe0164 100644
--- a/man/Coef.rrvglm.Rd
+++ b/man/Coef.rrvglm.Rd
@@ -55,7 +55,7 @@ This function is an alternative to \code{coef.rrvglm}.
\examples{
# Rank-1 stereotype model of Anderson (1984)
pneumo <- transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo)))
-fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3, multinomial, pneumo)
+fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3, multinomial, data = pneumo)
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/CommonVGAMffArguments.Rd b/man/CommonVGAMffArguments.Rd
index 98aaeac..2000bcc 100644
--- a/man/CommonVGAMffArguments.Rd
+++ b/man/CommonVGAMffArguments.Rd
@@ -16,7 +16,7 @@
\usage{
TypicalVGAMfamilyFunction(lsigma = "loge",
isigma = NULL,
- link.list = list("(Default)" = "identity",
+ link.list = list("(Default)" = "identitylink",
x2 = "loge",
x3 = "logoff",
x4 = "mlogit",
@@ -67,7 +67,7 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
These two arguments allow many such links and extra arguments
to be inputted more easily.
One has something like
- \code{link.list = list("(Default)" = "identity", x2 = "loge", x3 = "logoff")}
+ \code{link.list = list("(Default)" = "identitylink", x2 = "loge", x3 = "logoff")}
and
\code{earg.list = list("(Default)" = list(), x2 = list(), x3 = "list(offset = -1)")}.
Then any unnamed terms will have the default link with its
@@ -438,7 +438,7 @@ cumulative(link = "probit", reverse = TRUE, parallel = TRUE)
wdata <- data.frame(x2 = runif(nn <- 1000))
wdata <- transform(wdata,
y = rweibull(nn, shape = 2 + exp(1 + x2), scale = exp(-0.5)))
-fit <- vglm(y ~ x2, weibull(lshape = logoff(offset = -2), zero = 2), wdata)
+fit <- vglm(y ~ x2, weibull(lshape = logoff(offset = -2), zero = 2), data = wdata)
coef(fit, mat = TRUE)
# Example 3; multivariate (multiple) response
@@ -447,30 +447,30 @@ ndata <- data.frame(x = runif(nn <- 500))
ndata <- transform(ndata,
y1 = rnbinom(nn, mu = exp(3+x), size = exp(1)), # k is size
y2 = rnbinom(nn, mu = exp(2-x), size = exp(0)))
-fit <- vglm(cbind(y1, y2) ~ x, negbinomial(zero = -2), ndata)
+fit <- vglm(cbind(y1, y2) ~ x, negbinomial(zero = -2), data = ndata)
coef(fit, matrix = TRUE)
}
# Example 4
\dontrun{
# fit1 and fit2 are equivalent
fit1 <- vglm(ymatrix ~ x2 + x3 + x4 + x5,
- cumulative(parallel = FALSE ~ 1 + x3 + x5), mydataframe)
+ cumulative(parallel = FALSE ~ 1 + x3 + x5), data = cdata)
fit2 <- vglm(ymatrix ~ x2 + x3 + x4 + x5,
- cumulative(parallel = TRUE ~ x2 + x4), mydataframe)
+ cumulative(parallel = TRUE ~ x2 + x4), data = cdata)
}
# Example 5
-gdata <- data.frame(x2 = rnorm(nn <- 200))
-gdata <- transform(gdata,
+udata <- data.frame(x2 = rnorm(nn <- 200))
+udata <- transform(udata,
y1 = rnorm(nn, mean = 1 - 3*x2, sd = exp(1 + 0.2*x2)),
y2 = rnorm(nn, mean = 1 - 3*x2, sd = exp(1)))
args(uninormal)
-fit1 <- vglm(y1 ~ x2, uninormal, gdata) # This is okay
-fit2 <- vglm(y2 ~ x2, uninormal(zero = 2), gdata) # This is okay
+fit1 <- vglm(y1 ~ x2, uninormal, data = udata) # This is okay
+fit2 <- vglm(y2 ~ x2, uninormal(zero = 2), data = udata) # This is okay
# This creates potential conflict
clist <- list("(Intercept)" = diag(2), "x2" = diag(2))
-fit3 <- vglm(y2 ~ x2, uninormal(zero = 2), gdata,
+fit3 <- vglm(y2 ~ x2, uninormal(zero = 2), data = udata,
constraints = clist) # Conflict!
coef(fit3, matrix = TRUE) # Shows that clist[["x2"]] was overwritten,
constraints(fit3) # i.e., 'zero' seems to override the 'constraints' arg
@@ -478,9 +478,9 @@ constraints(fit3) # i.e., 'zero' seems to override the 'constraints' arg
# Example 6 ('whitespace' argument)
pneumo <- transform(pneumo, let = log(exposure.time))
fit1 <- vglm(cbind(normal, mild, severe) ~ let,
- sratio(whitespace = FALSE, parallel = TRUE), pneumo)
+ sratio(whitespace = FALSE, parallel = TRUE), data = pneumo)
fit2 <- vglm(cbind(normal, mild, severe) ~ let,
- sratio(whitespace = TRUE, parallel = TRUE), pneumo)
+ sratio(whitespace = TRUE, parallel = TRUE), data = pneumo)
head(predict(fit1), 2) # No white spaces
head(predict(fit2), 2) # Uses white spaces
}
diff --git a/man/G1G2G3.Rd b/man/G1G2G3.Rd
index a4ece90..bfa9eb7 100644
--- a/man/G1G2G3.Rd
+++ b/man/G1G2G3.Rd
@@ -75,7 +75,7 @@ ymat <- cbind(108, 196, 429, 143, 513, 559)
fit <- vglm(ymat ~ 1, G1G2G3(link = probit), trace = TRUE, crit = "coef")
fit <- vglm(ymat ~ 1, G1G2G3(link = logit, ip1 = 0.3, ip2 = 0.3, iF = 0.02),
trace = TRUE, crit = "coef")
-fit <- vglm(ymat ~ 1, G1G2G3(link = "identity"), trace = TRUE)
+fit <- vglm(ymat ~ 1, G1G2G3(link = "identitylink"), trace = TRUE)
Coef(fit) # Estimated p1, p2 and f
rbind(ymat, sum(ymat)*fitted(fit))
sqrt(diag(vcov(fit)))
diff --git a/man/Huggins89.t1.Rd b/man/Huggins89.t1.Rd
index 8e5248b..6c41801 100644
--- a/man/Huggins89.t1.Rd
+++ b/man/Huggins89.t1.Rd
@@ -56,22 +56,22 @@ On the statistical analysis of capture experiments.
%% ~~ possibly secondary sources and usages ~~
}
\examples{
-Huggins89table1 <- transform(Huggins89table1, x3.tij = t1,
- T2 = t2, T3 = t3, T4 = t4, T5 = t5, T6 = t6,
- T7 = t7, T8 = t8, T9 = t9, T10 = t10)
+Huggins89table1 <- transform(Huggins89table1, x3.tij = t01,
+ T02 = t02, T03 = t03, T04 = t04, T05 = t05, T06 = t06,
+ T07 = t07, T08 = t08, T09 = t09, T10 = t10)
small.table1 <- subset(Huggins89table1,
- y1 + y2 + y3 + y4 + y5 + y6 + y7 + y8 + y9 + y10 > 0)
+ y01 + y02 + y03 + y04 + y05 + y06 + y07 + y08 + y09 + y10 > 0)
# fit.tbh is the bottom equation on p.133.
# It is a M_tbh model.
fit.tbh <-
- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2 + x3.tij,
- xij = list(x3.tij ~ t1 + t2 + t3 + t4 + t5 + t6 + t7 + t8 + t9 + t10 +
- T2 + T3 + T4 + T5 + T6 + T7 + T8 + T9 + T10 - 1),
+ vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10) ~ x2 + x3.tij,
+ xij = list(x3.tij ~ t01 + t02 + t03 + t04 + t05 + t06 + t07 + t08 + t09 + t10 +
+ T02 + T03 + T04 + T05 + T06 + T07 + T08 + T09 + T10 - 1),
posbernoulli.tb(parallel.t = TRUE ~ x2 + x3.tij),
data = small.table1, trace = TRUE,
form2 = ~ x2 + x3.tij +
- t1 + t2 + t3 + t4 + t5 + t6 + t7 + t8 + t9 + t10 +
- T2 + T3 + T4 + T5 + T6 + T7 + T8 + T9 + T10)
+ t01 + t02 + t03 + t04 + t05 + t06 + t07 + t08 + t09 + t10 +
+ T02 + T03 + T04 + T05 + T06 + T07 + T08 + T09 + T10)
# These results differ a bit from Huggins (1989), probably because
# two animals had to be removed here (they were never caught):
@@ -82,7 +82,7 @@ summary(fit.tbh, presid = FALSE)
fit.tbh at extra$N.hat # Estimate of the population site N; cf. 20.86
fit.tbh at extra$SE.N.hat # Its standard error; cf. 1.87 or 4.51
-fit.th <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
+fit.th <- vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10) ~ x2,
posbernoulli.t, data = small.table1, trace = TRUE)
coef(fit.th)
constraints(fit.th)
@@ -91,7 +91,7 @@ summary(fit.th, presid = FALSE)
fit.th at extra$N.hat # Estimate of the population size N
fit.th at extra$SE.N.hat # Its standard error
-fit.bh <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
+fit.bh <- vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10) ~ x2,
posbernoulli.b(I2 = FALSE), data = small.table1, trace = TRUE)
coef(fit.bh)
constraints(fit.bh)
@@ -100,7 +100,7 @@ summary(fit.bh, presid = FALSE)
fit.bh at extra$N.hat
fit.bh at extra$SE.N.hat
-fit.h <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
+fit.h <- vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10) ~ x2,
posbernoulli.b, data = small.table1, trace = TRUE)
coef(fit.h, matrix = TRUE) # M_h model (version 1)
coef(fit.h)
@@ -108,7 +108,7 @@ summary(fit.h, presid = FALSE)
fit.h at extra$N.hat
fit.h at extra$SE.N.hat
-Fit.h <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
+Fit.h <- vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10) ~ x2,
posbernoulli.t(parallel.t = TRUE ~ x2),
data = small.table1, trace = TRUE)
coef(Fit.h)
@@ -141,13 +141,13 @@ Fit.h at extra$SE.N.hat
% x2 = cbind(rep(1, len = 2*tau-1)),
% Zedd = cbind(rep(1, len = 2*tau-1)))
%fit.tbh <-
-% vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2 + Zedd,
-% xij = list(Zedd ~ z1 + z2 + z3 + z4 + z5 + z6 + z7 + z8 + z9 + z10 +
-% Z2 + Z3 + Z4 + Z5 + Z6 + Z7 + Z8 + Z9 + Z10 - 1),
+% vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10) ~ x2 + Zedd,
+% xij = list(Zedd ~ z01 + z02 + z03 + z04 + z05 + z06 + z07 + z08 + z09 + z10 +
+% Z02 + Z03 + Z04 + Z05 + Z06 + Z07 + Z08 + Z09 + Z10 - 1),
% posbernoulli.tb, data = small.t1, trace = TRUE,
% constraints = Hlist,
% form2 = ~ x2 + Zedd +
-% z1 + z2 + z3 + z4 + z5 + z6 + z7 + z8 + z9 + z10 +
-% Z2 + Z3 + Z4 + Z5 + Z6 + Z7 + Z8 + Z9 + Z10)
+% z01 + z02 + z03 + z04 + z05 + z06 + z07 + z08 + z09 + z10 +
+% Z02 + Z03 + Z04 + Z05 + Z06 + Z07 + Z08 + Z09 + Z10)
diff --git a/man/Inv.gaussian.Rd b/man/Inv.gaussian.Rd
index e974937..b8ea845 100644
--- a/man/Inv.gaussian.Rd
+++ b/man/Inv.gaussian.Rd
@@ -34,10 +34,12 @@ rinv.gaussian(n, mu, lambda)
\value{
\code{dinv.gaussian} gives the density,
\code{pinv.gaussian} gives the distribution function, and
-% \code{qinv.gaussian} gives the quantile function, and
\code{rinv.gaussian} generates random deviates.
+% \code{qinv.gaussian} gives the quantile function, and
+
+
}
\references{
Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
diff --git a/man/Links.Rd b/man/Links.Rd
index 9f12a46..1a2cdd9 100644
--- a/man/Links.Rd
+++ b/man/Links.Rd
@@ -199,6 +199,8 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
\code{\link{rrvglm}}.
\code{\link{cqo}},
\code{\link{cao}}.
+
+
% \code{\link{uqo}}.
@@ -217,7 +219,7 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
vector of choices.
For example, rather than
\code{binomialff(link = c("logit", "probit", "cloglog",
- "cauchit", "identity"), ...)}
+ "cauchit", "identitylink"), ...)}
it is now
\code{binomialff(link = "logit", ...)}
No checking will be done to see if the user's choice is reasonable.
@@ -273,7 +275,7 @@ fit3 <- vgam(agaaus ~ altitude, binomialff(link = "clog"), hunua) # not okay
# No matter what the link, the estimated var-cov matrix is the same
y <- rbeta(n = 1000, shape1 = exp(0), shape2 = exp(1))
-fit1 <- vglm(y ~ 1, beta.ab(lshape1 = "identity", lshape2 = "identity"),
+fit1 <- vglm(y ~ 1, beta.ab(lshape1 = "identitylink", lshape2 = "identitylink"),
trace = TRUE, crit = "coef")
fit2 <- vglm(y ~ 1, beta.ab(lshape1 = logoff(offset = 1.1),
lshape2 = logoff(offset = 1.1)),
diff --git a/man/MNSs.Rd b/man/MNSs.Rd
index c082d16..f07c9cd 100644
--- a/man/MNSs.Rd
+++ b/man/MNSs.Rd
@@ -38,6 +38,7 @@ MNSs(link = "logit", imS = NULL, ims = NULL, inS = NULL)
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
Elandt-Johnson, R. C. (1971)
diff --git a/man/QvarUC.Rd b/man/QvarUC.Rd
index b1112a0..6b692e6 100644
--- a/man/QvarUC.Rd
+++ b/man/QvarUC.Rd
@@ -238,18 +238,18 @@ fit3 <- rcim(Qvar(cbind(0, rbind(0, vcov(Shipmodel)[2:5, 2:5])),
# Example 2: a model with M > 1 linear predictors
-\dontrun{ require(VGAMdata)
+\dontrun{ require("VGAMdata")
xs.nz.f <- subset(xs.nz, sex == "F")
xs.nz.f <- subset(xs.nz.f, !is.na(babies) & !is.na(age) & !is.na(ethnic))
xs.nz.f$babies <- as.numeric(as.character(xs.nz.f$babies))
xs.nz.f <- subset(xs.nz.f, babies >= 0)
xs.nz.f <- subset(xs.nz.f, as.numeric(as.character(ethnic)) <= 2)
-clist <- list("bs(age, df = 4)" = rbind(1, 0),
- "bs(age, df = 3)" = rbind(0, 1),
+clist <- list("sm.bs(age, df = 4)" = rbind(1, 0),
+ "sm.bs(age, df = 3)" = rbind(0, 1),
"ethnic" = diag(2),
"(Intercept)" = diag(2))
-fit1 <- vglm(babies ~ bs(age, df = 4) + bs(age, df = 3) + ethnic,
+fit1 <- vglm(babies ~ sm.bs(age, df = 4) + sm.bs(age, df = 3) + ethnic,
zipoissonff(zero = NULL), xs.nz.f,
constraints = clist, trace = TRUE)
Fit1 <- rcim(Qvar(fit1, "ethnic", which.linpred = 1),
diff --git a/man/Select.Rd b/man/Select.Rd
new file mode 100644
index 0000000..45fccf7
--- /dev/null
+++ b/man/Select.Rd
@@ -0,0 +1,262 @@
+\name{Select}
+\alias{Select}
+\alias{subsetc}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{
+ Select variables for a formula response or the RHS of a formula
+
+
+%% ~~function to do ... ~~
+}
+\description{
+ Select variables from a data frame whose names
+ begin with a certain character string.
+
+
+%% ~~ A concise (1-5 lines) description of what the function does. ~~
+}
+\usage{
+Select(data = list(), prefix = "y",
+ lhs = NULL, rhs = NULL, rhs2 = NULL, rhs3 = NULL,
+ as.character = FALSE, as.formula.arg = FALSE, tilde = TRUE,
+ exclude = NULL, sort.arg = TRUE)
+subsetc(data = list(), prefix = "y",
+ lhs = NULL, rhs = NULL, rhs2 = NULL, rhs3 = NULL,
+ as.character = FALSE, as.formula.arg = FALSE, tilde = TRUE,
+ exclude = NULL, sort.arg = TRUE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{data}{
+ A data frame or a matrix.
+
+
+%% ~~Describe \code{data} here~~
+}
+\item{prefix}{
+ A vector of character strings, or a logical.
+ If a character then
+ the variables chosen from \code{data} begin with the
+ value of \code{prefix}.
+ If a logical then
+ only \code{TRUE} is accepted and all the variables
+ in \code{data} are chosen.
+
+
+
+%% ~~Describe \code{prefix} here~~
+}
+\item{lhs}{
+ A character string.
+ The response of a formula.
+
+
+%% ~~Describe \code{lhs} here~~
+}
+\item{rhs}{
+ A character string.
+ Included as part of the RHS a formula.
+ Set \code{rhs = "0"} to suppress the intercept.
+
+
+%% ~~Describe \code{rhs} here~~
+}
+\item{rhs2, rhs3}{
+ Same as \code{rhs} but appended to its RHS,
+ i.e., \code{paste0(rhs, " + ", rhs2, " + ", rhs3)}.
+ If used, \code{rhs} should be used first,
+ and then possibly \code{rhs2}
+ and then possibly \code{rhs3}.
+
+
+
+
+%% ~~Describe \code{rhs} here~~
+}
+\item{as.character}{
+ Logical.
+ Return the answer as a character string?
+
+
+%% ~~Describe \code{as.character} here~~
+}
+\item{as.formula.arg}{
+ Logical.
+ Is the answer a formula?
+
+
+%% ~~Describe \code{as.formula.arg} here~~
+}
+\item{tilde}{
+ Logical.
+ If \code{as.character} and \code{as.formula.arg}
+ are both \code{TRUE}
+ then include the tilde in the formula?
+
+
+}
+\item{exclude}{
+ Vector of character strings.
+ Exclude these variables explicitly.
+
+
+%% ~~Describe \code{exclude} here~~
+}
+\item{sort.arg}{
+ Logical.
+ Sort the variables?
+
+
+%% ~~Describe \code{sort.arg} here~~
+}
+}
+\details{
+ This is meant as a utility function to avoid manually:
+ (i) making a \code{\link[base]{cbind}} call to construct
+ a big matrix response,
+ and
+ (ii) constructing a formula involving a lot of terms.
+ The savings can be made because the variables of interest
+ begin with some prefix, e.g., with the character \code{"y"}.
+
+
+
+}
+\value{
+ If \code{as.character = FALSE} and
+ \code{as.formula.arg = FALSE} then a matrix such
+ as \code{cbind(y1, y2, y3)}.
+ If \code{as.character = TRUE} and
+ \code{as.formula.arg = FALSE} then a character string such
+ as \code{"cbind(y1, y2, y3)"}.
+
+
+
+ If \code{as.character = FALSE} and
+ \code{as.formula.arg = TRUE} then a \code{\link[stats]{formula}} such
+ as \code{lhs ~ y1 + y2 + y3}.
+ If \code{as.character = TRUE} and
+ \code{as.formula.arg = TRUE} then a character string such
+ as \code{"lhs ~ y1 + y2 + y3"}.
+ See the examples below.
+ By default, if no variables beginning the the value of \code{prefix}
+ is found then a \code{NULL} is returned.
+ Setting \code{prefix = " "} is a way of selecting no variables.
+
+
+
+
+%% ~Describe the value returned
+%% If it is a LIST, use
+%% \item{comp1 }{Description of 'comp1'}
+%% \item{comp2 }{Description of 'comp2'}
+%% ...
+}
+%%\references{
+%% ~put references to the literature/web site here ~
+%%}
+\author{
+ T. W. Yee.
+
+
+%% ~~who you are~~
+}
+\note{
+ This function is a bit experimental at this stage and
+ may change in the short future.
+ Some of its utility may be better achieved using
+ \code{\link[base]{subset}} and its \code{select} argument,
+ e.g., \code{subset(pdata, TRUE, select = y01:y10)}.
+
+
+
+ For some models such as \code{\link{posbernoulli.t}} the
+ order of the variables in the \code{xij} argument is
+ crucial, therefore care must be taken with the
+ argument \code{sort.arg}.
+ In some instances, it may be good to rename variables
+ \code{y1} to \code{y01},
+ \code{y2} to \code{y02}, etc.
+ when there are variables such as
+ \code{y14}.
+
+
+
+ Currently \code{subsetc()} and \code{Select()} are identical.
+ One of these functions might be withdrawn in the future.
+
+
+
+%% ~~further notes~~
+}
+
+%% ~Make other sections like Warning with \section{Warning }{....} ~
+
+\seealso{
+ \code{\link{vglm}},
+ \code{\link[base]{cbind}},
+ \code{\link[base]{subset}},
+ \code{\link[stats]{formula}},
+ \code{\link{fill}}.
+
+
+
+
+%% ~~objects to See Also as \code{\link{help}}, ~~~
+}
+\examples{
+Pneumo <- pneumo
+colnames(Pneumo) <- c("y1", "y2", "y3", "x2") # The "y" variables are response
+Pneumo$x1 <- 1; Pneumo$x3 <- 3; Pneumo$x <- 0; Pneumo$x4 <- 4 # Add these
+
+Select(data = Pneumo) # Same as with(Pneumo, cbind(y1, y2, y3))
+Select(Pneumo, "x")
+Select(Pneumo, "x", sort = FALSE, as.char = TRUE)
+Select(Pneumo, "x", exclude = "x1")
+Select(Pneumo, "x", exclude = "x1", as.char = TRUE)
+Select(Pneumo, c("x", "y"))
+Select(Pneumo, "z") # Now returns a NULL
+Select(Pneumo, " ") # Now returns a NULL
+Select(Pneumo, prefix = TRUE, as.formula = TRUE)
+Select(Pneumo, "x", exclude = c("x3", "x1"), as.formula = TRUE,
+ lhs = "cbind(y1, y2, y3)", rhs = "0")
+Select(Pneumo, "x", exclude = "x1", as.formula = TRUE, as.char = TRUE,
+ lhs = "cbind(y1, y2, y3)", rhs = "0")
+
+# Now a 'real' example:
+Huggins89table1 <- transform(Huggins89table1, x3.tij = t01)
+tab1 <- subset(Huggins89table1,
+ rowSums(Select(Huggins89table1, "y")) > 0)
+# Same as
+# subset(Huggins89table1, y1 + y2 + y3 + y4 + y5 + y6 + y7 + y8 + y9 + y10 > 0)
+
+# Long way to do it:
+fit.th <-
+ vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10) ~ x2 + x3.tij,
+ xij = list(x3.tij ~ t01 + t02 + t03 + t04 + t05 + t06 + t07 + t08 +
+ t09 + t10 - 1),
+ posbernoulli.t(parallel.t = TRUE ~ x2 + x3.tij),
+ data = tab1, trace = TRUE,
+ form2 = ~ x2 + x3.tij + t01 + t02 + t03 + t04 + t05 + t06 + t07 + t08 +
+ t09 + t10)
+# Short way to do it:
+Fit.th <- vglm(Select(tab1, "y", sort = FALSE) ~ x2 + x3.tij,
+ xij = list(Select(tab1, "t", as.formula = TRUE,
+ sort = FALSE, lhs = "x3.tij", rhs = "0")),
+ posbernoulli.t(parallel.t = TRUE ~ x2 + x3.tij),
+ data = tab1, trace = TRUE,
+ form2 = Select(tab1, prefix = TRUE, as.formula = TRUE))
+}
+% Add one or more standard keywords, see file 'KEYWORDS' in the
+% R documentation directory.
+\keyword{models}
+\keyword{regression}
+
+
+
+% 20140524; For Fit.th before prefix = TRUE was allowed:
+% form2 = Select(tab1, "t", as.formula = TRUE,
+% rhs = "x2 + x3.tij"))
+
+
+
diff --git a/man/SurvS4-class.Rd b/man/SurvS4-class.Rd
index 3af8361..2d833c8 100644
--- a/man/SurvS4-class.Rd
+++ b/man/SurvS4-class.Rd
@@ -49,8 +49,12 @@ Class \code{"\linkS4class{vector}"}, by class "matrix", distance 4, with explici
\seealso{
\code{\link{SurvS4}}.
+
+
% or \code{\linkS4class{CLASSNAME}} for links to other classes
+
+
}
\examples{
showClass("SurvS4")
diff --git a/man/SurvS4.Rd b/man/SurvS4.Rd
index d65b9cb..981603e 100644
--- a/man/SurvS4.Rd
+++ b/man/SurvS4.Rd
@@ -160,12 +160,14 @@ coding in this case.
\seealso{
\code{\link{SurvS4-class}},
\code{\link{cenpoisson}},
-% \code{\link[survival]{coxph}},
-% \code{\link[survival]{survfit}},
\code{\link[survival]{survreg}},
\code{\link{leukemia}}.
+% \code{\link[survival]{coxph}},
+% \code{\link[survival]{survfit}},
+
+
}
\examples{
with(leukemia, SurvS4(time, status))
diff --git a/man/Tol.Rd b/man/Tol.Rd
index 573af56..ee7b20c 100644
--- a/man/Tol.Rd
+++ b/man/Tol.Rd
@@ -27,7 +27,7 @@ Tol(object, ...)
Many models have no such notion or definition.
- Tolerances occur in quadratic ordination, i.e., CQO. % or UQO.
+ Tolerances occur in quadratic ordination, i.e., CQO and UQO.
They have ecological meaning because a high tolerance
for a species means the species can survive over a large
environmental range (stenoecous species), whereas a
@@ -81,7 +81,7 @@ Constrained additive ordination.
For rank-\emph{R>1} models it becomes more complicated because
the latent variables are also uncorrelated. An important
argument when fitting quadratic ordination models is whether
- \code{EqualTolerances} is \code{TRUE} or \code{FALSE}.
+ \code{eq.tolerances} is \code{TRUE} or \code{FALSE}.
See Yee (2004) for details.
@@ -91,14 +91,17 @@ Constrained additive ordination.
\seealso{
\code{Tol.qrrvglm}.
\code{\link{Max}},
- \code{\link{Opt}}.
+ \code{\link{Opt}},
+ \code{\link{cqo}},
+ \code{\link{rcim}} for UQO.
}
\examples{
+\dontrun{
set.seed(111) # This leads to the global solution
-hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
+hspider[,1:6] <- scale(hspider[, 1:6]) # Standardized environmental vars
p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
Trocterr, Zoraspin) ~
@@ -107,6 +110,7 @@ p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Tol(p1)
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/VGAM-package.Rd b/man/VGAM-package.Rd
index bbb9f85..a91b4e3 100644
--- a/man/VGAM-package.Rd
+++ b/man/VGAM-package.Rd
@@ -192,7 +192,7 @@ contains some further information and examples.
\examples{
# Example 1; proportional odds model
pneumo <- transform(pneumo, let = log(exposure.time))
-(fit1 <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
+(fit1 <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo))
depvar(fit1) # Better than using fit1 at y; dependent variable (response)
weights(fit1, type = "prior") # Number of observations
coef(fit1, matrix = TRUE) # p.179, in McCullagh and Nelder (1989)
@@ -206,7 +206,7 @@ zdata <- transform(zdata, pstr0 = logit(-0.5 + 1*x2, inverse = TRUE),
lambda = loge( 0.5 + 2*x2, inverse = TRUE))
zdata <- transform(zdata, y = rzipois(nn, lambda, pstr0 = pstr0))
with(zdata, table(y))
-fit2 <- vglm(y ~ x2, zipoisson, zdata, trace = TRUE)
+fit2 <- vglm(y ~ x2, zipoisson, data = zdata, trace = TRUE)
coef(fit2, matrix = TRUE) # These should agree with the above values
diff --git a/man/acat.Rd b/man/acat.Rd
index 76c07c1..20fc1cb 100644
--- a/man/acat.Rd
+++ b/man/acat.Rd
@@ -66,6 +66,7 @@ acat(link = "loge", parallel = FALSE, reverse = FALSE,
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
}
\references{
Agresti, A. (2002)
@@ -127,7 +128,7 @@ contains further information and examples.
}
\examples{
pneumo <- transform(pneumo, let = log(exposure.time))
-(fit <- vglm(cbind(normal, mild, severe) ~ let, acat, pneumo))
+(fit <- vglm(cbind(normal, mild, severe) ~ let, acat, data = pneumo))
coef(fit, matrix = TRUE)
constraints(fit)
model.matrix(fit)
diff --git a/man/alaplace3.Rd b/man/alaplace3.Rd
index 367b7b3..598d0bc 100644
--- a/man/alaplace3.Rd
+++ b/man/alaplace3.Rd
@@ -12,19 +12,19 @@
}
\usage{
-alaplace1(tau = NULL, llocation = "identity",
+alaplace1(tau = NULL, llocation = "identitylink",
ilocation = NULL, kappa = sqrt(tau/(1 - tau)), Scale.arg = 1,
shrinkage.init = 0.95, parallelLocation = FALSE, digt = 4,
dfmu.init = 3, intparloc = FALSE, imethod = 1)
-alaplace2(tau = NULL, llocation = "identity", lscale = "loge",
+alaplace2(tau = NULL, llocation = "identitylink", lscale = "loge",
ilocation = NULL, iscale = NULL, kappa = sqrt(tau/(1 - tau)),
shrinkage.init = 0.95,
parallelLocation = FALSE, digt = 4, eq.scale = TRUE,
dfmu.init = 3, intparloc = FALSE,
imethod = 1, zero = -2)
-alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
+alaplace3(llocation = "identitylink", lscale = "loge", lkappa = "loge",
ilocation = NULL, iscale = NULL, ikappa = 1,
imethod = 1, zero = 2:3)
}
@@ -284,7 +284,8 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
\code{\link{laplace}},
\code{\link{lms.bcn}},
\code{\link{amlnormal}},
- \code{\link{koenker}}.
+ \code{\link{koenker}},
+ \code{\link{simulate.vlm}}.
}
@@ -299,7 +300,7 @@ mytau <- c(0.25, 0.75); mydof <- 4
fit <- vgam(y ~ s(x, df = mydof),
alaplace1(tau = mytau, llocation = "loge",
- parallelLoc = FALSE), adata, trace = TRUE)
+ parallelLoc = FALSE), data = adata, trace = TRUE)
fitp <- vgam(y ~ s(x, df = mydof), data = adata, trace = TRUE,
alaplace1(tau = mytau, llocation = "loge", parallelLoc = TRUE))
@@ -320,15 +321,15 @@ fit at extra # Contains useful information
# Example 2: regression quantile at a new tau value from an existing fit
# Nb. regression splines are used here since it is easier.
-fitp2 <- vglm(y ~ bs(x, df = mydof),
+fitp2 <- vglm(y ~ sm.bs(x, df = mydof),
family = alaplace1(tau = mytau, llocation = "loge",
parallelLoc = TRUE),
- adata, trace = TRUE)
+ data = adata, trace = TRUE)
newtau <- 0.5 # Want to refit the model with this tau value
fitp3 <- vglm(y ~ 1 + offset(predict(fitp2)[,1]),
family = alaplace1(tau = newtau, llocation = "loge"),
- adata)
+ data = adata)
with(adata, plot(x, jitter(y, factor = 0.5), col = "orange",
pch = "o", cex = 0.75, ylab = "y",
main = "Example 2; parallelLoc = TRUE"))
@@ -352,8 +353,8 @@ for (ii in 1:length(mytau)) {
adata <- transform(adata, usey = y-offsety)
iloc <- ifelse(ii == 1, with(adata, median(y)), 1.0) # Well-chosen!
mydf <- ifelse(ii == 1, 5, 3) # Maybe less smoothing will help
- lloc <- ifelse(ii == 1, "identity", "loge") # 2nd value must be "loge"
- fit3 <- vglm(usey ~ ns(x, df = mydf), data = adata, trace = TRUE,
+ lloc <- ifelse(ii == 1, "identitylink", "loge") # 2nd value must be "loge"
+ fit3 <- vglm(usey ~ sm.ns(x, df = mydf), data = adata, trace = TRUE,
alaplace1(tau = usetau[ii], lloc = lloc, iloc = iloc))
answer[,ii] <- (if(ii == 1) 0 else answer[,ii-1]) + fitted(fit3)
adata <- transform(adata, offsety = answer[,ii])
diff --git a/man/amh.Rd b/man/amh.Rd
index f8fd4a3..92c53b3 100644
--- a/man/amh.Rd
+++ b/man/amh.Rd
@@ -96,7 +96,8 @@ New York: Springer.
\seealso{
\code{\link{ramh}},
\code{\link{fgm}},
- \code{\link{bigumbelI}}.
+ \code{\link{bigumbelI}},
+ \code{\link{simulate.vlm}}.
}
diff --git a/man/amlbinomial.Rd b/man/amlbinomial.Rd
index 04552a2..7e05a01 100644
--- a/man/amlbinomial.Rd
+++ b/man/amlbinomial.Rd
@@ -44,7 +44,6 @@ amlbinomial(w.aml = 1, parallel = FALSE, digw = 4, link = "logit")
\details{
The general methodology behind this \pkg{VGAM} family function
is given in Efron (1992) and full details can be obtained there.
-% Equation numbers below refer to that article.
This model is essentially a logistic regression model
(see \code{\link{binomialff}}) but the usual deviance is replaced by an
asymmetric squared error loss function; it is multiplied by
@@ -54,6 +53,11 @@ amlbinomial(w.aml = 1, parallel = FALSE, digw = 4, link = "logit")
the \code{weights} argument (so that it can contain frequencies).
Newton-Raphson estimation is used here.
+
+% Equation numbers below refer to that article.
+
+
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -62,12 +66,15 @@ amlbinomial(w.aml = 1, parallel = FALSE, digw = 4, link = "logit")
}
\references{
+
Efron, B. (1992)
Poisson overdispersion estimates based on the method of
asymmetric maximum likelihood.
\emph{Journal of the American Statistical Association},
\bold{87}, 98--107.
+
+
}
\author{ Thomas W. Yee }
diff --git a/man/amlnormal.Rd b/man/amlnormal.Rd
index 51d2431..1cee24e 100644
--- a/man/amlnormal.Rd
+++ b/man/amlnormal.Rd
@@ -11,7 +11,7 @@
}
\usage{
-amlnormal(w.aml = 1, parallel = FALSE, lexpectile = "identity",
+amlnormal(w.aml = 1, parallel = FALSE, lexpectile = "identitylink",
iexpectile = NULL, imethod = 1, digw = 4)
}
%- maybe also 'usage' for other objects documented here.
@@ -64,12 +64,14 @@ amlnormal(w.aml = 1, parallel = FALSE, lexpectile = "identity",
\code{weights} argument (so that it can contain frequencies).
Newton-Raphson estimation is used here.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
Efron, B. (1991)
@@ -77,6 +79,7 @@ amlnormal(w.aml = 1, parallel = FALSE, lexpectile = "identity",
\emph{Statistica Sinica},
\bold{1}, 93--125.
+
}
\author{ Thomas W. Yee }
@@ -142,7 +145,7 @@ amlnormal(w.aml = 1, parallel = FALSE, lexpectile = "identity",
# Example 1
ooo <- with(bmi.nz, order(age))
bmi.nz <- bmi.nz[ooo,] # Sort by age
-(fit <- vglm(BMI ~ bs(age), fam = amlnormal(w.aml = 0.1), bmi.nz))
+(fit <- vglm(BMI ~ sm.bs(age), fam = amlnormal(w.aml = 0.1), bmi.nz))
fit at extra # Gives the w value and the percentile
coef(fit, matrix = TRUE)
@@ -155,7 +158,7 @@ with(bmi.nz, lines(age, c(fitted(fit)), col = "black"))
# Example 2
# Find the w values that give the 25, 50 and 75 percentiles
findw <- function(w, percentile = 50) {
- fit2 <- vglm(BMI ~ bs(age), fam = amlnormal(w = w), data = bmi.nz)
+ fit2 <- vglm(BMI ~ sm.bs(age), fam = amlnormal(w = w), data = bmi.nz)
fit2 at extra$percentile - percentile
}
# Quantile plot
@@ -164,7 +167,7 @@ with(bmi.nz, plot(age, BMI, col = "blue", las = 1, main =
for (myp in c(25, 50, 75)) {
# Note: uniroot() can only find one root at a time
bestw <- uniroot(f = findw, interval = c(1/10^4, 10^4), percentile = myp)
- fit2 <- vglm(BMI ~ bs(age), fam = amlnormal(w = bestw$root), data = bmi.nz)
+ fit2 <- vglm(BMI ~ sm.bs(age), fam = amlnormal(w = bestw$root), data = bmi.nz)
with(bmi.nz, lines(age, c(fitted(fit2)), col = "red"))
}
diff --git a/man/amlpoisson.Rd b/man/amlpoisson.Rd
index 036a30c..1ce7476 100644
--- a/man/amlpoisson.Rd
+++ b/man/amlpoisson.Rd
@@ -66,6 +66,7 @@ amlpoisson(w.aml = 1, parallel = FALSE, imethod = 1, digw = 4,
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
Efron, B. (1991)
diff --git a/man/benfUC.Rd b/man/benfUC.Rd
index 3b9823e..7a0c6cb 100644
--- a/man/benfUC.Rd
+++ b/man/benfUC.Rd
@@ -90,6 +90,7 @@ fraud detection in accounting and the design computers.
\code{qbenf} gives the quantile function, and
\code{rbenf} generates random deviates.
+
}
\references{
@@ -119,7 +120,7 @@ dbenf(x <- c(0:10, NA, NaN, -Inf, Inf))
pbenf(x)
\dontrun{
-xx = 1:9; # par(mfrow=c(2,1))
+xx <- 1:9
barplot(dbenf(xx), col = "lightblue", las = 1, xlab = "Leading digit",
ylab = "Probability", names.arg = as.character(xx),
main = paste("Benford's distribution", sep = ""))
diff --git a/man/benini.Rd b/man/benini.Rd
index 79d02df..1d50fc5 100644
--- a/man/benini.Rd
+++ b/man/benini.Rd
@@ -93,7 +93,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
\examples{
y0 <- 1; nn <- 3000
bdata <- data.frame(y = rbenini(nn, y0 = y0, shape = exp(2)))
-fit <- vglm(y ~ 1, benini(y0 = y0), bdata, trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, benini(y0 = y0), data = bdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
fit at extra$y0
diff --git a/man/beta.ab.Rd b/man/beta.ab.Rd
index 5d70814..8272443 100644
--- a/man/beta.ab.Rd
+++ b/man/beta.ab.Rd
@@ -126,20 +126,21 @@ beta.ab(lshape1 = "loge", lshape2 = "loge",
\code{\link{betaprime}},
\code{\link{rbetageom}},
\code{\link{rbetanorm}},
- \code{\link{kumar}}.
+ \code{\link{kumar}},
+ \code{\link{simulate.vlm}}.
}
\examples{
bdata <- data.frame(y = rbeta(n = 1000, shape1 = exp(0), shape2 = exp(1)))
-fit <- vglm(y ~ 1, beta.ab(lshape1 = "identity", lshape2 = "identity"),
+fit <- vglm(y ~ 1, beta.ab(lshape1 = "identitylink", lshape2 = "identitylink"),
data = bdata, trace = TRUE, crit = "coef")
-fit <- vglm(y ~ 1, beta.ab, bdata, trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, beta.ab, data = bdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit) # Useful for intercept-only models
bdata <- transform(bdata, Y = 5 + 8 * y) # From 5 to 13, not 0 to 1
-fit <- vglm(Y ~ 1, beta.ab(A = 5, B = 13), bdata, trace = TRUE)
+fit <- vglm(Y ~ 1, beta.ab(A = 5, B = 13), data = bdata, trace = TRUE)
Coef(fit)
c(meanY = with(bdata, mean(Y)), head(fitted(fit),2))
}
diff --git a/man/betaII.Rd b/man/betaII.Rd
index d0fe269..57ed59a 100644
--- a/man/betaII.Rd
+++ b/man/betaII.Rd
@@ -94,9 +94,9 @@ Hoboken, NJ, USA: Wiley-Interscience.
\examples{
bdata <- data.frame(y = rsinmad(2000, shape1.a = 1, exp(2), exp(1))) # Not genuine data!
-fit <- vglm(y ~ 1, betaII, bdata, trace = TRUE)
+fit <- vglm(y ~ 1, betaII, data = bdata, trace = TRUE)
fit <- vglm(y ~ 1, betaII(ishape2.p = 0.7, ishape3.q = 0.7),
- bdata, trace = TRUE)
+ data = bdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/betabinomUC.Rd b/man/betabinomUC.Rd
index 62c3f64..c00b4e3 100644
--- a/man/betabinomUC.Rd
+++ b/man/betabinomUC.Rd
@@ -71,10 +71,12 @@ rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
\value{
\code{dbetabinom} and \code{dbetabinom.ab} give the density,
\code{pbetabinom} and \code{pbetabinom.ab} give the distribution function, and
-% \code{qbetabinom} and \code{qbetabinom.ab} gives the quantile function, and
\code{rbetabinom} and \code{rbetabinom.ab} generate random deviates.
+% \code{qbetabinom} and \code{qbetabinom.ab} gives the quantile function, and
+
+
}
\author{ T. W. Yee }
\details{
diff --git a/man/betabinomial.Rd b/man/betabinomial.Rd
index bb5074d..c68c33c 100644
--- a/man/betabinomial.Rd
+++ b/man/betabinomial.Rd
@@ -187,7 +187,8 @@ betabinomial(lmu = "logit", lrho = "logit",
\code{\link{binomialff}},
\code{\link{betaff}},
\code{\link{dirmultinomial}},
- \code{\link{lirat}}.
+ \code{\link{lirat}},
+ \code{\link{simulate.vlm}}.
}
@@ -196,7 +197,7 @@ betabinomial(lmu = "logit", lrho = "logit",
bdata <- data.frame(N = 10, mu = 0.5, rho = 0.8)
bdata <- transform(bdata,
y = rbetabinom(n = 100, size = N, prob = mu, rho = rho))
-fit <- vglm(cbind(y, N-y) ~ 1, betabinomial, bdata, trace = TRUE)
+fit <- vglm(cbind(y, N-y) ~ 1, betabinomial, data = bdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
head(cbind(depvar(fit), weights(fit, type = "prior")))
diff --git a/man/betabinomial.ab.Rd b/man/betabinomial.ab.Rd
index 1bce1e8..f7de53c 100644
--- a/man/betabinomial.ab.Rd
+++ b/man/betabinomial.ab.Rd
@@ -181,7 +181,8 @@ betabinomial.ab(lshape12 = "loge", i1 = 1, i2 = NULL,
\code{\link{binomialff}},
\code{\link{betaff}},
\code{\link{dirmultinomial}},
- \code{\link{lirat}}.
+ \code{\link{lirat}},
+ \code{\link{simulate.vlm}}.
}
diff --git a/man/betaff.Rd b/man/betaff.Rd
index 58ce807..64a4bec 100644
--- a/man/betaff.Rd
+++ b/man/betaff.Rd
@@ -127,13 +127,14 @@ betaff(A = 0, B = 1, lmu = "logit", lphi = "loge",
\code{\link{rbetageom}},
\code{\link{rbetanorm}},
\code{\link{kumar}},
- \code{\link{elogit}}.
+ \code{\link{elogit}},
+ \code{\link{simulate.vlm}}.
}
\examples{
bdata <- data.frame(y = rbeta(nn <- 1000, shape1 = exp(0), shape2 = exp(1)))
-fit1 <- vglm(y ~ 1, betaff, bdata, trace = TRUE)
+fit1 <- vglm(y ~ 1, betaff, data = bdata, trace = TRUE)
coef(fit1, matrix = TRUE)
Coef(fit1) # Useful for intercept-only models
diff --git a/man/betageomUC.Rd b/man/betageomUC.Rd
index 3672833..10a9aff 100644
--- a/man/betageomUC.Rd
+++ b/man/betageomUC.Rd
@@ -44,10 +44,12 @@ rbetageom(n, shape1, shape2)
\value{
\code{dbetageom} gives the density,
\code{pbetageom} gives the distribution function, and
-% \code{qbetageom} gives the quantile function, and
\code{rbetageom} generates random deviates.
+% \code{qbetageom} gives the quantile function, and
+
+
}
\author{ T. W. Yee }
\details{
diff --git a/man/betageometric.Rd b/man/betageometric.Rd
index 6e49f63..05894b8 100644
--- a/man/betageometric.Rd
+++ b/man/betageometric.Rd
@@ -120,8 +120,8 @@ betageometric(lprob = "logit", lshape = "loge",
}
\examples{
bdata <- data.frame(y = 0:11, wts = c(227,123,72,42,21,31,11,14,6,4,7,28))
-fitb <- vglm(y ~ 1, betageometric, bdata, weight = wts, trace = TRUE)
-fitg <- vglm(y ~ 1, geometric, bdata, weight = wts, trace = TRUE)
+fitb <- vglm(y ~ 1, betageometric, data = bdata, weight = wts, trace = TRUE)
+fitg <- vglm(y ~ 1, geometric, data = bdata, weight = wts, trace = TRUE)
coef(fitb, matrix = TRUE)
Coef(fitb)
sqrt(diag(vcov(fitb, untransform = TRUE)))
diff --git a/man/betaprime.Rd b/man/betaprime.Rd
index c95874f..070b936 100644
--- a/man/betaprime.Rd
+++ b/man/betaprime.Rd
@@ -6,6 +6,7 @@
Estimation of the two shape parameters of the beta-prime distribution
by maximum likelihood estimation.
+
}
\usage{
betaprime(link = "loge", i1 = 2, i2 = NULL, zero = NULL)
diff --git a/man/frank.Rd b/man/bifrankcop.Rd
similarity index 98%
rename from man/frank.Rd
rename to man/bifrankcop.Rd
index 01fd41f..bd420f1 100644
--- a/man/frank.Rd
+++ b/man/bifrankcop.Rd
@@ -97,7 +97,8 @@ Frank's family of bivariate distributions.
\seealso{
\code{\link{rbifrankcop}},
- \code{\link{fgm}}.
+ \code{\link{fgm}},
+ \code{\link{simulate.vlm}}.
}
diff --git a/man/frankUC.Rd b/man/bifrankcopUC.Rd
similarity index 100%
rename from man/frankUC.Rd
rename to man/bifrankcopUC.Rd
diff --git a/man/bigumbelI.Rd b/man/bigumbelI.Rd
index 32e5c4b..5e20f4b 100644
--- a/man/bigumbelI.Rd
+++ b/man/bigumbelI.Rd
@@ -9,7 +9,7 @@
}
\usage{
-bigumbelI(lapar = "identity", iapar = NULL, imethod = 1)
+bigumbelI(lapar = "identitylink", iapar = NULL, imethod = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -93,7 +93,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
nn <- 1000
gdata <- data.frame(y1 = rexp(nn), y2 = rexp(nn))
\dontrun{ with(gdata, plot(cbind(y1, y2))) }
-fit <- vglm(cbind(y1, y2) ~ 1, fam = bigumbelI, gdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ 1, fam = bigumbelI, data = gdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
head(fitted(fit))
diff --git a/man/bilogistic4.Rd b/man/bilogistic4.Rd
index a9661b9..9bbd788 100644
--- a/man/bilogistic4.Rd
+++ b/man/bilogistic4.Rd
@@ -8,7 +8,7 @@
}
\usage{
-bilogistic4(llocation = "identity", lscale = "loge",
+bilogistic4(llocation = "identitylink", lscale = "loge",
iloc1 = NULL, iscale1 = NULL, iloc2 = NULL, iscale2 = NULL,
imethod = 1, zero = NULL)
}
diff --git a/man/binom2.or.Rd b/man/binom2.or.Rd
index 510b0a7..6f4d00e 100644
--- a/man/binom2.or.Rd
+++ b/man/binom2.or.Rd
@@ -229,7 +229,7 @@ legend(x = -4, y = 0.5, lty = 1:4, col = 1:4, lwd = 2,
# Another model: pet ownership
-\dontrun{ require(VGAMdata)
+\dontrun{ require("VGAMdata")
# More homogeneous:
petdata <- subset(xs.nz, ethnic == "0" & age < 70 & sex == "M")
petdata <- na.omit(petdata[, c("cat", "dog", "age")])
diff --git a/man/binom2.orUC.Rd b/man/binom2.orUC.Rd
index 0704033..39af990 100644
--- a/man/binom2.orUC.Rd
+++ b/man/binom2.orUC.Rd
@@ -11,12 +11,12 @@
}
\usage{
rbinom2.or(n, mu1,
- mu2 = if(exchangeable) mu1 else stop("argument 'mu2' not specified"),
+ mu2 = if (exchangeable) mu1 else stop("argument 'mu2' not specified"),
oratio = 1, exchangeable = FALSE, tol = 0.001, twoCols = TRUE,
- colnames = if(twoCols) c("y1","y2") else c("00", "01", "10", "11"),
+ colnames = if (twoCols) c("y1","y2") else c("00", "01", "10", "11"),
ErrorCheck = TRUE)
dbinom2.or(mu1,
- mu2 = if(exchangeable) mu1 else stop("'mu2' not specified"),
+ mu2 = if (exchangeable) mu1 else stop("'mu2' not specified"),
oratio = 1, exchangeable = FALSE, tol = 0.001,
colnames = c("00", "01", "10", "11"), ErrorCheck = TRUE)
diff --git a/man/binom2.rhoUC.Rd b/man/binom2.rhoUC.Rd
index 4c119fc..0bc8ae1 100644
--- a/man/binom2.rhoUC.Rd
+++ b/man/binom2.rhoUC.Rd
@@ -11,12 +11,12 @@
}
\usage{
rbinom2.rho(n, mu1,
- mu2 = if(exchangeable) mu1 else stop("argument 'mu2' not specified"),
+ mu2 = if (exchangeable) mu1 else stop("argument 'mu2' not specified"),
rho = 0, exchangeable = FALSE, twoCols = TRUE,
- colnames = if(twoCols) c("y1","y2") else c("00", "01", "10", "11"),
+ colnames = if (twoCols) c("y1","y2") else c("00", "01", "10", "11"),
ErrorCheck = TRUE)
dbinom2.rho(mu1,
- mu2 = if(exchangeable) mu1 else stop("'mu2' not specified"),
+ mu2 = if (exchangeable) mu1 else stop("'mu2' not specified"),
rho = 0, exchangeable = FALSE,
colnames = c("00", "01", "10", "11"), ErrorCheck = TRUE)
@@ -109,7 +109,7 @@ bdata <- transform(bdata, mu1 = probit(-2+4*x2, inverse = TRUE),
mu2 = probit(-1+3*x2, inverse = TRUE))
dmat <- with(bdata, dbinom2.rho(mu1, mu2, myrho))
ymat <- with(bdata, rbinom2.rho(nn, mu1, mu2, myrho))
-fit2 <- vglm(ymat ~ x2, binom2.rho, bdata)
+fit2 <- vglm(ymat ~ x2, binom2.rho, data = bdata)
coef(fit2, matrix = TRUE)
\dontrun{ matplot(with(bdata, x2), dmat, lty = 1:4, col = 1:4,
type = "l", main = "Joint probabilities",
diff --git a/man/binomialff.Rd b/man/binomialff.Rd
index 3cdf4b2..a83c30f 100644
--- a/man/binomialff.Rd
+++ b/man/binomialff.Rd
@@ -206,6 +206,7 @@ binomialff(link = "logit", dispersion = 1, mv = FALSE,
\code{\link{amlbinomial}},
\code{\link{simplex}},
\code{\link[stats:Binomial]{binomial}},
+ \code{\link{simulate.vlm}},
\pkg{safeBinaryRegression}.
@@ -244,8 +245,9 @@ with(shunua, lines(altitude, fitted(fit), col = "orange", lwd = 2)) }
# Fit two species simultaneously
fit2 <- vgam(cbind(agaaus, kniexc) ~ s(altitude), binomialff(mv = TRUE), shunua)
+\dontrun{
with(shunua, matplot(altitude, fitted(fit2), type = "l",
- main = "Two species response curves", las = 1))
+ main = "Two species response curves", las = 1)) }
# Shows that Fisher scoring can sometime fail. See Ridout (1990).
diff --git a/man/binormal.Rd b/man/binormal.Rd
index d0945c8..d349f05 100644
--- a/man/binormal.Rd
+++ b/man/binormal.Rd
@@ -8,7 +8,7 @@
}
\usage{
-binormal(lmean1 = "identity", lmean2 = "identity",
+binormal(lmean1 = "identitylink", lmean2 = "identitylink",
lsd1 = "loge", lsd2 = "loge",
lrho = "rhobit",
imean1 = NULL, imean2 = NULL,
diff --git a/man/binormalUC.Rd b/man/binormalUC.Rd
index 2035675..ad738d5 100644
--- a/man/binormalUC.Rd
+++ b/man/binormalUC.Rd
@@ -57,10 +57,13 @@ rbinorm(n, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
\value{
\code{dbinorm} gives the density,
\code{pbinorm} gives the cumulative distribution function,
-% \code{qnorm2} gives the quantile function, and
\code{rbinorm} generates random deviates (\eqn{n} by 2 matrix).
+% \code{qnorm2} gives the quantile function, and
+
+
+
}
% \author{ T. W. Yee }
\details{
diff --git a/man/binormalcop.Rd b/man/binormalcop.Rd
index 366bf66..0eb1d64 100644
--- a/man/binormalcop.Rd
+++ b/man/binormalcop.Rd
@@ -126,7 +126,7 @@ ymat <- rbinormcop(n = nn, rho = with(bdata, rho))
bdata <- transform(bdata, y5 = ymat[, 1],
y6 = ymat[, 2])
fit2 <- vgam(cbind(y5, y6) ~ s(x2), data = bdata,
- binormalcop(lrho = "identity"), trace = TRUE)
+ binormalcop(lrho = "identitylink"), trace = TRUE)
\dontrun{ plot(fit2, lcol = "blue", scol = "orange", se = TRUE, las = 1) }
}
\keyword{models}
diff --git a/man/bistudenttUC.Rd b/man/bistudenttUC.Rd
index c7e081d..de7b1dc 100644
--- a/man/bistudenttUC.Rd
+++ b/man/bistudenttUC.Rd
@@ -5,11 +5,13 @@
\title{Bivariate Student-t distribution cumulative distribution function}
\description{
Density
+ for the bivariate Student-t distribution distribution.
+
% cumulative distribution function
% quantile function
% and
% random generation
- for the bivariate Student-t distribution distribution.
+
}
\usage{
@@ -47,6 +49,8 @@ dbistudentt(x1, x2, df, rho = 0, log = FALSE)
}
\value{
\code{dbistudentt} gives the density.
+
+
% \code{pnorm2} gives the cumulative distribution function,
% \code{qnorm2} gives the quantile function, and
% \code{rbistudentt} generates random deviates (\eqn{n} by 2 matrix).
diff --git a/man/bivgamma.mckay.Rd b/man/bivgamma.mckay.Rd
index 8b96b2f..cf4ac8f 100644
--- a/man/bivgamma.mckay.Rd
+++ b/man/bivgamma.mckay.Rd
@@ -124,7 +124,7 @@ shape1 <- exp(1); shape2 <- exp(2); scalepar <- exp(3)
mdata <- data.frame(y1 = rgamma(nn <- 1000, shape = shape1, scale = scalepar))
mdata <- transform(mdata, zedd = rgamma(nn, shape = shape2, scale = scalepar))
mdata <- transform(mdata, y2 = y1 + zedd) # Z is defined as Y2-y1|Y1=y1
-fit <- vglm(cbind(y1, y2) ~ 1, bigamma.mckay, mdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ 1, bigamma.mckay, data = mdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
vcov(fit)
diff --git a/man/borel.tanner.Rd b/man/borel.tanner.Rd
index f37a55e..299b8c0 100644
--- a/man/borel.tanner.Rd
+++ b/man/borel.tanner.Rd
@@ -6,6 +6,7 @@
Estimates the parameter of a Borel-Tanner distribution
by maximum likelihood estimation.
+
}
\usage{
borel.tanner(Qsize = 1, link = "logit", imethod = 1)
@@ -13,19 +14,23 @@ borel.tanner(Qsize = 1, link = "logit", imethod = 1)
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Qsize}{
- A positive integer. It is called \eqn{Q} below and is the initial
- queue size.
+ A positive integer.
+ It is called \eqn{Q} below and is the initial queue size.
+ The default value \eqn{Q = 1} corresponds to the Borel distribution.
+
}
\item{link}{
Link function for the parameter;
see \code{\link{Links}} for more choices and for general information.
+
}
\item{imethod}{
See \code{\link{CommonVGAMffArguments}}.
Valid values are 1, 2, 3 or 4.
+
}
}
\details{
@@ -61,6 +66,7 @@ borel.tanner(Qsize = 1, link = "logit", imethod = 1)
The distribution has a very long tail unless \eqn{a} is small.
Fisher scoring is implemented.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -110,7 +116,7 @@ Boston: Birkhauser.
}
\examples{
bdata <- data.frame(y = rbort(n <- 200))
-fit <- vglm(y ~ 1, borel.tanner, bdata, trace = TRUE, crit = "c")
+fit <- vglm(y ~ 1, borel.tanner, data = bdata, trace = TRUE, crit = "c")
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/bortUC.Rd b/man/bortUC.Rd
index 6a3aab9..2ceba33 100644
--- a/man/bortUC.Rd
+++ b/man/bortUC.Rd
@@ -7,16 +7,21 @@
\title{The Borel-Tanner Distribution}
\description{
Density
-% distribution function, quantile function
and random generation for the Borel-Tanner distribution.
+
+% distribution function, quantile function
+
+
}
\usage{
dbort(x, Qsize = 1, a = 0.5, log = FALSE)
-%pbort(q, Qsize = 1, a = 0.5)
-%qbort(p, Qsize = 1, a = 0.5)
rbort(n, Qsize = 1, a = 0.5)
}
+
+%pbort(q, Qsize = 1, a = 0.5)
+%qbort(p, Qsize = 1, a = 0.5)
+
\arguments{
\item{x}{vector of quantiles.}
% \item{p}{vector of probabilities.}
@@ -24,19 +29,26 @@ rbort(n, Qsize = 1, a = 0.5)
Must be a positive integer of length 1.}
\item{Qsize, a}{
See \code{\link{borel.tanner}}.
+
+
}
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
+
+
}
}
\value{
\code{dbort} gives the density,
+ \code{rbort} generates random deviates.
+
+
% \code{pbort} gives the distribution function,
% \code{qbort} gives the quantile function, and
- \code{rbort} generates random deviates.
+
}
\author{ T. W. Yee }
@@ -45,6 +57,7 @@ rbort(n, Qsize = 1, a = 0.5)
for estimating the parameter,
for the formula of the probability density function and other details.
+
}
\section{Warning }{
Looping is used for \code{\link{rbort}}, therefore
@@ -52,19 +65,18 @@ rbort(n, Qsize = 1, a = 0.5)
computational times.
The default value of \code{a} is subjective.
+
}
\seealso{
\code{\link{borel.tanner}}.
+
}
\examples{
-\dontrun{
-qsize = 1; a = 0.5
-x = qsize:(qsize+10)
-plot(x, dbort(x, qsize, a), type="h", las=1, col="blue",
- ylab=paste("fbort(qsize=", qsize, ", a=", a, ")"),
- main="Borel-Tanner density function")
-}
+\dontrun{ qsize <- 1; a <- 0.5; x <- qsize:(qsize+10)
+plot(x, dbort(x, qsize, a), type = "h", las = 1, col = "blue",
+ ylab = paste("fbort(qsize=", qsize, ", a=", a, ")"),
+ main = "Borel-Tanner density function") }
}
\keyword{distribution}
diff --git a/man/calibrate.qrrvglm.Rd b/man/calibrate.qrrvglm.Rd
index c8c0aa2..084cb4c 100644
--- a/man/calibrate.qrrvglm.Rd
+++ b/man/calibrate.qrrvglm.Rd
@@ -154,10 +154,10 @@ Cambridge.
hspider[,1:6] <- scale(hspider[, 1:6]) # Standardize the environmental variables
set.seed(123)
p1 <- cqo(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
- WaterCon + BareSand + FallTwig +
- CoveMoss + CoveHerb + ReflLux,
- family = poissonff, data = hspider, Rank = 1,
- IToler = TRUE, Crow1positive = TRUE)
+ WaterCon + BareSand + FallTwig +
+ CoveMoss + CoveHerb + ReflLux,
+ family = poissonff, data = hspider, Rank = 1,
+ I.toler = TRUE, Crow1positive = TRUE)
siteNos <- 3:4 # Calibrate these sites
cp1 <- calibrate(p1, new = data.frame(depvar(p1)[siteNos, ]), trace = TRUE)
diff --git a/man/calibrate.qrrvglm.control.Rd b/man/calibrate.qrrvglm.control.Rd
index 3a42df2..1ae6504 100644
--- a/man/calibrate.qrrvglm.control.Rd
+++ b/man/calibrate.qrrvglm.control.Rd
@@ -101,13 +101,13 @@ On constrained and unconstrained quadratic ordination.
}
\examples{
-\dontrun{ hspider[, 1:6] <- scale(hspider[, 1:6]) # Needed when ITol = TRUE
+\dontrun{ hspider[, 1:6] <- scale(hspider[, 1:6]) # Needed when I.tol = TRUE
set.seed(123)
p1 <- cqo(cbind(Alopacce, Alopcune, Pardlugu, Pardnigr,
Pardpull, Trocterr, Zoraspin) ~
WaterCon + BareSand + FallTwig +
CoveMoss + CoveHerb + ReflLux,
- family = poissonff, data = hspider, ITol = TRUE)
+ family = poissonff, data = hspider, I.tol = TRUE)
sort(p1 at misc$deviance.Bestof) # A history of all the iterations
siteNos <- 3:4 # Calibrate these sites
diff --git a/man/cao.Rd b/man/cao.Rd
index 8d13d63..519c5a7 100644
--- a/man/cao.Rd
+++ b/man/cao.Rd
@@ -320,7 +320,8 @@ Constrained additive ordination.
\code{\link{gamma2}},
\code{\link{gaussianff}},
\code{\link[base:Random]{set.seed}},
- \code{\link[gam]{gam}}.
+ \code{\link[gam]{gam}},
+ \code{\link[VGAMdata]{trapO}}.
}
diff --git a/man/cao.control.Rd b/man/cao.control.Rd
index c816b58..a465fb5 100644
--- a/man/cao.control.Rd
+++ b/man/cao.control.Rd
@@ -131,7 +131,7 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
% Numerical and positive-valued vector of length \bold{C}
% (recycled if necessary). Passed into \code{optim(...,
% control = list(parscale = Parscale))}; the elements of \bold{C} become
-% \bold{C} / \code{Parscale}. Setting \code{ITolerances = TRUE} results
+% \bold{C} / \code{Parscale}. Setting \code{I.tolerances = TRUE} results
% in line searches that are very large, therefore \bold{C} has to be
% scaled accordingly to avoid large step sizes.
@@ -282,15 +282,19 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
}
\references{
+
+
Yee, T. W. (2006)
Constrained additive ordination.
\emph{Ecology}, \bold{87}, 203--213.
+
Green, P. J. and Silverman, B. W. (1994)
-\emph{Nonparametric Regression and Generalized Linear Models: A
- Roughness Penalty Approach},
+\emph{Nonparametric Regression and Generalized Linear Models:
+ A Roughness Penalty Approach},
London: Chapman & Hall.
+
}
\author{T. W. Yee}
\note{
@@ -298,15 +302,19 @@ London: Chapman & Hall.
spp2 = 3, 2.5)}, say, meaning the default value is 2.5, but two species
have alternative values.
+
If \code{spar1 = 0} and \code{df1.nl = 0} then this represents fitting
linear functions (CLO). Currently, this is handled in the awkward
manner of setting \code{df1.nl} to be a small positive value, so that
the smooth is almost linear but not quite.
A proper fix to this special case should done in the short future.
+
}
\seealso{
\code{\link{cao}}.
+
+
}
\examples{\dontrun{
diff --git a/man/cardioid.Rd b/man/cardioid.Rd
index cf4f833..53666bf 100644
--- a/man/cardioid.Rd
+++ b/man/cardioid.Rd
@@ -100,7 +100,7 @@ Singapore: World Scientific.
\examples{
\dontrun{
cdata <- data.frame(y = rcard(n = 1000, mu = 4, rho = 0.45))
-fit <- vglm(y ~ 1, cardioid, cdata, trace = TRUE)
+fit <- vglm(y ~ 1, cardioid, data = cdata, trace = TRUE)
coef(fit, matrix=TRUE)
Coef(fit)
c(with(cdata, mean(y)), head(fitted(fit), 1))
diff --git a/man/cauchy.Rd b/man/cauchy.Rd
index 102755b..782d468 100644
--- a/man/cauchy.Rd
+++ b/man/cauchy.Rd
@@ -9,11 +9,11 @@
}
\usage{
-cauchy(llocation = "identity", lscale = "loge",
+cauchy(llocation = "identitylink", lscale = "loge",
ilocation = NULL, iscale = NULL,
iprobs = seq(0.2, 0.8, by = 0.2),
imethod = 1, nsimEIM = NULL, zero = 2)
-cauchy1(scale.arg = 1, llocation = "identity",
+cauchy1(scale.arg = 1, llocation = "identitylink",
ilocation = NULL, imethod = 1)
}
%- maybe also 'usage' for other objects documented here.
@@ -133,27 +133,27 @@ Observed versus expected Fisher information.
\seealso{
\code{\link[stats:Cauchy]{Cauchy}},
\code{\link{cauchit}},
- \code{\link{studentt}}.
+ \code{\link{studentt}},
+ \code{\link{simulate.vlm}}.
}
\examples{
# Both location and scale parameters unknown
-cdata1 <- data.frame(x = runif(nn <- 1000))
-cdata1 <- transform(cdata1, loc = exp(1+0.5*x), scale = exp(1))
-cdata1 <- transform(cdata1, y = rcauchy(nn, loc, scale))
-fit <- vglm(y ~ x, cauchy(lloc = "loge"), cdata1, trace = TRUE)
-coef(fit, matrix = TRUE)
-head(fitted(fit)) # Location estimates
-summary(fit)
+set.seed(123)
+cdata <- data.frame(x2 = runif(nn <- 1000))
+cdata <- transform(cdata, loc = exp(1 + 0.5 * x2), scale = exp(1))
+cdata <- transform(cdata, y2 = rcauchy(nn, loc, scale))
+fit2 <- vglm(y2 ~ x2, cauchy(lloc = "loge"), data = cdata, trace = TRUE)
+coef(fit2, matrix = TRUE)
+head(fitted(fit2)) # Location estimates
+summary(fit2)
# Location parameter unknown
-set.seed(123)
-cdata2 <- data.frame(x = runif(nn <- 500))
-cdata2 <- transform(cdata2, loc = 1 + 0.5 * x, scale = 0.4)
-cdata2 <- transform(cdata2, y = rcauchy(nn, loc, scale))
-fit <- vglm(y ~ x, cauchy1(scale = 0.4), cdata2, trace = TRUE, crit = "coef")
-coef(fit, matrix = TRUE)
+cdata <- transform(cdata, scale1 = 0.4)
+cdata <- transform(cdata, y1 = rcauchy(nn, loc, scale1))
+fit1 <- vglm(y1 ~ x2, cauchy1(scale = 0.4), data = cdata, trace = TRUE)
+coef(fit1, matrix = TRUE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/cennormal.Rd b/man/cennormal.Rd
index 75d4e09..d232a66 100644
--- a/man/cennormal.Rd
+++ b/man/cennormal.Rd
@@ -11,7 +11,7 @@
}
\usage{
-cennormal(lmu = "identity", lsd = "loge", imethod = 1, zero = 2)
+cennormal(lmu = "identitylink", lsd = "loge", imethod = 1, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -101,9 +101,9 @@ cdata <- transform(cdata, y = pmin(U, y)) # Right censored
with(cdata, hist(y))
Extra <- list(leftcensored = with(cdata, ystar < L),
rightcensored = with(cdata, ystar > U))
-fit1 <- vglm(y ~ x2, cennormal, cdata, crit = "c", extra = Extra, trace = TRUE)
+fit1 <- vglm(y ~ x2, cennormal, data = cdata, crit = "c", extra = Extra, trace = TRUE)
fit2 <- vglm(y ~ x2, tobit(Lower = with(cdata, L), Upper = with(cdata, U)),
- cdata, crit = "c", trace = TRUE)
+ data = cdata, crit = "c", trace = TRUE)
coef(fit1, matrix = TRUE)
max(abs(coef(fit1, matrix = TRUE) - coef(fit2, matrix = TRUE))) # Should be 0
names(fit1 at extra)
diff --git a/man/cenpoisson.Rd b/man/cenpoisson.Rd
index 0eca698..46d9a1b 100644
--- a/man/cenpoisson.Rd
+++ b/man/cenpoisson.Rd
@@ -88,7 +88,7 @@ cdata <- transform(cdata, status = ifelse(rcensored, 0, 1))
with(cdata, table(cy))
with(cdata, table(rcensored))
with(cdata, table(ii <- print(SurvS4(cy, status)))) # Check; U+ means >= U
-fit <- vglm(SurvS4(cy, status) ~ 1, cenpoisson, cdata, trace = TRUE)
+fit <- vglm(SurvS4(cy, status) ~ 1, cenpoisson, data = cdata, trace = TRUE)
coef(fit, matrix = TRUE)
table(print(depvar(fit))) # Another check; U+ means >= U
@@ -101,7 +101,7 @@ cdata <- transform(cdata, status = ifelse(lcensored, 0, 1))
with(cdata, table(cY))
with(cdata, table(lcensored))
with(cdata, table(ii <- print(SurvS4(cY, status, type = "left")))) # Check
-fit <- vglm(SurvS4(cY, status, type = "left") ~ 1, cenpoisson, cdata, trace = TRUE)
+fit <- vglm(SurvS4(cY, status, type = "left") ~ 1, cenpoisson, data = cdata, trace = TRUE)
coef(fit, matrix = TRUE)
@@ -121,7 +121,7 @@ cdata$Lvec[with(cdata, rcensored)] <- cdata$Uvec[with(cdata, rcensored)] # Unch
with(cdata, table(ii <- print(SurvS4(Lvec, Uvec, status, type = "interval")))) # Check
fit <- vglm(SurvS4(Lvec, Uvec, status, type = "interval") ~ 1,
- cenpoisson, cdata, trace = TRUE)
+ cenpoisson, data = cdata, trace = TRUE)
coef(fit, matrix = TRUE)
table(print(depvar(fit))) # Another check
@@ -135,7 +135,7 @@ with(cdata, table(ii <- print(SurvS4(Lvec, Uvec, status,
type = "interval")))) # Check
fit <- vglm(SurvS4(Lvec, Uvec, status, type = "interval") ~ 1,
- cenpoisson, cdata, trace = TRUE, crit = "c")
+ cenpoisson, data = cdata, trace = TRUE, crit = "c")
coef(fit, matrix = TRUE)
table(print(depvar(fit))) # Another check
}
diff --git a/man/cfibrosis.Rd b/man/cfibrosis.Rd
new file mode 100644
index 0000000..8fe2e6c
--- /dev/null
+++ b/man/cfibrosis.Rd
@@ -0,0 +1,71 @@
+\name{cfibrosis}
+\alias{cfibrosis}
+\docType{data}
+\title{ Cystic Fibrosis Data
+%% ~~ data name/kind ... ~~
+
+}
+\description{
+ This data frame concerns families data
+ and cystic fibrosis.
+
+
+}
+\usage{
+data(cfibrosis)
+}
+
+\format{
+ A data frame with 24 rows on the following 4 variables.
+
+\describe{
+ \item{siblings, affected, ascertained, families}{
+ Over ascertained families, the \eqn{k}th ascertained family
+ has \eqn{s_k} siblings of whom \eqn{r_k}
+ are affected and \eqn{a_k} are ascertained.
+
+ }
+}
+
+}
+\details{
+
+ The data set allows a classical segregation analysis
+ to be peformed. In particular,
+ to test Mendelian segregation ratios in nuclear family data.
+The likelihood has similarities with \code{\link{seq2binomial}}.
+
+
+
+%% ~~ If necessary, more details than the __description__ above ~~
+}
+\source{
+
+ The data is originally from Crow (1965) and
+ appears as Table 2.3 of Lange (2002).
+
+
+Crow, J. F. (1965)
+Problems of ascertainment in the analysis of family data.
+Epidemiology and Genetics of Chronic Disease.
+Public Health Service Publication 1163,
+Neel J. V., Shaw M. W., Schull W. J., editors,
+Department of Health, Education, and Welfare, Washington, DC,
+USA.
+
+
+Lange, K. (2002)
+Mathematical and Statistical Methods for Genetic Analysis.
+Second Edition.
+Springer-Verlag: New York, USA.
+
+
+}
+\examples{
+cfibrosis
+summary(cfibrosis)
+}
+\keyword{datasets}
+
+
+
diff --git a/man/cgumbel.Rd b/man/cgumbel.Rd
index 6215520..063955c 100644
--- a/man/cgumbel.Rd
+++ b/man/cgumbel.Rd
@@ -10,7 +10,7 @@
}
\usage{
-cgumbel(llocation = "identity", lscale = "loge",
+cgumbel(llocation = "identitylink", lscale = "loge",
iscale = NULL, mean = TRUE, percentiles = NULL, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
diff --git a/man/chinese.nz.Rd b/man/chinese.nz.Rd
index fc3e120..c6dc226 100644
--- a/man/chinese.nz.Rd
+++ b/man/chinese.nz.Rd
@@ -62,9 +62,12 @@ plot(female / (male + female) ~ year, chinese.nz, type = "b",
main = "Proportion of NZ Chinese that are female")
abline(h = 0.5, lty = "dashed", col = "gray")
-fit1.cnz <- vglm(cbind(female, male) ~ year, binomialff, chinese.nz)
-fit2.cnz <- vglm(cbind(female, male) ~ poly(year, 2), binomialff, chinese.nz)
-fit4.cnz <- vglm(cbind(female, male) ~ bs(year, 5), binomialff, chinese.nz)
+fit1.cnz <- vglm(cbind(female, male) ~ year, binomialff,
+ data = chinese.nz)
+fit2.cnz <- vglm(cbind(female, male) ~ sm.poly(year, 2), binomialff,
+ data = chinese.nz)
+fit4.cnz <- vglm(cbind(female, male) ~ sm.bs(year, 5), binomialff,
+ data = chinese.nz)
lines(fitted(fit1.cnz) ~ year, chinese.nz, col = "purple", lty = 1)
lines(fitted(fit2.cnz) ~ year, chinese.nz, col = "green", lty = 2)
diff --git a/man/chisq.Rd b/man/chisq.Rd
index 51afbc5..7bd8124 100644
--- a/man/chisq.Rd
+++ b/man/chisq.Rd
@@ -58,7 +58,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
cdata <- data.frame(x2 = runif(nn <- 1000))
cdata <- transform(cdata, y1 = rchisq(nn, df = exp(1 - 1 * x2)),
y2 = rchisq(nn, df = exp(2 - 2 * x2)))
-fit <- vglm(cbind(y1, y2) ~ x2, chisq, cdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ x2, chisq, data = cdata, trace = TRUE)
coef(fit, matrix = TRUE)
}
\keyword{models}
diff --git a/man/clo.Rd b/man/clo.Rd
index feda2fc..9bb514c 100644
--- a/man/clo.Rd
+++ b/man/clo.Rd
@@ -30,6 +30,9 @@ The new CLO/CQO/CAO nomenclature described in Yee (2006).
}
\value{
Nothing is returned; an error message is issued.
+
+
+
}
\references{
diff --git a/man/cloglog.Rd b/man/cloglog.Rd
index 7564f9a..223c92c 100644
--- a/man/cloglog.Rd
+++ b/man/cloglog.Rd
@@ -103,8 +103,8 @@ cloglog(p, bvalue = .Machine$double.eps) # Has no NAs
\dontrun{
p <- seq(0.01, 0.99, by = 0.01)
-plot(p, logit(p), type = "l", col = "limegreen", ylab = "transformation",
- lwd = 2, las = 1, main = "Some probability link functions")
+plot(p, logit(p), type = "l", col = "limegreen", lwd = 2, las = 1,
+ main = "Some probability link functions", ylab = "transformation")
lines(p, probit(p), col = "purple", lwd = 2)
lines(p, cloglog(p), col = "chocolate", lwd = 2)
lines(p, cauchit(p), col = "tan", lwd = 2)
@@ -117,14 +117,15 @@ legend(0.1, 4, c("logit", "probit", "cloglog", "cauchit"),
# This example shows that a cloglog link is preferred over the logit
n <- 500; p <- 5; S <- 3; Rank <- 1 # Species packing model:
mydata <- rcqo(n, p, S, eq.tol = TRUE, es.opt = TRUE, eq.max = TRUE,
- family = "binomial", hi.abundance = 5, seed = 123, Rank = Rank)
-fitc <- cqo(attr(mydata, "formula"), ITol = TRUE, data = mydata,
+ family = "binomial", hi.abundance = 5, seed = 123,
+ Rank = Rank)
+fitc <- cqo(attr(mydata, "formula"), I.tol = TRUE, data = mydata,
fam = binomialff(mv = TRUE, link = "cloglog"), Rank = Rank)
-fitl <- cqo(attr(mydata, "formula"), ITol = TRUE, data = mydata,
+fitl <- cqo(attr(mydata, "formula"), I.tol = TRUE, data = mydata,
fam = binomialff(mv = TRUE, link = "logit"), Rank = Rank)
# Compare the fitted models (cols 1 and 3) with the truth (col 2)
-cbind(concoef(fitc), attr(mydata, "ccoefficients"), concoef(fitl))
+cbind(concoef(fitc), attr(mydata, "concoefficients"), concoef(fitl))
}
}
\keyword{math}
diff --git a/man/coalminers.Rd b/man/coalminers.Rd
index b3c789b..e8c5c1d 100644
--- a/man/coalminers.Rd
+++ b/man/coalminers.Rd
@@ -21,16 +21,23 @@
\details{
The data were published in Ashford and Sowden (1970).
A more recent analysis is McCullagh and Nelder (1989, Section 6.6).
+
+
+
}
\source{
Ashford, J. R. and Sowden, R. R. (1970)
Multi-variate probit analysis.
\emph{Biometrics}, \bold{26}, 535--546.
+
}
\references{
+
McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}. 2nd ed. London: Chapman & Hall.
+
+
}
\examples{
str(coalminers)
diff --git a/man/coefvlm.Rd b/man/coefvlm.Rd
new file mode 100644
index 0000000..8455b31
--- /dev/null
+++ b/man/coefvlm.Rd
@@ -0,0 +1,100 @@
+\name{coefvlm}
+\alias{coefvlm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Extract Model Coefficients }
+\description{
+ Extracts the estimated
+ coefficients from VLM objects such as VGLMs.
+
+
+}
+\usage{
+coefvlm(object, matrix.out = FALSE, label = TRUE, colon = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ An object for which the extraction of
+ coefficients is meaningful.
+ This will usually be a \code{\link{vglm}} object.
+
+
+ }
+ \item{matrix.out}{
+ Logical. If \code{TRUE} then a matrix is returned.
+ The explanatory variables are the rows.
+ The linear/additive predictors are the columns.
+ The constraint matrices are used to compute this matrix.
+
+
+ }
+ \item{label}{
+ Logical. If \code{FALSE} then the \code{names}
+ of the vector of coefficients are set to \code{NULL}.
+
+
+ }
+ \item{colon}{
+ Logical. Explanatory variables which appear in more than one
+ linear/additive predictor are labelled with a colon,
+ e.g., \code{age:1}, \code{age:2}.
+ However, if it only appears in one linear/additive predictor
+ then the \code{:1} is omitted by default.
+ Then setting \code{colon = TRUE} will add the \code{:1}.
+
+
+ }
+}
+\details{
+ This function works in a similar way to
+ applying \code{coef()} to a \code{\link[stats]{lm}}
+ or \code{\link[stats]{glm}} object.
+ However, for VGLMs, there are more options available.
+
+
+}
+\value{
+ A vector usually.
+ A matrix if \code{matrix.out = TRUE}.
+
+
+}
+\references{
+Yee, T. W. and Hastie, T. J. (2003)
+Reduced-rank vector generalized linear models.
+\emph{Statistical Modelling},
+\bold{3}, 15--41.
+
+
+}
+\author{ Thomas W. Yee }
+
+%\note{
+%}
+
+%\section{Warning }{
+
+%}
+
+\seealso{
+ \code{\link{vglm}},
+ \code{\link[stats]{coef}}.
+
+
+% \code{\link{coef-method}},
+
+
+}
+\examples{
+zdata <- data.frame(x2 = runif(nn <- 200))
+zdata <- transform(zdata, pstr0 = logit(-0.5 + 1*x2, inverse = TRUE),
+ lambda = loge( 0.5 + 2*x2, inverse = TRUE))
+zdata <- transform(zdata, y2 = rzipois(nn, lambda, pstr0 = pstr0))
+
+fit2 <- vglm(y2 ~ x2, zipoisson(zero = 1), data = zdata, trace = TRUE)
+coef(fit2, matrix = TRUE) # Always a good idea
+coef(fit2)
+coef(fit2, colon = TRUE)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/concoef.Rd b/man/concoef.Rd
index 388d6cc..e703b07 100644
--- a/man/concoef.Rd
+++ b/man/concoef.Rd
@@ -73,11 +73,11 @@ Constrained additive ordination.
scaling of the latent variables (site scores) and the tolerances.
One normalization is for the latent variables to have unit variance.
Another normalization is for all the species' tolerances to be
- unit (provided \code{EqualTolerances} is \code{TRUE}). These two
+ unit (provided \code{eq.tolerances} is \code{TRUE}). These two
normalizations cannot simultaneously hold in general. For rank
\eqn{R} models with \eqn{R>1} it becomes more complicated because
the latent variables are also uncorrelated. An important argument when
- fitting quadratic ordination models is whether \code{EqualTolerances}
+ fitting quadratic ordination models is whether \code{eq.tolerances}
is \code{TRUE} or \code{FALSE}. See Yee (2004) for details.
diff --git a/man/constraints.Rd b/man/constraints.Rd
index c781a01..d82de25 100644
--- a/man/constraints.Rd
+++ b/man/constraints.Rd
@@ -160,23 +160,22 @@ information.
\examples{
# Fit the proportional odds model:
pneumo <- transform(pneumo, let = log(exposure.time))
-(fit1 <- vglm(cbind(normal, mild, severe) ~ bs(let, 3),
- cumulative(parallel = TRUE, reverse = TRUE), pneumo))
+(fit1 <- vglm(cbind(normal, mild, severe) ~ sm.bs(let, 3),
+ cumulative(parallel = TRUE, reverse = TRUE), data = pneumo))
coef(fit1, matrix = TRUE)
constraints(fit1) # Parallel assumption results in this
-constraints(fit1, type = "term") # This is the same as the default ("vlm"-type)
+constraints(fit1, type = "term") # Same as the default ("vlm"-type)
is.parallel(fit1)
# An equivalent model to fit1 (needs the type "term" constraints):
-clist.term <- constraints(fit1, type = "term") # The "term"-type constraints
-(fit2 <- vglm(cbind(normal, mild, severe) ~ bs(let, 3),
- cumulative(reverse = TRUE), pneumo, constraints = clist.term))
+clist.term <- constraints(fit1, type = "term") # "term"-type constraints
+(fit2 <- vglm(cbind(normal, mild, severe) ~ sm.bs(let, 3), data = pneumo,
+ cumulative(reverse = TRUE), constraints = clist.term))
abs(max(coef(fit1, matrix = TRUE) -
coef(fit2, matrix = TRUE))) # Should be zero
# Fit a rank-1 stereotype (RR-multinomial logit) model:
-data(car.all)
-fit <- rrvglm(Country ~ Width + Height + HP, multinomial, car.all, Rank = 1)
+fit <- rrvglm(Country ~ Width + Height + HP, multinomial, data = car.all)
constraints(fit) # All except the first are the estimated A matrix
}
\keyword{models}
diff --git a/man/cqo.Rd b/man/cqo.Rd
index 4290ee1..57c5ff9 100644
--- a/man/cqo.Rd
+++ b/man/cqo.Rd
@@ -110,7 +110,7 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
\item{offset}{
This argument must not be used.
-% especially when \code{ITolerances = TRUE}.
+% especially when \code{I.tolerances = TRUE}.
% a vector or \eqn{M}-column matrix of offset values.
% These are \emph{a priori} known and are
% added to the linear predictors during fitting.
@@ -259,6 +259,20 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
\eqn{M=S} for Poisson and binomial species data,
and \eqn{M=2S} for negative binomial and gamma distributed species data.
+
+
+ Incidentally,
+ \emph{Unconstrained quadratic ordination} (UQO)
+ may be performed by, e.g., fitting a Goodman's RC association model;
+ see \code{\link{uqo}} and the Yee and Hadi (2014) referenced there.
+ For UQO, the response is the usual site-by-species matrix and
+ there are no environmental variables;
+ the site scores are free parameters.
+ UQO can be performed under the assumption that all species
+ have the same tolerance matrices.
+
+
+
}
\value{
An object of class \code{"qrrvglm"}.
@@ -270,6 +284,7 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
}
\references{
+
Yee, T. W. (2004)
A new technique for maximum-likelihood
canonical Gaussian ordination.
@@ -293,6 +308,7 @@ Yee, T. W. (2006)
Constrained additive ordination.
\emph{Ecology}, \bold{87}, 203--213.
+
}
\author{
Thomas W. Yee.
@@ -342,7 +358,7 @@ original FORTRAN code into C.
\item{(5)}{
Each explanatory variable should be scaled, e.g.,
to mean 0 and unit variance.
- This is especially needed for \code{ITolerance = TRUE}.
+ This is especially needed for \code{I.tolerance = TRUE}.
}
@@ -365,11 +381,11 @@ original FORTRAN code into C.
}
\item{(8)}{
- Try \code{ITolerance = TRUE} or \code{EqualTolerance = FALSE}
+ Try \code{I.tolerance = TRUE} or \code{eq.tolerance = FALSE}
if the inputted data set is large,
so as to reduce the computational expense.
- That's because the default, \code{ITolerance = FALSE} and
- \code{EqualTolerance = TRUE}, is very memory hungry.
+ That's because the default, \code{I.tolerance = FALSE} and
+ \code{eq.tolerance = TRUE}, is very memory hungry.
}
@@ -400,8 +416,8 @@ original FORTRAN code into C.
%Convergence of QRR-VGLMs can be difficult, especially for binary
- %data. If this is so, then setting \code{ITolerances = TRUE} or
- %\code{EqualTolerances = TRUE} may help, especially when the number of sites,
+ %data. If this is so, then setting \code{I.tolerances = TRUE} or
+ %\code{eq.tolerances = TRUE} may help, especially when the number of sites,
%\eqn{n}, is small.
%If the negative binomial family function \code{\link{negbinomial}} is
@@ -416,17 +432,17 @@ original FORTRAN code into C.
\code{Rank},
\code{noRRR},
\code{Bestof},
- \code{ITolerances},
- \code{EqualTolerances},
+ \code{I.tolerances},
+ \code{eq.tolerances},
\code{isd.latvar}, and
\code{MUXfactor}.
When fitting a 2-parameter model such as the negative binomial
- or gamma, it pays to have \code{EqualTolerances = TRUE} and
- \code{ITolerances = FALSE}. This is because numerical problems can
+ or gamma, it pays to have \code{eq.tolerances = TRUE} and
+ \code{I.tolerances = FALSE}. This is because numerical problems can
occur when fitting the model far away from the global solution when
- \code{ITolerances = TRUE}. Setting the two arguments as described will
+ \code{I.tolerances = TRUE}. Setting the two arguments as described will
slow down the computation considerably, however it is numerically
more stable.
@@ -467,10 +483,10 @@ original FORTRAN code into C.
if \code{Use.Init.Poisson.QO = TRUE}, else random numbers.
- Unless \code{ITolerances = TRUE} or \code{EqualTolerances = FALSE},
+ Unless \code{I.tolerances = TRUE} or \code{eq.tolerances = FALSE},
CQO is computationally expensive with memory and time.
It pays to keep the rank down to 1
- or 2. If \code{EqualTolerances = TRUE} and \code{ITolerances = FALSE} then
+ or 2. If \code{eq.tolerances = TRUE} and \code{I.tolerances = FALSE} then
the cost grows quickly with the number of species and sites (in terms of
memory requirements and time). The data needs to conform quite closely
to the statistical model, and the environmental range of the data should
@@ -499,12 +515,16 @@ original FORTRAN code into C.
and if the response data for each species is a string of all absences,
then all presences, then all absences (when enumerated along the latent
variable) then infinite parameter estimates will occur. In general,
- setting \code{ITolerances = TRUE} may help.
+ setting \code{I.tolerances = TRUE} may help.
This function was formerly called \code{cgo}. It has been renamed to
reinforce a new nomenclature described in Yee (2006).
+
+
+
+
}
\seealso{
@@ -513,9 +533,8 @@ original FORTRAN code into C.
\code{\link{predictqrrvglm}},
\code{\link{rcqo}},
\code{\link{cao}},
-% \code{\link{uqo}},
+ \code{\link{uqo}},
\code{\link{rrvglm}},
-% \code{\link{rrvglm.control}},
\code{\link{poissonff}},
\code{\link{binomialff}},
\code{\link{negbinomial}},
@@ -523,10 +542,17 @@ original FORTRAN code into C.
\code{\link{lvplot.qrrvglm}},
\code{\link{perspqrrvglm}},
\code{\link{trplot.qrrvglm}},
-% \code{\link{vcovqrrvglm}},
\code{\link{vglm}},
\code{\link[base:Random]{set.seed}},
- \code{\link{hspider}}.
+ \code{\link{hspider}},
+ \code{\link[VGAMdata]{trapO}}.
+
+
+
+% \code{\link{rrvglm.control}},
+% \code{\link{vcovqrrvglm}},
+
+
Documentation accompanying the \pkg{VGAM} package at
@@ -545,21 +571,22 @@ p1ut <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Trocterr, Zoraspin) ~
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
fam = poissonff, data = hspider, Crow1positive = FALSE,
- EqualTol = FALSE)
+ eq.tol = FALSE)
sort(p1ut at misc$deviance.Bestof) # A history of all the iterations
-if(deviance(p1ut) > 1177) warning("suboptimal fit obtained")
+if (deviance(p1ut) > 1177) warning("suboptimal fit obtained")
S <- ncol(depvar(p1ut)) # Number of species
clr <- (1:(S+1))[-7] # Omits yellow
-lvplot(p1ut, y = TRUE, lcol = clr, pch = 1:S, pcol = clr, las = 1) # Ordination diagram
+lvplot(p1ut, y = TRUE, lcol = clr, pch = 1:S, pcol = clr,
+ las = 1) # Ordination diagram
legend("topright", leg = colnames(depvar(p1ut)), col = clr,
pch = 1:S, merge = TRUE, bty = "n", lty = 1:S, lwd = 2)
(cp <- Coef(p1ut))
-(a <- cp at latvar[cp at latvar.order]) # The ordered site scores along the gradient
+(a <- cp at latvar[cp at latvar.order]) # Ordered site scores along the gradient
# Names of the ordered sites along the gradient:
rownames(cp at latvar)[cp at latvar.order]
-(aa <- (cp at Optimum)[,cp at Optimum.order]) # The ordered optima along the gradient
+(aa <- (cp at Optimum)[, cp at Optimum.order]) # Ordered optima along the gradient
aa <- aa[!is.na(aa)] # Delete the species that is not unimodal
names(aa) # Names of the ordered optima along the gradient
@@ -589,16 +616,16 @@ persp(p1et, col = clr, label = TRUE, las = 1)
# Example 3: A rank-2 equal tolerances CQO model with Poisson data
-# This example is numerically fraught... need IToler = TRUE too.
+# This example is numerically fraught... need I.toler = TRUE too.
set.seed(555)
p2 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
Trocterr, Zoraspin) ~
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
poissonff, data = hspider, Crow1positive = FALSE,
- IToler = TRUE, Rank = 2, Bestof = 3, isd.latvar = c(2.1, 0.9))
+ I.toler = TRUE, Rank = 2, Bestof = 3, isd.latvar = c(2.1, 0.9))
sort(p2 at misc$deviance.Bestof) # A history of all the iterations
-if(deviance(p2) > 1127) warning("suboptimal fit obtained")
+if (deviance(p2) > 1127) warning("suboptimal fit obtained")
lvplot(p2, ellips = FALSE, label = TRUE, xlim = c(-3,4),
C = TRUE, Ccol = "brown", sites = TRUE, scol = "grey",
pcol = "blue", pch = "+", chull = TRUE, ccol = "grey")
@@ -626,13 +653,14 @@ p1et <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
Trocterr, Zoraspin) ~
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- poissonff, data = hspider, EqualTol = TRUE, trace = FALSE)
+ poissonff, data = hspider, eq.tol = TRUE, trace = FALSE)
sort(p1et at misc$deviance.Bestof) # A history of all the iterations
-if(deviance(p1et) > 1586) warning("suboptimal fit obtained")
+if (deviance(p1et) > 1586) warning("suboptimal fit obtained")
S <- ncol(depvar(p1et))
par(mfrow = c(3, 4))
for (ii in 1:S) {
- tempdata <- data.frame(latvar1 = c(latvar(p1et)), sppCounts = depvar(p1et)[, ii])
+ tempdata <- data.frame(latvar1 = c(latvar(p1et)),
+ sppCounts = depvar(p1et)[, ii])
tempdata <- transform(tempdata, myOffset = -0.5 * latvar1^2)
# For species ii, refit the model to get the deviance residuals
@@ -640,15 +668,15 @@ for (ii in 1:S) {
data = tempdata, trace = FALSE)
# For checking: this should be 0
-# print("max(abs(c(Coef(p1et)@B1[1,ii], Coef(p1et)@A[ii,1]) - coef(fit1)))")
-# print( max(abs(c(Coef(p1et)@B1[1,ii], Coef(p1et)@A[ii,1]) - coef(fit1))) )
+# print("max(abs(c(Coef(p1et)@B1[1,ii],Coef(p1et)@A[ii,1])-coef(fit1)))")
+# print( max(abs(c(Coef(p1et)@B1[1,ii],Coef(p1et)@A[ii,1])-coef(fit1))) )
# Plot the deviance residuals
devresid <- resid(fit1, type = "deviance")
predvalues <- predict(fit1) + fit1 at offset
ooo <- with(tempdata, order(latvar1))
- with(tempdata, plot(latvar1, predvalues + devresid, col = "darkgreen",
- xlab = "latvar1", ylab = "", main = colnames(depvar(p1et))[ii]))
+ plot(predvalues + devresid ~ latvar1, data = tempdata, col = "red",
+ xlab = "latvar1", ylab = "", main = colnames(depvar(p1et))[ii])
with(tempdata, lines(latvar1[ooo], predvalues[ooo], col = "blue"))
}
}
diff --git a/man/cratio.Rd b/man/cratio.Rd
index eb2beb9..f2b077a 100644
--- a/man/cratio.Rd
+++ b/man/cratio.Rd
@@ -147,7 +147,7 @@ The \pkg{VGAM} package for categorical data analysis.
\examples{
pneumo <- transform(pneumo, let = log(exposure.time))
(fit <- vglm(cbind(normal, mild, severe) ~ let,
- cratio(parallel = TRUE), pneumo))
+ cratio(parallel = TRUE), data = pneumo))
coef(fit, matrix = TRUE)
constraints(fit)
predict(fit)
diff --git a/man/cumulative.Rd b/man/cumulative.Rd
index 337dc44..7af24e8 100644
--- a/man/cumulative.Rd
+++ b/man/cumulative.Rd
@@ -302,23 +302,24 @@ by the \pkg{VGAM} package can be found at
# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
pneumo <- transform(pneumo, let = log(exposure.time))
(fit <- vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel = TRUE, reverse = TRUE), pneumo))
+ cumulative(parallel = TRUE, reverse = TRUE), data = pneumo))
depvar(fit) # Sample proportions (good technique)
fit at y # Sample proportions (bad technique)
weights(fit, type = "prior") # Number of observations
coef(fit, matrix = TRUE)
constraints(fit) # Constraint matrices
apply(fitted(fit), 1, which.max) # Classification
-apply(predict(fit, newdata = pneumo, type = "response"), 1, which.max) # Classification
+apply(predict(fit, newdata = pneumo, type = "response"),
+ 1, which.max) # Classification
# Check that the model is linear in let ----------------------
fit2 <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
- cumulative(reverse = TRUE), pneumo)
+ cumulative(reverse = TRUE), data = pneumo)
\dontrun{ plot(fit2, se = TRUE, overlay = TRUE, lcol = 1:2, scol = 1:2) }
# Check the proportional odds assumption with a LRT ----------
(fit3 <- vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel = FALSE, reverse = TRUE), pneumo))
+ cumulative(parallel = FALSE, reverse = TRUE), data = pneumo))
pchisq(2 * (logLik(fit3) - logLik(fit)),
df = length(coef(fit3)) - length(coef(fit)), lower.tail = FALSE)
lrtest(fit3, fit) # More elegant
@@ -334,19 +335,19 @@ pneumo.long <-
levels = colnames(Nobs)),
let = rep(rep(with(pneumo, let), each = ncol(Nobs)),
times = c(t(Nobs))))
-with(pneumo.long, table(let, symptoms)) # Check it; should be same as pneumo
+with(pneumo.long, table(let, symptoms)) # Should be same as pneumo
(fit.long1 <- vglm(symptoms ~ let, data = pneumo.long, trace = TRUE,
cumulative(parallel = TRUE, reverse = TRUE)))
-coef(fit.long1, matrix = TRUE) # Should be same as coef(fit, matrix = TRUE)
+coef(fit.long1, matrix = TRUE) # Should be as coef(fit, matrix = TRUE)
# Could try using mustart if fit.long1 failed to converge.
mymustart <- matrix(sumNobs / sum(sumNobs),
nrow(pneumo.long), ncol(Nobs), byrow = TRUE)
fit.long2 <- vglm(symptoms ~ let, mustart = mymustart,
cumulative(parallel = TRUE, reverse = TRUE),
data = pneumo.long, trace = TRUE)
-coef(fit.long2, matrix = TRUE) # Should be same as coef(fit, matrix = TRUE)
+coef(fit.long2, matrix = TRUE) # Should be as coef(fit, matrix = TRUE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/dagum.Rd b/man/dagum.Rd
index f93fd07..4fe852d 100644
--- a/man/dagum.Rd
+++ b/man/dagum.Rd
@@ -98,15 +98,16 @@ while estimates for \eqn{a} and \eqn{p} can be considered unbiased for
\code{\link{invlomax}},
\code{\link{lomax}},
\code{\link{paralogistic}},
- \code{\link{invparalogistic}}.
+ \code{\link{invparalogistic}},
+ \code{\link{simulate.vlm}}.
}
\examples{
ddata <- data.frame(y = rdagum(n = 3000, exp(1), exp(2), exp(1)))
-fit <- vglm(y ~ 1, dagum, ddata, trace = TRUE)
-fit <- vglm(y ~ 1, dagum(ishape1.a = exp(1)), ddata, trace = TRUE)
+fit <- vglm(y ~ 1, dagum, data = ddata, trace = TRUE)
+fit <- vglm(y ~ 1, dagum(ishape1.a = exp(1)), data = ddata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/depvar.Rd b/man/depvar.Rd
index 96c91cd..85e8835 100644
--- a/man/depvar.Rd
+++ b/man/depvar.Rd
@@ -63,7 +63,7 @@ depvar(object, ...)
}
\examples{
pneumo <- transform(pneumo, let = log(exposure.time))
-(fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
+(fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo))
fit at y # Sample proportions (not recommended)
depvar(fit) # Better than using fit at y; dependent variable (response)
weights(fit, type = "prior") # Number of observations
diff --git a/man/df.residual.Rd b/man/df.residual.Rd
index 13f66aa..de14e1a 100644
--- a/man/df.residual.Rd
+++ b/man/df.residual.Rd
@@ -64,7 +64,7 @@ df.residual_vlm(object, type = c("vlm", "lm"), \dots)
\examples{
pneumo <- transform(pneumo, let = log(exposure.time))
-(fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
+(fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo))
head(model.matrix(fit, type = "vlm"))
head(model.matrix(fit, type = "lm"))
diff --git a/man/dirichlet.Rd b/man/dirichlet.Rd
index cb000aa..92cabf6 100644
--- a/man/dirichlet.Rd
+++ b/man/dirichlet.Rd
@@ -55,8 +55,8 @@ dirichlet(link = "loge", parallel = FALSE, zero = NULL)
y_1 > 0, \dots, y_M > 0,
\sum_{j=1}^M y_j = 1 }.
}
- One has \eqn{E(Y_j) = \alpha_j / \alpha_{+}}{E(Y_j) = alpha_j /
- alpha_{+}}, which are returned as the fitted values.
+ One has \eqn{E(Y_j) = \alpha_j / \alpha_{+}}{E(Y_j) = alpha_j / alpha_{+}},
+ which are returned as the fitted values.
For this distribution Fisher scoring corresponds to Newton-Raphson.
@@ -118,9 +118,10 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
}
\examples{
-ydata <- data.frame(rdiric(n = 1000, shape = exp(c(-1, 1, 0))))
-colnames(ydata) <- paste("y", 1:3, sep = "")
-fit <- vglm(cbind(y1, y2, y3) ~ 1, dirichlet, ydata, trace = TRUE, crit = "coef")
+ydata <- data.frame(rdiric(n = 1000,
+ shape = exp(c(y1 = -1, y2 = 1, y3 = 0))))
+fit <- vglm(cbind(y1, y2, y3) ~ 1, dirichlet,
+ data = ydata, trace = TRUE, crit = "coef")
Coef(fit)
coef(fit, matrix = TRUE)
head(fitted(fit))
@@ -128,3 +129,7 @@ head(fitted(fit))
\keyword{models}
\keyword{regression}
+% colnames(ydata) <- paste("y", 1:3, sep = "")
+
+
+
diff --git a/man/dirmultinomial.Rd b/man/dirmultinomial.Rd
index cd4c850..a644ff7 100644
--- a/man/dirmultinomial.Rd
+++ b/man/dirmultinomial.Rd
@@ -135,6 +135,22 @@ Overdispersion in allelic counts and \eqn{\theta}-correction in forensic genetic
\emph{Theoretical Population Biology}, \bold{78}, 200--210.
+
+Yu, P. and Shaw, C. A. (2014).
+An Efficient Algorithm for Accurate Computation of
+the Dirichlet-Multinomial Log-Likelihood Function.
+\emph{Bioinformatics},
+\bold{30},
+in press;
+\url{doi:10.1093/bioinformatics/btu079}.
+
+
+% first published online February 11, 2014
+
+
+
+
+
}
\author{ Thomas W. Yee }
@@ -180,13 +196,13 @@ nn <- 10; M <- 5
ydata <- data.frame(round(matrix(runif(nn * M, max = 10), nn, M))) # Integer counts
colnames(ydata) <- paste("y", 1:M, sep = "")
-fit <- vglm(cbind(y1, y2, y3, y4, y5) ~ 1, dirmultinomial, ydata, trace = TRUE)
+fit <- vglm(cbind(y1, y2, y3, y4, y5) ~ 1, dirmultinomial, data = ydata, trace = TRUE)
head(fitted(fit))
depvar(fit) # Sample proportions
weights(fit, type = "prior", matrix = FALSE) # Total counts per row
ydata <- transform(ydata, x2 = runif(nn))
-fit <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2, dirmultinomial, ydata, trace = TRUE)
+fit <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2, dirmultinomial, data = ydata, trace = TRUE)
\dontrun{ # This does not work:
Coef(fit) }
coef(fit, matrix = TRUE)
diff --git a/man/double.cennormal.Rd b/man/double.cennormal.Rd
index 9bf720e..5c94147 100644
--- a/man/double.cennormal.Rd
+++ b/man/double.cennormal.Rd
@@ -8,7 +8,7 @@
}
\usage{
-double.cennormal(r1 = 0, r2 = 0, lmu = "identity", lsd = "loge",
+double.cennormal(r1 = 0, r2 = 0, lmu = "identitylink", lsd = "loge",
imu = NULL, isd = NULL, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
diff --git a/man/enzyme.Rd b/man/enzyme.Rd
index c38ba56..3280227 100644
--- a/man/enzyme.Rd
+++ b/man/enzyme.Rd
@@ -35,6 +35,8 @@ Watts, D. G. (1981)
}
\seealso{
\code{\link[VGAM]{micmen}}.
+
+
}
\examples{
\dontrun{
diff --git a/man/erlang.Rd b/man/erlang.Rd
index 2ee090f..88c983b 100644
--- a/man/erlang.Rd
+++ b/man/erlang.Rd
@@ -88,7 +88,8 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
\seealso{
\code{\link{gamma2.ab}},
- \code{\link{exponential}}.
+ \code{\link{exponential}},
+ \code{\link{simulate.vlm}}.
}
@@ -97,7 +98,7 @@ rate <- exp(2); myshape <- 3
edata <- data.frame(y = rep(0, nn <- 1000))
for (ii in 1:myshape)
edata <- transform(edata, y = y + rexp(nn, rate = rate))
-fit <- vglm(y ~ 1, erlang(shape = myshape), edata, trace = TRUE)
+fit <- vglm(y ~ 1, erlang(shape = myshape), data = edata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit) # Answer = 1/rate
1/rate
diff --git a/man/eunifUC.Rd b/man/eunifUC.Rd
index 4e240f6..658f226 100644
--- a/man/eunifUC.Rd
+++ b/man/eunifUC.Rd
@@ -108,6 +108,8 @@ very close to 0 or 1.
the expectile \eqn{y} such that \eqn{G(y) = p}.
\code{reunif(n)} gives \eqn{n} random variates from \eqn{G}.
+
+
}
\references{
diff --git a/man/expexp.Rd b/man/expexp.Rd
index 3bab67f..c010e80 100644
--- a/man/expexp.Rd
+++ b/man/expexp.Rd
@@ -141,7 +141,7 @@ expexp(lshape = "loge", lscale = "loge",
\examples{
# A special case: exponential data
edata <- data.frame(y = rexp(n <- 1000))
-fit <- vglm(y ~ 1, fam = expexp, edata, trace = TRUE, maxit = 99)
+fit <- vglm(y ~ 1, fam = expexp, data = edata, trace = TRUE, maxit = 99)
coef(fit, matrix = TRUE)
Coef(fit)
diff --git a/man/exponential.Rd b/man/exponential.Rd
index 1786313..e87631b 100644
--- a/man/exponential.Rd
+++ b/man/exponential.Rd
@@ -74,14 +74,17 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
}
\seealso{
-% \code{\link{cexpon}},
\code{\link{amlexponential}},
\code{\link{laplace}},
\code{\link{expgeometric}},
\code{\link{explogff}},
\code{\link{poissonff}},
\code{\link{mix2exp}},
- \code{\link{freund61}}.
+ \code{\link{freund61}},
+ \code{\link{simulate.vlm}}.
+
+
+% \code{\link{cexpon}},
}
@@ -94,8 +97,8 @@ edata <- transform(edata, rate = exp(eta))
edata <- transform(edata, y = rexp(nn, rate = rate))
with(edata, stem(y))
-fit.slow <- vglm(y ~ x2 + x3, exponential, edata, trace = TRUE, crit = "c")
-fit.fast <- vglm(y ~ x2 + x3, exponential(exp = FALSE), edata,
+fit.slow <- vglm(y ~ x2 + x3, exponential, data = edata, trace = TRUE, crit = "c")
+fit.fast <- vglm(y ~ x2 + x3, exponential(exp = FALSE), data = edata,
trace = TRUE, crit = "coef")
coef(fit.slow, mat = TRUE)
summary(fit.slow)
diff --git a/man/felix.Rd b/man/felix.Rd
index a87ab19..a400c9e 100644
--- a/man/felix.Rd
+++ b/man/felix.Rd
@@ -38,6 +38,7 @@ felix(link = elogit(min = 0, max = 0.5), imethod = 1)
The mean is \eqn{1/(1-2a)} (returned as the fitted values).
Fisher scoring is implemented.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -50,7 +51,8 @@ felix(link = elogit(min = 0, max = 0.5), imethod = 1)
Consul, P. C. and Famoye, F. (2006)
\emph{Lagrangian Probability Distributions},
-Boston: Birkhauser.
+Boston, USA: Birkhauser.
+
}
\author{ T. W. Yee }
@@ -66,7 +68,7 @@ Boston: Birkhauser.
}
\examples{
fdata <- data.frame(y = 2 * rpois(n = 200, 1) + 1) # Not real data!
-fit <- vglm(y ~ 1, felix, fdata, trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, felix, data = fdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/felixUC.Rd b/man/felixUC.Rd
index 18149b9..32d1265 100644
--- a/man/felixUC.Rd
+++ b/man/felixUC.Rd
@@ -7,17 +7,23 @@
\title{The Felix Distribution}
\description{
Density
-% distribution function, quantile function
-% and random generation for the
+ for the
Felix distribution.
+
+% distribution function, quantile function
+% and random generation
+
+
}
\usage{
dfelix(x, a = 0.25, log = FALSE)
+}
+
+
%pfelix(q, a = 0.25)
%qfelix(p, a = 0.25)
%rfelix(n, a = 0.25)
-}
\arguments{
\item{x}{vector of quantiles.}
% \item{p}{vector of probabilities.}
@@ -34,10 +40,14 @@ dfelix(x, a = 0.25, log = FALSE)
}
\value{
\code{dfelix} gives the density.
+
+
% \code{pfelix} gives the distribution function,
% \code{qfelix} gives the quantile function, and
% \code{rfelix} generates random deviates.
+
+
}
\author{ T. W. Yee }
\details{
diff --git a/man/fff.Rd b/man/fff.Rd
index bd2b5fd..e453fb5 100644
--- a/man/fff.Rd
+++ b/man/fff.Rd
@@ -107,7 +107,7 @@ fdata <- data.frame(x2 = runif(nn <- 2000))
fdata <- transform(fdata, df1 = exp(2+0.5*x2),
df2 = exp(2-0.5*x2))
fdata <- transform(fdata, y = rf(nn, df1, df2))
-fit <- vglm(y ~ x2, fff, fdata, trace = TRUE)
+fit <- vglm(y ~ x2, fff, data = fdata, trace = TRUE)
coef(fit, matrix = TRUE)
}
}
diff --git a/man/fgm.Rd b/man/fgm.Rd
index b64317c..f56b1aa 100644
--- a/man/fgm.Rd
+++ b/man/fgm.Rd
@@ -79,7 +79,8 @@ Invariance theorems for Fisher information.
\seealso{
\code{\link{rfgm}},
\code{\link{bifrankcop}},
- \code{\link{morgenstern}}.
+ \code{\link{morgenstern}},
+ \code{\link{simulate.vlm}}.
}
diff --git a/man/fgmUC.Rd b/man/fgmUC.Rd
index 0c2c5a1..20f26f6 100644
--- a/man/fgmUC.Rd
+++ b/man/fgmUC.Rd
@@ -32,6 +32,7 @@ rfgm(n, alpha)
\code{pfgm} gives the distribution function, and
\code{rfgm} generates random deviates (a two-column matrix).
+
}
%\references{
%
diff --git a/man/fill.Rd b/man/fill.Rd
index deae150..15c92f1 100644
--- a/man/fill.Rd
+++ b/man/fill.Rd
@@ -26,6 +26,7 @@
A support function for the argument \code{xij}, it generates a matrix
of an appropriate dimension.
+
}
\usage{
fill(x, values = 0, ncolx = ncol(x))
@@ -38,6 +39,7 @@ fill(x, values = 0, ncolx = ncol(x))
to a matrix if necessary, the answer is a matrix of \code{values}
values, of dimension \code{nrow(x)} by \code{ncolx}.
+
}
\item{values}{
Numeric.
@@ -45,11 +47,13 @@ fill(x, values = 0, ncolx = ncol(x))
which are recycled \emph{columnwise} if necessary, i.e.,
as \code{matrix(values, ..., byrow=TRUE)}.
+
}
\item{ncolx}{
The number of columns of the returned matrix.
The default is the number of columns of \code{x}.
+
}
}
\details{
@@ -67,10 +71,12 @@ fill(x, values = 0, ncolx = ncol(x))
these data into \code{\link{vglm}} one often finds that functions
\code{fill}, \code{fill1}, etc. are useful.
+
All terms in the \code{xij}
and \code{formula} arguments in \code{\link{vglm}}
must appear in the \code{form2} argument too.
+
}
\value{
\code{matrix(values, nrow=nrow(x), ncol=ncolx)}, i.e., a matrix
@@ -78,11 +84,13 @@ fill(x, values = 0, ncolx = ncol(x))
\code{x}, and the default number of columns is the number of columns
of \code{x}.
+
}
\references{
More information can be found at
\url{http://www.stat.auckland.ac.nz/~yee}.
+
}
% \section{Warning }{
@@ -97,6 +105,7 @@ fill(x, values = 0, ncolx = ncol(x))
\code{exchangeable} and \code{zero}.
Hence \code{xij} does not affect constraint matrices.
+
Additionally, there are currently 3 other identical \code{fill}
functions, called \code{fill1}, \code{fill2} and \code{fill3};
if you need more then assign \code{fill4 = fill5 = fill1} etc.
@@ -108,21 +117,25 @@ fill(x, values = 0, ncolx = ncol(x))
\code{xij = op ~ lop + rop + fill1(mop) + fill2(mop)} would retain
all \eqn{M} terms, which is needed.
+
% The constraint matrices, as returned by \code{constraints}, do not
% have a different meaning when \code{xij} is used.
+
In Examples 1 to 3 below, the \code{xij} argument illustrates covariates
that are specific to a linear predictor. Here, \code{lop}/\code{rop} are
the ocular pressures of the left/right eye in an artificial dataset,
and \code{mop} is their mean. Variables \code{leye} and \code{reye}
might be the presence/absence of a particular disease on the LHS/RHS
eye respectively.
+
%
% Examples 1 and 2 are deliberately misspecified.
% The output from, e.g., \code{coef(fit, matrix=TRUE)}, looks wrong but
% is correct because the coefficients are multiplied by the zeros
% produced from \code{fill}.
+
In Example 3,
the \code{xij} argument illustrates fitting the (exchangeable) model
where there
@@ -136,12 +149,15 @@ fill(x, values = 0, ncolx = ncol(x))
in the model matrix, \code{fill(BS(lop,rop))} creates the required
(same) number of columns.
+
}
\seealso{
\code{\link{vglm.control}},
\code{\link{vglm}},
- \code{\link{multinomial}}.
+ \code{\link{multinomial}},
+ \code{\link{Select}}.
+
}
\examples{
@@ -151,42 +167,43 @@ fill(runif(5), val = 1, ncol = 3)
# Generate eyes data for the examples below. Eyes are independent (OR=1).
nn <- 1000 # Number of people
-eyesdat = data.frame(lop = round(runif(nn), 2),
- rop = round(runif(nn), 2),
- age = round(rnorm(nn, 40, 10)))
-eyesdat <- transform(eyesdat,
+eyesdata <- data.frame(lop = round(runif(nn), 2),
+ rop = round(runif(nn), 2),
+ age = round(rnorm(nn, 40, 10)))
+eyesdata <- transform(eyesdata,
mop = (lop + rop) / 2, # Mean ocular pressure
op = (lop + rop) / 2, # Value unimportant unless plotting
# op = lop, # Choose this if plotting
eta1 = 0 - 2*lop + 0.04*age, # Linear predictor for left eye
eta2 = 0 - 2*rop + 0.04*age) # Linear predictor for right eye
-eyesdat <- transform(eyesdat,
+eyesdata <- transform(eyesdata,
leye = rbinom(nn, size = 1, prob = logit(eta1, inverse = TRUE)),
reye = rbinom(nn, size = 1, prob = logit(eta2, inverse = TRUE)))
# Example 1
# All effects are linear
fit1 <- vglm(cbind(leye,reye) ~ op + age,
- family = binom2.or(exchangeable=TRUE, zero=3),
- data=eyesdat, trace=TRUE,
+ family = binom2.or(exchangeable = TRUE, zero = 3),
+ data = eyesdata, trace = TRUE,
xij = list(op ~ lop + rop + fill(lop)),
form2 = ~ op + lop + rop + fill(lop) + age)
-head(model.matrix(fit1, type="lm")) # LM model matrix
-head(model.matrix(fit1, type="vlm")) # Big VLM model matrix
+head(model.matrix(fit1, type = "lm")) # LM model matrix
+head(model.matrix(fit1, type = "vlm")) # Big VLM model matrix
coef(fit1)
coef(fit1, matrix = TRUE) # Unchanged with 'xij'
constraints(fit1)
-max(abs(predict(fit1)-predict(fit1, new = eyesdat))) # Predicts correctly
+max(abs(predict(fit1)-predict(fit1, new = eyesdata))) # Predicts correctly
summary(fit1)
-\dontrun{ plotvgam(fit1, se = TRUE) # Wrong, e.g., because it plots against op, not lop.
-# So set op=lop in the above for a correct plot.
+\dontrun{
+plotvgam(fit1, se = TRUE) # Wrong, e.g., because it plots against op, not lop.
+# So set op = lop in the above for a correct plot.
}
# Example 2
# Model OR as a linear function of mop
-fit2 <- vglm(cbind(leye,reye) ~ op + age, data = eyesdat, trace = TRUE,
+fit2 <- vglm(cbind(leye,reye) ~ op + age, data = eyesdata, trace = TRUE,
binom2.or(exchangeable = TRUE, zero = NULL),
xij = list(op ~ lop + rop + mop),
form2 = ~ op + lop + rop + mop + age)
@@ -194,20 +211,21 @@ head(model.matrix(fit2, type = "lm")) # LM model matrix
head(model.matrix(fit2, type = "vlm")) # Big VLM model matrix
coef(fit2)
coef(fit2, matrix = TRUE) # Unchanged with 'xij'
-max(abs(predict(fit2) - predict(fit2, new = eyesdat))) # Predicts correctly
+max(abs(predict(fit2) - predict(fit2, new = eyesdata))) # Predicts correctly
summary(fit2)
-\dontrun{ plotvgam(fit2, se = TRUE) # Wrong because it plots against op, not lop.
+\dontrun{
+plotvgam(fit2, se = TRUE) # Wrong because it plots against op, not lop.
}
# Example 3. This model uses regression splines on ocular pressure.
# It uses a trick to ensure common basis functions.
BS <- function(x, ...)
- bs(c(x,...), df = 3)[1:length(x), , drop = FALSE] # trick
+ sm.bs(c(x,...), df = 3)[1:length(x), , drop = FALSE] # trick
fit3 <- vglm(cbind(leye,reye) ~ BS(lop,rop) + age,
family = binom2.or(exchangeable = TRUE, zero = 3),
- data = eyesdat, trace = TRUE,
+ data = eyesdata, trace = TRUE,
xij = list(BS(lop,rop) ~ BS(lop,rop) +
BS(rop,lop) +
fill(BS(lop,rop))),
@@ -219,9 +237,10 @@ coef(fit3)
coef(fit3, matrix = TRUE)
summary(fit3)
fit3 at smart.prediction
-max(abs(predict(fit3) - predict(fit3, new = eyesdat))) # Predicts correctly
-predict(fit3, new = head(eyesdat)) # Note the 'scalar' OR, i.e., zero=3
-max(abs(head(predict(fit3)) - predict(fit3, new = head(eyesdat)))) # Should be 0
+max(abs(predict(fit3) - predict(fit3, new = eyesdata))) # Predicts correctly
+predict(fit3, new = head(eyesdata)) # Note the 'scalar' OR, i.e., zero=3
+max(abs(head(predict(fit3)) -
+ predict(fit3, new = head(eyesdata)))) # Should be 0
\dontrun{
plotvgam(fit3, se = TRUE, xlab = "lop") # Correct
}
@@ -235,13 +254,13 @@ plotvgam(fit3, se = TRUE, xlab = "lop") # Correct
%\code{fill1(x, value=0, ncolx=ncol(x))} and create .Rd file for
%\code{zero} argument.]
-%eyesdat$leye <- ifelse(runif(n) < exp(eta1)/(1+exp(eta1)), 1, 0)
-%eyesdat$reye <- ifelse(runif(n) < exp(eta2)/(1+exp(eta2)), 1, 0)
+%eyesdata$leye <- ifelse(runif(n) < exp(eta1)/(1+exp(eta1)), 1, 0)
+%eyesdata$reye <- ifelse(runif(n) < exp(eta2)/(1+exp(eta2)), 1, 0)
% \deqn{logit P(Y_k=1) = f_k(x_{ijk}) }{%
% logit P(Y_k=1) = f_k(x_{ijk}) }
% for \code{k=1,2}.
-% fill1(lop, ncol=ncol(BS(lop,rop,mop))), data=eyesdat)
+% fill1(lop, ncol=ncol(BS(lop,rop,mop))), data=eyesdata)
% Models using the \code{xij} argument may or may not predict correctly,
% and inference obtained using \code{summary} may be incorrect.
diff --git a/man/fisk.Rd b/man/fisk.Rd
index 332654d..161ec33 100644
--- a/man/fisk.Rd
+++ b/man/fisk.Rd
@@ -86,14 +86,15 @@ Hoboken, NJ: Wiley-Interscience.
\code{\link{invlomax}},
\code{\link{lomax}},
\code{\link{paralogistic}},
- \code{\link{invparalogistic}}.
+ \code{\link{invparalogistic}},
+ \code{\link{simulate.vlm}}.
}
\examples{
fdata <- data.frame(y = rfisk(n = 200, exp(1), exp(2)))
-fit <- vglm(y ~ 1, fisk, fdata, trace = TRUE)
-fit <- vglm(y ~ 1, fisk(ishape1.a = exp(1)), fdata, trace = TRUE)
+fit <- vglm(y ~ 1, fisk, data = fdata, trace = TRUE)
+fit <- vglm(y ~ 1, fisk(ishape1.a = exp(1)), data = fdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/fittedvlm.Rd b/man/fittedvlm.Rd
index 0f79a02..278f617 100644
--- a/man/fittedvlm.Rd
+++ b/man/fittedvlm.Rd
@@ -101,7 +101,7 @@ Chambers, J. M. and T. J. Hastie (eds) (1992)
\examples{
# Categorical regression example 1
pneumo <- transform(pneumo, let = log(exposure.time))
-(fit1 <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
+(fit1 <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo))
fitted(fit1)
# LMS quantile regression example 2
diff --git a/man/foldnormal.Rd b/man/foldnormal.Rd
index a23b742..5d3ac96 100644
--- a/man/foldnormal.Rd
+++ b/man/foldnormal.Rd
@@ -7,7 +7,7 @@
}
\usage{
-foldnormal(lmean = "identity", lsd = "loge", imean = NULL, isd = NULL,
+foldnormal(lmean = "identitylink", lsd = "loge", imean = NULL, isd = NULL,
a1 = 1, a2 = 1, nsimEIM = 500, imethod = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -100,6 +100,15 @@ foldnormal(lmean = "identity", lsd = "loge", imean = NULL, isd = NULL,
\bold{26}, 825--830.
+ Johnson, N. L. (1962)
+ The folded normal distribution:
+ accuracy of estimation by maximum likelihood.
+ \emph{Technometrics},
+ \bold{4}, 249--256.
+
+
+
+
}
\author{ Thomas W. Yee }
\note{
@@ -113,6 +122,12 @@ foldnormal(lmean = "identity", lsd = "loge", imean = NULL, isd = NULL,
many of these arguments.
+ Yet to do: implement the results of Johnson (1962) which gives
+ expressions for the EIM, albeit, under a different parameterization.
+ Also, one element of the EIM appears to require numerical integration.
+
+
+
}
\section{Warning }{
@@ -136,7 +151,7 @@ foldnormal(lmean = "identity", lsd = "loge", imean = NULL, isd = NULL,
fdata <- data.frame(y = rfoldnorm(n <- 1000, m = m, sd = SD))
hist(with(fdata, y), prob = TRUE, main = paste("foldnormal(m = ", m,
", sd = ", round(SD, 2), ")"))
-fit <- vglm(y ~ 1, fam = foldnormal, fdata, trace = TRUE)
+fit <- vglm(y ~ 1, foldnormal, data = fdata, trace = TRUE)
coef(fit, matrix = TRUE)
(Cfit <- Coef(fit))
mygrid <- with(fdata, seq(min(y), max(y), len = 200)) # Add the fit to the histogram
diff --git a/man/frechet.Rd b/man/frechet.Rd
index 0419b16..489ea28 100644
--- a/man/frechet.Rd
+++ b/man/frechet.Rd
@@ -162,7 +162,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
set.seed(123)
fdata <- data.frame(y1 = rfrechet(nn <- 1000, shape = 2 + exp(1)))
with(fdata, hist(y1))
-fit2 <- vglm(y1 ~ 1, frechet2, fdata, trace = TRUE)
+fit2 <- vglm(y1 ~ 1, frechet2, data = fdata, trace = TRUE)
coef(fit2, matrix = TRUE)
Coef(fit2)
head(fitted(fit2))
diff --git a/man/frechetUC.Rd b/man/frechetUC.Rd
index d1ba520..ae98756 100644
--- a/man/frechetUC.Rd
+++ b/man/frechetUC.Rd
@@ -67,6 +67,8 @@ Hoboken, NJ, USA: Wiley-Interscience.
%}
\seealso{
\code{\link{frechet2}}.
+
+
% \code{\link{frechet3}}.
diff --git a/man/freund61.Rd b/man/freund61.Rd
index 98932b3..c54a069 100644
--- a/man/freund61.Rd
+++ b/man/freund61.Rd
@@ -177,7 +177,7 @@ A bivariate extension of the exponential distribution.
\examples{
fdata <- data.frame(y1 = rexp(nn <- 1000, rate = exp(1)))
fdata <- transform(fdata, y2 = rexp(nn, rate = exp(2)))
-fit1 <- vglm(cbind(y1, y2) ~ 1, fam = freund61, fdata, trace = TRUE)
+fit1 <- vglm(cbind(y1, y2) ~ 1, fam = freund61, data = fdata, trace = TRUE)
coef(fit1, matrix = TRUE)
Coef(fit1)
vcov(fit1)
diff --git a/man/fsqrt.Rd b/man/fsqrt.Rd
index 05119ad..34c3332 100644
--- a/man/fsqrt.Rd
+++ b/man/fsqrt.Rd
@@ -129,7 +129,7 @@ par(lwd = 1)
}
# This is lucky to converge
-fit.h <- vglm(agaaus ~ bs(altitude), binomialff(link = fsqrt(mux = 5)),
+fit.h <- vglm(agaaus ~ sm.bs(altitude), binomialff(link = fsqrt(mux = 5)),
data = hunua, trace = TRUE)
\dontrun{
plotvgam(fit.h, se = TRUE, lcol = "orange", scol = "orange",
diff --git a/man/gamma1.Rd b/man/gamma1.Rd
index a54ff13..eaf8ab5 100644
--- a/man/gamma1.Rd
+++ b/man/gamma1.Rd
@@ -76,13 +76,14 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
\seealso{
\code{\link{gamma2.ab}} for the 2-parameter gamma distribution,
\code{\link{lgammaff}},
- \code{\link{lindley}}.
+ \code{\link{lindley}},
+ \code{\link{simulate.vlm}}.
}
\examples{
gdata <- data.frame(y = rgamma(n = 100, shape = exp(3)))
-fit <- vglm(y ~ 1, gamma1, gdata, trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, gamma1, data = gdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/gamma2.Rd b/man/gamma2.Rd
index 16fce87..4f5d55f 100644
--- a/man/gamma2.Rd
+++ b/man/gamma2.Rd
@@ -161,21 +161,22 @@ McCullagh, P. and Nelder, J. A. (1989)
\code{\link{expexp}},
\code{\link[stats]{GammaDist}},
\code{\link{golf}},
- \code{\link{CommonVGAMffArguments}}.
+ \code{\link{CommonVGAMffArguments}},
+ \code{\link{simulate.vlm}}.
}
\examples{
# Essentially a 1-parameter gamma
gdata <- data.frame(y = rgamma(n = 100, shape = exp(1)))
-fit1 <- vglm(y ~ 1, gamma1, gdata)
-fit2 <- vglm(y ~ 1, gamma2, gdata, trace = TRUE, crit = "coef")
+fit1 <- vglm(y ~ 1, gamma1, data = gdata)
+fit2 <- vglm(y ~ 1, gamma2, data = gdata, trace = TRUE, crit = "coef")
coef(fit2, matrix = TRUE)
Coef(fit2)
# Essentially a 2-parameter gamma
gdata <- data.frame(y = rgamma(n = 500, rate = exp(1), shape = exp(2)))
-fit2 <- vglm(y ~ 1, gamma2, gdata, trace = TRUE, crit = "coef")
+fit2 <- vglm(y ~ 1, gamma2, data = gdata, trace = TRUE, crit = "coef")
coef(fit2, matrix = TRUE)
Coef(fit2)
summary(fit2)
diff --git a/man/gamma2.ab.Rd b/man/gamma2.ab.Rd
index 5829f22..889efd1 100644
--- a/man/gamma2.ab.Rd
+++ b/man/gamma2.ab.Rd
@@ -103,21 +103,22 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
\code{\link{gamma2}} for another parameterization of
the 2-parameter gamma distribution,
\code{\link{bigamma.mckay}} for \emph{a} bivariate gamma distribution,
- \code{\link{expexp}}.
+ \code{\link{expexp}},
+ \code{\link{simulate.vlm}}.
}
\examples{
# Essentially a 1-parameter gamma
gdata <- data.frame(y1 = rgamma(n <- 100, shape = exp(1)))
-fit1 <- vglm(y1 ~ 1, gamma1, gdata, trace = TRUE)
-fit2 <- vglm(y1 ~ 1, gamma2.ab, gdata, trace = TRUE, crit = "coef")
+fit1 <- vglm(y1 ~ 1, gamma1, data = gdata, trace = TRUE)
+fit2 <- vglm(y1 ~ 1, gamma2.ab, data = gdata, trace = TRUE, crit = "coef")
coef(fit2, matrix = TRUE)
Coef(fit2)
# Essentially a 2-parameter gamma
gdata <- data.frame(y2 = rgamma(n = 500, rate = exp(1), shape = exp(2)))
-fit2 <- vglm(y2 ~ 1, gamma2.ab, gdata, trace = TRUE, crit = "coef")
+fit2 <- vglm(y2 ~ 1, gamma2.ab, data = gdata, trace = TRUE, crit = "coef")
coef(fit2, matrix = TRUE)
Coef(fit2)
summary(fit2)
diff --git a/man/gammahyp.Rd b/man/gammahyp.Rd
index 0b05bd5..f6cde5a 100644
--- a/man/gammahyp.Rd
+++ b/man/gammahyp.Rd
@@ -88,8 +88,8 @@ gdata <- data.frame(x2 = runif(nn <- 1000))
gdata <- transform(gdata, theta = exp(-2 + x2))
gdata <- transform(gdata, y1 = rexp(nn, rate = exp(-theta)/theta),
y2 = rexp(nn, rate = theta) + 1)
-fit <- vglm(cbind(y1, y2) ~ x2, fam = gammahyp(expected = TRUE), gdata)
-fit <- vglm(cbind(y1, y2) ~ x2, fam = gammahyp, gdata, trace = TRUE, crit = "coef")
+fit <- vglm(cbind(y1, y2) ~ x2, gammahyp(expected = TRUE), data = gdata)
+fit <- vglm(cbind(y1, y2) ~ x2, gammahyp, data = gdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
head(fitted(fit))
diff --git a/man/garma.Rd b/man/garma.Rd
index 463cfd7..fe6f8ca 100644
--- a/man/garma.Rd
+++ b/man/garma.Rd
@@ -7,7 +7,7 @@
}
\usage{
-garma(link = "identity", p.ar.lag = 1, q.ma.lag = 0,
+garma(link = "identitylink", p.ar.lag = 1, q.ma.lag = 0,
coefstart = NULL, step = 1)
}
%- maybe also 'usage' for other objects documented here.
@@ -165,16 +165,19 @@ garma(link = "identity", p.ar.lag = 1, q.ma.lag = 0,
}
-\seealso{
+%\seealso{
+
+% The site \url{http://www.stat.auckland.ac.nz/~yee} contains
+% more documentation about this family function.
+
+
% \code{\link{identity}},
% \code{\link{logit}}.
- The site \url{http://www.stat.auckland.ac.nz/~yee} contains
- more documentation about this family function.
+%}
-}
\examples{
gdata <- data.frame(interspike = c(68, 41, 82, 66, 101, 66, 57, 41, 27, 78,
@@ -191,7 +194,7 @@ gdata <- transform(gdata, spikenum = seq(interspike))
bvalue <- 0.1 # .Machine$double.xmin # Boundary value
fit <- vglm(interspike ~ 1, trace = TRUE, data = gdata,
garma(loge(bvalue = bvalue),
- p = 2, coefstart = c(4, 0.3, 0.4)))
+ p = 2, coefstart = c(4, 0.3, 0.4)))
summary(fit)
coef(fit, matrix = TRUE)
Coef(fit) # A bug here
diff --git a/man/genbetaII.Rd b/man/genbetaII.Rd
index 68e8dae..c77c9e6 100644
--- a/man/genbetaII.Rd
+++ b/man/genbetaII.Rd
@@ -135,7 +135,7 @@ More improvements could be made here.
\examples{
\dontrun{
gdata <- data.frame(y = rsinmad(3000, exp(2), exp(2), exp(1))) # A special case!
-fit <- vglm(y ~ 1, genbetaII, gdata, trace = TRUE)
+fit <- vglm(y ~ 1, genbetaII, data = gdata, trace = TRUE)
fit <- vglm(y ~ 1, data = gdata, trace = TRUE,
genbetaII(ishape1.a = 4, ishape2.p = 2.2, iscale = 7, ishape3.q = 2.3))
coef(fit, matrix = TRUE)
diff --git a/man/gengamma.Rd b/man/gengamma.Rd
index c5dee3e..e5dcc2d 100644
--- a/man/gengamma.Rd
+++ b/man/gengamma.Rd
@@ -118,14 +118,15 @@ Rayleigh \eqn{f(y;c\sqrt{2},2,1)}{f(y;c sqrt(2),2,1)} where \eqn{c>0}.
\code{\link{rgengamma}},
\code{\link{gamma1}},
\code{\link{gamma2}},
- \code{\link{prentice74}}.
+ \code{\link{prentice74}},
+ \code{\link{simulate.vlm}}.
}
\examples{
\dontrun{ k <- exp(-1); Scale = exp(1)
gdata <- data.frame(y = rgamma(1000, shape = k, scale = Scale))
-fit <- vglm(y ~ 1, gengamma, gdata, trace = TRUE)
+fit <- vglm(y ~ 1, gengamma, data = gdata, trace = TRUE)
coef(fit, matrix = TRUE)
# Another example
@@ -134,8 +135,8 @@ gdata <- transform(gdata, Scale = exp(1),
d = exp( 0 + 1.2* x2),
k = exp(-1 + 2 * x2))
gdata <- transform(gdata, y = rgengamma(nn, scale = Scale, d = d, k = k))
-fit <- vglm(y ~ x2, gengamma(zero = 1, iscale = 6), gdata, trace = TRUE)
-fit <- vglm(y ~ x2, gengamma(zero = 1), gdata, trace = TRUE, maxit = 50)
+fit <- vglm(y ~ x2, gengamma(zero = 1, iscale = 6), data = gdata, trace = TRUE)
+fit <- vglm(y ~ x2, gengamma(zero = 1), data = gdata, trace = TRUE, maxit = 50)
coef(fit, matrix = TRUE)
}
}
diff --git a/man/genpoisson.Rd b/man/genpoisson.Rd
index 334b949..3f00bc4 100644
--- a/man/genpoisson.Rd
+++ b/man/genpoisson.Rd
@@ -91,7 +91,7 @@ and the variance is \eqn{\theta / (1 - \lambda)^3}.
Consul, P. C. and Famoye, F. (2006)
\emph{Lagrangian Probability Distributions},
-Boston: Birkhauser.
+Boston, USA: Birkhauser.
Jorgensen, B. (1997)
@@ -101,7 +101,7 @@ London: Chapman & Hall
Consul, P. C. (1989)
\emph{Generalized Poisson Distributions: Properties and Applications}.
-New York: Marcel Dekker.
+New York, USA: Marcel Dekker.
}
@@ -121,7 +121,7 @@ New York: Marcel Dekker.
\examples{
gdata <- data.frame(x2 = runif(nn <- 200))
gdata <- transform(gdata, y = rpois(nn, exp(2 - x2))) # Ordinary Poisson data
-fit <- vglm(y ~ x2, genpoisson(zero = 1), gdata, trace = TRUE)
+fit <- vglm(y ~ x2, genpoisson(zero = 1), data = gdata, trace = TRUE)
coef(fit, matrix = TRUE)
summary(fit)
}
@@ -131,3 +131,7 @@ summary(fit)
% yettodo: see csda 2009, 53(9): 3478--3489.
%{% f(y) = theta*(theta+lambda*y)^(y-1) exp(-theta-lambda*y) / y!}
+
+
+
+
diff --git a/man/geometric.Rd b/man/geometric.Rd
index 2c0b39f..bd2d2b2 100644
--- a/man/geometric.Rd
+++ b/man/geometric.Rd
@@ -29,10 +29,12 @@ truncgeometric(upper.limit = Inf,
Logical.
Fisher scoring is used if \code{expected = TRUE}, else Newton-Raphson.
+
}
\item{iprob, imethod, zero}{
See \code{\link{CommonVGAMffArguments}} for more details.
+
}
\item{upper.limit}{
@@ -59,6 +61,8 @@ truncgeometric(upper.limit = Inf,
and its variance is \eqn{Var(Y) = (1-p)/p^2}{Var(Y) = (1-prob)/prob^2}.
The geometric distribution is a special case of the
negative binomial distribution (see \code{\link{negbinomial}}).
+ The geometric distribution is also a special case of the
+ Borel distribution, which is a Lagrangian distribution.
If \eqn{Y} has a geometric distribution with parameter \eqn{p}{prob} then
\eqn{Y+1} has a positive-geometric distribution with the same parameter.
Multiple responses are permitted.
@@ -110,7 +114,8 @@ Help from Viet Hoang Quoc is gratefully acknowledged.
\code{\link{expgeometric}},
\code{\link{zageometric}},
\code{\link{zigeometric}},
- \code{\link{rbetageom}}.
+ \code{\link{rbetageom}},
+ \code{\link{simulate.vlm}}.
}
@@ -122,7 +127,7 @@ gdata <- transform(gdata, eta = -1.0 - 1.0 * x2 + 2.0 * x3)
gdata <- transform(gdata, prob = logit(eta, inverse = TRUE))
gdata <- transform(gdata, y1 = rgeom(nn, prob))
with(gdata, table(y1))
-fit1 <- vglm(y1 ~ x2 + x3 + x4, geometric, gdata, trace = TRUE)
+fit1 <- vglm(y1 ~ x2 + x3 + x4, geometric, data = gdata, trace = TRUE)
coef(fit1, matrix = TRUE)
summary(fit1)
diff --git a/man/get.smart.Rd b/man/get.smart.Rd
index 59a6bd0..c299d06 100644
--- a/man/get.smart.Rd
+++ b/man/get.smart.Rd
@@ -3,8 +3,8 @@
\title{ Retrieve One Component of ``.smart.prediction'' }
\description{
Retrieve one component of the list \code{.smart.prediction} from
-\code{smartpredenv} (\R) or
-frame 1 (S-PLUS).
+\code{smartpredenv}.
+
}
\usage{
@@ -12,23 +12,20 @@ get.smart()
}
\value{
Returns with one list component of \code{.smart.prediction} from
-\code{smartpredenv} (\R)
-or
-frame 1 (S-PLUS),
+\code{smartpredenv},
in fact, \code{.smart.prediction[[.smart.prediction.counter]]}.
The whole procedure mimics a first-in first-out stack (better known
as a \emph{queue}).
+
}
\section{Side Effects}{
The variable \code{.smart.prediction.counter} in
-\code{smartpredenv} (\R)
-or
-frame 1 (S-PLUS)
+\code{smartpredenv}
is incremented beforehand, and then written back to
-\code{smartpredenv} (\R)
-or
-frame 1 (S-PLUS).
+\code{smartpredenv}.
+
+
}
\details{
@@ -36,7 +33,7 @@ frame 1 (S-PLUS).
it retrieves parameters saved at the time of fitting, and
is used for prediction.
\code{get.smart} is only used in smart functions such as
- \code{\link[stats]{poly}};
+ \code{\link[VGAM]{sm.poly}};
\code{get.smart.prediction} is only used in modelling functions
such as \code{\link[stats]{lm}} and \code{\link[stats]{glm}}.
The function
@@ -47,17 +44,19 @@ frame 1 (S-PLUS).
}
\seealso{
\code{\link{get.smart.prediction}}.
+
+
}
\examples{
"my1" <- function(x, minx = min(x)) { # Here is a smart function
- x <- x # Needed for nested calls, e.g., bs(scale(x))
- if(smart.mode.is("read")) {
- smart <- get.smart()
- minx <- smart$minx # Overwrite its value
- } else
+ x <- x # Needed for nested calls, e.g., sm.bs(sm.scale(x))
+ if (smart.mode.is("read")) {
+ smart <- get.smart()
+ minx <- smart$minx # Overwrite its value
+ } else
if(smart.mode.is("write"))
put.smart(list(minx = minx))
- sqrt(x-minx)
+ sqrt(x - minx)
}
attr(my1, "smart") <- TRUE
}
diff --git a/man/get.smart.prediction.Rd b/man/get.smart.prediction.Rd
index 2735986..21881e3 100644
--- a/man/get.smart.prediction.Rd
+++ b/man/get.smart.prediction.Rd
@@ -3,38 +3,40 @@
\title{ Retrieves ``.smart.prediction'' }
\description{
Retrieves \code{.smart.prediction} from
-\code{smartpredenv} (\R)
-or
-frame 1 (S-PLUS).
+\code{smartpredenv}.
}
\usage{
get.smart.prediction()
}
\value{
Returns with the list \code{.smart.prediction} from
-\code{smartpredenv} (\R)
-or
-frame 1 (S-PLUS).
+\code{smartpredenv}.
+
+
}
\details{
A smart modelling function such as \code{\link[stats]{lm}} allows
-smart functions such as \code{\link[splines]{bs}}
+smart functions such as \code{\link[VGAM]{sm.bs}}
to write to
a data structure called \code{.smart.prediction} in
-\code{smartpredenv} (\R)
-or
-frame 1 (S-PLUS).
+\code{smartpredenv}.
At the end of fitting,
\code{get.smart.prediction} retrieves this data structure.
It is then attached to the object, and used for prediction later.
+
+
+
}
\seealso{
\code{\link{get.smart}},
\code{\link[stats]{lm}}.
+
+
}
\examples{
-\dontrun{# Put at the end of lm
-fit$smart <- get.smart.prediction() }
+\dontrun{
+fit$smart <- get.smart.prediction() # Put at the end of lm()
+}
}
%\keyword{smart}
\keyword{models}
diff --git a/man/gev.Rd b/man/gev.Rd
index d34ffba..96fc3d3 100644
--- a/man/gev.Rd
+++ b/man/gev.Rd
@@ -9,11 +9,11 @@
}
\usage{
-gev(llocation = "identity", lscale = "loge", lshape = logoff(offset = 0.5),
+gev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
percentiles = c(95, 99), iscale=NULL, ishape = NULL,
imethod = 1, gshape = c(-0.45, 0.45), tolshape0 = 0.001,
giveWarning = TRUE, zero = 3)
-egev(llocation = "identity", lscale = "loge", lshape = logoff(offset = 0.5),
+egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
percentiles = c(95, 99), iscale=NULL, ishape = NULL,
imethod = 1, gshape = c(-0.45, 0.45), tolshape0 = 0.001,
giveWarning = TRUE, zero = 3)
@@ -247,8 +247,6 @@ egev(llocation = "identity", lscale = "loge", lshape = logoff(offset = 0.5),
}
\seealso{
- %\code{\link{egev}},
- %\code{\link{ogev}},
\code{\link{rgev}},
\code{\link{gumbel}},
\code{\link{egumbel}},
@@ -262,6 +260,10 @@ egev(llocation = "identity", lscale = "loge", lshape = logoff(offset = 0.5),
\code{\link{venice}}.
+ %\code{\link{egev}},
+ %\code{\link{ogev}},
+
+
}
\examples{
diff --git a/man/gompertz.Rd b/man/gompertz.Rd
index cd63b78..06ba745 100644
--- a/man/gompertz.Rd
+++ b/man/gompertz.Rd
@@ -95,7 +95,8 @@ The same warnings in \code{\link{makeham}} apply here too.
\seealso{
\code{\link{dgompertz}},
- \code{\link{makeham}}.
+ \code{\link{makeham}},
+ \code{\link{simulate.vlm}}.
}
diff --git a/man/gpd.Rd b/man/gpd.Rd
index d5def8b..22c321b 100644
--- a/man/gpd.Rd
+++ b/man/gpd.Rd
@@ -227,13 +227,13 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
# Simulated data from an exponential distribution (xi = 0)
threshold <- 0.5
gdata <- data.frame(y1 = threshold + rexp(n = 3000, rate = 2))
-fit <- vglm(y1 ~ 1, gpd(threshold = threshold), gdata, trace = TRUE)
+fit <- vglm(y1 ~ 1, gpd(threshold = threshold), data = gdata, trace = TRUE)
head(fitted(fit))
-coef(fit, matrix = TRUE) # xi should be close to 0
+coef(fit, matrix = TRUE) # xi should be close to 0
Coef(fit)
summary(fit)
-fit at extra$threshold # Note the threshold is stored here
+head(fit at extra$threshold) # Note the threshold is stored here
# Check the 90 percentile
ii <- depvar(fit) < fitted(fit)[1, "90\%"]
@@ -252,18 +252,18 @@ matlines(1:length(depvar(fit)), fitted(fit), lty = 2:3, lwd = 2) }
gdata <- data.frame(x2 = runif(nn <- 2000))
threshold <- 0; xi <- exp(-0.8) - 0.5
gdata <- transform(gdata, y2 = rgpd(nn, scale = exp(1 + 0.1*x2), shape = xi))
-fit <- vglm(y2 ~ x2, gpd(threshold), gdata, trace = TRUE)
+fit <- vglm(y2 ~ x2, gpd(threshold), data = gdata, trace = TRUE)
coef(fit, matrix = TRUE)
\dontrun{ # Nonparametric fits
gdata <- transform(gdata, yy = y2 + rnorm(nn, sd = 0.1))
# Not so recommended:
-fit1 <- vgam(yy ~ s(x2), gpd(threshold), gdata, trace = TRUE)
+fit1 <- vgam(yy ~ s(x2), gpd(threshold), data = gdata, trace = TRUE)
par(mfrow = c(2,1))
plotvgam(fit1, se = TRUE, scol = "blue")
# More recommended:
-fit2 <- vglm(yy ~ bs(x2), gpd(threshold), gdata, trace = TRUE)
+fit2 <- vglm(yy ~ sm.bs(x2), gpd(threshold), data = gdata, trace = TRUE)
plotvgam(fit2, se = TRUE, scol = "blue") }
}
\keyword{models}
diff --git a/man/grc.Rd b/man/grc.Rd
index dd1608a..45a8c03 100644
--- a/man/grc.Rd
+++ b/man/grc.Rd
@@ -1,35 +1,36 @@
\name{grc}
\alias{grc}
\alias{rcim}
+\alias{uqo}
%- Also NEED an `\alias' for EACH other topic documented here.
-\title{ Row-Column Interaction Models including Goodman's RC Association Model }
+\title{ Row-Column Interaction Models including Goodman's RC Association
+ Model and Unconstrained Quadratic Ordination }
\description{
- Fits a Goodman's RC association model to a matrix of counts,
- and more generally, a sub-class of row-column interaction models.
+ Fits a Goodman's RC association model (GRC) to a matrix of counts,
+ and more generally, row-column interaction models (RCIMs).
+ RCIMs allow for unconstrained quadratic ordination (UQO).
+
+
}
\usage{
grc(y, Rank = 1, Index.corner = 2:(1 + Rank),
str0 = 1, summary.arg = FALSE, h.step = 1e-04, ...)
-rcim(y, family = poissonff, Rank = 0, Musual = NULL,
+rcim(y, family = poissonff, Rank = 0, M1 = NULL,
weights = NULL, which.linpred = 1,
Index.corner = ifelse(is.null(str0), 0, max(str0)) + 1:Rank,
rprefix = "Row.", cprefix = "Col.", iprefix = "X2.",
offset = 0, str0 = if (Rank) 1 else NULL,
summary.arg = FALSE, h.step = 0.0001,
rbaseline = 1, cbaseline = 1,
- has.intercept = TRUE,
- M = NULL,
- rindex = 2:nrow(y),
- cindex = 2:ncol(y),
- iindex = 2:nrow(y),
- ...)
+ has.intercept = TRUE, M = NULL,
+ rindex = 2:nrow(y), cindex = 2:ncol(y), iindex = 2:nrow(y), ...)
}
%- maybe also `usage' for other objects documented here.
\arguments{
\item{y}{
- For \code{grc} a matrix of counts.
- For \code{rcim} a general matrix response depending on \code{family}.
+ For \code{grc()}: a matrix of counts.
+ For \code{rcim()}: a general matrix response depending on \code{family}.
Output from \code{table()} is acceptable; it is converted into a matrix.
Note that \code{y} should be at least 3 by 3 in dimension.
@@ -37,7 +38,8 @@ rcim(y, family = poissonff, Rank = 0, Musual = NULL,
}
\item{family}{
A \pkg{VGAM} family function.
- By default, the first linear/additive predictor is fitted
+ By default, the first linear/additive predictor
+ is fitted
using main effects plus an optional rank-\code{Rank}
interaction term.
Not all family functions are suitable or make sense.
@@ -48,11 +50,11 @@ rcim(y, family = poissonff, Rank = 0, Musual = NULL,
\code{\link{zipoisson}} because of the ordering of the
linear/additive predictors.
If the \pkg{VGAM} family function does not have an \code{infos}
- slot then \code{Musual} needs to be inputted (the number of
+ slot then \code{M1} needs to be inputted (the number of
linear predictors for an ordinary (usually univariate) response,
aka \eqn{M}).
The \pkg{VGAM} family function also needs to be able to
- handle multiple responses; and not all of them can do this.
+ handle multiple responses (currently not all of them can do this).
}
@@ -78,7 +80,7 @@ rcim(y, family = poissonff, Rank = 0, Musual = NULL,
Single integer.
Specifies which linear predictor is modelled as the sum of an
intercept, row effect, column effect plus an optional interaction
- term. It should be one value from the set \code{1:Musual}.
+ term. It should be one value from the set \code{1:M1}.
}
@@ -112,8 +114,8 @@ rcim(y, family = poissonff, Rank = 0, Musual = NULL,
}
\item{summary.arg}{
- Logical. If \code{TRUE}, a summary is returned.
- If \code{TRUE}, \code{y} may be the output (fitted
+ Logical. If \code{TRUE} then a summary is returned.
+ If \code{TRUE} then \code{y} may be the output (fitted
object) of \code{grc()}.
@@ -129,11 +131,11 @@ rcim(y, family = poissonff, Rank = 0, Musual = NULL,
}
- \item{Musual}{
+ \item{M1}{
The number of linear predictors of the \pkg{VGAM} \code{family} function
for an ordinary (univariate) response.
Then the number of linear predictors of the \code{rcim()} fit is
- usually the number of columns of \code{y} multiplied by \code{Musual}.
+ usually the number of columns of \code{y} multiplied by \code{M1}.
The default is to evaluate the \code{infos} slot of the
\pkg{VGAM} \code{family} function to try to evaluate it;
see \code{\link{vglmff-class}}.
@@ -179,7 +181,9 @@ rcim(y, family = poissonff, Rank = 0, Musual = NULL,
}
\details{
Goodman's RC association model fits a reduced-rank approximation
- to a table of counts. The log of each cell mean is decomposed as an
+ to a table of counts.
+ A Poisson model is assumed.
+ The log of each cell mean is decomposed as an
intercept plus a row effect plus a column effect plus a reduced-rank
component. The latter can be collectively written \code{A \%*\% t(C)},
the product of two `thin' matrices.
@@ -201,7 +205,7 @@ The function \code{rcim()} is more general than \code{grc()}.
Its default is a no-interaction model of \code{grc()}, i.e.,
rank-0 and a Poisson distribution. This means that each
row and column has a dummy variable associated with it.
-The first row and column is baseline.
+The first row and first column are baseline.
The power of \code{rcim()} is that many \pkg{VGAM} family functions
can be assigned to its \code{family} argument.
For example,
@@ -210,13 +214,38 @@ ANOVA with and without interactions,
\code{\link{alaplace2}} with \code{Rank = 0} is something like
\code{\link[stats]{medpolish}}.
Others include
-\code{\link{zipoissonff}},
+\code{\link{zipoissonff}} and
\code{\link{negbinomial}}.
Hopefully one day \emph{all} \pkg{VGAM} family functions will
work when assigned to the \code{family} argument, although the
result may not have meaning.
+
+\emph{Unconstrained quadratic ordination} (UQO) can be performed
+using \code{rcim()} and \code{grc()}.
+This has been called \emph{unconstrained Gaussian ordination}
+in the literature, however the word \emph{Gaussian} has two
+meanings which is confusing; it is better to use
+\emph{quadratic} because the bell-shape response surface is meant.
+UQO is similar to CQO (\code{\link{cqo}}) except there are
+no environmental/explanatory variables.
+Here, a GLM is fitted to each column (species)
+that is a quadratic function of hypothetical latent variables
+or gradients.
+Thus each row of the response has an associated site score,
+and each column of the response has an associated optimum
+and tolerance matrix.
+ UQO can be performed here under the assumption that all species
+ have the same tolerance matrices.
+ See Yee and Hadi (2014) for details.
+ It is not recommended that presence/absence data be inputted
+ because the information content is so low for each site-species
+ cell.
+ The example below uses Poisson counts.
+
+
+
}
\value{
An object of class \code{"grc"}, which currently is the same as
@@ -225,6 +254,7 @@ result may not have meaning.
a rank-0 \code{rcim()} object is of class \code{\link{rcim0-class}},
else of class \code{"rcim"} (this may change in the future).
+
% Currently,
% a rank-0 \code{rcim()} object is of class \code{\link{vglm-class}},
% but it may become of class \code{"rcim"} one day.
@@ -239,9 +269,10 @@ Reduced-rank vector generalized linear models.
\bold{3}, 15--41.
-Yee, T. W. and Hadi, A. F. (2013)
-Row-column interaction models
-\emph{In preparation}.
+Yee, T. W. and Hadi, A. F. (2014)
+Row-column interaction models, with an R implementation.
+\emph{Computational Statistics},
+\bold{29}, in press.
Goodman, L. A. (1981)
@@ -251,10 +282,10 @@ of cross-classifications having ordered categories.
\bold{76}, 320--334.
-Documentation accompanying the \pkg{VGAM} package at
-\url{http://www.stat.auckland.ac.nz/~yee}
-contains further information about the setting up of the
-indicator variables.
+%Documentation accompanying the \pkg{VGAM} package at
+%\url{http://www.stat.auckland.ac.nz/~yee}
+%contains further information about the setting up of the
+%indicator variables.
}
@@ -263,6 +294,7 @@ Thomas W. Yee, with
assistance from Alfian F. Hadi.
+
}
\note{
These functions set up the indicator variables etc. before calling
@@ -276,12 +308,12 @@ assistance from Alfian F. Hadi.
The data should be labelled with \code{\link[base]{rownames}} and
\code{\link[base]{colnames}}.
- Setting \code{trace = TRUE} is recommended for monitoring
+ Setting \code{trace = TRUE} is recommended to monitor
convergence.
Using \code{criterion = "coefficients"} can result in slow convergence.
- If \code{summary = TRUE}, then \code{y} can be a
+ If \code{summary = TRUE} then \code{y} can be a
\code{"grc"} object, in which case a summary can be returned.
That is, \code{grc(y, summary = TRUE)} is
equivalent to \code{summary(grc(y))}.
@@ -314,7 +346,7 @@ assistance from Alfian F. Hadi.
called \code{.grc.df} or \code{.rcim.df}, which used
to be needed by \code{summary.rrvglm()}. Then these
data frames are deleted before exiting the function.
- If an error occurs, then the data frames may be present
+ If an error occurs then the data frames may be present
in the workspace.
@@ -328,22 +360,25 @@ assistance from Alfian F. Hadi.
\code{summary.grc},
\code{\link{moffset}},
\code{\link{Rcim}},
+ \code{\link{Select}},
\code{\link{Qvar}},
\code{\link{plotrcim0}},
+ \code{\link{cqo}},
\code{\link{multinomial}},
\code{\link{alcoff}},
\code{\link{crashi}},
\code{\link{auuc}},
\code{\link[VGAM:olym08]{olym08}},
\code{\link[VGAM:olym12]{olym12}},
- \code{\link{poissonff}}.
+ \code{\link{poissonff}},
+ \code{\link[stats]{medpolish}}.
}
\examples{
-grc1 <- grc(auuc) # Undergraduate enrolments at Auckland University in 1990
-fitted(grc1)
+# Example 1: Undergraduate enrolments at Auckland University in 1990
+fitted(grc1 <- grc(auuc))
summary(grc1)
grc2 <- grc(auuc, Rank = 2, Index.corner = c(2, 5))
@@ -355,18 +390,8 @@ model3 <- rcim(auuc, Rank = 1, fam = multinomial,
fitted(model3)
summary(model3)
-
-# 2012 Summer Olympic Games in London
-\dontrun{ top10 <- head(olym12, n = 10)
-grc.oly1 <- with(top10, grc(cbind(gold, silver, bronze)))
-round(fitted(grc.oly1))
-round(resid(grc.oly1, type = "response"), digits = 1) # Response residuals
-summary(grc.oly1)
-Coef(grc.oly1)
-}
-
-
-# Roughly median polish
+# Roughly median polish but not 100 percent reliable
+\dontrun{
rcim0 <- rcim(auuc, fam = alaplace2(tau = 0.5, intparloc = TRUE), trace = TRUE)
round(fitted(rcim0), digits = 0)
round(100 * (fitted(rcim0) - auuc) / auuc, digits = 0) # Discrepancy
@@ -381,6 +406,62 @@ names(constraints(rcim0))
fv <- med.a$overall + outer(med.a$row, med.a$col, "+")
round(100 * (fitted(rcim0) - fv) / fv) # Hopefully should be all 0s
}
+
+
+# Example 2: 2012 Summer Olympic Games in London
+\dontrun{ top10 <- head(olym12, 10)
+grc1.oly12 <- with(top10, grc(cbind(gold, silver, bronze)))
+round(fitted(grc1.oly12))
+round(resid(grc1.oly12, type = "response"), digits = 1) # Response residuals
+summary(grc1.oly12)
+Coef(grc1.oly12)
+}
+
+
+# Example 3: Unconstrained quadratic ordination (UQO); see Yee and Hadi (2014)
+\dontrun{
+n <- 100; p <- 5; S <- 10
+pdata <- rcqo(n, p, S, es.opt = FALSE, eq.max = FALSE,
+ eq.tol = TRUE, sd.latvar = 0.75) # Poisson counts
+true.nu <- attr(pdata, "latvar") # The 'truth'; site scores
+attr(pdata, "tolerances") # The 'truth'; tolerances
+
+Y <- Select(pdata, "y", sort = FALSE) # Y matrix (n x S); the "y" vars
+uqo.rcim1 <- rcim(Y, Rank = 1,
+ str0 = NULL, # Delta covers entire n x M matrix
+ iindex = 1:nrow(Y), # RRR covers the entire Y
+ has.intercept = FALSE) # Suppress the intercept
+
+# Plot 1
+par(mfrow = c(2, 2))
+plot(attr(pdata, "optima"), Coef(uqo.rcim1)@A,
+ col = "blue", type = "p", main = "(a) UQO optima",
+ xlab = "True optima", ylab = "Estimated (UQO) optima")
+mylm <- lm(Coef(uqo.rcim1)@A ~ attr(pdata, "optima"))
+abline(coef = coef(mylm), col = "orange", lty = "dashed")
+
+# Plot 2
+fill.val <- NULL # Choose this for the new parameterization
+plot(attr(pdata, "latvar"), c(fill.val, concoef(uqo.rcim1)),
+ las = 1, col = "blue", type = "p", main = "(b) UQO site scores",
+ xlab = "True site scores", ylab = "Estimated (UQO) site scores" )
+mylm <- lm(c(fill.val, concoef(uqo.rcim1)) ~ attr(pdata, "latvar"))
+abline(coef = coef(mylm), col = "orange", lty = "dashed")
+
+# Plots 3 and 4
+myform <- attr(pdata, "formula")
+p1ut <- cqo(myform, family = poissonff,
+ eq.tol = FALSE, trace = FALSE, data = pdata)
+c1ut <- cqo(Select(pdata, "y", sort = FALSE) ~ scale(latvar(uqo.rcim1)),
+ family = poissonff, eq.tol = FALSE, trace = FALSE, data = pdata)
+lvplot(p1ut, lcol = 1:S, y = TRUE, pcol = 1:S, pch = 1:S, pcex = 0.5,
+ main = "(c) CQO fitted to the original data",
+ xlab = "Estimated (CQO) site scores")
+lvplot(c1ut, lcol = 1:S, y = TRUE, pcol = 1:S, pch = 1:S, pcex = 0.5,
+ main = "(d) CQO fitted to the scaled UQO site scores",
+ xlab = "Estimated (UQO) site scores")
+}
+}
\keyword{models}
\keyword{regression}
% plot(grc.oly1)
@@ -400,11 +481,11 @@ round(100 * (fitted(rcim0) - fv) / fv) # Hopefully should be all 0s
% Prior to 201310:
% str0 = if (!Rank) NULL else {
-% if (Musual == 1) 1 else setdiff(1:(Musual * ncol(y)),
-% c(1 + (1:ncol(y)) * Musual, Index.corner))
+% if (M1 == 1) 1 else setdiff(1:(M1 * ncol(y)),
+% c(1 + (1:ncol(y)) * M1, Index.corner))
% },
% str0 = if (Rank > 0) 1 else NULL,
-% Index.corner = if (!Rank) NULL else 1 + Musual * (1:Rank),
+% Index.corner = if (!Rank) NULL else 1 + M1 * (1:Rank),
diff --git a/man/gumbel.Rd b/man/gumbel.Rd
index 7394d5d..5dab0c9 100644
--- a/man/gumbel.Rd
+++ b/man/gumbel.Rd
@@ -9,10 +9,10 @@
}
\usage{
-gumbel(llocation = "identity", lscale = "loge",
+gumbel(llocation = "identitylink", lscale = "loge",
iscale = NULL, R = NA, percentiles = c(95, 99),
mpv = FALSE, zero = NULL)
-egumbel(llocation = "identity", lscale = "loge",
+egumbel(llocation = "identitylink", lscale = "loge",
iscale = NULL, R = NA, percentiles = c(95, 99),
mpv = FALSE, zero = NULL)
}
@@ -71,6 +71,9 @@ egumbel(llocation = "identity", lscale = "loge",
be from the set \{1, 2\} corresponding respectively to \eqn{\mu}{mu}
and \eqn{\sigma}{sigma}. By default all linear/additive predictors
are modelled as a linear combination of the explanatory variables.
+ See \code{\link{CommonVGAMffArguments}} for more details.
+
+
}
}
@@ -88,10 +91,10 @@ egumbel(llocation = "identity", lscale = "loge",
The MPV is the value of the response such that the probability
of obtaining a value greater than the MPV is 0.5 out of
\eqn{R} observations.
- For the Venice data, the MPV is the sea level such that there
+ For the Venice data, the MPV is the sea level such that there
is an even chance that the highest level for a particular year
exceeds the MPV.
- When \code{mpv = TRUE}, the column labelled \code{"MPV"} contains
+ If \code{mpv = TRUE} then the column labelled \code{"MPV"} contains
the MPVs when \code{fitted()} is applied to the fitted object.
@@ -152,6 +155,7 @@ egumbel(llocation = "identity", lscale = "loge",
however, the data are the highest sea level measurements recorded each
year (it therefore equates to the median predicted value or MPV).
+
}
\note{
@@ -166,14 +170,17 @@ egumbel(llocation = "identity", lscale = "loge",
\code{na.action = na.pass}. The response matrix needs to be
padded with any missing values. With a multivariate response
one has a matrix \code{y}, say, where
- \code{y[, 2]} contains the second order statistics etc.
+ \code{y[, 2]} contains the second order statistics, etc.
+
% If a random variable \eqn{Y} has a \emph{reverse}
% \eqn{Gumbel(\mu,\sigma)}{Gumbel(mu,sigma)} distribution then \eqn{-Y}
% has a \eqn{Gumbel(-\mu,\sigma)}{Gumbel(-mu,sigma)} distribution.
% It appears that some definite the reverse Gumbel the same as others
% who define the ordinary Gumbel distribution, e.g., in \pkg{gamlss}.
+
+
}
\seealso{
@@ -183,64 +190,61 @@ egumbel(llocation = "identity", lscale = "loge",
\code{\link{guplot}},
\code{\link{gev}},
\code{\link{egev}},
-% \code{\link{ogev}},
\code{\link{venice}}.
+
+% \code{\link{ogev}},
+
}
\examples{
# Example 1: Simulated data
-gdata <- data.frame(y = rgumbel(n = 1000, loc = 100, scale = exp(1)))
-fit <- vglm(y ~ 1, egumbel(perc = NULL), gdata, trace = TRUE)
-coef(fit, matrix = TRUE)
-Coef(fit)
-head(fitted(fit))
-with(gdata, mean(y))
+gdata <- data.frame(y1 = rgumbel(n = 1000, loc = 100, scale = exp(1)))
+fit1 <- vglm(y1 ~ 1, egumbel(perc = NULL), data = gdata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+Coef(fit1)
+head(fitted(fit1))
+with(gdata, mean(y1))
# Example 2: Venice data
-(fit <- vglm(cbind(r1,r2,r3,r4,r5) ~ year, data = venice,
- gumbel(R = 365, mpv = TRUE), trace = TRUE))
-head(fitted(fit))
-coef(fit, matrix = TRUE)
-vcov(summary(fit))
-sqrt(diag(vcov(summary(fit)))) # Standard errors
-
+(fit2 <- vglm(cbind(r1, r2, r3, r4, r5) ~ year, data = venice,
+ gumbel(R = 365, mpv = TRUE), trace = TRUE))
+head(fitted(fit2))
+coef(fit2, matrix = TRUE)
+sqrt(diag(vcov(summary(fit2)))) # Standard errors
# Example 3: Try a nonparametric fit ---------------------
# Use the entire data set, including missing values
-y <- as.matrix(venice[, paste("r", 1:10, sep = "")])
-fit1 <- vgam(y ~ s(year, df = 3), gumbel(R = 365, mpv = TRUE),
+# Same as as.matrix(venice[, paste0("r", 1:10)]):
+Y <- Select(venice, "r", sort = FALSE)
+fit3 <- vgam(Y ~ s(year, df = 3), gumbel(R = 365, mpv = TRUE),
data = venice, trace = TRUE, na.action = na.pass)
-depvar(fit1)[4:5, ] # NAs used to pad the matrix
+depvar(fit3)[4:5, ] # NAs used to pad the matrix
-\dontrun{
-# Plot the component functions
-par(mfrow = c(2, 1), mar = c(5, 4, 0.2, 1) + 0.1, xpd = TRUE)
-plot(fit1, se = TRUE, lcol = "blue", scol = "green", lty = 1,
+\dontrun{ # Plot the component functions
+par(mfrow = c(2, 3), mar = c(6, 4, 1, 2) + 0.3, xpd = TRUE)
+plot(fit3, se = TRUE, lcol = "blue", scol = "limegreen", lty = 1,
lwd = 2, slwd = 2, slty = "dashed")
# Quantile plot --- plots all the fitted values
-par(mfrow = c(1, 1), bty = "l", mar = c(4, 4, 0.2, 3) + 0.1, xpd = TRUE, las = 1)
-qtplot(fit1, mpv = TRUE, lcol = c(1, 2,5), tcol = c(1, 2,5), lwd = 2,
+qtplot(fit3, mpv = TRUE, lcol = c(1, 2, 5), tcol = c(1, 2, 5), lwd = 2,
pcol = "blue", tadj = 0.1, ylab = "Sea level (cm)")
# Plot the 99 percentile only
-par(mfrow = c(1, 1), mar = c(3, 4, 0.2, 1) + 0.1, xpd = TRUE)
-year = venice[["year"]]
-matplot(year, y, ylab = "Sea level (cm)", type = "n")
-matpoints(year, y, pch = "*", col = "blue")
-lines(year, fitted(fit1)[,"99\%"], lwd = 2, col = "red")
+year <- venice[["year"]]
+matplot(year, Y, ylab = "Sea level (cm)", type = "n")
+matpoints(year, Y, pch = "*", col = "blue")
+lines(year, fitted(fit3)[, "99\%"], lwd = 2, col = "orange")
# Check the 99 percentiles with a smoothing spline.
# Nb. (1-0.99) * 365 = 3.65 is approx. 4, meaning the 4th order
# statistic is approximately the 99 percentile.
-par(mfrow = c(1, 1), mar = c(3, 4, 2, 1) + 0.1, xpd = TRUE, lwd = 2)
-plot(year, y[, 4], ylab = "Sea level (cm)", type = "n",
- main = "Red is 99 percentile, Green is a smoothing spline")
-points(year, y[, 4], pch = "4", col = "blue")
-lines(year, fitted(fit1)[,"99\%"], lty = 1, col = "red")
-lines(smooth.spline(year, y[, 4], df = 4), col = "darkgreen", lty = 2)
+plot(year, Y[, 4], ylab = "Sea level (cm)", type = "n",
+ main = "Orange is 99 percentile, Green is a smoothing spline")
+points(year, Y[, 4], pch = "4", col = "blue")
+lines(year, fitted(fit3)[, "99\%"], lty = 1, col = "orange")
+lines(smooth.spline(year, Y[, 4], df = 4), col = "limegreen", lty = 2)
}
}
\keyword{models}
diff --git a/man/gumbelII.Rd b/man/gumbelII.Rd
index 6e08164..53237bc 100644
--- a/man/gumbelII.Rd
+++ b/man/gumbelII.Rd
@@ -127,7 +127,7 @@ U.S. Department of Commerce, National Bureau of Standards, USA.
}
\examples{
gdata <- data.frame(x2 = runif(nn <- 1000))
-gdata <- transform(gdata, eta1 = -1,
+gdata <- transform(gdata, eta1 = +1,
eta2 = -1 + 0.1 * x2,
ceta1 = 0,
ceta2 = 1)
@@ -136,11 +136,11 @@ gdata <- transform(gdata, shape1 = exp(eta1),
scale1 = exp(ceta1),
scale2 = exp(ceta2))
gdata <- transform(gdata,
- y1 = rgumbelII(nn, shape = shape1, scale = scale1),
- y2 = rgumbelII(nn, shape = shape2, scale = scale2))
+ y1 = rgumbelII(nn, shape = shape1, scale = scale1),
+ y2 = rgumbelII(nn, shape = shape2, scale = scale2))
fit <- vglm(cbind(y1, y2) ~ x2,
- gumbelII(zero = c(1, 2, 4)), gdata, trace = TRUE)
+ gumbelII(zero = c(1, 2, 4)), data = gdata, trace = TRUE)
coef(fit, matrix = TRUE)
vcov(fit)
summary(fit)
diff --git a/man/hormone.Rd b/man/hormone.Rd
index fec4c92..4375ba0 100644
--- a/man/hormone.Rd
+++ b/man/hormone.Rd
@@ -94,11 +94,11 @@ data(hormone)
summary(hormone)
modelI <-rrvglm(Y ~ 1 + X, data = hormone, trace = TRUE,
- uninormal(zero = NULL, lsd = "identity", imethod = 2))
+ uninormal(zero = NULL, lsd = "identitylink", imethod = 2))
# Alternative way to fit modelI
modelI.other <- vglm(Y ~ 1 + X, data = hormone, trace = TRUE,
- uninormal(zero = NULL, lsd = "identity"))
+ uninormal(zero = NULL, lsd = "identitylink"))
# Inferior to modelI
modelII <- vglm(Y ~ 1 + X, data = hormone, trace = TRUE,
diff --git a/man/huber.Rd b/man/huber.Rd
index 2a7a906..67e0cb5 100644
--- a/man/huber.Rd
+++ b/man/huber.Rd
@@ -10,8 +10,8 @@
}
\usage{
-huber1(llocation = "identity", k = 0.862, imethod = 1)
-huber2(llocation = "identity", lscale = "loge",
+huber1(llocation = "identitylink", k = 0.862, imethod = 1)
+huber2(llocation = "identitylink", lscale = "loge",
k = 0.862, imethod = 1, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
@@ -102,22 +102,22 @@ hdata <- transform(hdata, y = rhuber(NN, mu = coef1 + coef2 * x2))
hdata$x2[1] <- 0.0 # Add an outlier
hdata$y[1] <- 10
-fit.huber2 <- vglm(y ~ x2, huber2(imethod = 3), hdata, trace = TRUE)
-fit.huber1 <- vglm(y ~ x2, huber1(imethod = 3), hdata, trace = TRUE)
+fit.huber2 <- vglm(y ~ x2, huber2(imethod = 3), data = hdata, trace = TRUE)
+fit.huber1 <- vglm(y ~ x2, huber1(imethod = 3), data = hdata, trace = TRUE)
coef(fit.huber2, matrix = TRUE)
summary(fit.huber2)
\dontrun{ # Plot the results
-plot(y ~ x2, hdata, col = "blue", las = 1)
-lines(fitted(fit.huber2) ~ x2, hdata, col = "darkgreen", lwd = 2)
+plot(y ~ x2, data = hdata, col = "blue", las = 1)
+lines(fitted(fit.huber2) ~ x2, data = hdata, col = "darkgreen", lwd = 2)
fit.lm <- lm(y ~ x2, hdata) # Compare to a LM:
-lines(fitted(fit.lm) ~ x2, hdata, col = "lavender", lwd = 3)
+lines(fitted(fit.lm) ~ x2, data = hdata, col = "lavender", lwd = 3)
# Compare to truth:
-lines(coef1 + coef2 * x2 ~ x2, hdata, col = "orange", lwd = 2, lty = "dashed")
+lines(coef1 + coef2 * x2 ~ x2, data = hdata, col = "orange", lwd = 2, lty = "dashed")
legend("bottomright", legend = c("truth", "huber", "lm"),
col = c("orange", "darkgreen", "lavender"),
diff --git a/man/hypersecant.Rd b/man/hypersecant.Rd
index bd82554..f9ed64c 100644
--- a/man/hypersecant.Rd
+++ b/man/hypersecant.Rd
@@ -94,20 +94,22 @@ Natural exponential families with quadratic variance functions.
%}
\seealso{
-% \code{\link{nefghs}},
\code{\link{elogit}}.
+% \code{\link{nefghs}},
+
+
}
\examples{
hdata <- data.frame(x2 = rnorm(nn <- 200))
hdata <- transform(hdata, y = rnorm(nn)) # Not very good data!
-fit <- vglm(y ~ x2, hypersecant, hdata, trace = TRUE, crit = "coef")
+fit <- vglm(y ~ x2, hypersecant, data = hdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
fit at misc$earg
# Not recommended:
-fit <- vglm(y ~ x2, hypersecant(link = "identity"), hdata, trace = TRUE)
+fit <- vglm(y ~ x2, hypersecant(link = "identitylink"), data = hdata, trace = TRUE)
coef(fit, matrix = TRUE)
fit at misc$earg
}
diff --git a/man/hzeta.Rd b/man/hzeta.Rd
index 3aebbaf..1d9a320 100644
--- a/man/hzeta.Rd
+++ b/man/hzeta.Rd
@@ -59,7 +59,7 @@ hzeta(link = "loglog", ialpha = NULL, nsimEIM = 100)
}
\references{
- Page 533 of
+ Pages 533--4 of
Johnson N. L., Kemp, A. W. and Kotz S. (2005)
\emph{Univariate Discrete Distributions},
3rd edition,
@@ -75,14 +75,15 @@ hzeta(link = "loglog", ialpha = NULL, nsimEIM = 100)
\code{\link{Hzeta}},
\code{\link{zeta}},
\code{\link{zetaff}},
- \code{\link{loglog}}.
+ \code{\link{loglog}},
+ \code{\link{simulate.vlm}}.
}
\examples{
alpha <- exp(exp(-0.1)) # The parameter
hdata <- data.frame(y = rhzeta(n = 1000, alpha))
-fit <- vglm(y ~ 1, hzeta, hdata, trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, hzeta, data = hdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit) # Useful for intercept-only models; should be same as alpha
c(with(hdata, mean(y)), head(fitted(fit), 1))
diff --git a/man/hzetaUC.Rd b/man/hzetaUC.Rd
index 7568dcf..505c7ba 100644
--- a/man/hzetaUC.Rd
+++ b/man/hzetaUC.Rd
@@ -60,16 +60,16 @@ rhzeta(n, alpha)
}
-\references{
-
- Page 533 of
- Johnson N. L., Kemp, A. W. and Kotz S. (2005)
- \emph{Univariate Discrete Distributions},
- 3rd edition,
- Hoboken, New Jersey: Wiley.
-
-
-}
+%\references{
+%
+% Pages 533--4 of
+% Johnson N. L., Kemp, A. W. and Kotz S. (2005)
+% \emph{Univariate Discrete Distributions},
+% 3rd edition,
+% Hoboken, New Jersey: Wiley.
+%
+%
+%}
\author{ T. W. Yee }
\note{
Given some response data, the \pkg{VGAM} family function
@@ -81,7 +81,8 @@ rhzeta(n, alpha)
\seealso{
\code{\link{hzeta}},
\code{\link{zeta}},
- \code{\link{zetaff}}.
+ \code{\link{zetaff}},
+ \code{\link{simulate.vlm}}.
}
@@ -96,7 +97,8 @@ table(rhzeta(1000, 2))
plot(x, dhzeta(x, alpha = alpha), type = "h", ylim = 0:1, lwd = 2,
sub = paste("alpha =", alpha), las = 1, col = "blue", ylab = "Probability",
main = "Haight's zeta: blue = density; orange = distribution function")
-lines(x+0.1, phzeta(x, alpha = alpha), col = "orange", lty = 3, lwd = 2, type = "h")
+lines(x+0.1, phzeta(x, alpha = alpha), col = "orange", lty = 3, lwd = 2,
+ type = "h")
}
}
\keyword{distribution}
diff --git a/man/iam.Rd b/man/iam.Rd
index d3a0a31..7e80ca0 100644
--- a/man/iam.Rd
+++ b/man/iam.Rd
@@ -99,7 +99,9 @@ iam(j, k, M, both = FALSE, diag = TRUE)
}
\seealso{
-\code{\link{vglmff-class}}.
+ \code{\link{vglmff-class}}.
+
+
%\code{ima}.
diff --git a/man/identity.Rd b/man/identitylink.Rd
similarity index 87%
rename from man/identity.Rd
rename to man/identitylink.Rd
index 45c5001..c5a4289 100644
--- a/man/identity.Rd
+++ b/man/identitylink.Rd
@@ -1,5 +1,5 @@
-\name{identity}
-\alias{identity}
+\name{identitylink}
+\alias{identitylink}
\alias{negidentity}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Identity Link Function }
@@ -9,8 +9,8 @@
}
\usage{
- identity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
-negidentity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
+identitylink(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
+ negidentity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -47,7 +47,7 @@ negidentity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
}
\value{
- For \code{identity()}:
+ For \code{identitylink()}:
for \code{deriv = 0}, the identity of \code{theta}, i.e.,
\code{theta} when \code{inverse = FALSE},
and if \code{inverse = TRUE} then \code{theta}.
@@ -57,7 +57,7 @@ negidentity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
else if \code{inverse = TRUE} then it returns the reciprocal.
- For \code{negidentity()}: the results are similar to \code{identity()}
+ For \code{negidentity()}: the results are similar to \code{identitylink()}
except for a sign change in most cases.
@@ -80,9 +80,9 @@ negidentity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
}
\examples{
-identity((-5):5)
-identity((-5):5, deriv = 1)
-identity((-5):5, deriv = 2)
+identitylink((-5):5)
+identitylink((-5):5, deriv = 1)
+identitylink((-5):5, deriv = 2)
negidentity((-5):5)
negidentity((-5):5, deriv = 1)
negidentity((-5):5, deriv = 2)
diff --git a/man/inv.gaussianff.Rd b/man/inv.gaussianff.Rd
index 612a090..368c81a 100644
--- a/man/inv.gaussianff.Rd
+++ b/man/inv.gaussianff.Rd
@@ -107,8 +107,8 @@ idata <- data.frame(x2 = runif(nn <- 1000))
idata <- transform(idata, mymu = exp(2 + 1 * x2),
Lambda = exp(2 + 1 * x2))
idata <- transform(idata, y = rinv.gaussian(nn, mu = mymu, lambda = Lambda))
-fit1 <- vglm(y ~ x2, inv.gaussianff, idata, trace = TRUE)
-rrig <- rrvglm(y ~ x2, inv.gaussianff, idata, trace = TRUE)
+fit1 <- vglm(y ~ x2, inv.gaussianff, data = idata, trace = TRUE)
+rrig <- rrvglm(y ~ x2, inv.gaussianff, data = idata, trace = TRUE)
coef(fit1, matrix = TRUE)
coef(rrig, matrix = TRUE)
Coef(rrig)
diff --git a/man/invbinomial.Rd b/man/invbinomial.Rd
index 73d30cd..760d177 100644
--- a/man/invbinomial.Rd
+++ b/man/invbinomial.Rd
@@ -43,14 +43,16 @@ invbinomial(lrho = elogit(min = 0.5, max = 1),
\eqn{\frac12 < \rho < 1}{0.5 < rho < 1},
and \eqn{\lambda > 0}{lambda > 0}.
The first two moments exist for \eqn{\rho>\frac12}{rho>0.5};
- then the mean is \eqn{\lambda (1-\rho) /(2 \rho-1)}{lambda*(1-rho)/(2*rho-1)}
+ then the mean
+ is \eqn{\lambda (1-\rho) /(2 \rho-1)}{lambda*(1-rho)/(2*rho-1)}
(returned as the fitted values) and the
variance is
\eqn{\lambda \rho (1-\rho) /(2 \rho-1)^3}{lambda*rho*(1-rho)/(2*rho-1)^3}.
The inverse binomial distribution is a special
case of the generalized negative binomial distribution of
Jain and Consul (1971).
- It holds that \eqn{Var(Y) > E(Y)} so that the inverse binomial distribution
+ It holds that \eqn{Var(Y) > E(Y)} so that the
+ inverse binomial distribution
is overdispersed compared with the Poisson distribution.
@@ -106,7 +108,7 @@ results in a EIM that is diagonal.
}
\examples{
idata <- data.frame(y = rnbinom(n <- 1000, mu = exp(3), size = exp(1)))
-fit <- vglm(y ~ 1, invbinomial, idata, trace = TRUE)
+fit <- vglm(y ~ 1, invbinomial, data = idata, trace = TRUE)
with(idata, c(mean(y), head(fitted(fit), 1)))
summary(fit)
coef(fit, matrix = TRUE)
diff --git a/man/invlomax.Rd b/man/invlomax.Rd
index 27aa2c2..def8279 100644
--- a/man/invlomax.Rd
+++ b/man/invlomax.Rd
@@ -85,15 +85,16 @@ Hoboken, NJ, USA: Wiley-Interscience.
\code{\link{fisk}},
\code{\link{lomax}},
\code{\link{paralogistic}},
- \code{\link{invparalogistic}}.
+ \code{\link{invparalogistic}},
+ \code{\link{simulate.vlm}}.
}
\examples{
idata <- data.frame(y = rinvlomax(n = 2000, exp(2), exp(1)))
-fit <- vglm(y ~ 1, invlomax, idata, trace = TRUE)
-fit <- vglm(y ~ 1, invlomax(iscale = exp(2), ishape2.p = exp(1)), idata,
+fit <- vglm(y ~ 1, invlomax, data = idata, trace = TRUE)
+fit <- vglm(y ~ 1, invlomax(iscale = exp(2), ishape2.p = exp(1)), data = idata,
trace = TRUE, epsilon = 1e-8)
coef(fit, matrix = TRUE)
Coef(fit)
diff --git a/man/invlomaxUC.Rd b/man/invlomaxUC.Rd
index 3dc40bc..63e6b54 100644
--- a/man/invlomaxUC.Rd
+++ b/man/invlomaxUC.Rd
@@ -37,6 +37,8 @@ rinvlomax(n, scale = 1, shape2.p)
\code{qinvlomax} gives the quantile function, and
\code{rinvlomax} generates random deviates.
+
+
}
\references{
@@ -67,7 +69,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
idata <- data.frame(y = rinvlomax(n = 1000, exp(2), exp(1)))
-fit <- vglm(y ~ 1, invlomax, idata, trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, invlomax, data = idata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/invparalogistic.Rd b/man/invparalogistic.Rd
index c0fec2e..eec93b0 100644
--- a/man/invparalogistic.Rd
+++ b/man/invparalogistic.Rd
@@ -85,16 +85,17 @@ Hoboken, NJ, USA: Wiley-Interscience.
\code{\link{fisk}},
\code{\link{invlomax}},
\code{\link{lomax}},
- \code{\link{paralogistic}}.
+ \code{\link{paralogistic}},
+ \code{\link{simulate.vlm}}.
}
\examples{
idata <- data.frame(y = rinvparalogistic(n = 3000, exp(1), exp(2)))
-fit <- vglm(y ~ 1, invparalogistic, idata, trace = TRUE)
+fit <- vglm(y ~ 1, invparalogistic, data = idata, trace = TRUE)
fit <- vglm(y ~ 1, invparalogistic(ishape1.a = 2.7, iscale = 7.3),
- idata, trace = TRUE, epsilon = 1e-8)
+ data = idata, trace = TRUE, epsilon = 1e-8)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/invparalogisticUC.Rd b/man/invparalogisticUC.Rd
index 8ea3906..6c352f1 100644
--- a/man/invparalogisticUC.Rd
+++ b/man/invparalogisticUC.Rd
@@ -70,7 +70,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
\examples{
idata <- data.frame(y = rinvparalogistic(n = 3000, exp(1), exp(2)))
fit <- vglm(y ~ 1, invparalogistic(ishape1.a = 2.1),
- idata, trace = TRUE, crit = "coef")
+ data = idata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/is.parallel.Rd b/man/is.parallel.Rd
index 33ccb1c..d05d352 100644
--- a/man/is.parallel.Rd
+++ b/man/is.parallel.Rd
@@ -60,8 +60,8 @@ is.parallel.vglm(object, type = c("term", "lm"), \dots)
\examples{
-\dontrun{ require(VGAMdata)
-fit <- vglm(educ ~ bs(age) * sex + ethnic,
+\dontrun{ require("VGAMdata")
+fit <- vglm(educ ~ sm.bs(age) * sex + ethnic,
cumulative(parallel = TRUE), xs.nz[1:200, ])
is.parallel(fit)
is.parallel(fit, type = "lm") # For each column of the LM matrix
diff --git a/man/is.smart.Rd b/man/is.smart.Rd
index 95fd53a..8910685 100644
--- a/man/is.smart.Rd
+++ b/man/is.smart.Rd
@@ -12,12 +12,16 @@
\arguments{
\item{object}{
a function or a fitted model.
+
+
}
}
\value{
Returns \code{TRUE} or \code{FALSE}, according to whether the \code{object}
is smart or not.
+
+
}
\details{
If \code{object} is a function then this function looks to see whether
@@ -41,18 +45,16 @@
}
\examples{
is.smart(my1) # TRUE
-is.smart(poly) # TRUE
+is.smart(sm.poly) # TRUE
library(splines)
-is.smart(bs) # TRUE
-is.smart(ns) # TRUE
+is.smart(sm.bs) # TRUE
+is.smart(sm.ns) # TRUE
is.smart(tan) # FALSE
-if(!is.R()) is.smart(lm) # TRUE
\dontrun{
-library(VGAM)
x <- rnorm(9)
-fit1 <- vglm(rnorm(9) ~ x, normal1)
+fit1 <- vglm(rnorm(9) ~ x, uninormal)
is.smart(fit1) # TRUE
-fit2 <- vglm(rnorm(9) ~ x, normal1, smart = FALSE)
+fit2 <- vglm(rnorm(9) ~ x, uninormal, smart = FALSE)
is.smart(fit2) # FALSE
fit2 at smart.prediction
}
diff --git a/man/koenker.Rd b/man/koenker.Rd
index 28e320e..1ac464e 100644
--- a/man/koenker.Rd
+++ b/man/koenker.Rd
@@ -9,7 +9,7 @@
}
\usage{
-koenker(percentile = 50, llocation = "identity", lscale = "loge",
+koenker(percentile = 50, llocation = "identitylink", lscale = "loge",
ilocation = NULL, iscale = NULL, imethod = 1, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
@@ -104,15 +104,15 @@ kdata <- data.frame(x2 = sort(runif(nn)))
kdata <- transform(kdata, mylocat = 1 + 3 * x2,
myscale = 1)
kdata <- transform(kdata, y = rkoenker(nn, loc = mylocat, scale = myscale))
-fit <- vglm(y ~ x2, koenker(perc = c(1, 50, 99)), kdata, trace = TRUE)
-fit2 <- vglm(y ~ x2, studentt2(df = 2), kdata, trace = TRUE) # 'same' as fit
+fit <- vglm(y ~ x2, koenker(perc = c(1, 50, 99)), data = kdata, trace = TRUE)
+fit2 <- vglm(y ~ x2, studentt2(df = 2), data = kdata, trace = TRUE) # 'same' as fit
coef(fit, matrix = TRUE)
head(fitted(fit))
head(predict(fit))
# Nice plot of the results
-\dontrun{ plot(y ~ x2, kdata, col = "blue", las = 1,
+\dontrun{ plot(y ~ x2, data = kdata, col = "blue", las = 1,
sub = paste("n =", nn),
main = "Fitted quantiles/expectiles using Koenker's distribution")
matplot(with(kdata, x2), fitted(fit), add = TRUE, type = "l", lwd = 3)
diff --git a/man/kumar.Rd b/man/kumar.Rd
index 17b5e43..517885c 100644
--- a/man/kumar.Rd
+++ b/man/kumar.Rd
@@ -97,13 +97,15 @@ kumar(lshape1 = "loge", lshape2 = "loge",
\seealso{
\code{\link{dkumar}},
- \code{\link{betaff}}.
+ \code{\link{betaff}},
+ \code{\link{simulate.vlm}}.
+
}
\examples{
shape1 <- exp(1); shape2 <- exp(2)
kdata <- data.frame(y = rkumar(n = 1000, shape1, shape2))
-fit <- vglm(y ~ 1, kumar, kdata, trace = TRUE)
+fit <- vglm(y ~ 1, kumar, data = kdata, trace = TRUE)
c(with(kdata, mean(y)), head(fitted(fit), 1))
coef(fit, matrix = TRUE)
Coef(fit)
diff --git a/man/lakeO.Rd b/man/lakeO.Rd
new file mode 100644
index 0000000..23963e7
--- /dev/null
+++ b/man/lakeO.Rd
@@ -0,0 +1,110 @@
+\name{lakeO}
+\alias{lakeO}
+\docType{data}
+\title{
+ Annual catches on Lake Otamangakau from October 1974 to October 1989
+
+%% ~~ data name/kind ... ~~
+}
+\description{
+ Rainbow and brown trout catches by a Mr Swainson at
+ Lake Otamangakau in the central North Island of New Zealand
+ during the 1970s and 1980s.
+
+
+%% ~~ A concise (1-5 lines) description of the dataset. ~~
+}
+\usage{data(lakeO)}
+\format{
+ A data frame with 15 observations on the following 5 variables.
+ \describe{
+ \item{\code{year}}{a numeric vector,
+ the season began on 1 October of the year and ended 12 months later.
+% Hence the fishing ended around October 1989.
+
+
+ }
+ \item{\code{total.fish}}{a numeric vector,
+ the total number of fish caught during the season.
+ Simply the sum of brown and rainbow trout.
+
+
+ }
+ \item{\code{brown}}{a numeric vector,
+ the number of brown trout
+ (\emph{Salmo trutta})
+ caught.
+
+
+ }
+ \item{\code{rainbow}}{a numeric vector,
+ the number of rainbow trout
+ (\emph{Oncorhynchus mykiss})
+ caught.
+
+
+ }
+ \item{\code{visits}}{a numeric vector,
+ the number of visits during the season that the angler made to
+ the lake.
+ It is necessary to assume that the visits were of an equal time length
+ in order to interpret the usual Poisson regressions.
+
+ }
+ }
+}
+\details{
+%% ~~ If necessary, more details than the __description__ above ~~
+
+
+ The data was extracted from the season summaries at Lake Otamangakau
+ by Anthony Swainson
+ for the seasons 1974--75 to 1988--89.
+% Note however that the final year's data
+% was cut off from the scanned version.
+ Mr Swainson was one of a small group of regular fly fishing anglers
+ and kept a diary of his catches.
+ Lake Otamangakau is a lake of area 1.8 squared km and has a
+ maximum depth of about 12m, and is located
+ in the central North Island of New Zealand.
+ It is trout-infested and known for its trophy-sized fish.
+
+
+ See also \code{\link[VGAMdata]{trapO}}.
+
+
+}
+\source{
+
+ Table 7.2 of the reference below.
+ Thanks to Dr Michel Dedual for a copy of the report and for help
+ reading the final year's data.
+ The report is available from TWY on request.
+
+
+% p.43
+
+
+%% ~~ reference to a publication or URL from which the data were obtained ~~
+}
+\references{
+
+ {Dedual, M. and MacLean, G. and Rowe, D. and Cudby, E.},
+ \emph{The Trout Population and Fishery of {L}ake {O}tamangakau---Interim Report}.
+ {National Institute of Water and Atmospheric Research},
+ {Hamilton, New Zealand}.
+ Consultancy Report Project No. {ELE70207},
+ (Dec 1996).
+
+
+
+%% ~~ possibly secondary sources and usages ~~
+}
+\examples{
+data(lakeO)
+lakeO
+summary(lakeO)
+}
+\keyword{datasets}
+
+
diff --git a/man/laplace.Rd b/man/laplace.Rd
index 231f658..121ebc2 100644
--- a/man/laplace.Rd
+++ b/man/laplace.Rd
@@ -8,7 +8,7 @@
}
\usage{
-laplace(llocation = "identity", lscale = "loge",
+laplace(llocation = "identitylink", lscale = "loge",
ilocation = NULL, iscale = NULL, imethod = 1, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
@@ -111,14 +111,14 @@ Boston: Birkhauser.
\examples{
ldata <- data.frame(y = rlaplace(nn <- 100, loc = 2, scale = exp(1)))
-fit <- vglm(y ~ 1, laplace, ldata, trace = TRUE, crit = "l")
+fit <- vglm(y ~ 1, laplace, data = ldata, trace = TRUE, crit = "l")
coef(fit, matrix = TRUE)
Coef(fit)
with(ldata, median(y))
ldata <- data.frame(x = runif(nn <- 1001))
ldata <- transform(ldata, y = rlaplace(nn, loc = 2, scale = exp(-1 + 1*x)))
-coef(vglm(y ~ x, laplace(iloc = 0.2, imethod = 2, zero = 1), ldata,
+coef(vglm(y ~ x, laplace(iloc = 0.2, imethod = 2, zero = 1), data = ldata,
trace = TRUE), matrix = TRUE)
}
\keyword{models}
diff --git a/man/leipnik.Rd b/man/leipnik.Rd
index d4adbbb..7c2fe02 100644
--- a/man/leipnik.Rd
+++ b/man/leipnik.Rd
@@ -86,7 +86,7 @@ leipnik(lmu = "logit", llambda = "loge", imu = NULL, ilambda = NULL)
}
\section{Warning }{
- If \code{llambda="identity"} then it is possible that the
+ If \code{llambda="identitylink"} then it is possible that the
\code{lambda} estimate becomes less than \eqn{-1}, i.e., out of
bounds. One way to stop this is to choose \code{llambda = "loge"},
however, \code{lambda} is then constrained to be positive.
@@ -101,9 +101,9 @@ leipnik(lmu = "logit", llambda = "loge", imu = NULL, ilambda = NULL)
}
\examples{
ldata <- data.frame(y = rnorm(n = 2000, mean = 0.5, sd = 0.1)) # Not proper data
-fit <- vglm(y ~ 1, leipnik(ilambda = 1), ldata, trace = TRUE, checkwz = FALSE)
+fit <- vglm(y ~ 1, leipnik(ilambda = 1), data = ldata, trace = TRUE, checkwz = FALSE)
fit <- vglm(y ~ 1, leipnik(ilambda = 1, llambda = logoff(offset = 1)),
- ldata, trace = TRUE, crit = "coef")
+ data = ldata, trace = TRUE, crit = "coef")
head(fitted(fit))
with(ldata, mean(y))
summary(fit)
diff --git a/man/levy.Rd b/man/levy.Rd
index bf2884a..5913dba 100644
--- a/man/levy.Rd
+++ b/man/levy.Rd
@@ -92,9 +92,9 @@ ldata <- data.frame(y = delta + mygamma/rnorm(nn)^2) # Levy(mygamma, delta)
with(ldata, sum(y > 1) / length(y)) # Should be 0.6827
with(ldata, sum(y > 2) / length(y)) # Should be 0.5205
-fit <- vglm(y ~ 1, levy(delta = delta), ldata, trace = TRUE) # 1 parameter
+fit <- vglm(y ~ 1, levy(delta = delta), data = ldata, trace = TRUE) # 1 parameter
fit <- vglm(y ~ 1, levy(idelta = delta, igamma = mygamma),
- ldata, trace = TRUE) # 2 parameters
+ data = ldata, trace = TRUE) # 2 parameters
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/lgammaff.Rd b/man/lgammaff.Rd
index 2cb4ba0..ff10f36 100644
--- a/man/lgammaff.Rd
+++ b/man/lgammaff.Rd
@@ -10,7 +10,7 @@
}
\usage{
lgammaff(link = "loge", init.k = NULL)
-lgamma3ff(llocation = "identity", lscale = "loge", lshape = "loge",
+lgamma3ff(llocation = "identitylink", lscale = "loge", lshape = "loge",
ilocation = NULL, iscale = NULL, ishape = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -123,7 +123,7 @@ New York: Wiley.
}
\examples{
ldata <- data.frame(y = rlgamma(100, k = exp(1)))
-fit <- vglm(y ~ 1, lgammaff, ldata, trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, lgammaff, data = ldata, trace = TRUE, crit = "coef")
summary(fit)
coef(fit, matrix = TRUE)
Coef(fit)
@@ -131,7 +131,7 @@ Coef(fit)
ldata <- data.frame(x = runif(nn <- 5000)) # Another example
ldata <- transform(ldata, loc = -1 + 2 * x, Scale = exp(1))
ldata <- transform(ldata, y = rlgamma(nn, loc, scale = Scale, k = exp(0)))
-fit2 <- vglm(y ~ x, lgamma3ff(zero = 2:3), ldata, trace = TRUE, crit = "c")
+fit2 <- vglm(y ~ x, lgamma3ff(zero = 2:3), data = ldata, trace = TRUE, crit = "c")
coef(fit2, matrix = TRUE)
}
\keyword{models}
diff --git a/man/lindUC.Rd b/man/lindUC.Rd
index 2819bc8..67aa2e7 100644
--- a/man/lindUC.Rd
+++ b/man/lindUC.Rd
@@ -7,11 +7,13 @@
\title{The Lindley Distribution}
\description{
Density, cumulative distribution function,
-% quantile function
and
random generation for
the Lindley distribution.
+% quantile function
+
+
}
\usage{
dlind(x, theta, log = FALSE)
@@ -41,10 +43,12 @@ rlind(n, theta)
\value{
\code{dlind} gives the density,
\code{plind} gives the cumulative distribution function, and
-% \code{qlind} gives the quantile function, and
\code{rlind} generates random deviates.
+% \code{qlind} gives the quantile function, and
+
+
}
\author{ T. W. Yee }
\details{
diff --git a/man/lindley.Rd b/man/lindley.Rd
index eb9c839..c9be3a3 100644
--- a/man/lindley.Rd
+++ b/man/lindley.Rd
@@ -80,12 +80,13 @@ Lindley distribution and its application.
\seealso{
\code{\link{dlind}},
\code{\link{gamma2.ab}},
+ \code{\link{simulate.vlm}}.
}
\examples{
ldata <- data.frame(y = rlind(n = 1000, theta = exp(3)))
-fit <- vglm(y ~ 1, lindley, ldata, trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, lindley, data = ldata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/lino.Rd b/man/lino.Rd
index 9e83b34..fca42ca 100644
--- a/man/lino.Rd
+++ b/man/lino.Rd
@@ -70,6 +70,7 @@ lino(lshape1 = "loge", lshape2 = "loge", llambda = "loge",
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Libby, D. L. and Novick, M. R. (1982)
@@ -114,17 +115,20 @@ lino(lshape1 = "loge", lshape2 = "loge", llambda = "loge",
}
\examples{
-ldata1 <- data.frame(y = rbeta(n = 1000, exp(0.5), exp(1))) # ~ standard beta
-fit <- vglm(y ~ 1, lino, ldata1, trace = TRUE)
+ldata <- data.frame(y1 = rbeta(n = 1000, exp(0.5), exp(1))) # ~ standard beta
+fit <- vglm(y1 ~ 1, lino, ldata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
head(fitted(fit))
summary(fit)
# Nonstandard beta distribution
-ldata2 <- data.frame(y = rlino(n = 1000, shape1 = 2, shape2 = 3, lambda = exp(1)))
-fit <- vglm(y~1, lino(lshape1 = identity, lshape2 = identity, ilamb = 10), ldata2)
-coef(fit, matrix = TRUE)
+ldata <-
+ transform(ldata, y2 = rlino(n = 1000, shape1 = 2, shape2 = 3, lambda = exp(1)))
+fit2 <- vglm(y2 ~ 1, lino(lshape1 = identitylink,
+ lshape2 = identitylink, ilamb = 10),
+ data = ldata)
+coef(fit2, matrix = TRUE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/lms.bcg.Rd b/man/lms.bcg.Rd
index 80abc2a..9da6467 100644
--- a/man/lms.bcg.Rd
+++ b/man/lms.bcg.Rd
@@ -8,7 +8,7 @@
}
\usage{
lms.bcg(percentiles = c(25, 50, 75), zero = c(1, 3),
- llambda = "identity", lmu = "identity", lsigma = "loge",
+ llambda = "identitylink", lmu = "identitylink", lsigma = "loge",
dfmu.init = 4, dfsigma.init = 2, ilambda = 1, isigma = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -54,6 +54,8 @@ lms.bcg(percentiles = c(25, 50, 75), zero = c(1, 3),
The object is used by modelling functions such as \code{\link{vglm}},
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
+
}
\references{
@@ -68,11 +70,13 @@ Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
+
}
\author{ Thomas W. Yee }
\note{
Similar notes can be found at \code{\link{lms.bcn}}.
+
}
\section{Warning }{
This \pkg{VGAM} family function comes with the same
@@ -81,6 +85,7 @@ contains further information and examples.
respect to lambda may be incorrect (my calculations do
not agree with the Lopatatzidis and Green manuscript.)
+
}
\seealso{
\code{\link{lms.bcn}},
@@ -96,7 +101,7 @@ contains further information and examples.
\examples{
# This converges, but deplot(fit) and qtplot(fit) do not work
-fit0 <- vglm(BMI ~ bs(age, df = 4), lms.bcg, bmi.nz, trace = TRUE)
+fit0 <- vglm(BMI ~ sm.bs(age, df = 4), lms.bcg, data = bmi.nz, trace = TRUE)
coef(fit0, matrix = TRUE)
\dontrun{
par(mfrow = c(1, 1))
diff --git a/man/lms.bcn.Rd b/man/lms.bcn.Rd
index eb7f50e..27bc240 100644
--- a/man/lms.bcn.Rd
+++ b/man/lms.bcn.Rd
@@ -9,7 +9,7 @@
}
\usage{
lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
- llambda = "identity", lmu = "identity", lsigma = "loge",
+ llambda = "identitylink", lmu = "identitylink", lsigma = "loge",
dfmu.init = 4, dfsigma.init = 2, ilambda = 1,
isigma = NULL, tol0 = 0.001, expectiles = FALSE)
}
@@ -230,16 +230,19 @@ contains further information and examples.
\code{\link{qtplot.lmscreg}},
\code{\link{deplot.lmscreg}},
\code{\link{cdf.lmscreg}},
-% \code{\link{bmi.nz}},
\code{\link{alaplace1}},
\code{\link{amlnormal}},
\code{\link{denorm}},
\code{\link{CommonVGAMffArguments}}.
+
+% \code{\link{bmi.nz}},
+
+
}
\examples{
-\dontrun{ require(VGAMdata)
+\dontrun{ require("VGAMdata")
mysubset <- subset(xs.nz, sex == "M" & ethnic == "1" & Study1)
mysubset <- transform(mysubset, BMI = weight / height^2)
BMIdata <- na.omit(mysubset)
@@ -247,12 +250,15 @@ BMIdata <- subset(BMIdata, BMI < 80 & age < 65,
select = c(age, BMI)) # Delete an outlier
summary(BMIdata)
-fit <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), BMIdata)
+fit <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), data = BMIdata)
+
+par(mfrow = c(1, 2))
+plot(fit, scol = "blue", se = TRUE) # The two centered smooths
head(predict(fit))
head(fitted(fit))
head(BMIdata)
-head(cdf(fit)) # Person 56 is probably overweight, given his age
+head(cdf(fit)) # Person 46 is probably overweight, given his age
100 * colMeans(c(depvar(fit)) < fitted(fit)) # Empirical proportions
# Convergence problems? Try this trick: fit0 is a simpler model used for fit1
diff --git a/man/lms.yjn.Rd b/man/lms.yjn.Rd
index e252992..9fd40ad 100644
--- a/man/lms.yjn.Rd
+++ b/man/lms.yjn.Rd
@@ -9,12 +9,12 @@
}
\usage{
lms.yjn(percentiles = c(25, 50, 75), zero = c(1,3),
- llambda = "identity", lsigma = "loge",
+ llambda = "identitylink", lsigma = "loge",
dfmu.init = 4, dfsigma.init = 2,
ilambda = 1, isigma = NULL, rule = c(10, 5),
yoffset = NULL, diagW = FALSE, iters.diagW = 6)
lms.yjn2(percentiles=c(25,50,75), zero=c(1,3),
- llambda = "identity", lmu = "identity", lsigma = "loge",
+ llambda = "identitylink", lmu = "identitylink", lsigma = "loge",
dfmu.init = 4, dfsigma.init = 2, ilambda = 1.0,
isigma = NULL, yoffset = NULL, nsimEIM = 250)
}
@@ -99,6 +99,8 @@ lms.yjn2(percentiles=c(25,50,75), zero=c(1,3),
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
+
}
\references{
Yeo, I.-K. and Johnson, R. A. (2000)
@@ -132,24 +134,28 @@ Box-Cox transformation cannot handle negative values.
Some other notes can be found at \code{\link{lms.bcn}}.
+
}
\section{Warning }{
The computations are not simple, therefore convergence may fail.
In that case, try different starting values.
+
The generic function \code{predict}, when applied to a
\code{lms.yjn} fit, does not add back the \code{yoffset} value.
+
}
\seealso{
-\code{\link{lms.bcn}},
-\code{\link{lms.bcg}},
-\code{\link{qtplot.lmscreg}},
-\code{\link{deplot.lmscreg}},
-\code{\link{cdf.lmscreg}},
-\code{\link{bmi.nz}},
-\code{\link{amlnormal}}.
+ \code{\link{lms.bcn}},
+ \code{\link{lms.bcg}},
+ \code{\link{qtplot.lmscreg}},
+ \code{\link{deplot.lmscreg}},
+ \code{\link{cdf.lmscreg}},
+ \code{\link{bmi.nz}},
+ \code{\link{amlnormal}}.
+
}
\examples{
@@ -172,7 +178,8 @@ par(mfrow = c(1, 1), lwd = 2)
(aa <- deplot(fit, x0 = 20, y = ygrid, xlab = "BMI", col = "black",
main = "Density functions at Age = 20 (black), 42 (red) and 55 (blue)"))
aa <- deplot(fit, x0 = 42, y = ygrid, add = TRUE, llty = 2, col = "red")
-aa <- deplot(fit, x0 = 55, y = ygrid, add = TRUE, llty = 4, col = "blue", Attach = TRUE)
+aa <- deplot(fit, x0 = 55, y = ygrid, add = TRUE, llty = 4, col = "blue",
+ Attach = TRUE)
with(aa at post, deplot) # Contains density function values; == a at post$deplot
}
}
diff --git a/man/logF.Rd b/man/logF.Rd
index 5573bf7..77ab712 100644
--- a/man/logF.Rd
+++ b/man/logF.Rd
@@ -88,6 +88,9 @@
\code{\link{logff}}.
+% \code{\link{simulate.vlm}}.
+
+
}
\examples{
diff --git a/man/logF.UC.Rd b/man/logF.UC.Rd
index f07745f..946f30d 100644
--- a/man/logF.UC.Rd
+++ b/man/logF.UC.Rd
@@ -3,7 +3,7 @@
% \alias{qnefghs}
\title{ log F Distribution }
\description{
- Density,
+ Density
for the log F distribution.
% quantile function
@@ -42,6 +42,8 @@ dlogF(x, shape1, shape2, log = FALSE)
}
\value{
\code{dlogF} gives the density.
+
+
% \code{pnefghs} gives the distribution function, and
% \code{qnefghs} gives the quantile function, and
% \code{rnefghs} generates random deviates.
@@ -64,6 +66,9 @@ dlogF(x, shape1, shape2, log = FALSE)
\code{\link{hypersecant}}.
+% \code{\link{simulate.vlm}}.
+
+
}
\examples{
\dontrun{ shape1 <- 1.5; shape2 <- 0.5; x <- seq(-5, 8, length = 1001)
diff --git a/man/logLikvlm.Rd b/man/logLikvlm.Rd
new file mode 100644
index 0000000..aab6d45
--- /dev/null
+++ b/man/logLikvlm.Rd
@@ -0,0 +1,113 @@
+\name{logLik.vlm}
+\alias{logLik.vlm}
+%\alias{AICvglm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Extract Log-likelihood for VGLMs/VGAMs/etc. }
+\description{
+ Calculates the log-likelihood value or the
+ element-by-element contributions of the log-likelihood.
+
+}
+\usage{
+\method{logLik}{vlm}(object, summation = TRUE, \dots)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{
+ Some \pkg{VGAM} object, for example, having
+ class \code{\link{vglmff-class}}.
+
+
+ }
+ \item{summation}{
+ Logical, apply \code{\link[base]{sum}}?
+ If \code{FALSE} then a \eqn{n}-vector or
+ \eqn{n}-row matrix (with the number of responses as
+ the number of columns) is returned.
+ Each element is the contribution to the log-likelihood.
+
+
+ }
+ \item{\dots}{
+ Currently unused.
+ In the future:
+ other possible arguments fed into
+ \code{logLik} in order to compute the log-likelihood.
+
+
+ }
+}
+\details{
+ By default, this function
+ returns the log-likelihood of the object.
+ Thus this code relies on the log-likelihood being defined,
+ and computed, for the object.
+
+
+
+}
+\value{
+ Returns the log-likelihood of the object.
+ If \code{summation = FALSE} then a \eqn{n}-vector or
+ \eqn{n}-row matrix (with the number of responses as
+ the number of columns) is returned.
+ Each element is the contribution to the log-likelihood.
+ The prior weights are assimulated within the answer.
+
+
+}
+\author{T. W. Yee. }
+\note{
+ Not all \pkg{VGAM} family functions currently have the
+ \code{summation} argument implemented.
+
+
+
+
+}
+
+
+%\references{
+%
+%}
+
+\section{Warning }{
+ Not all \pkg{VGAM} family functions have had the
+ \code{summation} checked.
+
+
+}
+
+\seealso{
+ VGLMs are described in \code{\link{vglm-class}};
+ VGAMs are described in \code{\link{vgam-class}};
+ RR-VGLMs are described in \code{\link{rrvglm-class}};
+ \code{\link[stats]{AIC}}.
+
+
+}
+\examples{
+zdata <- data.frame(x2 = runif(nn <- 50))
+zdata <- transform(zdata, Ps01 = logit(-0.5 , inverse = TRUE),
+ Ps02 = logit( 0.5 , inverse = TRUE),
+ lambda1 = loge(-0.5 + 2*x2, inverse = TRUE),
+ lambda2 = loge( 0.5 + 2*x2, inverse = TRUE))
+zdata <- transform(zdata, y1 = rzipois(nn, lambda = lambda1, pstr0 = Ps01),
+ y2 = rzipois(nn, lambda = lambda2, pstr0 = Ps02))
+
+with(zdata, table(y1)) # Eyeball the data
+with(zdata, table(y2))
+fit2 <- vglm(cbind(y1, y2) ~ x2, zipoisson(zero = NULL), data = zdata)
+
+logLik(fit2) # Summed over the two responses
+sum(logLik(fit2, sum = FALSE)) # For checking purposes
+(ll.matrix <- logLik(fit2, sum = FALSE)) # nn x 2 matrix
+colSums(ll.matrix) # log-likelihood for each response
+}
+\keyword{models}
+\keyword{regression}
+
+% logLik.vlm(object, summation = TRUE, \dots)
+
+
+
diff --git a/man/logUC.Rd b/man/logUC.Rd
index 6fbb2f5..77df5d2 100644
--- a/man/logUC.Rd
+++ b/man/logUC.Rd
@@ -59,9 +59,12 @@ rlog(n, prob, Smallno = 1.0e-6)
\value{
\code{dlog} gives the density,
\code{plog} gives the distribution function, and
-% \code{qlog} gives the quantile function, and
\code{rlog} generates random deviates.
+
+% \code{qlog} gives the quantile function, and
+
+
}
\references{
diff --git a/man/logff.Rd b/man/logff.Rd
index d621858..10e00e2 100644
--- a/man/logff.Rd
+++ b/man/logff.Rd
@@ -32,8 +32,10 @@ logff(link = "logit", init.c = NULL, zero = NULL)
}
}
\details{
- The logarithmic distribution is based on the logarithmic series,
- and is scaled to a probability function.
+ The logarithmic distribution is
+ a generalized power series distribution that is
+ based specifically on the logarithmic series
+ (scaled to a probability function).
Its probability function is
\eqn{f(y) = a c^y / y}{f(y) = a * c^y / y}, for
\eqn{y=1,2,3,\ldots}{y=1,2,3,...},
@@ -77,9 +79,8 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
The logarithmic distribution is sometimes confused with the log-series
- The logarithmic distribution is sometimes confused with the log-series
distribution. The latter was used by Fisher et al. for species abundance
- data, and has two parameters.
+ data and has two parameters.
}
@@ -89,13 +90,14 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
\code{\link[base:Log]{log}},
\code{\link{loge}},
\code{\link{logoff}},
- \code{\link{explogff}}.
+ \code{\link{explogff}},
+ \code{\link{simulate.vlm}}.
}
\examples{
ldata <- data.frame(y = rlog(n = 1000, prob = logit(0.2, inverse = TRUE)))
-fit <- vglm(y ~ 1, logff, ldata, trace = TRUE, crit = "c")
+fit <- vglm(y ~ 1, logff, data = ldata, trace = TRUE, crit = "c")
coef(fit, matrix = TRUE)
Coef(fit)
\dontrun{with(ldata,
diff --git a/man/logistic.Rd b/man/logistic.Rd
index 511d605..d2e2ea5 100644
--- a/man/logistic.Rd
+++ b/man/logistic.Rd
@@ -11,8 +11,8 @@
}
\usage{
-logistic1(llocation = "identity", scale.arg = 1, imethod = 1)
-logistic2(llocation = "identity", lscale = "loge",
+logistic1(llocation = "identitylink", scale.arg = 1, imethod = 1)
+logistic2(llocation = "identitylink", lscale = "loge",
ilocation = NULL, iscale = NULL, imethod = 1, zero = -2)
}
%- maybe also 'usage' for other objects documented here.
@@ -119,7 +119,8 @@ A note on Deriving the Information Matrix for a Logistic Distribution,
\code{\link[stats:Logistic]{rlogis}},
\code{\link{logit}},
\code{\link{cumulative}},
- \code{\link{bilogistic4}}.
+ \code{\link{bilogistic4}},
+ \code{\link{simulate.vlm}}.
}
@@ -127,12 +128,12 @@ A note on Deriving the Information Matrix for a Logistic Distribution,
# Location unknown, scale known
ldata <- data.frame(x2 = runif(nn <- 500))
ldata <- transform(ldata, y1 = rlogis(nn, loc = 1 + 5*x2, scale = exp(2)))
-fit1 <- vglm(y1 ~ x2, logistic1(scale = 4), ldata, trace = TRUE, crit = "c")
+fit1 <- vglm(y1 ~ x2, logistic1(scale = exp(2)), data = ldata, trace = TRUE)
coef(fit1, matrix = TRUE)
# Both location and scale unknown
ldata <- transform(ldata, y2 = rlogis(nn, loc = 1 + 5*x2, scale = exp(0 + 1*x2)))
-fit2 <- vglm(cbind(y1, y2) ~ x2, logistic2, ldata, trace = TRUE)
+fit2 <- vglm(cbind(y1, y2) ~ x2, logistic2, data = ldata, trace = TRUE)
coef(fit2, matrix = TRUE)
vcov(fit2)
summary(fit2)
diff --git a/man/loglapUC.Rd b/man/loglapUC.Rd
index 2477747..0afc598 100644
--- a/man/loglapUC.Rd
+++ b/man/loglapUC.Rd
@@ -88,10 +88,13 @@ Log-Laplace distributions.
\seealso{
\code{\link{dalap}},
\code{\link{alaplace3}},
-% \code{\link{loglaplace3}}.
\code{\link{loglaplace1}}.
+% \code{\link{loglaplace3}}.
+
+
+
}
\examples{
loc <- 0; sigma <- exp(0.5); kappa <- 1
diff --git a/man/loglaplace.Rd b/man/loglaplace.Rd
index d209b7f..c4907a4 100644
--- a/man/loglaplace.Rd
+++ b/man/loglaplace.Rd
@@ -199,19 +199,23 @@ Log-Laplace distributions.
\examples{
# Example 1: quantile regression of counts with regression splines
set.seed(123); my.k <- exp(0)
-alldat <- data.frame(x2 = sort(runif(n <- 500)))
+adata <- data.frame(x2 = sort(runif(n <- 500)))
mymu <- function(x) exp( 1 + 3*sin(2*x) / (x+0.5)^2)
-alldat <- transform(alldat, y = rnbinom(n, mu = mymu(x2), size = my.k))
+adata <- transform(adata, y = rnbinom(n, mu = mymu(x2), size = my.k))
mytau <- c(0.1, 0.25, 0.5, 0.75, 0.9); mydof = 3
-fitp <- vglm(y ~ bs(x2, df = mydof), data=alldat, trace = TRUE,
- loglaplace1(tau = mytau, parallelLoc = TRUE)) # halfstepping is usual
+# halfstepping is usual:
+fitp <- vglm(y ~ sm.bs(x2, df = mydof), data = adata, trace = TRUE,
+ loglaplace1(tau = mytau, parallelLoc = TRUE))
\dontrun{
par(las = 1) # Plot on a log1p() scale
mylwd <- 1.5
-with(alldat, plot(x2, jitter(log1p(y), factor = 1.5), col = "red", pch = "o",
- main = "Example 1; darkgreen=truth, blue=estimated", cex = 0.75))
-with(alldat, matlines(x2, log1p(fitted(fitp)), col = "blue", lty = 1, lwd = mylwd))
+
+
+plot(jitter(log1p(y), factor = 1.5) ~ x2, adata, col = "red", pch = "o",
+ main = "Example 1; darkgreen=truth, blue=estimated", cex = 0.75)
+with(adata, matlines(x2, log1p(fitted(fitp)), col = "blue",
+ lty = 1, lwd = mylwd))
finexgrid <- seq(0, 1, len = 201)
for (ii in 1:length(mytau))
lines(finexgrid, col = "darkgreen", lwd = mylwd,
@@ -222,38 +226,41 @@ fitp at extra # Contains useful information
# Example 2: sample proportions
set.seed(123); nnn <- 1000; ssize <- 100 # ssize = 1 will not work!
-alldat <- data.frame(x2 = sort(runif(nnn)))
+adata <- data.frame(x2 = sort(runif(nnn)))
mymu <- function(x) logit( 1.0 + 4*x, inv = TRUE)
-alldat <- transform(alldat, ssize = ssize,
- y2 = rbinom(nnn, size=ssize, prob = mymu(x2)) / ssize)
+adata <- transform(adata, ssize = ssize,
+ y2 = rbinom(nnn, size = ssize, prob = mymu(x2)) / ssize)
mytau <- c(0.25, 0.50, 0.75)
-fit1 <- vglm(y2 ~ bs(x2, df = 3), data=alldat, weights=ssize, trace = TRUE,
- logitlaplace1(tau = mytau, lloc = "cloglog", paral = TRUE))
+fit1 <- vglm(y2 ~ sm.bs(x2, df = 3), data = adata,
+ weights = ssize, trace = TRUE,
+ logitlaplace1(tau = mytau, lloc = "cloglog", paral = TRUE))
\dontrun{
# Check the solution. Note: this may be like comparing apples with oranges.
-plotvgam(fit1, se = TRUE, scol = "red", lcol = "blue", main = "Truth = 'darkgreen'")
+plotvgam(fit1, se = TRUE, scol = "red", lcol = "blue",
+ main = "Truth = 'darkgreen'")
# Centered approximately !
-linkFunctionChar = as.character(fit1 at misc$link)
-alldat = transform(alldat, trueFunction=
+linkFunctionChar <- as.character(fit1 at misc$link)
+adata <- transform(adata, trueFunction=
theta2eta(theta = mymu(x2), link=linkFunctionChar))
-with(alldat, lines(x2, trueFunction - mean(trueFunction), col = "darkgreen"))
+with(adata, lines(x2, trueFunction - mean(trueFunction), col = "darkgreen"))
# Plot the data + fitted quantiles (on the original scale)
-myylim <- with(alldat, range(y2))
-with(alldat, plot(x2, y2, col = "blue", ylim = myylim, las = 1, pch = ".", cex = 2.5))
-with(alldat, matplot(x2, fitted(fit1), add = TRUE, lwd = 3, type = "l"))
+myylim <- with(adata, range(y2))
+plot(y2 ~ x2, adata, col = "blue", ylim = myylim, las = 1,
+ pch = ".", cex = 2.5)
+with(adata, matplot(x2, fitted(fit1), add = TRUE, lwd = 3, type = "l"))
truecol <- rep(1:3, len = fit1 at misc$M) # Add the 'truth'
smallxgrid <- seq(0, 1, len = 501)
for (ii in 1:length(mytau))
- lines(smallxgrid, col = truecol[ii], lwd = 2,
- qbinom(p = mytau[ii], prob = mymu(smallxgrid), size = ssize) / ssize)
+ lines(smallxgrid, col = truecol[ii], lwd = 2,
+ qbinom(p = mytau[ii], prob = mymu(smallxgrid), size = ssize) / ssize)
# Plot on the eta (== logit()/probit()/...) scale
-with(alldat, matplot(x2, predict(fit1), add = FALSE, lwd = 3, type = "l"))
+with(adata, matplot(x2, predict(fit1), add = FALSE, lwd = 3, type = "l"))
# Add the 'truth'
for (ii in 1:length(mytau)) {
true.quant <- qbinom(p = mytau[ii], pr = mymu(smallxgrid), si = ssize) / ssize
diff --git a/man/lognormal.Rd b/man/lognormal.Rd
index 5e9ab66..184e119 100644
--- a/man/lognormal.Rd
+++ b/man/lognormal.Rd
@@ -9,8 +9,8 @@
}
\usage{
-lognormal(lmeanlog = "identity", lsdlog = "loge", zero = 2)
-lognormal3(lmeanlog = "identity", lsdlog = "loge",
+lognormal(lmeanlog = "identitylink", lsdlog = "loge", zero = 2)
+lognormal3(lmeanlog = "identitylink", lsdlog = "loge",
powers.try = (-3):3, delta = NULL, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
@@ -122,29 +122,31 @@ Hoboken, NJ, USA: Wiley-Interscience.
\seealso{
-% \code{\link{lognormal3}},
\code{\link[stats]{rlnorm}},
\code{\link{uninormal}},
- \code{\link{CommonVGAMffArguments}}.
+ \code{\link{CommonVGAMffArguments}},
+ \code{\link{simulate.vlm}}.
+
+
+% \code{\link{lognormal3}},
}
\examples{
ldata <- data.frame(y1 = rlnorm(nn <- 1000, meanlog = 1.5, sdlog = exp(-0.8)))
-fit1 <- vglm(y1 ~ 1, lognormal, ldata, trace = TRUE)
+fit1 <- vglm(y1 ~ 1, lognormal, data = ldata, trace = TRUE, crit = "c")
coef(fit1, matrix = TRUE)
Coef(fit1)
ldata2 <- data.frame(x2 = runif(nn <- 1000))
ldata2 <- transform(ldata2, y2 = rlnorm(nn, mean = 0.5, sd = exp(x2)))
-fit2 <- vglm(y2 ~ x2, lognormal(zero = 1), ldata2, trace = TRUE, crit = "c")
+fit2 <- vglm(y2 ~ x2, lognormal(zero = 1), data = ldata2, trace = TRUE)
coef(fit2, matrix = TRUE)
-Coef(fit2)
lambda <- 4
-ldata3 <- data.frame(y3 = lambda + rlnorm(n = 1000, mean = 1.5, sd = exp(-0.8)))
-fit3 <- vglm(y3 ~ 1, lognormal3, ldata3, trace = TRUE, crit = "c")
+ldata3 <- data.frame(y3 = lambda + rlnorm(1000, m = 1.5, sd = exp(-0.8)))
+fit3 <- vglm(y3 ~ 1, lognormal3, data = ldata3, trace = TRUE, crit = "c")
coef(fit3, matrix = TRUE)
summary(fit3)
}
diff --git a/man/lomax.Rd b/man/lomax.Rd
index 290c55e..b419148 100644
--- a/man/lomax.Rd
+++ b/man/lomax.Rd
@@ -104,13 +104,14 @@ Hoboken, NJ, USA: Wiley-Interscience.
\code{\link{fisk}},
\code{\link{invlomax}},
\code{\link{paralogistic}},
- \code{\link{invparalogistic}}.
+ \code{\link{invparalogistic}},
+ \code{\link{simulate.vlm}}.
}
\examples{
ldata <- data.frame(y = rlomax(n = 1000, scale = exp(1), exp(2)))
-fit <- vglm(y ~ 1, lomax, ldata, trace = TRUE)
+fit <- vglm(y ~ 1, lomax, data = ldata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/lomaxUC.Rd b/man/lomaxUC.Rd
index 07a875e..53213da 100644
--- a/man/lomaxUC.Rd
+++ b/man/lomaxUC.Rd
@@ -69,16 +69,17 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
probs <- seq(0.1, 0.9, by = 0.1)
-max(abs(plomax(qlomax(p = probs, shape3.q = 1), shape3.q = 1) - probs)) # Should be 0
+max(abs(plomax(qlomax(p = probs, shape3.q = 1),
+ shape3.q = 1) - probs)) # Should be 0
\dontrun{ par(mfrow = c(1, 2))
x <- seq(-0.01, 5, len = 401)
-plot(x, dexp(x), type = "l", col = "black", ylab = "", las = 1, ylim = c(0, 3),
+plot(x, dexp(x), type = "l", col = "black", ylab = "", ylim = c(0, 3),
main = "Black is standard exponential, others are dlomax(x, shape3.q)")
lines(x, dlomax(x, shape3.q = 1), col = "orange")
lines(x, dlomax(x, shape3.q = 2), col = "blue")
lines(x, dlomax(x, shape3.q = 5), col = "green")
-legend("topright", col = c("orange","blue","green"), lty = rep(1, len = 3),
+legend("topright", col = c("orange","blue","green"), lty = rep(1, 3),
legend = paste("shape3.q =", c(1, 2, 5)))
plot(x, pexp(x), type = "l", col = "black", ylab = "", las = 1,
@@ -86,7 +87,7 @@ plot(x, pexp(x), type = "l", col = "black", ylab = "", las = 1,
lines(x, plomax(x, shape3.q = 1), col = "orange")
lines(x, plomax(x, shape3.q = 2), col = "blue")
lines(x, plomax(x, shape3.q = 5), col = "green")
-legend("bottomright", col = c("orange","blue","green"), lty = rep(1, len = 3),
+legend("bottomright", col = c("orange","blue","green"), lty = rep(1, 3),
legend = paste("shape3.q =", c(1, 2, 5)))
}
}
diff --git a/man/lqnorm.Rd b/man/lqnorm.Rd
index 09b2b67..c9ae9e3 100644
--- a/man/lqnorm.Rd
+++ b/man/lqnorm.Rd
@@ -8,7 +8,7 @@
}
\usage{
-lqnorm(qpower = 2, link = "identity",
+lqnorm(qpower = 2, link = "identitylink",
imethod = 1, imu = NULL, shrinkage.init = 0.95)
}
%- maybe also 'usage' for other objects documented here.
@@ -116,7 +116,7 @@ realfun <- function(x) 4 + 5*x
ldata <- transform(ldata, y = realfun(x) + rnorm(nn, sd = exp(-1)))
# Make the first observation an outlier
ldata <- transform(ldata, y = c(4*y[1], y[-1]), x = c(-1, x[-1]))
-fit <- vglm(y ~ x, fam = lqnorm(qpower = 1.2), data = ldata)
+fit <- vglm(y ~ x, lqnorm(qpower = 1.2), data = ldata)
coef(fit, matrix = TRUE)
head(fitted(fit))
fit at misc$qpower
diff --git a/man/lrtest.Rd b/man/lrtest.Rd
index f59ff75..b4afec7 100644
--- a/man/lrtest.Rd
+++ b/man/lrtest.Rd
@@ -140,9 +140,9 @@
\examples{
set.seed(1)
pneumo <- transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo)))
-fit1 <- vglm(cbind(normal, mild, severe) ~ let , propodds, pneumo)
-fit2 <- vglm(cbind(normal, mild, severe) ~ let + x3, propodds, pneumo)
-fit3 <- vglm(cbind(normal, mild, severe) ~ let , cumulative, pneumo)
+fit1 <- vglm(cbind(normal, mild, severe) ~ let , propodds, data = pneumo)
+fit2 <- vglm(cbind(normal, mild, severe) ~ let + x3, propodds, data = pneumo)
+fit3 <- vglm(cbind(normal, mild, severe) ~ let , cumulative, data = pneumo)
# Various equivalent specifications of the LR test for testing x3
(ans1 <- lrtest(fit2, fit1))
ans2 <- lrtest(fit2, 2)
diff --git a/man/lvplot.qrrvglm.Rd b/man/lvplot.qrrvglm.Rd
index 22cf5f2..15752f1 100644
--- a/man/lvplot.qrrvglm.Rd
+++ b/man/lvplot.qrrvglm.Rd
@@ -33,6 +33,8 @@ lvplot.qrrvglm(object, varI.latvar = FALSE, reference = NULL,
\arguments{
\item{object}{
A CQO object.
+
+
% A CQO or UQO object.
@@ -87,10 +89,13 @@ For rank-2 models, points are the optima.
See the \code{col} argument in \code{\link[graphics]{par}}.
}
- \item{pch}{ Either an integer specifying a symbol or a single character
- to be used as the default in plotting points.
- See \code{\link[graphics]{par}}.
- The \code{pch} argument can be of length \eqn{M}, the number of species.
+ \item{pch}{ Either an integer specifying a symbol or a single character
+ to be used as the default in plotting points.
+ See \code{\link[graphics]{par}}.
+ The \code{pch} argument can be of length \eqn{M},
+ the number of species.
+
+
}
\item{llty}{ Line type.
Rank-1 models only.
@@ -133,6 +138,7 @@ For rank-2 models, points are the optima.
variable units). If \code{ellipse} is \code{NULL} or \code{FALSE}
then no ellipse is drawn around the optima.
+
}
\item{Absolute}{ Logical.
If \code{TRUE}, the contours corresponding to \code{ellipse}
@@ -222,7 +228,7 @@ For rank-2 models, points are the optima.
% See \code{\link{Coef.qrrvglm}} for details.
% }
-% \item{ITolerances}{
+% \item{I.tolerances}{
% Logical.
% If \code{TRUE}, the tolerances matrices are transformed so that they are
% the order-\code{Rank} identity matrix. This means that a rank-2
@@ -337,11 +343,10 @@ canonical Gaussian ordination.
}
\examples{
-set.seed(123)
-nn <- 200
-cdata <- data.frame(x2 = rnorm(nn), # Has mean 0 (needed when ITol=TRUE)
- x3 = rnorm(nn), # Has mean 0 (needed when ITol=TRUE)
- x4 = rnorm(nn)) # Has mean 0 (needed when ITol=TRUE)
+set.seed(123); nn <- 200
+cdata <- data.frame(x2 = rnorm(nn), # Has mean 0 (needed when I.tol=TRUE)
+ x3 = rnorm(nn), # Has mean 0 (needed when I.tol=TRUE)
+ x4 = rnorm(nn)) # Has mean 0 (needed when I.tol=TRUE)
cdata <- transform(cdata, latvar1 = x2 + x3 - 2*x4,
latvar2 = -x2 + x3 + 0*x4)
# Nb. latvar2 is weakly correlated with latvar1
@@ -356,8 +361,8 @@ cdata <- transform(cdata,
set.seed(111)
# vvv p2 <- cqo(cbind(spp1,spp2,spp3) ~ x2 + x3 + x4, poissonff,
# vvv data = cdata,
-# vvv Rank=2, ITolerances=TRUE,
-# vvv Crow1positive=c(TRUE,FALSE)) # deviance = 505.81
+# vvv Rank = 2, I.tolerances = TRUE,
+# vvv Crow1positive = c(TRUE, FALSE)) # deviance = 505.81
# vvv if (deviance(p2) > 506) stop("suboptimal fit obtained")
# vvv sort(p2 at misc$deviance.Bestof) # A history of the fits
# vvv Coef(p2)
@@ -369,10 +374,10 @@ lvplot(p2, sites = TRUE, spch = "*", scol = "darkgreen", scex = 1.5,
C = TRUE, Cadj = c(-.3,-.3,1), Clwd = 2, Ccex = 1.4, Ccol = "red",
main = paste("Contours at Abundance = 140 with",
"convex hull of the site scores")) }
-# vvv var(latvar(p2)) # A diagonal matrix, i.e., uncorrelated latent variables
+# vvv var(latvar(p2)) # A diagonal matrix, i.e., uncorrelated latent vars
# vvv var(latvar(p2, varI.latvar = TRUE)) # Identity matrix
-# vvv Tol(p2)[,,1:2] # Identity matrix
-# vvv Tol(p2, varI.latvar = TRUE)[,,1:2] # A diagonal matrix
+# vvv Tol(p2)[, , 1:2] # Identity matrix
+# vvv Tol(p2, varI.latvar = TRUE)[, , 1:2] # A diagonal matrix
}
\keyword{models}
\keyword{regression}
diff --git a/man/lvplot.rrvglm.Rd b/man/lvplot.rrvglm.Rd
index c8e176d..83efc0f 100644
--- a/man/lvplot.rrvglm.Rd
+++ b/man/lvplot.rrvglm.Rd
@@ -162,10 +162,11 @@ nn <- nrow(pneumo) # x1, x2 and x3 are some unrelated covariates
pneumo <- transform(pneumo, slet = scale(log(exposure.time)),
x1 = rnorm(nn), x2 = rnorm(nn), x3 = rnorm(nn))
fit <- rrvglm(cbind(normal, mild, severe) ~ slet + x1 + x2 + x3,
- multinomial, pneumo, Rank=2, Corner=FALSE, Uncorrel=TRUE)
+ multinomial, data = pneumo, Rank = 2,
+ Corner = FALSE, Uncorrel = TRUE)
\dontrun{
-lvplot(fit, chull = TRUE, scores = TRUE, clty = 2, ccol = "blue", scol = "red",
- Ccol = "darkgreen", Clwd = 2, Ccex = 2,
+lvplot(fit, chull = TRUE, scores = TRUE, clty = 2, ccol = "blue",
+ scol = "red", Ccol = "darkgreen", Clwd = 2, Ccex = 2,
main = "Biplot of some fictitional data") }
}
\keyword{models}
diff --git a/man/makeham.Rd b/man/makeham.Rd
index 1c1d7b5..16b367a 100644
--- a/man/makeham.Rd
+++ b/man/makeham.Rd
@@ -130,7 +130,8 @@ However, this family function is currently numerically fraught.
\seealso{
\code{\link{dmakeham}},
- \code{\link{gompertz}}.
+ \code{\link{gompertz}},
+ \code{\link{simulate.vlm}}.
}
diff --git a/man/margeff.Rd b/man/margeff.Rd
index bdadf0f..97fa023 100644
--- a/man/margeff.Rd
+++ b/man/margeff.Rd
@@ -101,13 +101,13 @@ margeff(object, subset = NULL)
# Not a good example for multinomial() because the response is ordinal!!
ii <- 3; hh <- 1/100
pneumo <- transform(pneumo, let = log(exposure.time))
-fit <- vglm(cbind(normal, mild, severe) ~ let, multinomial, pneumo)
+fit <- vglm(cbind(normal, mild, severe) ~ let, multinomial, data = pneumo)
fit <- vglm(cbind(normal, mild, severe) ~ let,
cumulative(reverse = TRUE, parallel = TRUE),
data = pneumo)
fitted(fit)[ii, ]
-mynewdata <- with(pneumo, data.frame(let = let[ii]+hh))
+mynewdata <- with(pneumo, data.frame(let = let[ii] + hh))
(newp <- predict(fit, newdata = mynewdata, type = "response"))
# Compare the difference. Should be the same as hh --> 0.
diff --git a/man/maxwell.Rd b/man/maxwell.Rd
index c612bc6..ceaeb8a 100644
--- a/man/maxwell.Rd
+++ b/man/maxwell.Rd
@@ -71,7 +71,7 @@ maxwell(link = "loge", zero = NULL)
}
\examples{
mdata <- data.frame(y = rmaxwell(1000, a = exp(2)))
-fit <- vglm(y ~ 1, maxwell, mdata, trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, maxwell, data = mdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/mccullagh89.Rd b/man/mccullagh89.Rd
index 3c543db..a474747 100644
--- a/man/mccullagh89.Rd
+++ b/man/mccullagh89.Rd
@@ -111,7 +111,7 @@ all else fails.
\examples{
mdata <- data.frame(y = rnorm(n = 1000, sd = 0.2)) # Limit as theta = 0, nu = Inf
-fit <- vglm(y ~ 1, mccullagh89, mdata, trace = TRUE)
+fit <- vglm(y ~ 1, mccullagh89, data = mdata, trace = TRUE)
head(fitted(fit))
with(mdata, mean(y))
summary(fit)
diff --git a/man/micmen.Rd b/man/micmen.Rd
index 443bcfc..fd16337 100644
--- a/man/micmen.Rd
+++ b/man/micmen.Rd
@@ -8,7 +8,7 @@
}
\usage{
micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
- imethod = 1, oim = TRUE, link1 = "identity", link2 = "identity",
+ imethod = 1, oim = TRUE, link1 = "identitylink", link2 = "identitylink",
firstDeriv = c("nsimEIM", "rpar"), probs.x = c(0.15, 0.85),
nsimEIM = 500, dispersion = 0, zero = NULL)
}
@@ -137,8 +137,11 @@ micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
\seealso{
\code{\link{enzyme}}.
+
+
% \code{skira}.
+
}
\section{Warning }{
This function is not (nor could ever be) entirely reliable.
diff --git a/man/mix2exp.Rd b/man/mix2exp.Rd
index d56de90..1413cc7 100644
--- a/man/mix2exp.Rd
+++ b/man/mix2exp.Rd
@@ -83,6 +83,11 @@ mix2exp(lphi = "logit", llambda = "loge", iphi = 0.5, il1 = NULL,
as \code{\link[graphics]{hist}} can be used as an aid.
+ This \pkg{VGAM} family function is experimental and
+ should be used with care.
+
+
+
}
\author{ T. W. Yee }
@@ -113,7 +118,7 @@ mix2exp(lphi = "logit", llambda = "loge", iphi = 0.5, il1 = NULL,
mdata <- data.frame(y1 = rexp(nn <- 1000, lambda1))
mdata <- transform(mdata, y2 = rexp(nn, lambda2))
mdata <- transform(mdata, Y = ifelse(runif(nn) < phi, y1, y2))
-fit <- vglm(Y ~ 1, mix2exp, mdata, trace = TRUE)
+fit <- vglm(Y ~ 1, mix2exp, data = mdata, trace = TRUE)
coef(fit, matrix = TRUE)
# Compare the results with the truth
diff --git a/man/mix2normal.Rd b/man/mix2normal.Rd
index 7bf8c7b..d2d07d0 100644
--- a/man/mix2normal.Rd
+++ b/man/mix2normal.Rd
@@ -8,9 +8,9 @@
}
\usage{
-mix2normal(lphi = "logit", lmu = "identity", lsd = "loge",
- iphi = 0.5, imu1 = NULL, imu2 = NULL, isd1 = NULL, isd2 = NULL,
- qmu = c(0.2, 0.8), eq.sd = TRUE, nsimEIM = 100, zero = 1)
+mix2normal(lphi = "logit", lmu = "identitylink", lsd = "loge",
+ iphi = 0.5, imu1 = NULL, imu2 = NULL, isd1 = NULL, isd2 = NULL,
+ qmu = c(0.2, 0.8), eq.sd = TRUE, nsimEIM = 100, zero = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -147,7 +147,8 @@ London: Chapman & Hall.
% the arguments \code{weights} is used to input prior weights.
- This \pkg{VGAM} family function should be used with care.
+ This \pkg{VGAM} family function is experimental and
+ should be used with care.
}
@@ -186,7 +187,7 @@ sd1 <- sd2 <- exp(3)
(phi <- logit(-1, inverse = TRUE))
mdata <- data.frame(y = ifelse(runif(nn) < phi, rnorm(nn, mu1, sd1),
rnorm(nn, mu2, sd2)))
-fit <- vglm(y ~ 1, mix2normal(eq.sd = TRUE), mdata)
+fit <- vglm(y ~ 1, mix2normal(eq.sd = TRUE), data = mdata)
# Compare the results
cfit <- coef(fit)
@@ -197,7 +198,8 @@ round(rbind('Estimated' = c(logit(cfit[1], inverse = TRUE),
# Plot the results
xx <- with(mdata, seq(min(y), max(y), len = 200))
plot(xx, (1-phi) * dnorm(xx, mu2, sd2), type = "l", xlab = "y",
- main = "Orange = estimate, blue = truth", col = "blue", ylab = "Density")
+ main = "Orange = estimate, blue = truth",
+ col = "blue", ylab = "Density")
phi.est <- logit(coef(fit)[1], inverse = TRUE)
sd.est <- exp(coef(fit)[3])
lines(xx, phi*dnorm(xx, mu1, sd1), col = "blue")
diff --git a/man/mix2poisson.Rd b/man/mix2poisson.Rd
index e60f539..b93bf6b 100644
--- a/man/mix2poisson.Rd
+++ b/man/mix2poisson.Rd
@@ -94,6 +94,11 @@ mix2poisson(lphi = "logit", llambda = "loge",
+
+ This \pkg{VGAM} family function is experimental and
+ should be used with care.
+
+
}
\author{ T. W. Yee }
@@ -125,11 +130,11 @@ mix2poisson(lphi = "logit", llambda = "loge",
\examples{
\dontrun{ # Example 1: simulated data
nn <- 1000
-mu1 <- exp(2.5) # also known as lambda1
+mu1 <- exp(2.5) # Also known as lambda1
mu2 <- exp(3)
(phi <- logit(-0.5, inverse = TRUE))
mdata <- data.frame(y = rpois(nn, ifelse(runif(nn) < phi, mu1, mu2)))
-fit <- vglm(y ~ 1, mix2poisson, mdata)
+fit <- vglm(y ~ 1, mix2poisson, data = mdata)
coef(fit, matrix = TRUE)
# Compare the results with the truth
diff --git a/man/mlogit.Rd b/man/mlogit.Rd
index 9c35158..592d3ec 100644
--- a/man/mlogit.Rd
+++ b/man/mlogit.Rd
@@ -96,7 +96,7 @@ mlogit(theta, refLevel = "last", M = NULL, whitespace = FALSE,
\examples{
pneumo <- transform(pneumo, let = log(exposure.time))
fit <- vglm(cbind(normal, mild, severe) ~ let,
- multinomial, trace = TRUE, pneumo) # For illustration only!
+ multinomial, trace = TRUE, data = pneumo) # For illustration only!
fitted(fit)
predict(fit)
diff --git a/man/model.framevlm.Rd b/man/model.framevlm.Rd
index 2345d89..25ab920 100644
--- a/man/model.framevlm.Rd
+++ b/man/model.framevlm.Rd
@@ -50,6 +50,8 @@ model.framevlm(object, setupsmart = TRUE, wrapupsmart = TRUE, \dots)
A \code{\link{data.frame}} containing the variables used in
the \code{object} plus those specified in \code{\dots}.
+
+
}
\seealso{
\code{\link[stats]{model.frame}},
diff --git a/man/morgenstern.Rd b/man/morgenstern.Rd
index 0b620f3..628c3f2 100644
--- a/man/morgenstern.Rd
+++ b/man/morgenstern.Rd
@@ -111,9 +111,9 @@ Hoboken, NJ, USA: Wiley-Interscience.
\examples{
N <- 1000; mdata <- data.frame(y1 = rexp(N), y2 = rexp(N))
\dontrun{plot(ymat)}
-fit <- vglm(cbind(y1, y2) ~ 1, morgenstern, mdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ 1, morgenstern, data = mdata, trace = TRUE)
# This may fail:
-fit <- vglm(cbind(y1, y2) ~ 1, morgenstern, mdata, trace = TRUE, crit = "coef")
+fit <- vglm(cbind(y1, y2) ~ 1, morgenstern, data = mdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
head(fitted(fit))
diff --git a/man/multinomial.Rd b/man/multinomial.Rd
index 6a30845..fa39a0c 100644
--- a/man/multinomial.Rd
+++ b/man/multinomial.Rd
@@ -266,9 +266,8 @@ coef(fit3b, matrix = TRUE) # "Treatment1" is the reference level
margeff(fit3b)
# Example 4: Fit a rank-1 stereotype model
-data(car.all)
-fit4 <- rrvglm(Country ~ Width + Height + HP, multinomial, car.all)
-coef(fit4) # Contains the C matrix
+fit4 <- rrvglm(Country ~ Width + Height + HP, multinomial, data = car.all)
+coef(fit4) # Contains the C matrix
constraints(fit4)$HP # The A matrix
coef(fit4, matrix = TRUE) # The B matrix
Coef(fit4)@C # The C matrix
@@ -277,8 +276,8 @@ Coef(fit4)@A # The A matrix
svd(coef(fit4, matrix = TRUE)[-1, ])$d # This has rank 1; = C %*% t(A)
# Classification (but watch out for NAs in some of the variables):
apply(fitted(fit4), 1, which.max) # Classification
-apply(predict(fit4, car.all, type = "response"), 1, which.max) # Classification
colnames(fitted(fit4))[apply(fitted(fit4), 1, which.max)] # Classification
+apply(predict(fit4, car.all, type = "response"), 1, which.max) # Ditto
# Example 5: The use of the xij argument (aka conditional logit model)
diff --git a/man/nakagami.Rd b/man/nakagami.Rd
index a466c2c..bb7b505 100644
--- a/man/nakagami.Rd
+++ b/man/nakagami.Rd
@@ -96,16 +96,16 @@ nakagami(lshape = "loge", lscale = "loge", ishape = NULL, iscale = 1)
\examples{
nn <- 1000; shape <- exp(0); Scale <- exp(1)
ndata <- data.frame(y1 = sqrt(rgamma(nn, shape = shape, scale = Scale/shape)))
-fit <- vglm(y1 ~ 1, nakagami, ndata, trace = TRUE, crit = "coef")
+fit <- vglm(y1 ~ 1, nakagami, data = ndata, trace = TRUE, crit = "coef")
ndata <- transform(ndata, y2 = rnaka(nn, shape = shape, scale = Scale))
-fit <- vglm(y2 ~ 1, nakagami(iscale = 3), ndata, trace = TRUE)
+fit <- vglm(y2 ~ 1, nakagami(iscale = 3), data = ndata, trace = TRUE)
head(fitted(fit))
with(ndata, mean(y2))
coef(fit, matrix = TRUE)
(Cfit <- Coef(fit))
\dontrun{ with(ndata,
hist(sy <- sort(y2), prob = TRUE, main = "", xlab = "y", ylim = c(0, 0.6)))
-lines(dnaka(sy, shape = Cfit[1], scale = Cfit[2]) ~ sy, ndata, col = "orange") }
+lines(dnaka(sy, shape = Cfit[1], scale = Cfit[2]) ~ sy, data = ndata, col = "orange") }
}
\keyword{models}
\keyword{regression}
diff --git a/man/nbcanlink.Rd b/man/nbcanlink.Rd
index c79654d..bf43065 100644
--- a/man/nbcanlink.Rd
+++ b/man/nbcanlink.Rd
@@ -158,7 +158,7 @@ head(ndata)
summary(ndata)
fit <- vglm(cbind(y1, y2) ~ x2, negbinomial("nbcanlink", imethod = 3),
- stepsize = 0.5, ndata, # Deliberately slow the convergence rate
+ stepsize = 0.5, data = ndata, # Deliberately slow the convergence rate
maxit = 100, trace = TRUE) # Warning: may converge to a local soln
coef(fit, matrix = TRUE)
summary(fit)
diff --git a/man/negbinomial.Rd b/man/negbinomial.Rd
index e568963..e3e13fe 100644
--- a/man/negbinomial.Rd
+++ b/man/negbinomial.Rd
@@ -16,8 +16,11 @@ negbinomial(lmu = "loge", lsize = "loge",
parallel = FALSE, shrinkage.init = 0.95, zero = -2)
polya(lprob = "logit", lsize = "loge",
iprob = NULL, isize = NULL, probs.y = 0.75, nsimEIM = 100,
- deviance.arg = FALSE, imethod = 1, shrinkage.init = 0.95, zero = -2)
+ imethod = 1, shrinkage.init = 0.95, zero = -2)
}
+
+% deviance.arg = FALSE,
+
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{lmu, lsize, lprob}{
@@ -87,15 +90,30 @@ polya(lprob = "logit", lsize = "loge",
}
\item{deviance.arg}{
- Logical. If \code{TRUE}, the deviance function is attached
- to the object. Under ordinary circumstances, it should be
- left alone because it really assumes the index parameter
- is at the maximum likelihood estimate. Consequently,
- one cannot use that criterion to minimize within the
- IRLS algorithm. It should be set \code{TRUE} only when
+ Logical.
+
+ If \code{TRUE}, the deviance is computed \emph{after} convergence.
+ It only works in the NB-2 model.
+ It is also necessary to set \code{criterion = "coefficients"}
+ or \code{half.step = FALSE}
+ since
+ one cannot use that criterion properly for the minimization
+ within the IRLS algorithm.
+ It should be set \code{TRUE} when
used with \code{\link{cqo}} under the fast algorithm.
+
+% Pre-20131212:
+% If \code{TRUE}, the deviance function is attached
+% to the object. Under ordinary circumstances, it should be
+% left alone because it really assumes the index parameter
+% is at the maximum likelihood estimate. Consequently,
+% one cannot use that criterion to minimize within the
+% IRLS algorithm. It should be set \code{TRUE} only when
+% used with \code{\link{cqo}} under the fast algorithm.
+
+
}
\item{imethod}{
An integer with value \code{1} or \code{2} or \code{3} which
@@ -320,8 +338,8 @@ Fitting the negative binomial distribution to biological data.
The function \code{negbinomial} can be used by the fast algorithm in
- \code{\link{cqo}}, however, setting \code{EqualTolerances = TRUE} and
- \code{ITolerances = FALSE} is recommended.
+ \code{\link{cqo}}, however, setting \code{eq.tolerances = TRUE} and
+ \code{I.tolerances = FALSE} is recommended.
% For \code{\link{cqo}} and \code{\link{cao}}, taking the square-root
@@ -383,35 +401,42 @@ Fitting the negative binomial distribution to biological data.
\code{\link{nbcanlink}} (NB-C),
\code{\link{posnegbinomial}},
\code{\link{invbinomial}},
-% \code{\link[MASS]{rnegbin}}.
\code{\link[stats:NegBinomial]{rnbinom}},
\code{\link{nbolf}},
\code{\link{rrvglm}},
\code{\link{cao}},
\code{\link{cqo}},
- \code{\link{CommonVGAMffArguments}}.
+ \code{\link{CommonVGAMffArguments}},
+ \code{\link{simulate.vlm}}.
+
+
+% \code{\link[MASS]{rnegbin}}.
}
\examples{
# Example 1: apple tree data
appletree <- data.frame(y = 0:7, w = c(70, 38, 17, 10, 9, 3, 2, 1))
-fit <- vglm(y ~ 1, negbinomial, appletree, weights = w)
+fit <- vglm(y ~ 1, negbinomial(deviance = TRUE), data = appletree,
+ weights = w, crit = "coef")
+fit <- vglm(y ~ 1, negbinomial(deviance = TRUE), data = appletree,
+ weights = w, half.step = FALSE) # Alternative method
summary(fit)
coef(fit, matrix = TRUE)
Coef(fit)
+deviance(fit) # NB2 only; needs 'crit = "coef"' & 'deviance = TRUE' above
# Example 2: simulated data with multivariate response
ndata <- data.frame(x2 = runif(nn <- 500))
ndata <- transform(ndata, y1 = rnbinom(nn, mu = exp(3+x2), size = exp(1)),
y2 = rnbinom(nn, mu = exp(2-x2), size = exp(0)))
-fit1 <- vglm(cbind(y1, y2) ~ x2, negbinomial, ndata, trace = TRUE)
+fit1 <- vglm(cbind(y1, y2) ~ x2, negbinomial, data = ndata, trace = TRUE)
coef(fit1, matrix = TRUE)
# Example 3: large counts so definitely use the nsimEIM argument
ndata <- transform(ndata, y3 = rnbinom(nn, mu = exp(12+x2), size = exp(1)))
with(ndata, range(y3)) # Large counts
-fit2 <- vglm(y3 ~ x2, negbinomial(nsimEIM = 100), ndata, trace = TRUE)
+fit2 <- vglm(y3 ~ x2, negbinomial(nsimEIM = 100), data = ndata, trace = TRUE)
coef(fit2, matrix = TRUE)
# Example 4: a NB-1 to estimate a negative binomial with Var(Y) = phi0 * mu
@@ -425,7 +450,7 @@ mydata <- transform(mydata, y3 = rnbinom(nn, mu = mu, size = delta0 * mu))
plot(y3 ~ x2, data = mydata, pch = "+", col = 'blue',
main = paste("Var(Y) = ", phi0, " * mu", sep = ""), las = 1) }
nb1 <- vglm(y3 ~ x2 + x3, negbinomial(parallel = TRUE, zero = NULL),
- mydata, trace = TRUE)
+ data = mydata, trace = TRUE)
# Extracting out some quantities:
cnb1 <- coef(nb1, matrix = TRUE)
mydiff <- (cnb1["(Intercept)", "log(size)"] -
diff --git a/man/negbinomial.size.Rd b/man/negbinomial.size.Rd
index fa54a13..3d102e6 100644
--- a/man/negbinomial.size.Rd
+++ b/man/negbinomial.size.Rd
@@ -101,8 +101,11 @@ Cambridge: Cambridge University Press.
\code{\link{nbcanlink}} (NB-C model),
\code{\link{quasipoissonff}},
\code{\link{poissonff}},
+ \code{\link[stats:NegBinomial]{rnbinom}},
+ \code{\link{simulate.vlm}}.
+
+
% \code{\link[MASS]{rnegbin}}.
- \code{\link[stats:NegBinomial]{rnbinom}}.
}
@@ -122,13 +125,13 @@ ndata <- transform(ndata,
# Also known as NB-C with size known (Hilbe, 2011)
fit1 <- vglm(y1 ~ x2, negbinomial.size(size = size1, lmu = "nbcanlink"),
- ndata, trace = TRUE, crit = "coef")
+ data = ndata, trace = TRUE, crit = "coef")
coef(fit1, matrix = TRUE)
head(fit1 at misc$size) # size saved here
fit2 <- vglm(cbind(y2, y3, y4) ~ x2,
negbinomial.size(size = c(size2, size3, size4)),
- ndata, trace = TRUE)
+ data = ndata, trace = TRUE)
coef(fit2, matrix = TRUE)
head(fit2 at misc$size) # size saved here
}
diff --git a/man/normal.vcm.Rd b/man/normal.vcm.Rd
index 7ba3db6..40dde16 100644
--- a/man/normal.vcm.Rd
+++ b/man/normal.vcm.Rd
@@ -11,7 +11,7 @@
}
\usage{
-normal.vcm(link.list = list("(Default)" = "identity"),
+normal.vcm(link.list = list("(Default)" = "identitylink"),
earg.list = list("(Default)" = list()),
lsd = "loge", lvar = "loge",
esd = list(), evar = list(),
@@ -177,10 +177,11 @@ of such models have been named \emph{varying-coefficient models} (VCMs).
}
\seealso{
- \code{\link{uninormal}},
- \code{\link[stats:lm]{lm}}.
+ \code{\link{uninormal}},
+ \code{\link[stats:lm]{lm}}.
-% \code{link[locfit]{ethanol}}.
+
+% \code{link[locfit]{ethanol}}.
}
@@ -194,7 +195,7 @@ ndata <- transform(ndata,
coeff3 = exp(-0.5), # "loge" link
coeff4 = logoff(+0.5, offset = myoffset, inverse = TRUE), # "logoff" link
coeff5 = 0.50, # "mlogit" link
- coeff6 = 1.00, # "identity" link
+ coeff6 = 1.00, # "identitylink" link
v2 = runif(nn),
v3 = runif(nn),
v4 = runif(nn),
@@ -228,7 +229,7 @@ fit1 <- vglm(y1 ~ 1,
"v2" = "mlogit",
"v3" = "loge",
"v4" = "logoff",
- "(Default)" = "identity",
+ "(Default)" = "identitylink",
"v5" = "mlogit"),
earg.list = list("(Intercept)" = list(),
"v2" = list(),
@@ -250,7 +251,7 @@ fit2 <- vglm(y2 ~ 1 + x2,
"v2" = "mlogit",
"v3" = "logit",
"v4" = "loglog",
- "(Default)" = "identity",
+ "(Default)" = "identitylink",
"v5" = "mlogit"),
earg.list = list("(Intercept)" = list(),
"v2" = list(),
diff --git a/man/notdocumentedyet.Rd b/man/notdocumentedyet.Rd
index 6e7839e..eb72730 100644
--- a/man/notdocumentedyet.Rd
+++ b/man/notdocumentedyet.Rd
@@ -2,6 +2,18 @@
\alias{notdocumentedyet}
%
%
+% 201405;
+%\alias{sm.bs}
+%\alias{sm.ns}
+%\alias{sm.poly}
+%\alias{sm.scale}
+%\alias{sm.scale.default}
+%
+%
+%
+%
+% 201312;
+% \alias{simulate.vlm}
% 201311;
\alias{family.name}
\alias{family.name.vlm}
@@ -267,14 +279,13 @@
\alias{biplot.qrrvglm}
% \alias{block.diag}
% \alias{borel.tanner}
-\alias{bs}
% \alias{callcaof}
% \alias{callcqof}
% \alias{calldcaof}
% \alias{calldcqof}
% \alias{callduqof}
% \alias{calluqof}
-% \alias{canonical.Blist}
+% \alias{canonical.Hlist}
% \alias{cao.fit}
\alias{car.all}
\alias{care.exp}
@@ -294,7 +305,7 @@
% \alias{cm.zero.vgam}
\alias{coefficients}
\alias{coefqrrvglm}
-\alias{coefvlm}
+% \alias{coefvlm} % 20140124
\alias{coefvsmooth.spline}
\alias{coefvsmooth.spline.fit}
% \alias{constraints.vlm}
@@ -311,7 +322,7 @@
\alias{deplot.vglm}
\alias{deviance}
%\alias{deviance.uqo}
-\alias{deviance.vglm}
+%\alias{deviance.vglm}
\alias{deviance.vlm}
%\alias{df.residual}
%\alias{df.residual_vlm}
@@ -390,7 +401,7 @@
\alias{lms.bcn.control}
\alias{lms.yjn.control}
\alias{lmscreg.control}
-\alias{logLik.vlm}
+% \alias{logLik.vlm}
\alias{logLik.qrrvglm}
% \alias{lv.Coef.cao} 20090505
\alias{latvar.Coef.qrrvglm}
@@ -435,7 +446,6 @@
\alias{nvar.qrrvglm}
\alias{nvar.cao}
\alias{nvar.rcim}
-\alias{ns}
% \alias{num.deriv.rrr}
\alias{persp}
\alias{persp.cao}
@@ -446,7 +456,6 @@
\alias{plotvsmooth.spline}
% \alias{pnorm2} done 20120910
% \alias{poissonqn}
-\alias{poly}
\alias{predict}
\alias{predict.cao}
\alias{predict.glm}
@@ -519,7 +528,6 @@
% \alias{rrvglm.fit}
\alias{ResSS.vgam}
\alias{s.vam}
-\alias{scale.default}
\alias{simple.exponential}
\alias{simple.poisson}
\alias{size.binomial}
@@ -631,7 +639,7 @@
}
%\usage{
-%uninormal(lmean = "identity", lsd = "loge", zero = NULL)
+%uninormal(lmean = "identitylink", lsd = "loge", zero = NULL)
%}
%- maybe also 'usage' for other objects documented here.
%\arguments{
@@ -652,6 +660,7 @@
Each objects/methods/classes may or may not have its own individual value.
These will be documented over time.
+
}
%\references{
%}
@@ -661,8 +670,9 @@
%
%}
%\seealso{
-% \code{gaussianff},
-% \code{\link{posnormal}}.
+% \code{gaussianff},
+% \code{\link{posnormal}}.
+%
%}
%\examples{
%}
diff --git a/man/paralogistic.Rd b/man/paralogistic.Rd
index 8cd7b88..79dcf2f 100644
--- a/man/paralogistic.Rd
+++ b/man/paralogistic.Rd
@@ -5,6 +5,8 @@
\description{
Maximum likelihood estimation of the 2-parameter
paralogistic distribution.
+
+
}
\usage{
paralogistic(lshape1.a = "loge", lscale = "loge",
@@ -90,9 +92,9 @@ Hoboken, NJ, USA: Wiley-Interscience.
\examples{
pdata <- data.frame(y = rparalogistic(n = 3000, exp(1), exp(2)))
-fit <- vglm(y ~ 1, paralogistic, pdata, trace = TRUE)
+fit <- vglm(y ~ 1, paralogistic, data = pdata, trace = TRUE)
fit <- vglm(y ~ 1, paralogistic(ishape1.a = 2.3, iscale = 7),
- pdata, trace = TRUE, epsilon = 1e-8)
+ data = pdata, trace = TRUE, epsilon = 1e-8)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/paralogisticUC.Rd b/man/paralogisticUC.Rd
index e68aaa7..22a37d0 100644
--- a/man/paralogisticUC.Rd
+++ b/man/paralogisticUC.Rd
@@ -68,7 +68,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
pdata <- data.frame(y = rparalogistic(n = 3000, exp(1), exp(2)))
-fit <- vglm(y ~ 1, paralogistic(ishape1.a = 2.1), pdata, trace = TRUE)
+fit <- vglm(y ~ 1, paralogistic(ishape1.a = 2.1), data = pdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/paretoff.Rd b/man/paretoff.Rd
index fc7f375..b9d0cd1 100644
--- a/man/paretoff.Rd
+++ b/man/paretoff.Rd
@@ -172,7 +172,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
\examples{
alpha <- 2; kay <- exp(3)
pdata <- data.frame(y = rpareto(n = 1000, location = alpha, shape = kay))
-fit <- vglm(y ~ 1, paretoff, pdata, trace = TRUE)
+fit <- vglm(y ~ 1, paretoff, data = pdata, trace = TRUE)
fit at extra # The estimate of alpha is here
head(fitted(fit))
with(pdata, mean(y))
@@ -180,7 +180,7 @@ coef(fit, matrix = TRUE)
summary(fit) # Standard errors are incorrect!!
# Here, alpha is assumed known
-fit2 <- vglm(y ~ 1, paretoff(location = alpha), pdata, trace = TRUE)
+fit2 <- vglm(y ~ 1, paretoff(location = alpha), data = pdata, trace = TRUE)
fit2 at extra # alpha stored here
head(fitted(fit2))
coef(fit2, matrix = TRUE)
diff --git a/man/perks.Rd b/man/perks.Rd
index 8b5bc00..ee82bbe 100644
--- a/man/perks.Rd
+++ b/man/perks.Rd
@@ -119,7 +119,8 @@ Also, monitor convergence by setting \code{trace = TRUE}.
}
\seealso{
- \code{\link{dperks}}.
+ \code{\link{dperks}},
+ \code{\link{simulate.vlm}}.
}
diff --git a/man/persp.qrrvglm.Rd b/man/persp.qrrvglm.Rd
index a998289..e43f9d7 100644
--- a/man/persp.qrrvglm.Rd
+++ b/man/persp.qrrvglm.Rd
@@ -205,18 +205,18 @@ canonical Gaussian ordination.
}
\examples{\dontrun{
-hspider[, 1:6] <- scale(hspider[, 1:6]) # Good idea when ITolerances = TRUE
+hspider[, 1:6] <- scale(hspider[, 1:6]) # Good idea when I.tolerances = TRUE
set.seed(111)
r1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Auloalbi, Pardmont, Pardnigr, Pardpull, Trocterr) ~
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- poissonff, data = hspider, trace = FALSE, ITolerances = TRUE)
+ poissonff, data = hspider, trace = FALSE, I.tolerances = TRUE)
set.seed(111) # r2 below is an ill-conditioned model
r2 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Auloalbi, Pardmont, Pardnigr, Pardpull, Trocterr) ~
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
isd.lv = c(2.4, 1.0), Muxfactor = 3.0, trace = FALSE,
- poissonff, data = hspider, Rank = 2, EqualTolerances = TRUE)
+ poissonff, data = hspider, Rank = 2, eq.tolerances = TRUE)
sort(r1 at misc$deviance.Bestof) # A history of the fits
sort(r2 at misc$deviance.Bestof) # A history of the fits
diff --git a/man/plotqrrvglm.Rd b/man/plotqrrvglm.Rd
index 9c5a02b..308753c 100644
--- a/man/plotqrrvglm.Rd
+++ b/man/plotqrrvglm.Rd
@@ -10,7 +10,7 @@ plotqrrvglm(object, rtype = c("response", "pearson", "deviance", "working"),
ask = FALSE,
main = paste(Rtype, "residuals vs latent variable(s)"),
xlab = "Latent Variable",
- ITolerances = object at control$EqualTolerances, ...)
+ I.tolerances = object at control$eq.tolerances, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -21,8 +21,8 @@ plotqrrvglm(object, rtype = c("response", "pearson", "deviance", "working"),
key for the next plot. }
\item{main}{ Character string giving the title of the plot. }
\item{xlab}{ Character string giving the x-axis caption. }
- \item{ITolerances}{ Logical. This argument is fed into
- \code{Coef(object, ITolerances = ITolerances)}.
+ \item{I.tolerances}{ Logical. This argument is fed into
+ \code{Coef(object, I.tolerances = I.tolerances)}.
}
\item{\dots}{ Other plotting arguments (see \code{\link[graphics]{par}}). }
}
diff --git a/man/plotqtplot.lmscreg.Rd b/man/plotqtplot.lmscreg.Rd
index e5d3734..21597a3 100644
--- a/man/plotqtplot.lmscreg.Rd
+++ b/man/plotqtplot.lmscreg.Rd
@@ -95,6 +95,8 @@ contains further information and examples.
\seealso{
\code{\link{qtplot.lmscreg}}.
+
+
}
\examples{\dontrun{
diff --git a/man/plotrcim0.Rd b/man/plotrcim0.Rd
index 82a42ec..1030c52 100644
--- a/man/plotrcim0.Rd
+++ b/man/plotrcim0.Rd
@@ -201,6 +201,8 @@
\code{\link{Rcim}},
\code{\link{rcim}}.
+
+
}
\examples{
alcoff.e <- moffset(alcoff, "6", "Mon", postfix = "*") # Effective day
diff --git a/man/plotvgam.Rd b/man/plotvgam.Rd
index 9288072..2b2e689 100644
--- a/man/plotvgam.Rd
+++ b/man/plotvgam.Rd
@@ -208,7 +208,7 @@ plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
\examples{
coalminers <- transform(coalminers, Age = (age - 42) / 5)
fit <- vgam(cbind(nBnW, nBW, BnW, BW) ~ s(Age),
- binom2.or(zero = NULL), coalminers)
+ binom2.or(zero = NULL), data = coalminers)
\dontrun{ par(mfrow = c(1,3))
plot(fit, se = TRUE, ylim = c(-3, 2), las = 1)
plot(fit, se = TRUE, which.cf = 1:2, lcol = "blue", scol = "orange",
diff --git a/man/plotvglm.Rd b/man/plotvglm.Rd
index dd878da..60c0a66 100644
--- a/man/plotvglm.Rd
+++ b/man/plotvglm.Rd
@@ -95,8 +95,8 @@ plotvglm(x, type = c("vglm", "vgam"),
}
\examples{
coalminers <- transform(coalminers, Age = (age - 42) / 5)
-fit <- vglm(cbind(nBnW, nBW, BnW, BW) ~ bs(Age),
- binom2.or(zero = NULL), coalminers)
+fit <- vglm(cbind(nBnW, nBW, BnW, BW) ~ sm.bs(Age),
+ binom2.or(zero = NULL), data = coalminers)
\dontrun{ par(mfrow = c(1, 3))
plot(fit, type = "vgam", se = TRUE, ylim = c(-3, 2), las = 1)
plot(fit, type = "vgam", se = TRUE, which.cf = 1:2,
diff --git a/man/pneumo.Rd b/man/pneumo.Rd
index a32798d..802201f 100644
--- a/man/pneumo.Rd
+++ b/man/pneumo.Rd
@@ -47,6 +47,6 @@ data set, the two most severe categories were combined.
\examples{
# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
pneumo <- transform(pneumo, let = log(exposure.time))
-vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo)
+vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo)
}
\keyword{datasets}
diff --git a/man/poisson.points.Rd b/man/poisson.points.Rd
index 56f9756..11e69d6 100644
--- a/man/poisson.points.Rd
+++ b/man/poisson.points.Rd
@@ -113,11 +113,11 @@ poisson.points(ostatistic, dimension = 2, link = "loge",
\examples{
pdata <- data.frame(y = rgamma(10, shape = exp(-1))) # Not proper data!
ostat <- 2
-fit <- vglm(y ~ 1, poisson.points(ostat, 2), pdata,
+fit <- vglm(y ~ 1, poisson.points(ostat, 2), data = pdata,
trace = TRUE, crit = "coef")
-fit <- vglm(y ~ 1, poisson.points(ostat, 3), pdata,
+fit <- vglm(y ~ 1, poisson.points(ostat, 3), data = pdata,
trace = TRUE, crit = "coef") # Slow convergence?
-fit <- vglm(y ~ 1, poisson.points(ostat, 3, idensi = 1), pdata,
+fit <- vglm(y ~ 1, poisson.points(ostat, 3, idensi = 1), data = pdata,
trace = TRUE, crit = "coef")
head(fitted(fit))
with(pdata, mean(y))
diff --git a/man/poisson.pointsUC.Rd b/man/poisson.pointsUC.Rd
index d847f5c..ce6254f 100644
--- a/man/poisson.pointsUC.Rd
+++ b/man/poisson.pointsUC.Rd
@@ -7,11 +7,13 @@
\title{Poisson Points Distribution}
\description{
Density
-% distribution function, quantile function
-% and random generation
for the
PoissonPoints distribution.
+
+% distribution function, quantile function
+% and random generation
+
}
\usage{
dpois.points(x, lambda, ostatistic, dimension = 2, log = FALSE)
@@ -50,6 +52,8 @@ dpois.points(x, lambda, ostatistic, dimension = 2, log = FALSE)
}
\value{
\code{dpois.points} gives the density. % and
+
+
% \code{ppois.points} gives the distribution function,
% \code{qpois.points} gives the quantile function, and
% \code{rpois.points} generates random deviates.
diff --git a/man/poissonff.Rd b/man/poissonff.Rd
index dab71c8..bb58ecd 100644
--- a/man/poissonff.Rd
+++ b/man/poissonff.Rd
@@ -154,6 +154,7 @@ poissonff(link = "loge", dispersion = 1, onedpar = FALSE, imu = NULL,
\code{\link{ordpoisson}},
\code{\link{amlpoisson}},
\code{\link{invbinomial}},
+ \code{\link{simulate.vlm}},
\code{\link{loge}},
\code{\link{polf}},
\code{\link{rrvglm}},
@@ -175,8 +176,8 @@ set.seed(123)
pdata <- data.frame(x2 = rnorm(nn <- 100))
pdata <- transform(pdata, y1 = rpois(nn, exp(1 + x2)),
y2 = rpois(nn, exp(1 + x2)))
-(fit1 <- vglm(cbind(y1, y2) ~ x2, family = poissonff, pdata))
-(fit2 <- vglm(y1 ~ x2, family = poissonff(bred = TRUE), pdata))
+(fit1 <- vglm(cbind(y1, y2) ~ x2, poissonff, data = pdata))
+(fit2 <- vglm(y1 ~ x2, poissonff(bred = TRUE), data = pdata))
coef(fit1, matrix = TRUE)
coef(fit2, matrix = TRUE)
@@ -194,6 +195,6 @@ cdata <- transform(cdata, y1 = rpois(nn, lambda1),
\keyword{models}
\keyword{regression}
-%# vvv p1 = cqo(cbind(y1,y2,y3) ~ x2 + x3 + x4, poissonff, cdata,
-%# vvv EqualTol = FALSE, ITol = FALSE)
+%# vvv p1 <- cqo(cbind(y1,y2,y3) ~ x2 + x3 + x4, poissonff, data = cdata,
+%# vvv eq.tol = FALSE, I.tol = FALSE)
%# vvv summary(p1) # # Three dispersion parameters are all unity
diff --git a/man/posbernUC.Rd b/man/posbernUC.Rd
index 6e757e2..c409518 100644
--- a/man/posbernUC.Rd
+++ b/man/posbernUC.Rd
@@ -12,7 +12,8 @@
}
\usage{
rposbern(n, nTimePts = 5, pvars = length(xcoeff), xcoeff = c(-2, 1, 2),
- cap.effect = 1, is.popn = FALSE, link = "logit", earg.link = FALSE)
+ Xmatrix = NULL, cap.effect = 1, is.popn = FALSE,
+ link = "logit", earg.link = FALSE)
dposbern(x, prob, prob0 = prob, log = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -50,6 +51,12 @@ dposbern(x, prob, prob0 = prob, log = FALSE)
}
+ \item{Xmatrix}{
+ Optional \bold{X} matrix.
+ If given, the \bold{X} matrix is not generated internally.
+
+
+ }
\item{cap.effect}{
Numeric, the capture effect.
Added to the linear predictor if captured previously.
@@ -157,12 +164,14 @@ dposbern(x, prob, prob0 = prob, log = FALSE)
}
\seealso{
-% \code{\link{huggins91}},
\code{\link{posbernoulli.tb}},
\code{\link{posbernoulli.b}},
\code{\link{posbernoulli.t}}.
+% \code{\link{huggins91}},
+
+
}
\examples{
rposbern(n = 10)
diff --git a/man/posbernoulli.b.Rd b/man/posbernoulli.b.Rd
index e3fc2d7..ddd1437 100644
--- a/man/posbernoulli.b.Rd
+++ b/man/posbernoulli.b.Rd
@@ -169,6 +169,8 @@ posbernoulli.b(link = "logit", drop.b = FALSE ~ 1,
\code{\link{posbinomial}},
\code{\link{aux.posbernoulli.t}},
\code{\link{prinia}}.
+
+
% \code{\link{huggins91}}.
% \code{\link{vglm.control}} for \code{xij},
diff --git a/man/posbernoulli.t.Rd b/man/posbernoulli.t.Rd
index 2e6aef9..5605cf1 100644
--- a/man/posbernoulli.t.Rd
+++ b/man/posbernoulli.t.Rd
@@ -223,6 +223,7 @@ capture--recapture experiments.
\seealso{
\code{\link{posbernoulli.b}},
\code{\link{posbernoulli.tb}},
+ \code{\link{Select}},
\code{\link{deermice}},
\code{\link{Huggins89table1}},
\code{\link{Huggins89.t1}},
@@ -232,6 +233,8 @@ capture--recapture experiments.
\code{\link{AICvlm}},
\code{\link{BICvlm}},
\code{\link{prinia}}.
+
+
% \code{\link{aux.posbernoulli.t}},
% \code{\link{vglm.control}} for \code{xij},
% \code{\link{huggins91}}.
@@ -246,7 +249,7 @@ coef(M.t, matrix = TRUE)
constraints(M.t, matrix = TRUE)
summary(M.t, presid = FALSE)
-M.h.1 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight, trace = TRUE,
+M.h.1 <- vglm(Select(deermice, "y") ~ sex + weight, trace = TRUE,
posbernoulli.t(parallel.t = FALSE ~ -1), data = deermice)
coef(M.h.1, matrix = TRUE)
constraints(M.h.1)
@@ -270,7 +273,7 @@ round(M.th.2 at extra$N.hat + c(-1, 1) * 1.96 * M.th.2 at extra$SE.N.hat, 1)
# Fit a M_h model, effectively the parallel M_t model, using posbinomial()
deermice <- transform(deermice, ysum = y1 + y2 + y3 + y4 + y5 + y6,
- tau = 6)
+ tau = 6)
M.h.3 <- vglm(cbind(ysum, tau - ysum) ~ sex + weight,
posbinomial(omit.constant = TRUE), data = deermice, trace = TRUE)
max(abs(coef(M.h.1) - coef(M.h.3))) # Should be zero
diff --git a/man/posbernoulli.tb.Rd b/man/posbernoulli.tb.Rd
index 39099e5..08c7bc1 100644
--- a/man/posbernoulli.tb.Rd
+++ b/man/posbernoulli.tb.Rd
@@ -195,9 +195,10 @@ posbernoulli.tb(link = "logit",
\code{\link{posbernoulli.b}} (including \code{N.hat}),
\code{\link{posbernoulli.t}},
\code{\link{posbinomial}},
+ \code{\link{Select}},
\code{\link{Huggins89table1}},
\code{\link{Huggins89.t1}},
- \code{\link{deermice}}.
+ \code{\link{deermice}},
\code{\link{prinia}}.
diff --git a/man/posbinomUC.Rd b/man/posbinomUC.Rd
index 4465632..343613e 100644
--- a/man/posbinomUC.Rd
+++ b/man/posbinomUC.Rd
@@ -74,6 +74,8 @@ rposbinom(n, size, prob)
\code{pposbinom} gives the distribution function,
\code{qposbinom} gives the quantile function, and
\code{rposbinom} generates random deviates.
+
+
}
%\references{
%None.
@@ -141,7 +143,7 @@ with(pdata, table(y1))
with(pdata, table(y2))
# Multivariate response
fit2 <- vglm(cbind(y1, y2) ~ x2, posbinomial(mv = TRUE),
- trace = TRUE, pdata, weight = cbind(sizev1, sizev2))
+ trace = TRUE, data = pdata, weight = cbind(sizev1, sizev2))
coef(fit2, matrix = TRUE)
}
\keyword{distribution}
diff --git a/man/posbinomial.Rd b/man/posbinomial.Rd
index 3e7eacd..1a7a9f6 100644
--- a/man/posbinomial.Rd
+++ b/man/posbinomial.Rd
@@ -130,7 +130,8 @@ Drapers Company Research Memoirs.
\code{\link{posbernoulli.t}},
\code{\link{posbernoulli.tb}},
\code{\link{binomialff}},
- \code{\link{AICvlm}}, \code{\link{BICvlm}}.
+ \code{\link{AICvlm}}, \code{\link{BICvlm}},
+ \code{\link{simulate.vlm}}.
}
diff --git a/man/posgeomUC.Rd b/man/posgeomUC.Rd
index 8cc9948..b794903 100644
--- a/man/posgeomUC.Rd
+++ b/man/posgeomUC.Rd
@@ -77,15 +77,17 @@ rposgeom(n, prob)
%}
\seealso{
-% \code{posgeometric},
\code{\link{zageometric}},
\code{\link{zigeometric}},
\code{\link[stats:Geometric]{rgeom}}.
+% \code{posgeometric},
+
+
}
\examples{
-prob <- 0.75; y = rposgeom(n = 1000, prob)
+prob <- 0.75; y <- rposgeom(n = 1000, prob)
table(y)
mean(y) # Sample mean
1 / prob # Population mean
diff --git a/man/posnegbinUC.Rd b/man/posnegbinUC.Rd
index 7eeb2df..4aadb12 100644
--- a/man/posnegbinUC.Rd
+++ b/man/posnegbinUC.Rd
@@ -69,8 +69,10 @@ rposnegbin(n, size, prob = NULL, munb = NULL)
\code{qposnegbin} gives the quantile function, and
\code{rposnegbin} generates \eqn{n} random deviates.
+
}
\references{
+
Welsh, A. H., Cunningham, R. B., Donnelly, C. F. and Lindenmayer,
D. B. (1996)
Modelling the abundances of rare species: statistical models
@@ -80,6 +82,7 @@ for counts with extra zeros.
297--308.
+
}
\author{ T. W. Yee }
%\note{
diff --git a/man/posnegbinomial.Rd b/man/posnegbinomial.Rd
index f0f4ee3..bea6cef 100644
--- a/man/posnegbinomial.Rd
+++ b/man/posnegbinomial.Rd
@@ -128,11 +128,14 @@ posnegbinomial(lmunb = "loge", lsize = "loge",
\code{\link{pospoisson}},
\code{\link{negbinomial}},
\code{\link{zanegbinomial}},
-% \code{\link[MASS]{rnegbin}}.
\code{\link[stats:NegBinomial]{rnbinom}},
\code{\link{CommonVGAMffArguments}},
\code{\link{corbet}},
- \code{\link{logff}}.
+ \code{\link{logff}},
+ \code{\link{simulate.vlm}}.
+
+
+% \code{\link[MASS]{rnegbin}}.
}
@@ -142,7 +145,7 @@ posnegbinomial(lmunb = "loge", lsize = "loge",
pdata <- data.frame(x2 = runif(nn <- 1000))
pdata <- transform(pdata, y1 = rposnegbin(nn, munb = exp(0+2*x2), size = exp(1)),
y2 = rposnegbin(nn, munb = exp(1+2*x2), size = exp(3)))
-fit <- vglm(cbind(y1, y2) ~ x2, posnegbinomial, pdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ x2, posnegbinomial, data = pdata, trace = TRUE)
coef(fit, matrix = TRUE)
dim(depvar(fit)) # dim(fit at y) is not as good
diff --git a/man/posnormUC.Rd b/man/posnormUC.Rd
index 0d20986..02c62d4 100644
--- a/man/posnormUC.Rd
+++ b/man/posnormUC.Rd
@@ -34,6 +34,7 @@ rposnorm(n, mean = 0, sd = 1)
\code{qposnorm} gives the quantile function, and
\code{rposnorm} generates random deviates.
+
}
\author{ T. W. Yee }
\details{
diff --git a/man/posnormal.Rd b/man/posnormal.Rd
index 86f3ddf..62df13f 100644
--- a/man/posnormal.Rd
+++ b/man/posnormal.Rd
@@ -6,7 +6,7 @@
Fits a positive (univariate) normal distribution.
}
\usage{
-posnormal(lmean = "identity", lsd = "loge",
+posnormal(lmean = "identitylink", lsd = "loge",
imean = NULL, isd = NULL, nsimEIM = 100, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -119,7 +119,7 @@ pdata <- transform(pdata, y = rposnorm(n <- 1000, m = m, sd = SD))
\dontrun{with(pdata, hist(y, prob = TRUE, border = "blue",
main = paste("posnorm(m =", m[1], ", sd =", round(SD[1], 2),")"))) }
-fit <- vglm(y ~ 1, fam = posnormal, pdata, trace = TRUE)
+fit <- vglm(y ~ 1, posnormal, data = pdata, trace = TRUE)
coef(fit, matrix = TRUE)
(Cfit <- Coef(fit))
mygrid <- with(pdata, seq(min(y), max(y), len = 200)) # Add the fit to the histogram
diff --git a/man/pospoisson.Rd b/man/pospoisson.Rd
index bcb3776..497f8c6 100644
--- a/man/pospoisson.Rd
+++ b/man/pospoisson.Rd
@@ -80,14 +80,15 @@ contains further information and examples.
\code{\link{Pospois}},
\code{\link{posnegbinomial}},
\code{\link{poissonff}},
- \code{\link{zipoisson}}.
+ \code{\link{zipoisson}},
+ \code{\link{simulate.vlm}}.
}
\examples{
# Data from Coleman and James (1961)
cjdata <- data.frame(y = 1:6, freq = c(1486, 694, 195, 37, 10, 1))
-fit <- vglm(y ~ 1, pospoisson, cjdata, weights = freq)
+fit <- vglm(y ~ 1, pospoisson, data = cjdata, weights = freq)
Coef(fit)
summary(fit)
fitted(fit)
@@ -96,7 +97,7 @@ pdata <- data.frame(x2 = runif(nn <- 1000)) # Artificial data
pdata <- transform(pdata, lambda = exp(1 - 2 * x2))
pdata <- transform(pdata, y1 = rpospois(nn, lambda))
with(pdata, table(y1))
-fit <- vglm(y1 ~ x2, pospoisson, pdata, trace = TRUE, crit = "coef")
+fit <- vglm(y1 ~ x2, pospoisson, data = pdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
}
\keyword{models}
diff --git a/man/predictqrrvglm.Rd b/man/predictqrrvglm.Rd
index 3ce3e16..e462fb5 100644
--- a/man/predictqrrvglm.Rd
+++ b/man/predictqrrvglm.Rd
@@ -76,19 +76,20 @@ canonical Gaussian ordination.
}
\examples{
-hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
+hspider[,1:6] <- scale(hspider[,1:6]) # Standardize the environmental vars
set.seed(1234)
-# vvv p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
-# vvv Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
-# vvv WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
-# vvv fam=poissonff, data=hspider, Crow1positive=FALSE, ITol=TRUE)
+# vvv p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute,
+# vvv Arctperi, Auloalbi, Pardlugu, Pardmont,
+# vvv Pardnigr, Pardpull, Trocterr, Zoraspin) ~
+# vvv WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+# vvv poissonff, data = hspider, Crow1positive = FALSE, I.tol = TRUE)
# vvv sort(p1 at misc$deviance.Bestof) # A history of all the iterations
# vvv head(predict(p1))
# The following should be all zeros
-# vvv max(abs(predict(p1, new=head(hspider)) - head(predict(p1))))
-# vvv max(abs(predict(p1, new=head(hspider), type="res") - head(fitted(p1))))
+# vvv max(abs(predict(p1, new = head(hspider)) - head(predict(p1))))
+# vvv max(abs(predict(p1, new = head(hspider), type = "res")-head(fitted(p1))))
}
\keyword{models}
\keyword{regression}
diff --git a/man/prentice74.Rd b/man/prentice74.Rd
index 0af4793..fb45585 100644
--- a/man/prentice74.Rd
+++ b/man/prentice74.Rd
@@ -8,7 +8,7 @@
}
\usage{
-prentice74(llocation = "identity", lscale = "loge", lshape = "identity",
+prentice74(llocation = "identitylink", lscale = "loge", lshape = "identitylink",
ilocation = NULL, iscale = NULL, ishape = NULL, zero = 2:3)
}
%- maybe also 'usage' for other objects documented here.
@@ -115,7 +115,7 @@ else \eqn{q < 0} is right skew.
pdata <- data.frame(x2 = runif(nn <- 1000))
pdata <- transform(pdata, loc = -1 + 2*x2, Scale = exp(1))
pdata <- transform(pdata, y = rlgamma(nn, loc = loc, scale = Scale, k = 1))
-fit <- vglm(y ~ x2, prentice74(zero = 2:3), pdata, trace = TRUE)
+fit <- vglm(y ~ x2, prentice74(zero = 2:3), data = pdata, trace = TRUE)
coef(fit, matrix = TRUE) # Note the coefficients for location
}
\keyword{models}
diff --git a/man/prinia.Rd b/man/prinia.Rd
index 8e68b89..ff3f5f1 100644
--- a/man/prinia.Rd
+++ b/man/prinia.Rd
@@ -38,12 +38,12 @@ data(prinia)
}
- \item{y1, y2, y3, y4, y5, y6}{
+ \item{y01, y02, y03, y04, y05, y06}{
a numeric vector of 0s and 1s; for noncapture and capture resp.
}
- \item{y7, y8, y9, y10, y11, y12}{
+ \item{y07, y08, y09, y10, y11, y12}{
same as above.
@@ -119,8 +119,8 @@ fit1 <- vglm(cbind(cap, noncap) ~ length + fat, posbinomial, data = prinia)
# Fit another positive-binomial distribution (M.h) to the data:
# The response input is suitable for posbernoulli.*-type functions.
-fit2 <- vglm(cbind( y1, y2, y3, y4, y5, y6, y7, y8, y9,
- y10, y11, y12, y13, y14, y15, y16, y17, y18, y19) ~
+fit2 <- vglm(cbind(y01, y02, y03, y04, y05, y06, y07, y08, y09, y10,
+ y11, y12, y13, y14, y15, y16, y17, y18, y19) ~
length + fat, posbernoulli.b(drop.b = FALSE ~ 0), data = prinia)
}
\keyword{datasets}
diff --git a/man/propodds.Rd b/man/propodds.Rd
index 34c060d..0901c3c 100644
--- a/man/propodds.Rd
+++ b/man/propodds.Rd
@@ -84,7 +84,7 @@ contains further information and examples.
\examples{
# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
pneumo <- transform(pneumo, let = log(exposure.time))
-(fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
+(fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo))
depvar(fit) # Sample proportions
weights(fit, type = "prior") # Number of observations
coef(fit, matrix = TRUE)
@@ -92,12 +92,12 @@ constraints(fit) # Constraint matrices
summary(fit)
# Check that the model is linear in let ----------------------
-fit2 <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2), propodds, pneumo)
+fit2 <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2), propodds, data = pneumo)
\dontrun{ plot(fit2, se = TRUE, lcol = 2, scol = 2) }
# Check the proportional odds assumption with a LRT ----------
(fit3 <- vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel = FALSE, reverse = TRUE), pneumo))
+ cumulative(parallel = FALSE, reverse = TRUE), data = pneumo))
pchisq(deviance(fit) - deviance(fit3),
df = df.residual(fit) - df.residual(fit3), lower.tail = FALSE)
lrtest(fit3, fit) # Easier
diff --git a/man/prplot.Rd b/man/prplot.Rd
index 1d843ac..b981a8b 100644
--- a/man/prplot.Rd
+++ b/man/prplot.Rd
@@ -84,7 +84,7 @@ prplot.control(xlab = NULL, ylab = "Probability", main = NULL, xlim = NULL,
}
\examples{
pneumo <- transform(pneumo, let = log(exposure.time))
-fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo)
+fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo)
M <- npred(fit) # Or fit at misc$M
\dontrun{ prplot(fit)
prplot(fit, lty = 1:M, col = (1:M)+2, rug = TRUE, las = 1,
diff --git a/man/put.smart.Rd b/man/put.smart.Rd
index e356778..361b10a 100644
--- a/man/put.smart.Rd
+++ b/man/put.smart.Rd
@@ -4,9 +4,7 @@
\description{
Adds a list to the end of the list \code{.smart.prediction}
in
-\code{smartpredenv} (\R)
-or
-frame 1 (S-PLUS).
+\code{smartpredenv}.
}
\usage{
@@ -25,22 +23,16 @@ Nothing is returned.
}
\section{Side Effects}{
The variable \code{.smart.prediction.counter} in
- \code{smartpredenv} (\R)
- or
- frame 1 (S-PLUS)
+ \code{smartpredenv}
is incremented beforehand,
and \code{.smart.prediction[[.smart.prediction.counter]]} is
assigned the list \code{smart}.
If the list \code{.smart.prediction} in
- \code{smartpredenv} (\R)
- or
- frame 1 (S-PLUS)
+ \code{smartpredenv}
is not long enough
to hold \code{smart}, then it is made larger, and the variable
\code{.max.smart} in
- \code{smartpredenv} (\R)
- or
- frame 1 (S-PLUS)
+ \code{smartpredenv}
is adjusted accordingly.
@@ -60,15 +52,15 @@ Nothing is returned.
}
\examples{
-"my1" <- function(x, minx=min(x)) { # Here is a smart function
- x <- x # Needed for nested calls, e.g., bs(scale(x))
- if(smart.mode.is("read")) {
- smart <- get.smart()
- minx <- smart$minx # Overwrite its value
- } else
- if(smart.mode.is("write"))
- put.smart(list(minx=minx))
- sqrt(x-minx)
+"my1" <- function(x, minx = min(x)) { # Here is a smart function
+ x <- x # Needed for nested calls, e.g., bs(scale(x))
+ if (smart.mode.is("read")) {
+ smart <- get.smart()
+ minx <- smart$minx # Overwrite its value
+ } else
+ if (smart.mode.is("write"))
+ put.smart(list(minx = minx))
+ sqrt(x - minx)
}
attr(my1, "smart") <- TRUE
}
diff --git a/man/qrrvglm.control.Rd b/man/qrrvglm.control.Rd
index 3364fa8..4181198 100644
--- a/man/qrrvglm.control.Rd
+++ b/man/qrrvglm.control.Rd
@@ -11,12 +11,13 @@
}
\usage{
qrrvglm.control(Rank = 1,
- Bestof = if(length(Cinit)) 1 else 10,
+ Bestof = if (length(Cinit)) 1 else 10,
checkwz = TRUE,
Cinit = NULL,
Crow1positive = TRUE,
epsilon = 1.0e-06,
- EqualTolerances = TRUE,
+ EqualTolerances = NULL,
+ eq.tolerances = TRUE,
Etamat.colmax = 10,
FastAlgorithm = TRUE,
GradientFunction = TRUE,
@@ -24,14 +25,15 @@ qrrvglm.control(Rank = 1,
isd.latvar = rep(c(2, 1, rep(0.5, length = Rank)), length = Rank),
iKvector = 0.1,
iShape = 0.1,
- ITolerances = FALSE,
+ ITolerances = NULL,
+ I.tolerances = FALSE,
maxitl = 40,
imethod = 1,
Maxit.optim = 250,
MUXfactor = rep(7, length = Rank),
noRRR = ~ 1, Norrr = NA,
optim.maxit = 20,
- Parscale = if(ITolerances) 0.001 else 1.0,
+ Parscale = if (I.tolerances) 0.001 else 1.0,
sd.Cinit = 0.02,
SmallNo = 5.0e-13,
trace = TRUE,
@@ -85,30 +87,46 @@ qrrvglm.control(Rank = 1,
coefficients of the latent variables are unique up to a sign.
}
- \item{epsilon}{
- Positive numeric. Used to test for convergence for GLMs fitted in C.
- Larger values mean a loosening of the convergence criterion.
- If an error code of 3 is reported, try increasing this value.
+ \item{epsilon}{
+ Positive numeric. Used to test for convergence for GLMs fitted in C.
+ Larger values mean a loosening of the convergence criterion.
+ If an error code of 3 is reported, try increasing this value.
+
+ }
+ \item{eq.tolerances}{
+ Logical indicating whether each (quadratic) predictor will
+ have equal tolerances. Having \code{eq.tolerances = TRUE}
+ can help avoid numerical problems, especially with binary data.
+ Note that the estimated (common) tolerance matrix may or may
+ not be positive-definite. If it is then it can be scaled to
+ the \eqn{R} by \eqn{R} identity matrix, i.e., made equivalent
+ to \code{I.tolerances = TRUE}. Setting \code{I.tolerances = TRUE}
+ will \emph{force} a common \eqn{R} by \eqn{R} identity matrix as
+ the tolerance matrix to the data even if it is not appropriate.
+ In general, setting \code{I.tolerances = TRUE} is
+ preferred over \code{eq.tolerances = TRUE} because,
+ if it works, it is much faster and uses less memory.
+ However, \code{I.tolerances = TRUE} requires the
+ environmental variables to be scaled appropriately.
+ See \bold{Details} for more details.
+
+ }
+ \item{EqualTolerances}{
+ Defunct argument.
+ Use \code{eq.tolerances} instead.
+
+ }
+
+
+
+
+
+
+
+
+
- }
- \item{EqualTolerances}{
- Logical indicating whether each (quadratic) predictor will
- have equal tolerances. Having \code{EqualTolerances = TRUE}
- can help avoid numerical problems, especially with binary data.
- Note that the estimated (common) tolerance matrix may or may
- not be positive-definite. If it is then it can be scaled to
- the \eqn{R} by \eqn{R} identity matrix, i.e., made equivalent
- to \code{ITolerances = TRUE}. Setting \code{ITolerances = TRUE}
- will \emph{force} a common \eqn{R} by \eqn{R} identity matrix as
- the tolerance matrix to the data even if it is not appropriate.
- In general, setting \code{ITolerances = TRUE} is
- preferred over \code{EqualTolerances = TRUE} because,
- if it works, it is much faster and uses less memory.
- However, \code{ITolerances = TRUE} requires the
- environmental variables to be scaled appropriately.
- See \bold{Details} for more details.
- }
% \item{Eta.range}{ Numerical vector of length 2 or \code{NULL}.
% Gives the lower and upper bounds on the values that can be taken
% by the quadratic predictor (i.e., on the eta-scale).
@@ -147,7 +165,7 @@ qrrvglm.control(Rank = 1,
\item{isd.latvar}{
Initial standard deviations for the latent variables (site scores).
Numeric, positive and of length \eqn{R} (recycled if necessary).
- This argument is used only if \code{ITolerances = TRUE}. Used by
+ This argument is used only if \code{I.tolerances = TRUE}. Used by
\code{.Init.Poisson.QO()} to obtain initial values for the constrained
coefficients \eqn{C} adjusted to a reasonable value. It adjusts the
spread of the site scores relative to a common species tolerance of 1
@@ -169,20 +187,32 @@ qrrvglm.control(Rank = 1,
arguments in \code{\link{negbinomial}} and \code{\link{gamma2}}.
}
- \item{ITolerances}{
+
+ \item{I.tolerances}{
Logical. If \code{TRUE} then the (common) tolerance matrix is the
\eqn{R} by \eqn{R} identity matrix by definition. Note that having
- \code{ITolerances = TRUE} implies \code{EqualTolerances = TRUE}, but
+ \code{I.tolerances = TRUE} implies \code{eq.tolerances = TRUE}, but
not vice versa. Internally, the quadratic terms will be treated as
offsets (in GLM jargon) and so the models can potentially be fitted
very efficiently. \emph{However, it is a very good idea to center
and scale all numerical variables in the \eqn{x_2} vector}.
See \bold{Details} for more details.
- The success of \code{ITolerances = TRUE} often
+ The success of \code{I.tolerances = TRUE} often
depends on suitable values for \code{isd.latvar} and/or
\code{MUXfactor}.
+
+ }
+ \item{ITolerances}{
+ Defunct argument.
+ Use \code{I.tolerances} instead.
+
+
}
+
+
+
+
\item{maxitl}{
Maximum number of times the optimizer is called or restarted.
Most users should ignore this argument.
@@ -205,7 +235,7 @@ qrrvglm.control(Rank = 1,
\item{MUXfactor}{
Multiplication factor for detecting large offset values. Numeric,
positive and of length \eqn{R} (recycled if necessary). This argument
- is used only if \code{ITolerances = TRUE}. Offsets are \eqn{-0.5}
+ is used only if \code{I.tolerances = TRUE}. Offsets are \eqn{-0.5}
multiplied by the sum of the squares of all \eqn{R} latent variable
values. If the latent variable values are too large then this will
result in numerical problems. By too large, it is meant that the
@@ -246,7 +276,7 @@ qrrvglm.control(Rank = 1,
(recycled if necessary).
Passed into \code{optim(..., control = list(parscale = Parscale))};
the elements of \eqn{C} become \eqn{C} / \code{Parscale}.
- Setting \code{ITolerances = TRUE} results in line searches that
+ Setting \code{I.tolerances = TRUE} results in line searches that
are very large, therefore \eqn{C} has to be scaled accordingly
to avoid large step sizes.
See \bold{Details} for more information.
@@ -322,7 +352,7 @@ qrrvglm.control(Rank = 1,
% general, diagonal under such a constraint.
- Having \code{ITolerances = TRUE} means all the tolerance matrices
+ Having \code{I.tolerances = TRUE} means all the tolerance matrices
are the order-\eqn{R} identity matrix, i.e., it \emph{forces}
bell-shaped curves/surfaces on all species. This results in a
more difficult optimization problem (especially for 2-parameter
@@ -332,12 +362,12 @@ qrrvglm.control(Rank = 1,
can help enormously. Even better, scaling \eqn{C} by specifying
\code{isd.latvar} is more understandable to humans. If failure to
converge occurs, try adjusting \code{Parscale}, or better, setting
- \code{EqualTolerances = TRUE} (and hope that the estimated tolerance
+ \code{eq.tolerances = TRUE} (and hope that the estimated tolerance
matrix is positive-definite). To fit an equal-tolerances model, it
- is firstly best to try setting \code{ITolerances = TRUE} and varying
+ is firstly best to try setting \code{I.tolerances = TRUE} and varying
\code{isd.latvar} and/or \code{MUXfactor} if it fails to converge.
If it still fails to converge after many attempts, try setting
- \code{EqualTolerances = TRUE}, however this will usually be a lot slower
+ \code{eq.tolerances = TRUE}, however this will usually be a lot slower
because it requires a lot more memory.
@@ -346,18 +376,18 @@ qrrvglm.control(Rank = 1,
matrix.
- If setting \code{EqualTolerances = TRUE} is used and the common
+ If setting \code{eq.tolerances = TRUE} is used and the common
estimated tolerance matrix is positive-definite then that model is
- effectively the same as the \code{ITolerances = TRUE} model (the two are
- transformations of each other). In general, \code{ITolerances = TRUE}
+ effectively the same as the \code{I.tolerances = TRUE} model (the two are
+ transformations of each other). In general, \code{I.tolerances = TRUE}
is numerically more unstable and presents a more difficult problem
to optimize; the arguments \code{isd.latvar} and/or \code{MUXfactor} often
must be assigned some good value(s) (possibly found by trial and error)
- in order for convergence to occur. Setting \code{ITolerances = TRUE}
+ in order for convergence to occur. Setting \code{I.tolerances = TRUE}
\emph{forces} a bell-shaped curve or surface onto all the species data,
therefore this option should be used with deliberation. If unsuitable,
the resulting fit may be very misleading. Usually it is a good idea
- for the user to set \code{EqualTolerances = FALSE} to see which species
+ for the user to set \code{eq.tolerances = FALSE} to see which species
appear to have a bell-shaped curve or surface. Improvements to the
fit can often be achieved using transformations, e.g., nitrogen
concentration to log nitrogen concentration.
@@ -385,6 +415,8 @@ qrrvglm.control(Rank = 1,
}
\value{
A list with components matching the input names.
+
+
}
\references{
Yee, T. W. (2004)
@@ -393,15 +425,17 @@ canonical Gaussian ordination.
\emph{Ecological Monographs},
\bold{74}, 685--701.
+
Yee, T. W. (2006)
Constrained additive ordination.
\emph{Ecology},
\bold{87}, 203--213.
+
}
\author{ Thomas W. Yee }
\note{
- When \code{ITolerances = TRUE} it is a good idea to apply
+ When \code{I.tolerances = TRUE} it is a good idea to apply
\code{\link[base]{scale}} to all the numerical variables that make up
the latent variable, i.e., those of \eqn{x_2}{x_2}. This is to make
them have mean 0, and hence avoid large offset values which cause
@@ -413,11 +447,11 @@ Constrained additive ordination.
It is usually a good idea to try fitting a model with
- \code{ITolerances = TRUE} first, and if convergence is unsuccessful,
- then try \code{EqualTolerances = TRUE} and \code{ITolerances = FALSE}.
+ \code{I.tolerances = TRUE} first, and if convergence is unsuccessful,
+ then try \code{eq.tolerances = TRUE} and \code{I.tolerances = FALSE}.
Ordination diagrams with
- \code{EqualTolerances = TRUE} have a natural interpretation, but
- with \code{EqualTolerances = FALSE} they are more complicated and
+ \code{eq.tolerances = TRUE} have a natural interpretation, but
+ with \code{eq.tolerances = FALSE} they are more complicated and
requires, e.g., contours to be overlaid on the ordination diagram
(see \code{\link{lvplot.qrrvglm}}).
@@ -426,7 +460,7 @@ Constrained additive ordination.
In the example below, an equal-tolerances CQO model is fitted to the
- hunting spiders data. Because \code{ITolerances = TRUE}, it is a good idea
+ hunting spiders data. Because \code{I.tolerances = TRUE}, it is a good idea
to center all the \eqn{x_2} variables first. Upon fitting the model,
the actual standard deviation of the site scores are computed. Ideally,
the \code{isd.latvar} argument should have had this value for the best
@@ -456,25 +490,29 @@ Constrained additive ordination.
\code{\link{rcqo}},
\code{\link{Coef.qrrvglm}},
\code{\link{Coef.qrrvglm-class}},
-% \code{\link{rrvglm}},
-% \code{\link{rrvglm.control}},
-% \code{\link{rrvglm.optim.control}},
\code{\link[stats]{optim}},
\code{\link{binomialff}},
\code{\link{poissonff}},
\code{\link{negbinomial}},
\code{\link{gamma2}},
\code{\link{gaussianff}}.
+
+
+% \code{\link{rrvglm}},
+% \code{\link{rrvglm.control}},
+% \code{\link{rrvglm.optim.control}},
+
+
}
\examples{
\dontrun{ # Poisson CQO with equal tolerances
set.seed(111) # This leads to the global solution
-hspider[,1:6] <- scale(hspider[,1:6]) # Good idea when ITolerances = TRUE
+hspider[,1:6] <- scale(hspider[,1:6]) # Good idea when I.tolerances = TRUE
p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- quasipoissonff, data = hspider, EqualTolerances = TRUE)
+ quasipoissonff, data = hspider, eq.tolerances = TRUE)
sort(p1 at misc$deviance.Bestof) # A history of all the iterations
(isd.latvar <- apply(latvar(p1), 2, sd)) # Should be approx isd.latvar
@@ -484,7 +522,7 @@ set.seed(111) # This leads to the global solution
p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- ITolerances = TRUE, quasipoissonff, data = hspider,
+ I.tolerances = TRUE, quasipoissonff, data = hspider,
isd.latvar = isd.latvar) # Note the use of isd.latvar here
sort(p1 at misc$deviance.Bestof) # A history of all the iterations
}
@@ -501,7 +539,7 @@ sort(p1 at misc$deviance.Bestof) # A history of all the iterations
%nb1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
% Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
% WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
-% ITol = FALSE, EqualTol = TRUE, # A good idea for negbinomial
+% I.tol = FALSE, eq.tol = TRUE, # A good idea for negbinomial
% fam = negbinomial, data = hspider)
%sort(nb1 at misc$deviance.Bestof) # A history of all the iterations
%summary(nb1)
diff --git a/man/qtplot.lmscreg.Rd b/man/qtplot.lmscreg.Rd
index 814f242..25c2751 100644
--- a/man/qtplot.lmscreg.Rd
+++ b/man/qtplot.lmscreg.Rd
@@ -57,15 +57,19 @@ contains further information and examples.
}
\author{ Thomas W. Yee }
\note{
- \code{\link{plotqtplot.lmscreg}} does the actual plotting.
+ \code{\link{plotqtplot.lmscreg}} does the actual plotting.
+
+
}
\seealso{
-\code{\link{plotqtplot.lmscreg}},
-\code{\link{deplot.lmscreg}},
-\code{\link{lms.bcn}},
-\code{\link{lms.bcg}},
-\code{\link{lms.yjn}}.
+ \code{\link{plotqtplot.lmscreg}},
+ \code{\link{deplot.lmscreg}},
+ \code{\link{lms.bcn}},
+ \code{\link{lms.bcg}},
+ \code{\link{lms.yjn}}.
+
+
}
\examples{\dontrun{
diff --git a/man/quasipoissonff.Rd b/man/quasipoissonff.Rd
index 6ebd635..75e915d 100644
--- a/man/quasipoissonff.Rd
+++ b/man/quasipoissonff.Rd
@@ -123,7 +123,7 @@ quasipoissonff()
\dontrun{n <- 200; p <- 5; S <- 5
mydata <- rcqo(n, p, S, fam = "poisson", eq.tol = FALSE)
myform <- attr(mydata, "formula")
-p1 <- cqo(myform, fam = quasipoissonff, EqualTol = FALSE, data = mydata)
+p1 <- cqo(myform, fam = quasipoissonff, eq.tol = FALSE, data = mydata)
sort(p1 at misc$deviance.Bestof) # A history of all the iterations
lvplot(p1, y = TRUE, lcol = 1:S, pch = 1:S, pcol = 1:S)
summary(p1) # The dispersion parameters are estimated
diff --git a/man/rayleigh.Rd b/man/rayleigh.Rd
index 8675854..fd12669 100644
--- a/man/rayleigh.Rd
+++ b/man/rayleigh.Rd
@@ -62,6 +62,7 @@ cenrayleigh(lscale = "loge", oim = TRUE)
for \eqn{y > 0} and \eqn{b > 0}.
The mean of \eqn{Y} is
\eqn{b \sqrt{\pi / 2}}{b * sqrt(pi / 2)}
+ (returned as the fitted values)
and its variance is
\eqn{b^2 (4-\pi)/2}{b^2 (4-pi)/2}.
@@ -116,14 +117,15 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
\code{\link{genrayleigh}},
\code{\link{riceff}},
\code{\link{maxwell}},
- \code{\link{poisson.points}}.
+ \code{\link{poisson.points}},
+ \code{\link{simulate.vlm}}.
}
\examples{
nn <- 1000; Scale <- exp(2)
rdata <- data.frame(ystar = rrayleigh(nn, scale = Scale))
-fit <- vglm(ystar ~ 1, rayleigh, rdata, trace = TRUE, crit = "c")
+fit <- vglm(ystar ~ 1, rayleigh, data = rdata, trace = TRUE, crit = "coef")
head(fitted(fit))
with(rdata, mean(ystar))
coef(fit, matrix = TRUE)
@@ -132,9 +134,10 @@ Coef(fit)
# Censored data
rdata <- transform(rdata, U = runif(nn, 5, 15))
rdata <- transform(rdata, y = pmin(U, ystar))
-\dontrun{ par(mfrow = c(1,2)); hist(with(rdata, ystar)); hist(with(rdata, y)) }
+\dontrun{ par(mfrow = c(1, 2))
+hist(with(rdata, ystar)); hist(with(rdata, y)) }
extra <- with(rdata, list(rightcensored = ystar > U))
-fit <- vglm(y ~ 1, cenrayleigh, rdata, trace = TRUE, extra = extra)
+fit <- vglm(y ~ 1, cenrayleigh, data = rdata, trace = TRUE, extra = extra)
table(fit at extra$rightcen)
coef(fit, matrix = TRUE)
head(fitted(fit))
diff --git a/man/rayleighUC.Rd b/man/rayleighUC.Rd
index 2ab4882..ce47da8 100644
--- a/man/rayleighUC.Rd
+++ b/man/rayleighUC.Rd
@@ -23,12 +23,15 @@ rrayleigh(n, scale = 1)
\item{p}{vector of probabilities.}
\item{n}{number of observations.
Fed into \code{\link[stats]{runif}}.
+
+
}
\item{scale}{the scale parameter \eqn{b}.}
\item{log}{
Logical.
If \code{log = TRUE} then the logarithm of the density is returned.
+
}
}
diff --git a/man/rcqo.Rd b/man/rcqo.Rd
index 09ec3b2..95c3a60 100644
--- a/man/rcqo.Rd
+++ b/man/rcqo.Rd
@@ -388,7 +388,7 @@ c(apply(attr(mydata, "latvar"), 2, sd),
n <- 200; p <- 5; S <- 5
mydata <- rcqo(n, p, S, fam = "negbin", sqrt = TRUE)
myform <- attr(mydata, "formula")
-fit <- cqo(myform, fam = poissonff, dat = mydata) # ITol = TRUE,
+fit <- cqo(myform, fam = poissonff, dat = mydata) # I.tol = TRUE,
lvplot(fit, lcol = 1:S, y = TRUE, pcol = 1:S)
# Compare the fitted model with the 'truth'
concoef(fit) # The fitted model
@@ -399,7 +399,7 @@ attr(mydata, "concoefficients") # The 'truth'
n <- 200; p <- 5; S <- 3
mydata <- rcqo(n, p, S, fam = "gamma2", Log = TRUE)
fit <- cqo(attr(mydata, "formula"),
- fam = gaussianff, data = mydata) # ITol = TRUE,
+ fam = gaussianff, data = mydata) # I.tol = TRUE,
matplot(attr(mydata, "latvar"),
exp(mydata[, -(1:(p-1))]), col = 1:S) # 'raw' data
# Fitted model to transformed data:
diff --git a/man/rdiric.Rd b/man/rdiric.Rd
index a3b93af..5d782ac 100644
--- a/man/rdiric.Rd
+++ b/man/rdiric.Rd
@@ -7,21 +7,40 @@
}
\usage{
-rdiric(n, shape, dimension = NULL)
+rdiric(n, shape, dimension = NULL, is.matrix.shape = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{n}{ number of observations. }
+ \item{n}{ number of observations.
+ Note it has two meanings, see \code{is.matrix.shape} below.
+
+
+
+ }
\item{shape}{
the shape parameters. These must be positive.
If \code{dimension} is specifed, values
are recycled if necessary to length \code{dimension}.
+
}
\item{dimension}{
the dimension of the distribution.
If \code{dimension} is not numeric then it is taken to be
- \code{length(shape)}.
+ \code{length(shape)}
+ (or \code{ncol(shape)} if \code{is.matrix.shape == TRUE}).
+
+
+ }
+ \item{is.matrix.shape}{
+ Logical.
+ If \code{TRUE} then \code{shape} must be a matrix, and then
+ \code{n} is no longer the number of rows of the answer but the
+ answer has \code{n * nrow(shape)} rows.
+ If \code{FALSE} (the default) then \code{shape} is a vector and each
+ of the \code{n} rows of the answer have \code{shape} as
+ its shape parameters.
+
}
}
@@ -30,30 +49,37 @@ rdiric(n, shape, dimension = NULL)
Dirichlet distribution. Random gamma variates are generated, and
then Dirichlet random variates are formed from these.
+
}
\value{
A \code{n} by \code{dimension} matrix of Dirichlet random variates.
Each element is positive, and each row will sum to unity.
+ If \code{shape} has names then these will become the column names
+ of the answer.
+
}
\references{
+
Lange, K. (2002)
\emph{Mathematical and Statistical Methods for Genetic Analysis},
2nd ed.
New York: Springer-Verlag.
+
}
\author{ Thomas W. Yee }
\seealso{
\code{\link{dirichlet}} is a \pkg{VGAM} family function for
fitting a Dirichlet distribution to data.
+
}
\examples{
-y <- rdiric(n = 1000, shape = c(3, 1, 4))
-fit <- vglm(y ~ 1, dirichlet, trace = TRUE, crit = "c")
+ddata <- data.frame(rdiric(n = 1000, shape = c(y1 = 3, y2 = 1, y3 = 4)))
+fit <- vglm(cbind(y1, y2, y3) ~ 1, dirichlet, data = ddata, trace = TRUE)
Coef(fit)
coef(fit, matrix = TRUE)
}
diff --git a/man/recexp1.Rd b/man/recexp1.Rd
index fdacdf3..66e525f 100644
--- a/man/recexp1.Rd
+++ b/man/recexp1.Rd
@@ -34,17 +34,21 @@ recexp1(lrate = "loge", irate = NULL, imethod = 1)
The response must be a vector or one-column matrix with strictly
increasing values.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
+
}
\references{
Arnold, B. C. and Balakrishnan, N. and Nagaraja, H. N. (1998)
\emph{Records},
New York: John Wiley & Sons.
+
}
\author{ T. W. Yee }
\note{
@@ -52,10 +56,13 @@ recexp1(lrate = "loge", irate = NULL, imethod = 1)
initial value, therefore convergence may only take one iteration.
Fisher scoring is used.
+
}
\seealso{
- \code{\link{exponential}}.
+ \code{\link{exponential}}.
+
+
}
\examples{
rawy <- rexp(n <- 10000, rate = exp(1))
diff --git a/man/recnormal.Rd b/man/recnormal.Rd
index 82c02dd..a593b77 100644
--- a/man/recnormal.Rd
+++ b/man/recnormal.Rd
@@ -9,7 +9,7 @@
}
\usage{
-recnormal(lmean = "identity", lsd = "loge",
+recnormal(lmean = "identitylink", lsd = "loge",
imean = NULL, isd = NULL, imethod = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -91,7 +91,7 @@ Rdata <- data.frame(rawy = c(mymean, rnorm(nn, me = mymean, sd = exp(3))))
# Keep only observations that are records:
rdata <- data.frame(y = unique(cummax(with(Rdata, rawy))))
-fit <- vglm(y ~ 1, recnormal, rdata, trace = TRUE, maxit = 200)
+fit <- vglm(y ~ 1, recnormal, data = rdata, trace = TRUE, maxit = 200)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/riceUC.Rd b/man/riceUC.Rd
index a38d6eb..47bb250 100644
--- a/man/riceUC.Rd
+++ b/man/riceUC.Rd
@@ -7,10 +7,12 @@
\title{The Rice Distribution}
\description{
Density
-% distribution function, quantile function
and random generation for the
Rician distribution.
+
+% distribution function, quantile function
+
}
\usage{
@@ -37,9 +39,11 @@ rrice(n, vee, sigma)
}
\value{
\code{drice} gives the density,
+ \code{rrice} generates random deviates.
+
+
% \code{price} gives the distribution function,
% \code{qrice} gives the quantile function, and
- \code{rrice} generates random deviates.
}
diff --git a/man/riceff.Rd b/man/riceff.Rd
index 59df95b..c3b2778 100644
--- a/man/riceff.Rd
+++ b/man/riceff.Rd
@@ -87,7 +87,8 @@ Mathematical Analysis of Random Noise.
\seealso{
\code{\link{drice}},
\code{\link{rayleigh}},
- \code{\link[base:Bessel]{besselI}}.
+ \code{\link[base:Bessel]{besselI}},
+ \code{\link{simulate.vlm}}.
}
diff --git a/man/rigff.Rd b/man/rigff.Rd
index 31439b0..0c10d83 100644
--- a/man/rigff.Rd
+++ b/man/rigff.Rd
@@ -8,7 +8,7 @@
}
\usage{
-rigff(lmu = "identity", llambda = "loge", imu = NULL, ilambda = 1)
+rigff(lmu = "identitylink", llambda = "loge", imu = NULL, ilambda = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -58,8 +58,8 @@ London: Chapman & Hall
}
\examples{
rdata <- data.frame(y = rchisq(n = 100, df = 14)) # Not 'proper' data!!
-fit <- vglm(y ~ 1, rigff, rdata, trace = TRUE)
-fit <- vglm(y ~ 1, rigff, rdata, trace = TRUE, eps = 1e-9, crit = "coef")
+fit <- vglm(y ~ 1, rigff, data = rdata, trace = TRUE)
+fit <- vglm(y ~ 1, rigff, data = rdata, trace = TRUE, eps = 1e-9, crit = "coef")
summary(fit)
}
\keyword{models}
diff --git a/man/rlplot.egev.Rd b/man/rlplot.egev.Rd
index e73640a..25e8542 100644
--- a/man/rlplot.egev.Rd
+++ b/man/rlplot.egev.Rd
@@ -136,11 +136,11 @@ London: Springer-Verlag.
\examples{
gdata <- data.frame(y = rgev(n <- 100, scale = 2, shape = -0.1))
-fit <- vglm(y ~ 1, egev, gdata, trace = TRUE)
+fit <- vglm(y ~ 1, egev, data = gdata, trace = TRUE)
# Identity link for all parameters:
-fit2 <- vglm(y ~ 1, egev(lshape = identity, lscale = identity,
- iscale = 10), gdata, trace = TRUE)
+fit2 <- vglm(y ~ 1, egev(lshape = identitylink, lscale = identitylink,
+ iscale = 10), data = gdata, trace = TRUE)
coef(fit2, matrix = TRUE)
\dontrun{
par(mfrow = c(1, 2))
diff --git a/man/rrvglm-class.Rd b/man/rrvglm-class.Rd
index 64121a8..3aa897d 100644
--- a/man/rrvglm-class.Rd
+++ b/man/rrvglm-class.Rd
@@ -262,7 +262,7 @@ Vector generalized additive models.
pneumo <- transform(pneumo, let = log(exposure.time),
x3 = runif(nrow(pneumo))) # x3 is unrelated
fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3,
- multinomial, pneumo, Rank = 1)
+ multinomial, data = pneumo, Rank = 1)
Coef(fit)
}
}
diff --git a/man/rrvglm.Rd b/man/rrvglm.Rd
index 4ee419a..77f46df 100644
--- a/man/rrvglm.Rd
+++ b/man/rrvglm.Rd
@@ -123,6 +123,7 @@ rrvglm(formula, family, data = list(), weights = NULL, subset = NULL,
same internally. The slots of \code{"vglm"} objects are
described in \code{\link{vglm-class}}.
+
}
\references{
@@ -236,17 +237,19 @@ Regression and ordered categorical variables.
\examples{
\dontrun{
-# Example 1: RR negative binomial (RR-NB) with Var(Y) = mu + delta1 * mu^delta2
+# Example 1: RR negative binomial with Var(Y) = mu + delta1 * mu^delta2
nn <- 1000 # Number of observations
delta1 <- 3.0 # Specify this
delta2 <- 1.5 # Specify this; should be greater than unity
a21 <- 2 - delta2
mydata <- data.frame(x2 = runif(nn), x3 = runif(nn))
mydata <- transform(mydata, mu = exp(2 + 3 * x2 + 0 * x3))
-mydata <- transform(mydata, y2 = rnbinom(nn, mu=mu, size=(1/delta1)*mu^a21))
+mydata <- transform(mydata,
+ y2 = rnbinom(nn, mu = mu, size = (1/delta1)*mu^a21))
plot(y2 ~ x2, data = mydata, pch = "+", col = 'blue', las = 1,
main = paste("Var(Y) = mu + ", delta1, " * mu^", delta2, sep = ""))
-rrnb2 <- rrvglm(y2 ~ x2 + x3, negbinomial(zero = NULL), mydata, trace = TRUE)
+rrnb2 <- rrvglm(y2 ~ x2 + x3, negbinomial(zero = NULL),
+ data = mydata, trace = TRUE)
a21.hat <- (Coef(rrnb2)@A)["log(size)", 1]
beta11.hat <- Coef(rrnb2)@B1["(Intercept)", "log(mu)"]
@@ -267,7 +270,7 @@ Confint.rrnb(rrnb2) # Quick way to get it
plot(y2 ~ latvar(rrnb2), data = mydata, col = "blue",
xlab = "Latent variable", las = 1)
ooo <- order(latvar(rrnb2))
-lines(fitted(rrnb2)[ooo] ~ latvar(rrnb2)[ooo], col = "red")
+lines(fitted(rrnb2)[ooo] ~ latvar(rrnb2)[ooo], col = "orange")
# Example 2: stereotype model (reduced-rank multinomial logit model)
data(car.all)
@@ -275,14 +278,15 @@ index <- with(car.all, Country == "Germany" | Country == "USA" |
Country == "Japan" | Country == "Korea")
scar <- car.all[index, ] # standardized car data
fcols <- c(13,14,18:20,22:26,29:31,33,34,36) # These are factors
-scar[,-fcols] = scale(scar[, -fcols]) # Standardize all numerical vars
+scar[, -fcols] <- scale(scar[, -fcols]) # Standardize all numerical vars
ones <- matrix(1, 3, 1)
clist <- list("(Intercept)" = diag(3), Width = ones, Weight = ones,
Disp. = diag(3), Tank = diag(3), Price = diag(3),
Frt.Leg.Room = diag(3))
set.seed(111)
-fit <- rrvglm(Country ~ Width + Weight + Disp. + Tank + Price + Frt.Leg.Room,
- multinomial, data = scar, Rank = 2, trace = TRUE,
+fit <- rrvglm(Country ~ Width + Weight + Disp. + Tank +
+ Price + Frt.Leg.Room,
+ multinomial, data = scar, Rank = 2, trace = TRUE,
constraints = clist, noRRR = ~ 1 + Width + Weight,
Uncor = TRUE, Corner = FALSE, Bestof = 2)
fit at misc$deviance # A history of the fits
diff --git a/man/rrvglm.control.Rd b/man/rrvglm.control.Rd
index 9c3bcc3..b598db9 100644
--- a/man/rrvglm.control.Rd
+++ b/man/rrvglm.control.Rd
@@ -213,8 +213,8 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
% likelihood solution to constrained quadratic ordination (CQO;
% formerly called canonical Gaussian ordination) models.
-% For QRR-VGLMs, if \code{EqualTolerances=TRUE} and
-% \code{ITolerances=FALSE} then the default is that the \bold{C}
+% For QRR-VGLMs, if \code{eq.tolerances=TRUE} and
+% \code{I.tolerances=FALSE} then the default is that the \bold{C}
% matrix is constrained by forcing the latent variables to have sample
% variance-covariance matrix equalling \code{diag(Rank)}, i.e., unit
% variance and uncorrelated.
@@ -227,6 +227,8 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
\value{
A list with components matching the input names.
Some error checking is done, but not much.
+
+
}
\references{
Yee, T. W. and Hastie, T. J. (2003)
@@ -269,7 +271,7 @@ set.seed(111)
pneumo <- transform(pneumo, let = log(exposure.time),
x3 = runif(nrow(pneumo))) # x3 is random noise
fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3,
- multinomial, pneumo, Rank = 1, Index.corner = 2)
+ multinomial, data = pneumo, Rank = 1, Index.corner = 2)
constraints(fit)
vcov(fit)
summary(fit)
diff --git a/man/seq2binomial.Rd b/man/seq2binomial.Rd
index 771f8b3..4f15b4b 100644
--- a/man/seq2binomial.Rd
+++ b/man/seq2binomial.Rd
@@ -96,7 +96,8 @@ seq2binomial(lprob1 = "logit", lprob2 = "logit",
}
\seealso{
- \code{\link{binomialff}}.
+ \code{\link{binomialff}},
+ \code{\link{cfibrosis}}.
}
diff --git a/man/setup.smart.Rd b/man/setup.smart.Rd
index 60faab4..ea386c0 100644
--- a/man/setup.smart.Rd
+++ b/man/setup.smart.Rd
@@ -18,6 +18,7 @@ setup.smart(mode.arg, smart.prediction = NULL, max.smart = 30)
\code{object$smart.prediction} where
\code{object} is the name of the fitted object.
+
}
\item{smart.prediction}{
@@ -34,26 +35,28 @@ setup.smart(mode.arg, smart.prediction = NULL, max.smart = 30)
\code{.smart.prediction}. It is not important because
\code{.smart.prediction} is made larger if needed.
+
}}
\value{
Nothing is returned.
+
+
}
\section{Side Effects}{
In \code{"write"} mode
\code{.smart.prediction} in
- \code{smartpredenv} (\R) or frame 1 (S-PLUS)
+ \code{smartpredenv}
is assigned an empty list with \code{max.smart} components.
In \code{"read"} mode
\code{.smart.prediction} in
- \code{smartpredenv} (\R) or frame 1 (S-PLUS)
+ \code{smartpredenv}
is assigned \code{smart.prediction}.
- In both cases,
+ Then
\code{.smart.prediction.counter} in
- \code{smartpredenv} (\R) or
- frame 1 (S-PLUS)
+ \code{smartpredenv}
is assigned the value 0, and
\code{.smart.prediction.mode} and \code{.max.smart} are written to
- \code{smartpredenv} (\R) or frame 1 (S-PLUS) too.
+ \code{smartpredenv} too.
}
@@ -75,12 +78,12 @@ setup.smart(mode.arg, smart.prediction = NULL, max.smart = 30)
}
\examples{
-\dontrun{# Put at the beginning of lm
-setup.smart("write")
+\dontrun{
+setup.smart("write") # Put at the beginning of lm
}
\dontrun{# Put at the beginning of predict.lm
-setup.smart("read", smart.prediction=object$smart.prediction)
+setup.smart("read", smart.prediction = object$smart.prediction)
}
}
%\keyword{smart}
diff --git a/man/simplex.Rd b/man/simplex.Rd
index 8ff504c..3cee9b4 100644
--- a/man/simplex.Rd
+++ b/man/simplex.Rd
@@ -109,7 +109,7 @@ sdata <- transform(sdata, eta1 = 1 + 2 * x2,
eta2 = 1 - 2 * x2)
sdata <- transform(sdata, y = rsimplex(nn, mu = logit(eta1, inverse = TRUE),
dispersion = exp(eta2)))
-(fit <- vglm(y ~ x2, simplex(zero = NULL), sdata, trace = TRUE))
+(fit <- vglm(y ~ x2, simplex(zero = NULL), data = sdata, trace = TRUE))
coef(fit, matrix = TRUE)
summary(fit)
}
diff --git a/man/simulate.vlm.Rd b/man/simulate.vlm.Rd
new file mode 100644
index 0000000..ade8362
--- /dev/null
+++ b/man/simulate.vlm.Rd
@@ -0,0 +1,208 @@
+% 20131230; adapted from simulate.Rd from R 3.0.2
+
+
+\newcommand{\CRANpkg}{\href{http://CRAN.R-project.org/package=#1}{\pkg{#1}}}
+
+
+\name{simulate.vlm}
+\title{Simulate Responses for VGLMs and VGAMs}
+\description{
+ Simulate one or more responses from the distribution
+ corresponding to a fitted model object.
+
+}
+\usage{
+\method{simulate}{vlm}(object, nsim = 1, seed = NULL, \dots)
+}
+\alias{simulate.vlm}
+\arguments{
+ \item{object}{an object representing a fitted model.
+ Usually an object of class
+ \code{\link{vglm-class}}
+ or
+ \code{\link{vgam-class}}.
+
+
+
+ }
+ \item{nsim, seed}{
+ Same as \code{\link[stats]{simulate}}.
+
+
+
+ }
+% \item{seed}{an object specifying if and how the random number
+% generator should be initialized (\sQuote{seeded}).\cr
+% For the "lm" method, either \code{NULL} or an integer that will be
+% used in a call to \code{set.seed} before simulating the response
+% vectors. If set, the value is saved as the \code{"seed"} attribute
+% of the returned value. The default, \code{NULL} will not change the
+% random generator state, and return \code{\link{.Random.seed}} as the
+% \code{"seed"} attribute, see \sQuote{Value}.
+% }
+ \item{\dots}{additional optional arguments.}
+
+
+
+}
+\value{
+ Similar to \code{\link[stats]{simulate}}.
+ Note that many \pkg{VGAM} family functions can handle
+ multiple responses. This can result in a longer data frame
+ with more rows
+ (\code{nsim} multiplied by \code{n} rather than the
+ ordinary \code{n}).
+ In the future an argument may be available so that there
+ is always \code{n} rows no matter how many responses were
+ inputted.
+
+
+
+% Typically, a list of length \code{nsim} of simulated responses. Where
+% appropriate the result can be a data frame (which is a special type of
+% list).
+% %% a *matrix* seems very natural and is more efficient
+% %% for large-scale simulation, already for stats:::simulate.lm (in ../R/lm.R )
+
+
+
+% For the \code{"lm"} method, the result is a data frame with an
+% attribute \code{"seed"}. If argument \code{seed} is \code{NULL}, the
+% attribute is the value of \code{\link{.Random.seed}} before the
+% simulation was started; otherwise it is the value of the argument with
+% a \code{"kind"} attribute with value \code{as.list(\link{RNGkind}())}.
+
+
+
+}
+\details{
+ This is a methods function for \code{\link[stats]{simulate}}
+ and hopefully should behave in a very similar manner.
+ Only \pkg{VGAM} family functions with a \code{simslot} slot
+ have been implemented for \code{\link[stats]{simulate}}.
+
+
+
+
+}
+\seealso{
+ Currently the \pkg{VGAM} family functions with a
+ \code{simslot} slot are:
+ \code{\link{alaplace1}},
+ \code{\link{alaplace2}},
+ \code{\link{amh}},
+ \code{\link{betabinomial}},
+ \code{\link{betabinomial.ab}},
+ \code{\link{beta.ab}},
+ \code{\link{betaff}},
+ \code{\link{bifrankcop}},
+ \code{\link{bilogistic4}},
+ \code{\link{binomialff}},
+ \code{\link{binormal}},
+ \code{\link{binormalcop}},
+ \code{\link{biclaytoncop}},
+ \code{\link{cauchy}},
+ \code{\link{cauchy1}},
+ \code{\link{chisq}},
+ \code{\link{dirichlet}},
+ \code{\link{dagum}},
+ \code{\link{erlang}},
+ \code{\link{exponential}},
+ \code{\link{fgm}},
+ \code{\link{fisk}},
+ \code{\link{gamma1}},
+ \code{\link{gamma2}},
+ \code{\link{gamma2.ab}},
+ \code{\link{gengamma}},
+ \code{\link{geometric}},
+ \code{\link{gompertz}},
+ \code{\link{gumbelII}},
+ \code{\link{hzeta}},
+ \code{\link{invlomax}},
+ \code{\link{invparalogistic}},
+ \code{\link{kumar}},
+ \code{\link{lgammaff}},
+ \code{\link{lgamma3ff}},
+ \code{\link{lindley}},
+ \code{\link{lino}},
+ \code{\link{logff}},
+ \code{\link{logistic1}},
+ \code{\link{logistic2}},
+ \code{\link{lognormal}},
+ \code{\link{lognormal3}},
+ \code{\link{lomax}},
+ \code{\link{makeham}},
+ \code{\link{negbinomial}},
+ \code{\link{negbinomial.size}},
+ \code{\link{paralogistic}},
+ \code{\link{perks}},
+ \code{\link{poissonff}},
+ \code{\link{posnegbinomial}},
+ \code{\link{posnormal}},
+ \code{\link{pospoisson}},
+ \code{\link{polya}},
+ \code{\link{posbinomial}},
+ \code{\link{rayleigh}},
+ \code{\link{riceff}},
+ \code{\link{simplex}},
+ \code{\link{sinmad}},
+ \code{\link{slash}},
+ \code{\link{studentt}},
+ \code{\link{studentt2}},
+ \code{\link{studentt3}},
+ \code{\link{triangle}},
+ \code{\link{uninormal}},
+ \code{\link{yulesimon}},
+ \code{\link{zageometric}},
+ \code{\link{zageometricff}},
+ \code{\link{zanegbinomial}},
+ \code{\link{zanegbinomialff}},
+ \code{\link{zapoisson}},
+ \code{\link{zapoissonff}},
+ \code{\link{zigeometric}},
+ \code{\link{zigeometricff}},
+ \code{\link{zinegbinomial}},
+ \code{\link{zipf}},
+ \code{\link{zipoisson}},
+ \code{\link{zipoissonff}}.
+
+
+
+% \code{\link{logF}},
+% \code{\link{tobit}},
+
+
+ See also
+ \code{\link{RNG}} about random number generation in \R,
+ \code{\link{vglm}}, \code{\link{vgam}} for model fitting.
+
+
+
+
+}
+\examples{
+nn <- 10; mysize <- 20
+set.seed(123)
+bdata <- data.frame(x2 = rnorm(nn))
+bdata <- transform(bdata,
+ y1 = rbinom(nn, size = mysize, p = logit(1+x2, inverse = TRUE)),
+ y2 = rbinom(nn, size = mysize, p = logit(1+x2, inverse = TRUE)),
+ f1 = factor(as.numeric(rbinom(nn, size = 1,
+ p = logit(1+x2, inverse = TRUE)))))
+(fit1 <- vglm(cbind(y1, aaa = mysize - y1) ~ x2, # Matrix response (2-colns)
+ binomialff, data = bdata))
+(fit2 <- vglm(f1 ~ x2, # Factor response
+ binomialff, model = TRUE, data = bdata))
+
+set.seed(123)
+simulate(fit1, nsim = 8)
+
+set.seed(123)
+c(simulate(fit2, nsim = 3)) # Use c() when model = TRUE
+}
+\keyword{models}
+\keyword{datagen}
+
+
+
+
diff --git a/man/sinmad.Rd b/man/sinmad.Rd
index 1c867a5..0b83cf6 100644
--- a/man/sinmad.Rd
+++ b/man/sinmad.Rd
@@ -93,15 +93,16 @@ Hoboken, NJ, USA: Wiley-Interscience.
\code{\link{invlomax}},
\code{\link{lomax}},
\code{\link{paralogistic}},
- \code{\link{invparalogistic}}.
+ \code{\link{invparalogistic}},
+ \code{\link{simulate.vlm}}.
}
\examples{
sdata <- data.frame(y = rsinmad(n = 1000, exp(1), exp(2), exp(0)))
-fit <- vglm(y ~ 1, sinmad, sdata, trace = TRUE)
-fit <- vglm(y ~ 1, sinmad(ishape1.a = exp(1)), sdata, trace = TRUE)
+fit <- vglm(y ~ 1, sinmad, data = sdata, trace = TRUE)
+fit <- vglm(y ~ 1, sinmad(ishape1.a = exp(1)), data = sdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/sinmadUC.Rd b/man/sinmadUC.Rd
index dbe8437..049475c 100644
--- a/man/sinmadUC.Rd
+++ b/man/sinmadUC.Rd
@@ -70,7 +70,8 @@ Hoboken, NJ: Wiley-Interscience.
}
\examples{
sdata <- data.frame(y = rsinmad(n = 3000, exp(1), exp(2), exp(1)))
-fit <- vglm(y ~ 1, sinmad(ishape1.a = 2.1), sdata, trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, sinmad(ishape1.a = 2.1), data = sdata,
+ trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/skellam.Rd b/man/skellam.Rd
index 20b2829..a4bceaa 100644
--- a/man/skellam.Rd
+++ b/man/skellam.Rd
@@ -107,14 +107,14 @@ two Poisson variates belonging to different populations.
sdata <- data.frame(x2 = runif(nn <- 1000))
sdata <- transform(sdata, mu1 = exp(1 + x2), mu2 = exp(1 + x2))
sdata <- transform(sdata, y = rskellam(nn, mu1, mu2))
-fit1 <- vglm(y ~ x2, skellam, sdata, trace = TRUE, crit = "coef")
-fit2 <- vglm(y ~ x2, skellam(parallel = TRUE), sdata, trace = TRUE)
+fit1 <- vglm(y ~ x2, skellam, data = sdata, trace = TRUE, crit = "coef")
+fit2 <- vglm(y ~ x2, skellam(parallel = TRUE), data = sdata, trace = TRUE)
coef(fit1, matrix = TRUE)
coef(fit2, matrix = TRUE)
summary(fit1)
# Likelihood ratio test for equal means:
pchisq(2 * (logLik(fit1) - logLik(fit2)),
- df = fit2 at df.residual - fit1 at df.residual, lower.tail = FALSE)
+ df = df.residual(fit2) - df.residual(fit1), lower.tail = FALSE)
lrtest(fit1, fit2) # Alternative
}
}
diff --git a/man/skellamUC.Rd b/man/skellamUC.Rd
index 8030416..efe4fcc 100644
--- a/man/skellamUC.Rd
+++ b/man/skellamUC.Rd
@@ -7,10 +7,13 @@
\title{The Skellam Distribution}
\description{
Density
-% distribution function, quantile function
and random generation for the
Skellam distribution.
+
+% distribution function, quantile function
+
+
}
\usage{
dskellam(x, mu1, mu2, log = FALSE)
@@ -39,9 +42,11 @@ rskellam(n, mu1, mu2)
}
\value{
\code{dskellam} gives the density, and
+ \code{rskellam} generates random deviates.
+
+
% \code{pskellam} gives the distribution function,
% \code{qskellam} gives the quantile function, and
- \code{rskellam} generates random deviates.
}
diff --git a/man/skewnormUC.Rd b/man/skewnormUC.Rd
index 88f9fd0..e68eb34 100644
--- a/man/skewnormUC.Rd
+++ b/man/skewnormUC.Rd
@@ -8,11 +8,13 @@
\title{ Skew-Normal Distribution }
\description{
Density and
-% , distribution function, quantile function and
random generation
for the univariate skew-normal distribution.
+% , distribution function, quantile function and
+
+
}
\usage{
dskewnorm(x, location = 0, scale = 1, shape = 0, log = FALSE)
@@ -67,9 +69,11 @@ rskewnorm(n, location = 0, scale = 1, shape = 0)
}
\value{
\code{dskewnorm} gives the density,
+ \code{rskewnorm} generates random deviates.
+
+
% \code{pskewnorm} gives the distribution function,
% \code{qskewnorm} gives the quantile function, and
- \code{rskewnorm} generates random deviates.
}
diff --git a/man/skewnormal.Rd b/man/skewnormal.Rd
index a3fcb02..86c340e 100644
--- a/man/skewnormal.Rd
+++ b/man/skewnormal.Rd
@@ -9,7 +9,7 @@
}
\usage{
-skewnormal(lshape = "identity", ishape = NULL, nsimEIM = NULL)
+skewnormal(lshape = "identitylink", ishape = NULL, nsimEIM = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -99,7 +99,7 @@ skewnormal(lshape = "identity", ishape = NULL, nsimEIM = NULL)
\examples{
sdata <- data.frame(y1 = rskewnorm(nn <- 1000, shape = 5))
-fit1 <- vglm(y1 ~ 1, skewnormal, sdata, trace = TRUE)
+fit1 <- vglm(y1 ~ 1, skewnormal, data = sdata, trace = TRUE)
coef(fit1, matrix = TRUE)
head(fitted(fit1), 1)
with(sdata, mean(y1))
@@ -109,7 +109,7 @@ with(sdata, lines(x, dskewnorm(x, shape = Coef(fit1)), col = "blue")) }
sdata <- data.frame(x2 = runif(nn))
sdata <- transform(sdata, y2 = rskewnorm(nn, shape = 1 + 2*x2))
-fit2 <- vglm(y2 ~ x2, skewnormal, sdata, trace = TRUE, crit = "coef")
+fit2 <- vglm(y2 ~ x2, skewnormal, data = sdata, trace = TRUE, crit = "coef")
summary(fit2)
}
\keyword{models}
diff --git a/man/slash.Rd b/man/slash.Rd
index dbbee72..1350328 100644
--- a/man/slash.Rd
+++ b/man/slash.Rd
@@ -7,7 +7,7 @@
slash distribution by maximum likelihood estimation.
}
\usage{
-slash(lmu = "identity", lsigma = "loge",
+slash(lmu = "identitylink", lsigma = "loge",
imu = NULL, isigma = NULL, iprobs = c(0.1, 0.9), nsimEIM = 250,
zero = NULL, smallno = .Machine$double.eps*1000)
}
@@ -77,6 +77,8 @@ f(y) = 1/(2*sigma*sqrt(2*pi)) if y=mu
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
+
}
\references{
Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
@@ -103,14 +105,15 @@ f(y) = 1/(2*sigma*sqrt(2*pi)) if y=mu
\seealso{
- \code{\link{rslash}}.
+ \code{\link{rslash}},
+ \code{\link{simulate.vlm}}.
}
\examples{
\dontrun{
sdata <- data.frame(y = rslash(n = 1000, mu = 4, sigma = exp(2)))
-fit <- vglm(y ~ 1, slash, sdata, trace = TRUE)
+fit <- vglm(y ~ 1, slash, data = sdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/slashUC.Rd b/man/slashUC.Rd
index 49cb586..1160b50 100644
--- a/man/slashUC.Rd
+++ b/man/slashUC.Rd
@@ -12,25 +12,46 @@
}
\usage{
dslash(x, mu = 0, sigma = 1, log = FALSE, smallno = .Machine$double.eps*1000)
-pslash(q, mu = 0, sigma = 1)
+pslash(q, mu = 0, sigma = 1, very.negative = -10000)
rslash(n, mu = 0, sigma = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x, q}{vector of quantiles.}
- \item{n}{number of observations. Must be a single positive integer. }
+ \item{n}{
+ Same as \code{\link[stats]{runif}}.
+% number of observations. Must be a single positive integer.
+
+
+ }
\item{mu, sigma}{the mean and standard deviation of
the univariate normal distribution.
+
}
\item{log}{
Logical.
If \code{TRUE} then the logarithm of the density is returned.
+
+ }
+ \item{very.negative}{
+ Numeric, of length 1.
+ A large negative value.
+ For \code{(q-mu)/sigma} values less than this,
+ the value 0 is returned because
+ \code{\link[stats]{integrate}} tends to fail.
+ A warning is issued.
+ Similarly, if \code{(q-mu)/sigma} is greater than
+ \code{abs(very.negative)} then 1 is returned
+ with a warning.
+
+
}
\item{smallno}{
See \code{\link{slash}}.
+
}
}
\details{
@@ -38,12 +59,22 @@ rslash(n, mu = 0, sigma = 1)
for estimating the two parameters by maximum likelihood estimation,
for the formula of the probability density function and other details.
+
+ Function \code{\link{pslash}} uses a \code{for ()} loop and
+ \code{\link[stats]{integrate}}, meaning it's very slow.
+ It may also be inaccurate for extreme values of \code{q},
+ and returns with 1 or 0 values when too extreme compared
+ to \code{very.negative}.
+
+
+
}
\value{
\code{dslash} gives the density, and
\code{pslash} gives the distribution function,
\code{rslash} generates random deviates.
+
}
%\references{ }
\author{ Thomas W. Yee and C. S. Chee}
@@ -53,19 +84,21 @@ rslash(n, mu = 0, sigma = 1)
}
\seealso{
- \code{\link{slash}}.
+ \code{\link{slash}}.
+
}
\examples{
\dontrun{
-curve(dslash, col = "blue", ylab = "f(x)", -5, 5, ylim = c(0, 0.4),
+curve(dslash, col = "blue", ylab = "f(x)", -5, 5, ylim = c(0, 0.4), las = 1,
main = "Standard slash, normal and Cauchy densities", lwd = 2)
curve(dnorm, col = "black", lty = 2, lwd = 2, add = TRUE)
curve(dcauchy, col = "orange", lty = 3, lwd = 2, add = TRUE)
-legend(x = 2, y = 0.3, c("slash", "normal", "Cauchy"), lty = 1:3,
+legend("topleft", c("slash", "normal", "Cauchy"), lty = 1:3,
col = c("blue","black","orange"), lwd = 2)
curve(pslash, col = "blue", -5, 5, ylim = 0:1)
+pslash(c(-Inf, -20000, 20000, Inf)) # Gives a warning
}
}
\keyword{distribution}
diff --git a/man/smart.expression.Rd b/man/smart.expression.Rd
index c3cfa46..06f2f12 100644
--- a/man/smart.expression.Rd
+++ b/man/smart.expression.Rd
@@ -21,14 +21,14 @@
}
\examples{
-"my2" <- function(x, minx=min(x)) { # Here is a smart function
- x <- x # Needed for nested calls, e.g., bs(scale(x))
- if(smart.mode.is("read")) {
- return(eval(smart.expression))
- } else
- if(smart.mode.is("write"))
- put.smart(list(minx=minx, match.call=match.call()))
- (x-minx)^2
+"my2" <- function(x, minx = min(x)) { # Here is a smart function
+ x <- x # Needed for nested calls, e.g., sm.bs(sm.scale(x))
+ if (smart.mode.is("read")) {
+ return(eval(smart.expression))
+ } else
+ if (smart.mode.is("write"))
+ put.smart(list(minx = minx, match.call = match.call()))
+ (x - minx)^2
}
attr(my2, "smart") <- TRUE
}
diff --git a/man/smart.mode.is.Rd b/man/smart.mode.is.Rd
index 3585310..3bda619 100644
--- a/man/smart.mode.is.Rd
+++ b/man/smart.mode.is.Rd
@@ -9,10 +9,14 @@ smart.mode.is(mode.arg=NULL)
}
\arguments{
\item{mode.arg}{
- a character string, either \code{"read"}, \code{"write"} or \code{"neutral"}.
+ a character string, either \code{"read"}, \code{"write"}
+ or \code{"neutral"}.
+
+
}}
\value{
- If \code{mode.arg} is given, then either \code{TRUE} or \code{FALSE} is returned.
+ If \code{mode.arg} is given, then either \code{TRUE} or \code{FALSE}
+ is returned.
If \code{mode.arg} is not given, then the mode (\code{"neutral"},
\code{"read"} or \code{"write"})
is returned. Usually, the mode is \code{"neutral"}.
@@ -37,20 +41,23 @@ smart.mode.is(mode.arg=NULL)
If in \code{"neutral"} mode then the smart function behaves like an
ordinary function.
+
+
}
\examples{
my1 <- function(x, minx = min(x)) { # Here is a smart function
- x <- x # Needed for nested calls, e.g., bs(scale(x))
- if(smart.mode.is("read")) {
- smart <- get.smart()
- minx <- smart$minx # Overwrite its value
- } else if(smart.mode.is("write"))
- put.smart(list(minx = minx))
- sqrt(x - minx)
+ x <- x # Needed for nested calls, e.g., bs(scale(x))
+ if (smart.mode.is("read")) {
+ smart <- get.smart()
+ minx <- smart$minx # Overwrite its value
+ } else
+ if (smart.mode.is("write"))
+ put.smart(list(minx = minx))
+ sqrt(x - minx)
}
attr(my1, "smart") <- TRUE
-smart.mode.is() # Returns "neutral"
+smart.mode.is() # Returns "neutral"
smart.mode.is(smart.mode.is()) # Returns TRUE
}
%\keyword{smart}
diff --git a/man/smartpred.Rd b/man/smartpred.Rd
index 4c5979f..d17a399 100644
--- a/man/smartpred.Rd
+++ b/man/smartpred.Rd
@@ -1,16 +1,36 @@
\name{smartpred}
\alias{smartpred}
+\alias{sm.bs}
+\alias{sm.ns}
+\alias{sm.scale}
+\alias{sm.scale.default}
+\alias{sm.poly}
\title{ Smart Prediction }
\description{
Data-dependent parameters in formula terms
can cause problems in when predicting.
- The \pkg{smartpred} package for \R and S-PLUS saves
+ The \pkg{smartpred} package
+ saves
data-dependent parameters on the object so that the bug is fixed.
The \code{\link[stats]{lm}} and \code{\link[stats]{glm}} functions have
been fixed properly. Note that the \pkg{VGAM} package by T. W. Yee
automatically comes with smart prediction.
+
+
+
+}
+\usage{
+sm.bs(x, df = NULL, knots = NULL, degree = 3, intercept = FALSE,
+ Boundary.knots = range(x))
+sm.ns(x, df = NULL, knots = NULL, intercept = FALSE,
+ Boundary.knots = range(x))
+sm.poly(x, ..., degree = 1, coefs = NULL, raw = FALSE)
+sm.scale(x, center = TRUE, scale = TRUE)
}
+
+
+
%\usage{
%lm()
%glm()
@@ -25,9 +45,47 @@
%cqo()
%uqo()
%}
+
+
+
+\arguments{
+ \item{x}{
+ The \code{x} argument is actually common to them all.
+
+
+ }
+
+ \item{df, knots, intercept, Boundary.knots}{
+ See \code{\link[splines]{bs}} and/or
+ \code{\link[splines]{ns}}.
+
+
+ }
+ \item{degree, \dots, coefs, raw}{
+ See \code{\link[stats]{poly}}.
+
+
+ }
+ \item{center, scale}{
+ See \code{\link[base]{scale}}.
+
+
+ }
+
+}
+
+
+
+
\value{
- Returns the usual object, but with one list/slot component called
- \code{smart.prediction} containing any data-dependent parameters.
+ The usual value returned by
+ \code{\link[splines]{bs}},
+ \code{\link[splines]{ns}},
+ \code{\link[stats]{poly}} and
+ \code{\link[base]{scale}},
+ When used with functions such as \code{\link[VGAM]{vglm}}
+ the data-dependent parameters are saved on one slot component called
+ \code{smart.prediction}.
}
@@ -37,16 +95,19 @@
\code{.smart.prediction} and
\code{.smart.prediction.counter}
are created while the model is being fitted.
- In \R they are created in a new environment called \code{smartpredenv}.
- In S-PLUS they are created in frame 1.
+ They are created in a new environment called \code{smartpredenv}.
These variables are deleted after the model has been fitted.
- However, in \R,
+ However,
if there is an error in the model fitting function or the fitting
model is killed (e.g., by typing control-C) then these variables will
be left in \code{smartpredenv}. At the beginning of model fitting,
these variables are deleted if present in \code{smartpredenv}.
+% In S-PLUS they are created in frame 1.
+
+
+
During prediction, the variables
\code{.smart.prediction} and
\code{.smart.prediction.counter}
@@ -57,8 +118,7 @@
If the modelling function is used with argument \code{smart = FALSE}
(e.g., \code{vglm(..., smart = FALSE)}) then smart prediction will not
- be used, and the results should match with the original \R or S-PLUS
- functions.
+ be used, and the results should match with the original \R functions.
}
@@ -73,25 +133,35 @@
See the examples below.
Smart prediction, however, will always work.
+
+
% albeit, not so elegantly.
+
The basic idea is that the functions in the formula are now smart, and the
modelling functions make use of these smart functions. Smart prediction
works in two ways: using \code{\link{smart.expression}}, or using a
combination of \code{\link{put.smart}} and \code{\link{get.smart}}.
+
+
}
\author{T. W. Yee and T. J. Hastie}
-\note{
- In S-PLUS you will need to load in the \pkg{smartpred} library with
- the argument \code{first = T}, e.g.,
- \code{library(smartpred, lib = "./mys8libs", first = T)}.
- Here, \code{mys8libs} is the name of a directory of installed packages.
- To install the smartpred package in Linux/Unix, type something like
- \code{Splus8 INSTALL -l ./mys8libs ./smartpred_0.8-2.tar.gz}.
-}
+
+%\note{
+% In S-PLUS you will need to load in the \pkg{smartpred} library with
+% the argument \code{first = T}, e.g.,
+% \code{library(smartpred, lib = "./mys8libs", first = T)}.
+% Here, \code{mys8libs} is the name of a directory of installed packages.
+% To install the smartpred package in Linux/Unix, type something like
+% \code{Splus8 INSTALL -l ./mys8libs ./smartpred_0.8-2.tar.gz}.
+
+%}
+
+
+
%\note{
% In \R and
@@ -122,10 +192,12 @@
loaded in because it contains compiled code that
\code{\link[splines]{bs}} and \code{\link[splines]{ns}} call.
+
The website \url{http://www.stat.auckland.ac.nz/~yee}
contains more information such as how to write a
smart function, and other technical details.
+
The functions \code{\link[VGAM]{vglm}},
\code{\link[VGAM]{vgam}},
\code{\link[VGAM]{rrvglm}}
@@ -134,63 +206,88 @@
in T. W. Yee's \pkg{VGAM}
package are examples of modelling functions that employ smart prediction.
+
+
}
\section{WARNING }{
- In S-PLUS,
- if the \code{"bigdata"} library is loaded then it is
- \code{detach()}'ed. This is done because
- \code{scale} cannot be made smart if \code{"bigdata"} is loaded
- (it is loaded by default in the Windows version of
- Splus 8.0, but not in Linux/Unix).
- The function \code{\link[base]{search}} tells what is
- currently attached.
-
- In \R and S-PLUS the functions
+% In S-PLUS,
+% if the \code{"bigdata"} library is loaded then it is
+% \code{detach()}'ed. This is done because
+% \code{scale} cannot be made smart if \code{"bigdata"} is loaded
+% (it is loaded by default in the Windows version of
+% Splus 8.0, but not in Linux/Unix).
+% The function \code{\link[base]{search}} tells what is
+% currently attached.
+
+
+% In \R and S-PLUS
+
+
+ The functions
+ \code{\link[splines]{bs}},
+ \code{\link[splines]{ns}},
+ \code{\link[stats]{poly}} and
+ \code{\link[base]{scale}}
+ are now left alone (from 2014-05 onwards) and no longer smart.
+ They work via safe prediction.
+ The smart versions of these functions have been renamed and
+ they begin with \code{"sm."}.
+
+
+
+
+ The functions
\code{\link[splines]{predict.bs}} and
\code{predict.ns}
are not smart.
That is because they operate on objects that contain attributes only
and do not have list components or slots.
- In \R the function
+ The function
\code{\link[stats:poly]{predict.poly}} is not smart.
+
+
+
+
+
}
\examples{
# Create some data first
n <- 20
-set.seed(86) # For reproducibility of the random numbers
-x <- sort(runif(n))
-y <- sort(runif(n))
-\dontrun{if(is.R()) library(splines) # To get ns() in R
+set.seed(86) # For reproducibility of the random numbers
+ldata <- data.frame(x2 = sort(runif(n)), y = sort(runif(n)))
+library("splines") # To get ns() in R
+
+# This will work for R 1.6.0 and later
+fit <- lm(y ~ ns(x2, df = 5), data = ldata)
+\dontrun{
+plot(y ~ x2, data = ldata)
+lines(fitted(fit) ~ x2, data = ldata)
+new.ldata <- data.frame(x2 = seq(0, 1, len = n))
+points(predict(fit, new.ldata) ~ x2, new.ldata, type = "b", col = 2, err = -1)
}
-# This will work for R 1.6.0 and later, but fail for S-PLUS
-fit <- lm(y ~ ns(x, df = 5))
-\dontrun{ plot(x, y)
-lines(x, fitted(fit))
-newx <- seq(0, 1, len = n)
-points(newx, predict(fit, data.frame(x = newx)), type = "b",
- col = 2, err = -1) }
-
-# The following fails for R 1.6.x and later but works with smart prediction
-fit <- lm(y ~ ns(scale(x), df = 5))
-\dontrun{ fit$smart.prediction
-plot(x, y)
-lines(x, fitted(fit))
-newx <- seq(0, 1, len = n)
-points(newx, predict(fit, data.frame(x = newx)), type = "b",
- col = 2, err = -1) }
-
-# The following requires the VGAM package to be loaded
-\dontrun{ library(VGAM)
-fit <- vlm(y ~ ns(scale(x), df = 5))
-fit at smart.prediction
-plot(x, y)
-lines(x, fitted(fit))
-newx <- seq(0, 1, len = n)
-points(newx, predict(fit, data.frame(x = newx)), type = "b",
- col = 2, err = -1) }
+# The following fails for R 1.6.x and later. It can be
+# made to work with smart prediction provided
+# ns is changed to sm.ns and scale is changed to sm.scale:
+fit1 <- lm(y ~ ns(scale(x2), df = 5), data = ldata)
+\dontrun{
+plot(y ~ x2, data = ldata, main = "Safe prediction fails")
+lines(fitted(fit1) ~ x2, data = ldata)
+points(predict(fit1, new.ldata) ~ x2, new.ldata, type = "b", col = 2, err = -1)
+}
+
+# Fit the above using smart prediction
+\dontrun{
+library("VGAM") # The following requires the VGAM package to be loaded
+fit2 <- vglm(y ~ sm.ns(sm.scale(x2), df = 5), uninormal, data = ldata)
+fit2 at smart.prediction
+plot(y ~ x2, data = ldata, main = "Smart prediction")
+lines(fitted(fit2) ~ x2, data = ldata)
+points(predict(fit2, new.ldata, type = "response") ~ x2, data = new.ldata,
+ type = "b", col = 2, err = -1)
+}
}
%\keyword{smart}
\keyword{models}
diff --git a/man/sratio.Rd b/man/sratio.Rd
index 8e7c378..4b77baf 100644
--- a/man/sratio.Rd
+++ b/man/sratio.Rd
@@ -68,6 +68,7 @@ sratio(link = "logit", parallel = FALSE, reverse = FALSE,
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
}
\references{
Agresti, A. (2002)
diff --git a/man/studentt.Rd b/man/studentt.Rd
index 1a9074c..42c744c 100644
--- a/man/studentt.Rd
+++ b/man/studentt.Rd
@@ -10,10 +10,10 @@
}
\usage{
- studentt(ldf = "loglog", idf = NULL, tol1 = 0.1, imethod = 1)
-studentt2(df = Inf, llocation = "identity", lscale = "loge",
+studentt (ldf = "loglog", idf = NULL, tol1 = 0.1, imethod = 1)
+studentt2(df = Inf, llocation = "identitylink", lscale = "loge",
ilocation = NULL, iscale = NULL, imethod = 1, zero = -2)
-studentt3(llocation = "identity", lscale = "loge", ldf = "loglog",
+studentt3(llocation = "identitylink", lscale = "loge", ldf = "loglog",
ilocation = NULL, iscale = NULL, idf = NULL,
imethod = 1, zero = -(2:3))
}
@@ -149,7 +149,8 @@ application to financial econometrics.
\code{\link{logistic}},
\code{\link{huber2}},
\code{\link{koenker}},
- \code{\link[stats]{TDist}}.
+ \code{\link[stats]{TDist}},
+ \code{\link{simulate.vlm}}.
}
@@ -157,10 +158,10 @@ application to financial econometrics.
tdata <- data.frame(x2 = runif(nn <- 1000))
tdata <- transform(tdata, y1 = rt(nn, df = exp(exp(0.5 - x2))),
y2 = rt(nn, df = exp(exp(0.5 - x2))))
-fit1 <- vglm(y1 ~ x2, studentt, tdata, trace = TRUE)
+fit1 <- vglm(y1 ~ x2, studentt, data = tdata, trace = TRUE)
coef(fit1, matrix = TRUE)
-fit2 <- vglm(cbind(y1, y2) ~ x2, studentt3, tdata, trace = TRUE)
+fit2 <- vglm(cbind(y1, y2) ~ x2, studentt3, data = tdata, trace = TRUE)
coef(fit2, matrix = TRUE)
}
\keyword{models}
diff --git a/man/tikuv.Rd b/man/tikuv.Rd
index bd9e9e1..44946ad 100644
--- a/man/tikuv.Rd
+++ b/man/tikuv.Rd
@@ -7,7 +7,7 @@
}
\usage{
-tikuv(d, lmean = "identity", lsigma = "loge",
+tikuv(d, lmean = "identitylink", lsigma = "loge",
isigma = NULL, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
@@ -132,7 +132,7 @@ coef(fit, matrix = TRUE)
(Cfit <- Coef(fit))
with(tdata, mean(y))
\dontrun{ with(tdata, hist(y, prob = TRUE))
-lines(dtikuv(sy, d = 1, m = Cfit[1], s = Cfit[2]) ~ sy, tdata, col = "orange") }
+lines(dtikuv(sy, d = 1, m = Cfit[1], s = Cfit[2]) ~ sy, data = tdata, col = "orange") }
}
\keyword{models}
\keyword{regression}
diff --git a/man/tobit.Rd b/man/tobit.Rd
index fb64dc8..dda5560 100644
--- a/man/tobit.Rd
+++ b/man/tobit.Rd
@@ -7,7 +7,7 @@
}
\usage{
-tobit(Lower = 0, Upper = Inf, lmu = "identity", lsd = "loge",
+tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
nsimEIM = 250, imu = NULL, isd = NULL,
type.fitted = c("uncensored", "censored", "mean.obs"),
imethod = 1, zero = -2)
@@ -191,13 +191,13 @@ with(tdata, table(y2 == Lower | y2 == Upper)) # How many censored values?
with(tdata, table(attr(y2, "cenL")))
with(tdata, table(attr(y2, "cenU")))
-fit1 <- vglm(y1 ~ x2, tobit, tdata, trace = TRUE,
+fit1 <- vglm(y1 ~ x2, tobit, data = tdata, trace = TRUE,
crit = "coeff") # crit = "coeff" is recommended
coef(fit1, matrix = TRUE)
summary(fit1)
fit2 <- vglm(y2 ~ x2, tobit(Lower = Lower, Upper = Upper, type.f = "cens"),
- tdata, crit = "coeff", trace = TRUE) # ditto
+ data = tdata, crit = "coeff", trace = TRUE) # ditto
table(fit2 at extra$censoredL)
table(fit2 at extra$censoredU)
coef(fit2, matrix = TRUE)
@@ -205,7 +205,7 @@ coef(fit2, matrix = TRUE)
fit3 <- vglm(y3 ~ x2,
tobit(Lower = with(tdata, Lower.vec),
Upper = with(tdata, Upper.vec), type.f = "cens"),
- tdata, crit = "coeff", trace = TRUE) # ditto
+ data = tdata, crit = "coeff", trace = TRUE) # ditto
table(fit3 at extra$censoredL)
table(fit3 at extra$censoredU)
coef(fit3, matrix = TRUE)
@@ -214,7 +214,7 @@ coef(fit3, matrix = TRUE)
fit4 <- vglm(cbind(y3, y4) ~ x2,
tobit(Lower = rep(with(tdata, Lower.vec), each = 2),
Upper = rep(with(tdata, Upper.vec), each = 2)),
- tdata, crit = "coeff", trace = TRUE) # ditto
+ data = tdata, crit = "coeff", trace = TRUE) # ditto
head(fit4 at extra$censoredL) # A matrix
head(fit4 at extra$censoredU) # A matrix
head(fit4 at misc$Lower) # A matrix
@@ -238,7 +238,7 @@ lines(fitted(lm(y1 ~ x2, tdata)) ~ x2, tdata, col = "black",
lty = 2, lwd = 2) # This is simplest but wrong!
# Plot fit2
-plot(y2 ~ x2, tdata, las = 1, main = "Tobit model",
+plot(y2 ~ x2, data = tdata, las = 1, main = "Tobit model",
col = as.numeric(attr(y2, "cenL")) + 3 +
as.numeric(attr(y2, "cenU")),
pch = as.numeric(attr(y2, "cenL")) + 1 +
@@ -253,7 +253,7 @@ lines(fitted(lm(y2 ~ x2, tdata)) ~ x2, tdata, col = "black",
lty = 2, lwd = 2) # This is simplest but wrong!
# Plot fit3
-plot(y3 ~ x2, tdata, las = 1,
+plot(y3 ~ x2, data = tdata, las = 1,
main = "Tobit model with nonconstant censor levels",
col = as.numeric(attr(y3, "cenL")) + 3 +
as.numeric(attr(y3, "cenU")),
@@ -269,7 +269,7 @@ lines(fitted(lm(y3 ~ x2, tdata)) ~ x2, tdata, col = "black",
lty = 2, lwd = 2) # This is simplest but wrong!
# Plot fit4
-plot(y3 ~ x2, tdata, las = 1,
+plot(y3 ~ x2, data = tdata, las = 1,
main = "Tobit model with nonconstant censor levels",
col = as.numeric(attr(y3, "cenL")) + 3 +
as.numeric(attr(y3, "cenU")),
@@ -279,9 +279,9 @@ legend(x = "topleft", leg = c("censored", "uncensored"),
pch = c(2, 1), col = c("blue", "green"))
legend(-1.0, 3.5, c("Truth", "Estimate", "Naive"),
col = c("purple", "orange", "black"), lwd = 2, lty = c(1, 2, 2))
-lines(meanfun3(x2) ~ x2, tdata, col = "purple", lwd = 2)
+lines(meanfun3(x2) ~ x2, data = tdata, col = "purple", lwd = 2)
lines(fitted(fit4)[, 1] ~ x2, tdata, col = "orange", lwd = 2, lty = 2)
-lines(fitted(lm(y3 ~ x2, tdata)) ~ x2, tdata, col = "black",
+lines(fitted(lm(y3 ~ x2, tdata)) ~ x2, data = tdata, col = "black",
lty = 2, lwd = 2) # This is simplest but wrong!
}
}
diff --git a/man/tobitUC.Rd b/man/tobitUC.Rd
index 16fc305..ba4c3a8 100644
--- a/man/tobitUC.Rd
+++ b/man/tobitUC.Rd
@@ -40,6 +40,7 @@ rtobit(n, mean = 0, sd = 1, Lower = 0, Upper = Inf)
\code{qtobit} gives the quantile function, and
\code{rtobit} generates random deviates.
+
}
\author{ T. W. Yee }
\details{
@@ -58,6 +59,7 @@ rtobit(n, mean = 0, sd = 1, Lower = 0, Upper = Inf)
\seealso{
\code{\link{tobit}}.
+
}
\examples{
\dontrun{ m <- 0.5; x <- seq(-2, 4, len = 501)
diff --git a/man/toxop.Rd b/man/toxop.Rd
index 1ed56fb..017aa91 100644
--- a/man/toxop.Rd
+++ b/man/toxop.Rd
@@ -35,7 +35,8 @@
}
\seealso{
- \code{\link[VGAM]{double.expbinomial}}.
+ \code{\link[VGAM]{double.expbinomial}}.
+
}
diff --git a/man/triangle.Rd b/man/triangle.Rd
index fd82608..a9317f1 100644
--- a/man/triangle.Rd
+++ b/man/triangle.Rd
@@ -101,14 +101,15 @@ World Scientific: Singapore.
}
\seealso{
- \code{\link{Triangle}}.
+ \code{\link{Triangle}},
+ \code{\link{simulate.vlm}}.
}
\examples{
# Example 1
tdata <- data.frame(y = rtriangle(n <- 3000, theta = 3/4))
-fit <- vglm(y ~ 1, triangle(link = "identity"), tdata, trace = TRUE)
+fit <- vglm(y ~ 1, triangle(link = "identitylink"), data = tdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
head(fit at extra$lower)
@@ -117,10 +118,10 @@ with(tdata, mean(y))
# Example 2; Kotz and van Dorp (2004), p.14
rdata <- data.frame(y = c(0.1, 0.25, 0.3, 0.4, 0.45, 0.6, 0.75, 0.8))
-fit <- vglm(y ~ 1, triangle(link = "identity"), rdata, trace = TRUE,
+fit <- vglm(y ~ 1, triangle(link = "identitylink"), data = rdata, trace = TRUE,
crit = "coef", maxit = 1000)
Coef(fit) # The MLE is the 3rd order statistic, which is 0.3.
-fit <- vglm(y ~ 1, triangle(link = "identity"), rdata, trace = TRUE,
+fit <- vglm(y ~ 1, triangle(link = "identitylink"), data = rdata, trace = TRUE,
crit = "coef", maxit = 1001)
Coef(fit) # The MLE is the 3rd order statistic, which is 0.3.
}
diff --git a/man/trplot.Rd b/man/trplot.Rd
index aa5dbe3..41b988e 100644
--- a/man/trplot.Rd
+++ b/man/trplot.Rd
@@ -73,7 +73,7 @@ p1cqo <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
quasipoissonff, data = hspider, Crow1positive = FALSE)
nos <- ncol(depvar(p1cqo))
-clr <- 1:nos # OR (1:(nos+1))[-7] to omit yellow
+clr <- 1:nos # OR (1:(nos+1))[-7] to omit yellow
trplot(p1cqo, which.species = 1:3, log = "xy",
col = c("blue", "orange", "green"), lwd = 2, label = TRUE) -> ii
diff --git a/man/truncweibull.Rd b/man/truncweibull.Rd
index 775247c..af7b3e3 100644
--- a/man/truncweibull.Rd
+++ b/man/truncweibull.Rd
@@ -140,7 +140,7 @@ lower.limit2 <- with(wdata, quantile(y2, prob = prop.lost)) # Proportion lost
wdata <- subset(wdata, y2 > lower.limit2) # Smaller due to truncation
fit1 <- vglm(y2 ~ x2, maxit = 100, trace = TRUE,
- truncweibull(lower.limit = lower.limit2), data = wdata)
+ truncweibull(lower.limit = lower.limit2), data = wdata)
coef(fit1, matrix = TRUE)
summary(fit1)
vcov(fit1)
diff --git a/man/undocumented-methods.Rd b/man/undocumented-methods.Rd
index c692548..9d1ee8f 100644
--- a/man/undocumented-methods.Rd
+++ b/man/undocumented-methods.Rd
@@ -4,6 +4,11 @@
%\alias{ccoef-method}
%
%
+%
+% 201312
+\alias{simulate,ANY-method}
+\alias{simulate,vlm-method}
+%
% 20131104
\alias{family.name,ANY-method}
\alias{family.name,vlm-method}
@@ -115,7 +120,7 @@
%
\alias{deviance,qrrvglm-method}
\alias{deviance,vlm-method}
-\alias{deviance,vglm-method}
+%\alias{deviance,vglm-method}
%\alias{deviance,uqo-method}
\alias{df.residual,vlm-method}
\alias{effects,vlm-method}
diff --git a/man/uninormal.Rd b/man/uninormal.Rd
index ec1b7a1..be99c38 100644
--- a/man/uninormal.Rd
+++ b/man/uninormal.Rd
@@ -10,7 +10,7 @@
}
\usage{
-uninormal(lmean = "identity", lsd = "loge", lvar = "loge",
+uninormal(lmean = "identitylink", lsd = "loge", lvar = "loge",
var.arg = FALSE, imethod = 1, isd = NULL, parallel = FALSE,
smallno = 1e-05, zero = -2)
}
@@ -117,30 +117,31 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
\code{\link{huber2}},
\code{\link{studentt}},
\code{\link{binormal}},
- \code{\link[stats:Normal]{dnorm}}.
+ \code{\link[stats:Normal]{dnorm}},
+ \code{\link{simulate.vlm}}.
}
\examples{
-ndata <- data.frame(x2 = rnorm(nn <- 200))
-ndata <- transform(ndata,
+udata <- data.frame(x2 = rnorm(nn <- 200))
+udata <- transform(udata,
y1 = rnorm(nn, m = 1 - 3*x2, sd = exp(1 + 0.2*x2)),
y2a = rnorm(nn, m = 1 + 2*x2, sd = exp(1 + 2.0*x2)^0.5),
y2b = rnorm(nn, m = 1 + 2*x2, sd = exp(1 + 2.0*x2)^0.5))
-fit1 <- vglm(y1 ~ x2, uninormal(zero = NULL), ndata, trace = TRUE)
+fit1 <- vglm(y1 ~ x2, uninormal(zero = NULL), data = udata, trace = TRUE)
coef(fit1, matrix = TRUE)
-fit2 <- vglm(cbind(y2a, y2b) ~ x2, data = ndata, trace = TRUE,
+fit2 <- vglm(cbind(y2a, y2b) ~ x2, data = udata, trace = TRUE,
uninormal(var = TRUE, parallel = TRUE ~ x2,
zero = NULL))
coef(fit2, matrix = TRUE)
# Generate data from N(mu = theta = 10, sigma = theta) and estimate theta.
theta <- 10
-ndata <- data.frame(y3 = rnorm(100, m = theta, sd = theta))
-fit3a <- vglm(y3 ~ 1, uninormal(lsd = "identity"), ndata,
+udata <- data.frame(y3 = rnorm(100, m = theta, sd = theta))
+fit3a <- vglm(y3 ~ 1, uninormal(lsd = "identitylink"), data = udata,
constraints = list("(Intercept)" = rbind(1, 1)))
-fit3b <- vglm(y3 ~ 1, uninormal(lsd = "identity", parallel = TRUE ~ 1,
- zero = NULL), ndata)
+fit3b <- vglm(y3 ~ 1, uninormal(lsd = "identitylink", parallel = TRUE ~ 1,
+ zero = NULL), data = udata)
coef(fit3a, matrix = TRUE)
coef(fit3b, matrix = TRUE) # Same as fit3a
}
diff --git a/man/vgam.Rd b/man/vgam.Rd
index 02d0ea3..3664de0 100644
--- a/man/vgam.Rd
+++ b/man/vgam.Rd
@@ -45,6 +45,11 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
}
\item{weights, subset, na.action}{
Same as for \code{\link{vglm}}.
+ Note that \code{subset} may be unreliable and to get around
+ this problem it is best to use \code{\link[base]{subset}} to create
+ a new smaller data frame and feed in the smaller data frame.
+ See below for an example.
+ This is a bug that needs fixing.
}
@@ -234,7 +239,7 @@ The \code{VGAM} Package.
\examples{ # Nonparametric proportional odds model
pneumo <- transform(pneumo, let = log(exposure.time))
vgam(cbind(normal, mild, severe) ~ s(let),
- cumulative(parallel = TRUE), data = pneumo)
+ cumulative(parallel = TRUE), data = pneumo, trace = TRUE)
# Nonparametric logistic regression
fit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = hunua)
@@ -257,6 +262,18 @@ with(hunua, matplot(altitude[ooo], fitted(fit2)[ooo,], ylim = c(0, 0.8),
xlab = "Altitude (m)", ylab = "Probability of presence", las = 1,
main = "Two plant species' response curves", type = "l", lwd = 2))
with(hunua, rug(altitude)) }
+
+# The subset argument does not work here. Use subset() instead.
+set.seed(1)
+zdata <- data.frame(x2 = runif(nn <- 100))
+zdata <- transform(zdata, y = rbinom(nn, 1, 0.5))
+zdata <- transform(zdata, subS = runif(nn) < 0.7)
+sub.zdata <- subset(zdata, subS) # Use this instead
+if (FALSE)
+ fit4a <- vgam(cbind(y, y) ~ s(x2, df = 2), binomialff(mv = TRUE),
+ data = zdata, subset = subS) # This fails!!!
+fit4b <- vgam(cbind(y, y) ~ s(x2, df = 2), binomialff(mv = TRUE),
+ data = sub.zdata) # This succeeds!!!
}
\keyword{models}
\keyword{regression}
diff --git a/man/vglm-class.Rd b/man/vglm-class.Rd
index b50d27f..97ae6fc 100644
--- a/man/vglm-class.Rd
+++ b/man/vglm-class.Rd
@@ -231,6 +231,6 @@ Vector generalized additive models.
\examples{
# Multinomial logit model
pneumo <- transform(pneumo, let = log(exposure.time))
-vglm(cbind(normal, mild, severe) ~ let, multinomial, pneumo)
+vglm(cbind(normal, mild, severe) ~ let, multinomial, data = pneumo)
}
\keyword{classes}
diff --git a/man/vglm.Rd b/man/vglm.Rd
index fb4dedd..b4f2881 100644
--- a/man/vglm.Rd
+++ b/man/vglm.Rd
@@ -34,6 +34,13 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
``\pkg{VGAM} family function''. See \code{\link{CommonVGAMffArguments}}
for general information about many types of arguments found in this
type of function.
+ The argument name \code{"family"} is used loosely and for
+ the ease of existing \code{\link[stats]{glm}} users;
+ there is no concept of a formal ``error distribution'' for VGLMs.
+ Possibly the argument name should be better \code{"model"}
+ but unfortunately
+ that name has already been taken.
+
}
@@ -48,19 +55,34 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
}
\item{weights}{
- an optional vector or matrix of (prior) weights to be used
- in the fitting process.
+ an optional vector or matrix of (prior fixed and known) weights
+ to be used in the fitting process.
If the \pkg{VGAM} family function handles multiple responses
- (\eqn{q > 1} of them, say) then
- \code{weights} can be a matrix with \eqn{q} columns.
+ (\eqn{Q > 1} of them, say) then
+ \code{weights} can be a matrix with \eqn{Q} columns.
Each column matches the respective response.
If it is a vector (the usually case) then it is recycled into a
- matrix with \eqn{q} columns.
+ matrix with \eqn{Q} columns.
The values of \code{weights} must be positive; try setting
a very small value such as \code{1.0e-8} to effectively
delete an observation.
+
+
+% 20140507:
+
+ Currently the \code{weights} argument does not support sampling
+ weights from complex sampling designs.
+ And currently sandwich estimators are not computed in any shape or form.
+ The present weights are multiplied by the corresponding
+ log-likelihood contributions.
+
+
+
+
+
+
% If \code{weights} is a matrix,
% then it should be must be in \emph{matrix-band} form, whereby the
% first \eqn{M} columns of the matrix are the diagonals,
@@ -388,6 +410,8 @@ The \code{VGAM} Package.
%~Make other sections like WARNING with \section{WARNING }{....} ~
\section{WARNING}{
See warnings in \code{\link{vglm.control}}.
+ Also, see warnings under \code{weights} above regarding
+ sampling weights from complex sampling designs.
}
@@ -428,11 +452,11 @@ summary(vglm.D93)
# Example 2. Multinomial logit model
pneumo <- transform(pneumo, let = log(exposure.time))
-vglm(cbind(normal, mild, severe) ~ let, multinomial, pneumo)
+vglm(cbind(normal, mild, severe) ~ let, multinomial, data = pneumo)
# Example 3. Proportional odds model
-fit3 <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo)
+fit3 <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo)
coef(fit3, matrix = TRUE)
constraints(fit3)
model.matrix(fit3, type = "lm") # LM model matrix
diff --git a/man/vglm.control.Rd b/man/vglm.control.Rd
index b0831b3..0e69b43 100644
--- a/man/vglm.control.Rd
+++ b/man/vglm.control.Rd
@@ -136,6 +136,10 @@ vglm.control(checkwz = TRUE, Check.rank = TRUE,
to give \emph{every} term used by the model.
+ The function \code{\link{Select}} can be used to
+ select variables beginning with the same character string.
+
+
}
% \item{jix}{
% A formula or a list of formulas specifying
@@ -224,7 +228,8 @@ vglm.control(checkwz = TRUE, Check.rank = TRUE,
\code{\link{vglm}},
\code{\link{fill}}.
The author's homepage has further documentation about
- the \code{xij} argument.
+ the \code{xij} argument;
+ see also \code{\link{Select}}.
}
@@ -270,12 +275,14 @@ coalminers <- transform(coalminers,
dum2 = round(runif(nrow(coalminers)), digits = 2),
dum3 = round(runif(nrow(coalminers)), digits = 2),
dumm = round(runif(nrow(coalminers)), digits = 2))
-BS <- function(x, ..., df = 3) bs(c(x,...), df = df)[1:length(x),,drop = FALSE]
-NS <- function(x, ..., df = 3) ns(c(x,...), df = df)[1:length(x),,drop = FALSE]
+BS <- function(x, ..., df = 3) sm.bs(c(x,...), df = df)[1:length(x),,drop = FALSE]
+NS <- function(x, ..., df = 3) sm.ns(c(x,...), df = df)[1:length(x),,drop = FALSE]
# Equivalently...
-BS <- function(x, ..., df = 3) head(bs(c(x,...), df = df), length(x), drop = FALSE)
-NS <- function(x, ..., df = 3) head(ns(c(x,...), df = df), length(x), drop = FALSE)
+BS <- function(x, ..., df = 3)
+ head(sm.bs(c(x,...), df = df), length(x), drop = FALSE)
+NS <- function(x, ..., df = 3)
+ head(sm.ns(c(x,...), df = df), length(x), drop = FALSE)
fit3 <- vglm(cbind(nBnW,nBW,BnW,BW) ~ Age + NS(dum1, dum2),
fam = binom2.or(exchangeable = TRUE, zero = 3),
diff --git a/man/vglmff-class.Rd b/man/vglmff-class.Rd
index 1767568..6006ecd 100644
--- a/man/vglmff-class.Rd
+++ b/man/vglmff-class.Rd
@@ -49,10 +49,10 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
}
\item{\code{infos}:}{
Object of class \code{"function"} which
- returns a list with components such as \code{Musual}.
+ returns a list with components such as \code{M1}.
At present only a very few \pkg{VGAM} family functions have this
feature implemented.
- Those that do do not require specifying the \code{Musual}
+ Those that do do not require specifying the \code{M1}
argument when used with \code{\link{rcim}}.
@@ -117,6 +117,13 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
}
+ \item{\code{simslot}:}{
+ Object of class \code{"function"} to allow
+ \code{\link[stats]{simulate}} to work.
+
+
+
+ }
\item{\code{summary.dispersion}:}{
Object of class \code{"logical"}
indicating whether the general VGLM formula (based on a residual
diff --git a/man/vonmises.Rd b/man/vonmises.Rd
index 54dcc04..2d207c5 100644
--- a/man/vonmises.Rd
+++ b/man/vonmises.Rd
@@ -126,7 +126,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
\examples{
vdata <- data.frame(x2 = runif(nn <- 1000))
vdata <- transform(vdata, y = rnorm(nn, m = 2+x2, sd = exp(0.2))) # Bad data!!
-fit <- vglm(y ~ x2, vonmises(zero = 2), vdata, trace = TRUE)
+fit <- vglm(y ~ x2, vonmises(zero = 2), data = vdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
with(vdata, range(y)) # Original data
diff --git a/man/waldff.Rd b/man/waldff.Rd
index d07f8b8..cc9aa1e 100644
--- a/man/waldff.Rd
+++ b/man/waldff.Rd
@@ -77,7 +77,7 @@ New York: Wiley.
}
\examples{
wdata <- data.frame(y = rinv.gaussian(n = 1000, mu = 1, lambda = exp(1)))
-fit <- vglm(y ~ 1, waldff(init = 0.2), wdata, trace = TRUE)
+fit <- vglm(y ~ 1, waldff(init = 0.2), data = wdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/weibull.Rd b/man/weibull.Rd
index 9a60082..d733131 100644
--- a/man/weibull.Rd
+++ b/man/weibull.Rd
@@ -218,7 +218,7 @@ wdata <- data.frame(x2 = runif(nn <- 1000)) # Complete data
wdata <- transform(wdata,
y1 = rweibull(nn, shape = exp(1 + x2), scale = exp(-2)),
y2 = rweibull(nn, shape = exp(2 - x2), scale = exp( 1)))
-fit <- vglm(cbind(y1, y2) ~ x2, weibull, wdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ x2, weibull, data = wdata, trace = TRUE)
coef(fit, matrix = TRUE)
vcov(fit)
summary(fit)
diff --git a/man/weightsvglm.Rd b/man/weightsvglm.Rd
index 0542b1a..2100529 100644
--- a/man/weightsvglm.Rd
+++ b/man/weightsvglm.Rd
@@ -120,7 +120,7 @@ weightsvglm(object, type = c("prior", "working"),
\examples{
pneumo <- transform(pneumo, let = log(exposure.time))
(fit <- vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel = TRUE, reverse = TRUE), pneumo))
+ cumulative(parallel = TRUE, reverse = TRUE), data = pneumo))
depvar(fit) # These are sample proportions
weights(fit, type = "prior", matrix = FALSE) # Number of observations
diff --git a/man/wine.Rd b/man/wine.Rd
new file mode 100644
index 0000000..11e973a
--- /dev/null
+++ b/man/wine.Rd
@@ -0,0 +1,91 @@
+\name{wine}
+\alias{wine}
+\docType{data}
+\title{ Bitterness in Wine Data
+%% ~~ data name/kind ... ~~
+
+}
+\description{
+ This oenological data frame concerns the amount of bitterness
+ in 78 bottles of white wine.
+
+
+}
+\usage{
+data(wine)
+}
+
+\format{
+ A data frame with 4 rows on the following 7 variables.
+
+\describe{
+ \item{temp}{temperature, with levels cold and warm.
+
+
+ }
+ \item{contact}{whether contact of the juice with the
+ skin was allowed or avoided, for a specified period.
+ Two levels: no or yes.
+
+ }
+ \item{bitter1, bitter2, bitter3, bitter4, bitter5}{
+ numeric vectors, the counts.
+ The order is none to most intense.
+
+
+ }
+}
+
+}
+\details{
+
+The data set comes from Randall (1989) and concerns a factorial
+experiment for investigating factors that affect the bitterness of white
+wines. There are two factors in the experiment: temperature at the time
+of crushing the grapes and contact of the juice with the skin.
+Two bottles of wine were fermented for each of the treatment combinations.
+A panel of 9 judges were selected and trained for the ability to
+detect bitterness.
+Thus there were 72 bottles in total.
+Originally, the bitterness of the wine were taken on a continuous
+scale in the interval from 0 (none) to 100 (intense)
+but later they were
+grouped using equal lengths into five ordered categories 1, 2, 3, 4 and 5.
+
+
+
+%% ~~ If necessary, more details than the __description__ above ~~
+}
+\source{
+
+% Further information is at:
+
+
+% September 30, 2013
+Christensen, R. H. B. (2013)
+Analysis of ordinal data with cumulative link models---estimation
+with the R-package \pkg{ordinal}.
+R Package Version 2013.9-30.
+\url{http://www.cran.r-project.org/package=ordinal}.
+
+
+ Randall, J. H. (1989)
+ The analysis of sensory data by generalized linear model.
+ \emph{Biometrical Journal} \bold{31}(7), 781--793.
+
+
+ Kosmidis, I. (2014)
+ Improved estimation in cumulative link models.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{76}, in press.
+
+
+}
+\examples{
+wine
+summary(wine)
+}
+\keyword{datasets}
+
+
+
diff --git a/man/wrapup.smart.Rd b/man/wrapup.smart.Rd
index a2d4c48..89086e8 100644
--- a/man/wrapup.smart.Rd
+++ b/man/wrapup.smart.Rd
@@ -13,11 +13,14 @@ wrapup.smart()
\details{
The variables to be deleted are \code{.smart.prediction},
\code{.smart.prediction.counter}, and \code{.smart.prediction.mode}.
- The function \code{wrapup.smart} is useful in \R because
+ The function \code{wrapup.smart} is useful in \R because
these variables are held in \code{smartpredenv}.
- In S-PLUS,
- \code{wrapup.smart} is not really necessary because the variables are
- placed in frame 1, which disappears when finished anyway.
+
+
+% In S-PLUS,
+% \code{wrapup.smart} is not really necessary because the variables are
+% placed in frame 1, which disappears when finished anyway.
+
}
@@ -25,13 +28,17 @@ wrapup.smart()
See the technical help file at \url{http://www.stat.auckland.ac.nz/~yee}
for details.
+
+
}
\seealso{
\code{\link{setup.smart}}.
+
+
}
\examples{
\dontrun{# Place this inside modelling functions such as lm, glm, vglm.
-wrapup.smart() # Put at the end of lm
+wrapup.smart() # Put at the end of lm
}
}
\keyword{models}
diff --git a/man/yip88.Rd b/man/yip88.Rd
index e012d94..dca6dcf 100644
--- a/man/yip88.Rd
+++ b/man/yip88.Rd
@@ -145,7 +145,29 @@ coef(fit3, matrix = TRUE)
Coef(fit3) # Estimate of lambda (they get 0.6997 with SE 0.1520)
head(fitted(fit3))
mean(yy) # Compare this with fitted(fit3)
+
+
+
}
\keyword{models}
\keyword{regression}
+
+
+
+% 20140101; try to put into a data frame but it gives a numerical
+% problem:
+%# Another example (Angers and Biswas, 2003) ---------------------
+%abdata <- data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1))
+%abdata <- subset(abdata, w > 0)
+%abdata <- subset(abdata, y > 0)
+%Abdata <- data.frame(yy = with(abdata, rep(y, w)))
+%fit3 <- vglm(yy ~ 1, yip88(n = nrow(Abdata)), data = Abdata)
+%fit3 at misc$pstr0 # Estimate of phi (they get 0.5154 with SE 0.0707)
+%coef(fit3, matrix = TRUE)
+%Coef(fit3) # Estimate of lambda (they get 0.6997 with SE 0.1520)
+%head(fitted(fit3))
+%with(Abdata, mean(yy)) # Compare this with fitted(fit3)
+
+
+
diff --git a/man/yulesimon.Rd b/man/yulesimon.Rd
index e91124e..d6703ee 100644
--- a/man/yulesimon.Rd
+++ b/man/yulesimon.Rd
@@ -77,14 +77,17 @@ yulesimon(link = "loge", irho = NULL, nsimEIM = 200, zero = NULL)
%}
\seealso{
- \code{\link{ryules}}.
+ \code{\link{ryules}},
+ \code{\link{simulate.vlm}}.
+
+
}
\examples{
ydata <- data.frame(x2 = runif(nn <- 1000))
ydata <- transform(ydata, y = ryules(nn, rho = exp(1.5 - x2)))
with(ydata, table(y))
-fit <- vglm(y ~ x2, yulesimon, ydata, trace = TRUE)
+fit <- vglm(y ~ x2, yulesimon, data = ydata, trace = TRUE)
coef(fit, matrix = TRUE)
summary(fit)
}
diff --git a/man/yulesimonUC.Rd b/man/yulesimonUC.Rd
index 54c495a..1f7cce0 100644
--- a/man/yulesimonUC.Rd
+++ b/man/yulesimonUC.Rd
@@ -13,9 +13,9 @@
\usage{
dyules(x, rho, log = FALSE)
pyules(q, rho)
-%qyules(p, rho)
ryules(n, rho)
}
+%qyules(p, rho)
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x, q}{
@@ -44,9 +44,12 @@ ryules(n, rho)
\value{
\code{dyules} gives the density,
\code{pyules} gives the distribution function, and
-% \code{qyules} gives the quantile function, and
\code{ryules} generates random deviates.
+
+% \code{qyules} gives the quantile function, and
+
+
}
%\references{
%
diff --git a/man/zabinomUC.Rd b/man/zabinomUC.Rd
index 7b9e571..abbee13 100644
--- a/man/zabinomUC.Rd
+++ b/man/zabinomUC.Rd
@@ -42,6 +42,7 @@ rzabinom(n, size, prob, pobs0 = 0)
binomial(size, prob)
distribution.
+
}
\value{
\code{dzabinom} gives the density and
@@ -49,6 +50,7 @@ rzabinom(n, size, prob, pobs0 = 0)
\code{qzabinom} gives the quantile function, and
\code{rzabinom} generates random deviates.
+
}
%\references{ }
\author{ T. W. Yee }
@@ -56,13 +58,17 @@ rzabinom(n, size, prob, pobs0 = 0)
The argument \code{pobs0} is recycled to the required length, and
must have values which lie in the interval \eqn{[0,1]}.
+
}
\seealso{
-% \code{\link{zabinomial}},
\code{\link{zibinomial}},
\code{\link{rposbinom}}.
+
+% \code{\link{zabinomial}},
+
+
}
\examples{
size <- 10; prob <- 0.15; pobs0 <- 0.05; x <- (-1):7
diff --git a/man/zabinomial.Rd b/man/zabinomial.Rd
index 1eb190c..0f59f78 100644
--- a/man/zabinomial.Rd
+++ b/man/zabinomial.Rd
@@ -161,7 +161,7 @@ zdata <- transform(zdata,
y1 = rzabinom(nn, size = size, prob = prob, pobs0 = pobs0))
with(zdata, table(y1))
-fit <- vglm(cbind(y1, size - y1) ~ x2, zabinomial(zero = NULL), zdata, trace = TRUE)
+fit <- vglm(cbind(y1, size - y1) ~ x2, zabinomial(zero = NULL), data = zdata, trace = TRUE)
coef(fit, matrix = TRUE)
head(fitted(fit))
head(predict(fit))
diff --git a/man/zageomUC.Rd b/man/zageomUC.Rd
index b47bf98..e21e100 100644
--- a/man/zageomUC.Rd
+++ b/man/zageomUC.Rd
@@ -63,6 +63,7 @@ rzageom(n, prob, pobs0 = 0)
\code{\link{zigeometric}},
\code{\link{rposgeom}}.
+
}
\examples{
prob <- 0.35; pobs0 <- 0.05; x <- (-1):7
diff --git a/man/zageometric.Rd b/man/zageometric.Rd
index 497961f..e9955f4 100644
--- a/man/zageometric.Rd
+++ b/man/zageometric.Rd
@@ -163,7 +163,8 @@ zageometricff(lprob = "logit", lonempobs0 = "logit",
\code{\link{geometric}},
\code{\link{zigeometric}},
\code{\link[stats:Geometric]{dgeom}},
- \code{\link{CommonVGAMffArguments}}.
+ \code{\link{CommonVGAMffArguments}},
+ \code{\link{simulate.vlm}}.
}
diff --git a/man/zanegbinUC.Rd b/man/zanegbinUC.Rd
index ef24c66..8ce6b38 100644
--- a/man/zanegbinUC.Rd
+++ b/man/zanegbinUC.Rd
@@ -42,6 +42,7 @@ rzanegbin(n, size, prob = NULL, munb = NULL, pobs0 = 0)
negative binomial(\eqn{\mu_{nb}}{munb}, size)
distribution.
+
}
\value{
\code{dzanegbin} gives the density and
@@ -49,6 +50,7 @@ rzanegbin(n, size, prob = NULL, munb = NULL, pobs0 = 0)
\code{qzanegbin} gives the quantile function, and
\code{rzanegbin} generates random deviates.
+
}
%\references{ }
\author{ T. W. Yee }
@@ -56,12 +58,15 @@ rzanegbin(n, size, prob = NULL, munb = NULL, pobs0 = 0)
The argument \code{pobs0} is recycled to the required length, and
must have values which lie in the interval \eqn{[0,1]}.
+
}
\seealso{
\code{\link{zanegbinomial}},
\code{\link{rposnegbin}}.
+
+
}
\examples{
munb <- 3; size <- 4; pobs0 <- 0.3; x <- (-1):7
diff --git a/man/zanegbinomial.Rd b/man/zanegbinomial.Rd
index 37d78dd..7a03aee 100644
--- a/man/zanegbinomial.Rd
+++ b/man/zanegbinomial.Rd
@@ -219,7 +219,10 @@ for counts with extra zeros.
\code{\link{zinegbinomial}},
\code{\link{zipoisson}},
\code{\link[stats:NegBinomial]{dnbinom}},
- \code{\link{CommonVGAMffArguments}}.
+ \code{\link{CommonVGAMffArguments}},
+ \code{\link{simulate.vlm}}.
+
+
}
diff --git a/man/zapoisUC.Rd b/man/zapoisUC.Rd
index 54b26bf..b128098 100644
--- a/man/zapoisUC.Rd
+++ b/man/zapoisUC.Rd
@@ -36,12 +36,16 @@ rzapois(n, lambda, pobs0 = 0)
The probability function of \eqn{Y} is 0 with probability
\code{pobs0}, else a positive \eqn{Poisson(\lambda)}{Poisson(lambda)}.
+
+
}
\value{
\code{dzapois} gives the density,
\code{pzapois} gives the distribution function,
\code{qzapois} gives the quantile function, and
\code{rzapois} generates random deviates.
+
+
}
%\references{ }
\author{ T. W. Yee }
diff --git a/man/zapoisson.Rd b/man/zapoisson.Rd
index 3253f1d..fe0c925 100644
--- a/man/zapoisson.Rd
+++ b/man/zapoisson.Rd
@@ -194,7 +194,9 @@ contains further information and examples.
\code{\link{posnegbinomial}},
\code{\link{binomialff}},
\code{\link{rpospois}},
- \code{\link{CommonVGAMffArguments}}.
+ \code{\link{CommonVGAMffArguments}},
+ \code{\link{simulate.vlm}}.
+
}
@@ -213,17 +215,16 @@ head(predict(fit, untransform = TRUE))
coef(fit, matrix = TRUE)
summary(fit)
-
# Another example ------------------------------
# Data from Angers and Biswas (2003)
abdata <- data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1))
abdata <- subset(abdata, w > 0)
-yy <- with(abdata, rep(y, w))
-fit3 <- vglm(yy ~ 1, zapoisson, trace = TRUE, crit = "coef")
+Abdata <- data.frame(yy = with(abdata, rep(y, w)))
+fit3 <- vglm(yy ~ 1, zapoisson, data = Abdata, trace = TRUE, crit = "coef")
coef(fit3, matrix = TRUE)
Coef(fit3) # Estimate lambda (they get 0.6997 with SE 0.1520)
head(fitted(fit3), 1)
-mean(yy) # Compare this with fitted(fit3)
+with(Abdata, mean(yy)) # Compare this with fitted(fit3)
}
\keyword{models}
\keyword{regression}
diff --git a/man/zero.Rd b/man/zero.Rd
index 7b1ae01..4dc149a 100644
--- a/man/zero.Rd
+++ b/man/zero.Rd
@@ -15,6 +15,8 @@
It is simply a convenient argument for constraining
certain linear/additive predictors to be an intercept only.
+
+
}
\section{Warning }{
The use of other arguments may conflict with the \code{zero}
@@ -92,7 +94,9 @@ Reduced-rank vector generalized linear models.
}
\seealso{
- \code{\link{constraints}}.
+ \code{\link{constraints}}.
+
+
}
\examples{
@@ -101,8 +105,8 @@ args(binom2.or)
args(gpd)
#LMS quantile regression example
-fit <- vglm(BMI ~ bs(age, df = 4), lms.bcg(zero = c(1,3)),
- bmi.nz, trace = TRUE)
+fit <- vglm(BMI ~ sm.bs(age, df = 4), lms.bcg(zero = c(1,3)),
+ data = bmi.nz, trace = TRUE)
coef(fit, matrix = TRUE)
}
\keyword{models}
diff --git a/man/zetaff.Rd b/man/zetaff.Rd
index de5d05e..8e609b2 100644
--- a/man/zetaff.Rd
+++ b/man/zetaff.Rd
@@ -93,7 +93,7 @@ Boca Raton: Chapman & Hall/CRC Press.
}
\examples{
zdata <- data.frame(y = 1:5, w = c(63, 14, 5, 1, 2)) # Knight, p.304
-fit <- vglm(y ~ 1, zetaff, zdata, trace = TRUE, weight = w, crit = "coef")
+fit <- vglm(y ~ 1, zetaff, data = zdata, trace = TRUE, weight = w, crit = "coef")
(phat <- Coef(fit)) # 1.682557
with(zdata, cbind(round(dzeta(y, phat) * sum(w), 1), w))
diff --git a/man/zibinomUC.Rd b/man/zibinomUC.Rd
index 5de6b76..42fb198 100644
--- a/man/zibinomUC.Rd
+++ b/man/zibinomUC.Rd
@@ -54,6 +54,8 @@ rzibinom(n, size, prob, pstr0 = 0)
\code{qzibinom} gives the quantile function, and
\code{rzibinom} generates random deviates.
+
+
}
%\references{ }
\author{ T. W. Yee }
diff --git a/man/zibinomial.Rd b/man/zibinomial.Rd
index 673b417..3ffd17f 100644
--- a/man/zibinomial.Rd
+++ b/man/zibinomial.Rd
@@ -168,7 +168,7 @@ Fitting and interpreting occupancy models.
The zero-\emph{deflated} binomial distribution might
- be fitted by setting \code{lpstr0 = identity}, albeit,
+ be fitted by setting \code{lpstr0 = identitylink}, albeit,
not entirely reliably. See \code{\link{zipoisson}}
for information that can be applied here. Else
try the zero-altered binomial distribution (see
@@ -204,8 +204,8 @@ zdata <- data.frame(pstr0 = logit( 0, inverse = TRUE), # 0.50
zdata <- transform(zdata,
y = rzibinom(nn, size = sv, prob = mubin, pstr0 = pstr0))
with(zdata, table(y))
-fit <- vglm(cbind(y, sv - y) ~ 1, zibinomialff, zdata, trace = TRUE)
-fit <- vglm(cbind(y, sv - y) ~ 1, zibinomialff, zdata, trace = TRUE, stepsize = 0.5)
+fit <- vglm(cbind(y, sv - y) ~ 1, zibinomialff, data = zdata, trace = TRUE)
+fit <- vglm(cbind(y, sv - y) ~ 1, zibinomialff, data = zdata, trace = TRUE, stepsize = 0.5)
coef(fit, matrix = TRUE)
Coef(fit) # Useful for intercept-only models
diff --git a/man/zigeometric.Rd b/man/zigeometric.Rd
index 5cbf1c1..217c966 100644
--- a/man/zigeometric.Rd
+++ b/man/zigeometric.Rd
@@ -127,7 +127,7 @@ zigeometricff(lprob = "logit", lonempstr0 = "logit",
The zero-\emph{deflated} geometric distribution might
- be fitted by setting \code{lpstr0 = identity}, albeit,
+ be fitted by setting \code{lpstr0 = identitylink}, albeit,
not entirely reliably. See \code{\link{zipoisson}}
for information that can be applied here. Else
try the zero-altered geometric distribution (see
@@ -146,7 +146,8 @@ zigeometricff(lprob = "logit", lonempstr0 = "logit",
\code{\link{rzigeom}},
\code{\link{geometric}},
\code{\link{zageometric}},
- \code{\link[stats]{rgeom}}.
+ \code{\link[stats]{rgeom}},
+ \code{\link{simulate.vlm}}.
}
diff --git a/man/zinegbinomial.Rd b/man/zinegbinomial.Rd
index 05df6ff..5b6ee6b 100644
--- a/man/zinegbinomial.Rd
+++ b/man/zinegbinomial.Rd
@@ -153,7 +153,7 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
The zero-\emph{deflated} negative binomial distribution
- might be fitted by setting \code{lpstr0 = identity},
+ might be fitted by setting \code{lpstr0 = identitylink},
albeit, not entirely reliably. See \code{\link{zipoisson}}
for information that can be applied here. Else try
the zero-altered negative binomial distribution (see
diff --git a/man/zipebcom.Rd b/man/zipebcom.Rd
index ad9dc7b..5c22b6a 100644
--- a/man/zipebcom.Rd
+++ b/man/zipebcom.Rd
@@ -238,7 +238,7 @@ with(zdata, matplot(x2, tmat2, col = 1:4, type = "l", ylim = 0:1,
ylab = "Probability", main = "Joint probabilities")) }
# Now fit the model to the data.
-fit <- vglm(cbind(ybin1, ybin2) ~ x2, zipebcom, dat = zdata, trace = TRUE)
+fit <- vglm(cbind(ybin1, ybin2) ~ x2, zipebcom, data = zdata, trace = TRUE)
coef(fit, matrix = TRUE)
summary(fit)
vcov(fit)
diff --git a/man/zipf.Rd b/man/zipf.Rd
index 35bb7f5..698b5af 100644
--- a/man/zipf.Rd
+++ b/man/zipf.Rd
@@ -84,14 +84,15 @@ zipf(N = NULL, link = "loge", init.s = NULL)
\seealso{
\code{\link{dzipf}},
- \code{\link{zetaff}}.
+ \code{\link{zetaff}},
+ \code{\link{simulate.vlm}}.
}
\examples{
zdata <- data.frame(y = 1:5, ofreq = c(63, 14, 5, 1, 2))
-fit <- vglm(y ~ 1, zipf, zdata, trace = TRUE, weight = ofreq, crit = "coef")
-fit <- vglm(y ~ 1, zipf(link = identity, init = 3.4), zdata,
+fit <- vglm(y ~ 1, zipf, data = zdata, trace = TRUE, weight = ofreq, crit = "coef")
+fit <- vglm(y ~ 1, zipf(link = identitylink, init = 3.4), data = zdata,
trace = TRUE, weight = ofreq)
fit at misc$N
(shat <- Coef(fit))
diff --git a/man/zipfUC.Rd b/man/zipfUC.Rd
index a985feb..2045fae 100644
--- a/man/zipfUC.Rd
+++ b/man/zipfUC.Rd
@@ -29,20 +29,26 @@ pzipf(q, N, s)
Logical.
If \code{log = TRUE} then the logarithm of the density is returned.
+
}
}
\value{
\code{dzipf} gives the density, and
\code{pzipf} gives the cumulative distribution function.
+
+
% \code{qzipf} gives the quantile function, and
% \code{rzipf} generates random deviates.
+
+
}
\author{ T. W. Yee }
\details{
This is a finite version of the zeta distribution.
See \code{\link{zipf}} for more details.
+
}
%\note{
%
@@ -50,6 +56,8 @@ pzipf(q, N, s)
\seealso{
\code{\link{zipf}}.
+
+
}
\examples{
N <- 10; s <- 0.5; y <- 1:N
diff --git a/man/zipoisson.Rd b/man/zipoisson.Rd
index 8bf3df0..c6218a5 100644
--- a/man/zipoisson.Rd
+++ b/man/zipoisson.Rd
@@ -176,7 +176,7 @@ zipoissonff(llambda = "loge", lonempstr0 = "logit",
Although the functions in \code{\link{Zipois}}
can handle the zero-\emph{deflated} Poisson distribution,
this family function cannot estimate this very well in general.
- One sets \code{lpstr0 = identity}, however, the iterations
+ One sets \code{lpstr0 = identitylink}, however, the iterations
might fall outside the parameter space.
Practically, it is restricted to intercept-models only
(see example below).
@@ -184,7 +184,7 @@ zipoissonff(llambda = "loge", lonempstr0 = "logit",
or using a simpler model to obtain initial values.
% If there is a covariate then it is best to
% constrain \code{pstr0} to be intercept-only, e.g.,
-% by \code{zipoisson(lpstr0 = identity, zero = -1)}.
+% by \code{zipoisson(lpstr0 = identitylink, zero = -1)}.
A (somewhat) similar and more reliable method for
zero-deflation is to try the zero-altered Poisson model
(see \code{\link{zapoisson}}).
@@ -226,7 +226,8 @@ zipoissonff(llambda = "loge", lonempstr0 = "logit",
\code{\link{yip88}},
\code{\link{rrvglm}},
\code{\link{zipebcom}},
- \code{\link[stats:Poisson]{rpois}}.
+ \code{\link[stats:Poisson]{rpois}},
+ \code{\link{simulate.vlm}}.
}
@@ -272,7 +273,7 @@ with(cholera, cbind(actual = wfreq,
abdata <- data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1))
abdata <- subset(abdata, w > 0)
fit <- vglm(y ~ 1, zipoisson(lpstr0 = probit, ipstr0 = 0.8),
- abdata, weight = w, trace = TRUE)
+ data = abdata, weight = w, trace = TRUE)
fitted(fit, type = "pobs0") # Estimate of P(Y = 0)
coef(fit, matrix = TRUE)
Coef(fit) # Estimate of pstr0 and lambda
@@ -288,7 +289,7 @@ zdata <- transform(zdata, usepstr0 = deflat.limit / 1.5)
zdata <- transform(zdata, y3 = rzipois(nn, lambda3, pstr0 = usepstr0))
head(zdata)
with(zdata, table(y3)) # A lot of deflation
-fit3 <- vglm(y3 ~ 1, zipoisson(zero = -1, lpstr0 = identity),
+fit3 <- vglm(y3 ~ 1, zipoisson(zero = -1, lpstr0 = identitylink),
data = zdata, trace = TRUE, crit = "coef")
coef(fit3, matrix = TRUE)
# Check how accurate it was:
@@ -298,8 +299,8 @@ Coef(fit3)
# Example 5: This RR-ZIP is known as a COZIGAM or COZIVGLM-ZIP
set.seed(123)
-rrzip <- rrvglm(Alopacce ~ bs(WaterCon, df = 3), zipoisson(zero = NULL),
- hspider, trace = TRUE, Index.corner = 2)
+rrzip <- rrvglm(Alopacce ~ sm.bs(WaterCon, df = 3), zipoisson(zero = NULL),
+ data = hspider, trace = TRUE, Index.corner = 2)
coef(rrzip, matrix = TRUE)
Coef(rrzip)
summary(rrzip)
diff --git a/src/muxr.c b/src/muxr.c
deleted file mode 100644
index 4ad6d97..0000000
--- a/src/muxr.c
+++ /dev/null
@@ -1,516 +0,0 @@
-/*
-This code is
-Copyright (C) 1998-2012 T. W. Yee, University of Auckland. All rights reserved.
-*/
-
-
-#include<math.h>
-#include<stdio.h>
-#include<stdlib.h>
-#include<R.h>
-#include<Rmath.h>
-
-
-void vdec(int *row_index, int *col_index, int *dimm);
-void m2a(double *m, double *a, int *dimm, int *row_index,
- int *col_index, int *n, int *M, int *upper);
-void a2m(double *a, double *m, int *dimm, int *row_index,
- int *col_index, int *n, int *M);
-void mux2(double *cc, double *ymat,
- double *ans, int *p, int *n, int *M);
-void mux22(double *cc, double *ymat, double *ans, int *dimm,
- int *row_index, int *col_index,
- int *n, int *M, double *wk, int *upper);
-void mux5(double *cc, double *x,
- double *ans, int *M, int *n, int *r,
- int *dimm,
- int *dimr,
- int *matrix,
- double *wk, double *wk2,
- int *row_index_M, int *col_index_M,
- int *row_index_r, int *col_index_r);
-void mux55(double *evects, double *evals, double *ans, double *wk,
- double *wk2, int *row_index, int *col_index,
- int *M, int *n);
-void mux7(double *cc, double *x,
- double *ans, int *M, int *q, int *n, int *r);
-void mux111(double *cc, double *txmat, int *M, int *R, int *n,
- double *wk, double *wk2, int *row_index, int *col_index,
- int *dimm, int *upper);
-void mux15(double *cc, double *x,
- double *ans, int *M, int *n);
-void vchol(double *cc, int *M, int *n, int *ok, double *wk,
- int *row_index, int *col_index, int *dimm);
-void vforsub(double *cc, double *b, int *M, int *n,
- double *wk, int *row_index,
- int *col_index, int *dimm);
-void vbacksub(double *cc, double *b, int *M, int *n,
- double *wk, int *row_index,
- int *col_index, int *dimm);
-void tapplymat1(double *mat, int *nr, int *nc, int *type);
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-void vdec(int *row_index, int *col_index, int *dimm)
-{
- int i;
-
- for(i = 0; i < *dimm; i++) {
- row_index[i] -= 1;
- col_index[i] -= 1;
- }
-}
-
-
-void m2a(double *m, double *a, int *dimm, int *row_index,
- int *col_index, int *n, int *M, int *upper)
-{
- int i, k, MM = *M * *M, MMn = *M * *M * *n;
-
- if(*upper == 1 || *dimm != *M * (*M + 1) / 2)
- for(k = 0; k < MMn; k++)
- a[k] = 0.0;
-
- for(k = 0; k < *n; k++)
- {
- for(i = 0; i < *dimm; i++)
- {
- a[row_index[i] + col_index[i] * *M] = m[i];
- if(*upper == 0)
- a[col_index[i] + row_index[i] * *M] = m[i];
- }
- a += MM;
- m += *dimm;
- }
-}
-
-
-void a2m(double *a, double *m, int *dimm, int *row_index,
- int *col_index, int *n, int *M)
-{
- int i, k, MM= *M * *M;
-
- for(k = 0; k < *n; k++)
- {
- for(i = 0; i < *dimm; i++)
- m[i] = a[row_index[i] + col_index[i] * *M];
- a += MM;
- m += *dimm;
- }
-}
-
-
-
-
-void mux2(double *cc, double *ymat,
- double *ans, int *p, int *n, int *M)
-{
- double s;
- int i, j, t, Mp = *M * *p;
-
- for(i = 0; i < *n; i++)
- {
- for(j = 0; j < *M; j++)
- {
- s = 0.0;
- for(t = 0; t < *p; t++)
- s += cc[j + t * *M] * ymat[t];
- *ans++ = s;
- }
- ymat += *p;
- cc += Mp;
- }
-}
-
-
-
-void mux22(double *cc, double *ymat, double *ans, int *dimm,
- int *row_index, int *col_index,
- int *n, int *M, double *wk, int *upper)
-{
- double s;
- int j, t, k, one = 1, lower;
-
- vdec(row_index, col_index, dimm);
- for(k = 0; k < *n; k++)
- {
- m2a(cc, wk, dimm, row_index, col_index, &one, M, upper);
-
- for(j = 0; j < *M; j++)
- {
- s = 0.0;
- lower = *upper == 0 ? 0 : j;
- for(t = lower; t < *M; t++)
- s += wk[j + t * *M] * ymat[t];
- *ans++ = s;
- }
- ymat += *M;
- cc += *dimm;
- }
-}
-
-
-void mux5(double *cc, double *x,
- double *ans, int *M, int *n, int *r,
- int *dimm,
- int *dimr,
- int *matrix,
- double *wk, double *wk2,
- int *row_index_M, int *col_index_M,
- int *row_index_r, int *col_index_r)
-{
- double s, *pd, *pd2;
- int i, j, k, t, Mr = *M * *r, rr = *r * *r, MM = *M * *M, u,
- jM, jr, kM, kr, one=1, upper=0;
-
- if(*matrix == 1)
- {
- vdec(row_index_M, col_index_M, dimm);
- vdec(row_index_r, col_index_r, dimr);
- pd = wk;
- pd2 = wk2;
- } else {
-/* Commented out on 2/5/06. Need to fix this up more cleanly.
- Rprintf("Error: can only handle matrix.arg == 1\n");
- exit(-1);
-*/
-
-/*
-26/9/07:
-The following line was added only to avoid a warning message from the compiler
-*/
- pd = pd2 = wk;
-
- }
-
- for(i = 0; i < *n; i++)
- {
- if(*matrix == 1)
- m2a(cc, pd, dimm, row_index_M, col_index_M, &one, M, &upper);
- else
- {
- pd = cc;
- pd2 = ans;
- }
-
- for(j = 0; j < *r; j++)
- {
- jM = j * *M;
- jr = j * *r;
- for(k = j; k < *r; k++)
- {
- kM = k * *M;
- kr = k * *r;
- s = 0.0;
- for(t = 0; t < *M; t++)
- for(u = 0; u < *M; u++)
- s += x[t + jM] * pd[t + u * *M] * x[u + kM];
- pd2[j + kr] =
- pd2[k + jr] = s;
- }
-
- }
-
- if(*matrix == 1)
- a2m(pd2, ans, dimr, row_index_r, col_index_r, &one, r);
-
- cc += (*matrix == 1 ? *dimm : MM);
- x += Mr;
- ans += (*matrix == 1 ? *dimr : rr);
- }
-}
-
-
-
-void mux55(double *evects, double *evals, double *ans, double *wk,
- double *wk2, int *row_index, int *col_index,
- int *M, int *n)
-{
- double *pd, *pd2, t;
- int i, j, k, s, MM = *M * *M, one=1,
- MM12 = *M * (*M + 1)/2;
-
- vdec(row_index, col_index, &MM12);
-
- for(i = 0; i < *n; i++)
- {
- pd = evects;
- pd2 = wk2;
- for(j = 0; j < *M; j++)
- for(k = 0; k < *M; k++)
- *pd2++ = *pd++ * evals[j];
-
- for(j = 0; j < *M; j++)
- for(k = j; k < *M; k++)
- {
- t = 0.0;
- for(s = 0; s < *M; s++)
- t += wk2[j + s * *M] * evects[k + s * *M];
- wk[j + k * *M] =
- wk[k + j * *M] = t;
- }
-
- a2m(wk, ans, &MM12, row_index, col_index, &one, M);
-
- ans += MM12;
- evals += *M;
- evects += MM;
- }
-}
-
-
-
-
-
-void mux7(double *cc, double *x,
- double *ans, int *M, int *q, int *n, int *r)
-{
- double s;
- int i, j, k, t, Mq = *M * *q, qr = *q * *r, Mr = *M * *r,
- kq, kM;
-
- for(i = 0; i < *n; i++)
- {
- for(j = 0; j < *M; j++)
- {
- for(k = 0; k < *r; k++)
- {
- kq = k * *q;
- kM = k * *M;
- s = 0.0;
- for(t = 0; t < *q; t++)
- s += cc[j + t * *M] * x[t + kq];
- ans[j + kM] = s;
- }
- }
- cc += Mq;
- ans += Mr;
- x += qr;
- }
-}
-
-
-
-void mux111(double *cc, double *txmat, int *M, int *R, int *n,
- double *wk, double *wk2, int *row_index, int *col_index,
- int *dimm, int *upper)
-{
- double s, *pd2;
- int i, j, k, t, MM = *M * *M, MR = *M * *R, lower;
-
- vdec(row_index, col_index, dimm);
-
- for(i = 0; i < MM; i++)
- wk[i] = 0.0;
-
- for(t = 0; t < *n; t++)
- {
- for(i = 0; i < *dimm; i++)
- {
- if(*upper == 0)
- wk[row_index[i] + col_index[i] * *M] =
- wk[col_index[i] + row_index[i] * *M] = *cc++;
- else
- wk[row_index[i] + col_index[i] * *M] = *cc++;
- }
-
- pd2 = txmat;
- for(i = 0; i < *M; i++)
- for(j = 0; j < *R; j++)
- wk2[i + j * *M] = *pd2++;
-
- for(i = 0; i < *M; i++)
- for(j = 0; j < *R; j++)
- {
- s = 0.0;
- lower = *upper == 0 ? 0 : i;
- for(k = lower; k < *M; k++)
- s += wk[i + k * *M] * wk2[k + j * *M];
- txmat[j + i * *R] = s;
- }
- txmat += MR;
- }
-}
-
-
-
-
-void mux15(double *cc, double *x,
- double *ans, int *M, int *n)
-{
- double *pd, *pd2;
- int i, j, k, MM = *M * *M;
-
- for(i = 0; i < *n; i++)
- {
- pd = cc;
- pd2 = ans;
- for(j = 0; j < *M; j++)
- for(k = 0; k < *M; k++)
- *pd2++ = *pd++ * x[j];
-
- pd2 = ans;
- for(j = 0; j < *M; j++)
- for(k = 0; k < *M; k++)
- {
- *pd2 *= x[k];
- pd2++;
- }
-
- ans += MM;
- x += *M;
- }
-}
-
-
-
-
-void vchol(double *cc, int *M, int *n, int *ok, double *wk,
- int *row_index, int *col_index, int *dimm)
-
-{
- double s, *pd;
- int t, i, j, k, iM, iiM, upper = 0, one = 1;
-
- vdec(row_index, col_index, dimm);
- pd = wk;
-
- for(t = 0; t < *n; t++)
- {
- *ok = 1;
-
- m2a(cc, wk, dimm, row_index, col_index, &one, M, &upper);
-
- for(i = 0; i < *M; i++)
- {
- s = 0.0;
- iM = i * *M;
- iiM = i + iM;
- for(k = 0; k < i; k++)
- s += pd[k + iM] * pd[k + iM];
-
- pd[iiM] -= s;
- if(pd[iiM] < 0.0)
- {
- *ok = 0;
- break;
- }
- pd[iiM] = sqrt(pd[iiM]);
-
- for(j = i+1; j < *M; j++)
- {
- s = 0.0;
- for(k = 0; k < i; k++)
- s += pd[k + iM] * pd[k + j * *M];
- pd[i + j * *M] = (pd[i + j * *M] - s) / pd[iiM];
- }
-
- }
-
- a2m(wk, cc, dimm, row_index, col_index, &one, M);
-
- cc += *dimm;
- ok++;
- }
-}
-
-
-
-void vforsub(double *cc, double *b, int *M, int *n,
- double *wk, int *row_index,
- int *col_index, int *dimm)
-{
- double s, *pd;
- int j, k, t, upper = 1, one = 1;
-
- pd = wk;
- vdec(row_index, col_index, dimm);
-
- for(t = 0; t < *n; t++)
- {
- m2a(cc, wk, dimm, row_index, col_index, &one, M, &upper);
-
- for(j = 0; j < *M; j++)
- {
- s = b[j];
- for(k = 0; k < j; k++)
- s -= pd[k + j * *M] * b[k];
- b[j] = s / pd[j + j * *M];
- }
- cc += *dimm;
- b += *M;
- }
-}
-
-
-
-
-void vbacksub(double *cc, double *b, int *M, int *n,
- double *wk, int *row_index,
- int *col_index, int *dimm)
-{
- double s, *pd;
- int j, k, t, upper = 1, one = 1;
-
- pd = wk;
- vdec(row_index, col_index, dimm);
-
- for(t = 0; t < *n; t++)
- {
- m2a(cc, wk, dimm, row_index, col_index, &one, M, &upper);
-
- for(j = *M - 1; j >= 0; j--)
- {
- s = b[j];
- for(k = j + 1; k < *M; k++)
- s -= pd[j + k * *M] * b[k];
- b[j] = s / pd[j + j * *M];
- }
- cc += *dimm;
- b += *M;
- }
-}
-
-
-
-void tapplymat1(double *mat, int *nr, int *nc, int *type)
-{
- double *pd = mat, *pd2 = mat + *nr;
- int i, j;
-
- if(*type == 1)
- for(j = 2; j <= *nc; j++)
- for(i = 0; i < *nr; i++, pd2++)
- *pd2 += *pd++;
-
- if(*type == 2)
- {
- pd2 = mat + *nr * *nc - 1;
- pd = pd2 - *nr;
- for(j = *nc; j >= 2; j--)
- for(i = 0; i < *nr; i++, pd2--)
- *pd2 -= *pd--;
- }
-
- if(*type == 3)
- for(j = 2; j <= *nc; j++)
- for(i = 0; i < *nr; i++, pd2++)
- *pd2 *= *pd++;
-
- if(*type < 1 || *type > 3)
- Rprintf("Error: *type not matched\n");
-}
-
diff --git a/src/muxr3.c b/src/muxr3.c
new file mode 100644
index 0000000..d8034b3
--- /dev/null
+++ b/src/muxr3.c
@@ -0,0 +1,465 @@
+
+
+#include<math.h>
+#include<stdio.h>
+#include<stdlib.h>
+#include<R.h>
+#include<Rmath.h>
+
+
+void vdec(int *hqipzx3n, int *exz2jrhq, int *dimm);
+void m2a(double *m, double *a, int *dimm, int *hqipzx3n,
+ int *exz2jrhq, int *n, int *M, int *rb1onzwu);
+void a2m(double *a, double *m, int *dimm, int *hqipzx3n,
+ int *exz2jrhq, int *n, int *M);
+void mux2(double *cc, double *tlgduey8,
+ double *bzmd6ftv, int *p, int *n, int *M);
+void mux22(double *cc, double *tlgduey8, double *bzmd6ftv, int *dimm,
+ int *hqipzx3n, int *exz2jrhq,
+ int *n, int *M, double *wk, int *rb1onzwu);
+void mux5(double *cc, double *x,
+ double *bzmd6ftv, int *M, int *n, int *r,
+ int *dimm,
+ int *dimr,
+ int *matrix,
+ double *wk, double *wk2,
+ int *hqipzx3n_M, int *exz2jrhq_M,
+ int *hqipzx3n_r, int *exz2jrhq_r);
+void mux55(double *evects, double *evals, double *bzmd6ftv, double *wk,
+ double *wk2, int *hqipzx3n, int *exz2jrhq,
+ int *M, int *n);
+void mux7(double *cc, double *x,
+ double *bzmd6ftv, int *M, int *q, int *n, int *r);
+void mux111(double *cc, double *the7mqnvy, int *M, int *R, int *n,
+ double *wk, double *wk2, int *hqipzx3n, int *exz2jrhq,
+ int *dimm, int *rb1onzwu);
+void mux15(double *cc, double *x,
+ double *bzmd6ftv, int *M, int *n);
+void vchol(double *cc, int *M, int *n, int *ok, double *wk,
+ int *hqipzx3n, int *exz2jrhq, int *dimm);
+void vforsub(double *cc, double *b, int *M, int *n,
+ double *wk, int *hqipzx3n,
+ int *exz2jrhq, int *dimm);
+void vbacksub(double *cc, double *b, int *M, int *n,
+ double *wk, int *hqipzx3n,
+ int *exz2jrhq, int *dimm);
+void tapply_mat1(double *mat, int *nr, int *nc, int *type);
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+void vdec(int *hqipzx3n, int *exz2jrhq, int *dimm) {
+ int ayfnwr1v;
+
+ for(ayfnwr1v = 0; ayfnwr1v < *dimm; ayfnwr1v++) {
+ hqipzx3n[ayfnwr1v] -= 1;
+ exz2jrhq[ayfnwr1v] -= 1;
+ }
+}
+
+
+void m2a(double *m, double *a, int *dimm, int *hqipzx3n,
+ int *exz2jrhq, int *n, int *M, int *rb1onzwu) {
+ int ayfnwr1v, gp1jxzuh, MM = *M * *M, MMn = *M * *M * *n;
+
+ if(*rb1onzwu == 1 || *dimm != *M * (*M + 1) / 2)
+ for(gp1jxzuh = 0; gp1jxzuh < MMn; gp1jxzuh++)
+ a[gp1jxzuh] = 0.0;
+
+ for(gp1jxzuh = 0; gp1jxzuh < *n; gp1jxzuh++) {
+ for(ayfnwr1v = 0; ayfnwr1v < *dimm; ayfnwr1v++) {
+ a[hqipzx3n[ayfnwr1v] + exz2jrhq[ayfnwr1v] * *M] = m[ayfnwr1v];
+ if(*rb1onzwu == 0)
+ a[exz2jrhq[ayfnwr1v] + hqipzx3n[ayfnwr1v] * *M] = m[ayfnwr1v];
+ }
+ a += MM;
+ m += *dimm;
+ }
+}
+
+
+void a2m(double *a, double *m, int *dimm, int *hqipzx3n,
+ int *exz2jrhq, int *n, int *M) {
+ int ayfnwr1v, gp1jxzuh, MM= *M * *M;
+
+ for(gp1jxzuh = 0; gp1jxzuh < *n; gp1jxzuh++) {
+ for(ayfnwr1v = 0; ayfnwr1v < *dimm; ayfnwr1v++)
+ m[ayfnwr1v] = a[hqipzx3n[ayfnwr1v] + exz2jrhq[ayfnwr1v] * *M];
+ a += MM;
+ m += *dimm;
+ }
+}
+
+
+
+
+void mux2(double *cc, double *tlgduey8,
+ double *bzmd6ftv, int *p, int *n, int *M) {
+ double urohxe6t;
+ int ayfnwr1v, yq6lorbx, bpvaqm5z, Mp = *M * *p;
+
+ for(ayfnwr1v = 0; ayfnwr1v < *n; ayfnwr1v++) {
+ for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++) {
+ urohxe6t = 0.0;
+ for(bpvaqm5z = 0; bpvaqm5z < *p; bpvaqm5z++)
+ urohxe6t += cc[yq6lorbx + bpvaqm5z * *M] * tlgduey8[bpvaqm5z];
+ *bzmd6ftv++ = urohxe6t;
+ }
+ tlgduey8 += *p;
+ cc += Mp;
+ }
+}
+
+
+
+void mux22(double *cc, double *tlgduey8, double *bzmd6ftv, int *dimm,
+ int *hqipzx3n, int *exz2jrhq,
+ int *n, int *M, double *wk, int *rb1onzwu) {
+ double urohxe6t;
+ int yq6lorbx, bpvaqm5z, gp1jxzuh, one = 1, nzqklc9x;
+
+ vdec(hqipzx3n, exz2jrhq, dimm);
+ for(gp1jxzuh = 0; gp1jxzuh < *n; gp1jxzuh++) {
+ m2a(cc, wk, dimm, hqipzx3n, exz2jrhq, &one, M, rb1onzwu);
+
+ for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++) {
+ urohxe6t = 0.0;
+ nzqklc9x = *rb1onzwu == 0 ? 0 : yq6lorbx;
+ for(bpvaqm5z = nzqklc9x; bpvaqm5z < *M; bpvaqm5z++)
+ urohxe6t += wk[yq6lorbx + bpvaqm5z * *M] * tlgduey8[bpvaqm5z];
+ *bzmd6ftv++ = urohxe6t;
+ }
+ tlgduey8 += *M;
+ cc += *dimm;
+ }
+}
+
+
+void mux5(double *cc, double *x,
+ double *bzmd6ftv, int *M, int *n, int *r,
+ int *dimm,
+ int *dimr,
+ int *matrix,
+ double *wk, double *wk2,
+ int *hqipzx3n_M, int *exz2jrhq_M,
+ int *hqipzx3n_r, int *exz2jrhq_r) {
+ double urohxe6t, *pd, *pd2;
+ int ayfnwr1v, yq6lorbx, gp1jxzuh, bpvaqm5z, Mr = *M * *r,
+ rr = *r * *r, MM = *M * *M, usvdbx3tk,
+ jM, jr, kM, kr, one=1, rb1onzwu=0;
+
+ if(*matrix == 1) {
+ vdec(hqipzx3n_M, exz2jrhq_M, dimm);
+ vdec(hqipzx3n_r, exz2jrhq_r, dimr);
+ pd = wk;
+ pd2 = wk2;
+ } else {
+
+ pd = pd2 = wk;
+
+ }
+
+ for(ayfnwr1v = 0; ayfnwr1v < *n; ayfnwr1v++) {
+ if(*matrix == 1)
+ m2a(cc, pd, dimm, hqipzx3n_M, exz2jrhq_M, &one, M, &rb1onzwu);
+ else {
+ pd = cc;
+ pd2 = bzmd6ftv;
+ }
+
+ for(yq6lorbx = 0; yq6lorbx < *r; yq6lorbx++) {
+ jM = yq6lorbx * *M;
+ jr = yq6lorbx * *r;
+ for(gp1jxzuh = yq6lorbx; gp1jxzuh < *r; gp1jxzuh++) {
+ kM = gp1jxzuh * *M;
+ kr = gp1jxzuh * *r;
+ urohxe6t = 0.0;
+ for(bpvaqm5z = 0; bpvaqm5z < *M; bpvaqm5z++)
+ for(usvdbx3tk = 0; usvdbx3tk < *M; usvdbx3tk++)
+ urohxe6t += x[bpvaqm5z + jM] * pd[bpvaqm5z + usvdbx3tk * *M] *
+ x[usvdbx3tk + kM];
+ pd2[yq6lorbx + kr] =
+ pd2[gp1jxzuh + jr] = urohxe6t;
+ }
+ }
+
+ if(*matrix == 1)
+ a2m(pd2, bzmd6ftv, dimr, hqipzx3n_r, exz2jrhq_r, &one, r);
+
+ cc += (*matrix == 1 ? *dimm : MM);
+ x += Mr;
+ bzmd6ftv += (*matrix == 1 ? *dimr : rr);
+ }
+}
+
+
+
+void mux55(double *evects, double *evals, double *bzmd6ftv, double *wk,
+ double *wk2, int *hqipzx3n, int *exz2jrhq,
+ int *M, int *n) {
+ double *pd, *pd2, bpvaqm5z;
+ int ayfnwr1v, yq6lorbx, gp1jxzuh, urohxe6t, MM = *M * *M, one = 1,
+ imk5wjxg = *M * (*M + 1)/2;
+
+ vdec(hqipzx3n, exz2jrhq, &imk5wjxg);
+
+ for(ayfnwr1v = 0; ayfnwr1v < *n; ayfnwr1v++) {
+ pd = evects;
+ pd2 = wk2;
+ for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++)
+ for(gp1jxzuh = 0; gp1jxzuh < *M; gp1jxzuh++)
+ *pd2++ = *pd++ * evals[yq6lorbx];
+
+ for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++)
+ for(gp1jxzuh = yq6lorbx; gp1jxzuh < *M; gp1jxzuh++) {
+ bpvaqm5z = 0.0;
+ for(urohxe6t = 0; urohxe6t < *M; urohxe6t++)
+ bpvaqm5z += wk2[yq6lorbx + urohxe6t * *M] *
+ evects[gp1jxzuh + urohxe6t * *M];
+ wk[yq6lorbx + gp1jxzuh * *M] =
+ wk[gp1jxzuh + yq6lorbx * *M] = bpvaqm5z;
+ }
+
+ a2m(wk, bzmd6ftv, &imk5wjxg, hqipzx3n, exz2jrhq, &one, M);
+
+ bzmd6ftv += imk5wjxg;
+ evals += *M;
+ evects += MM;
+ }
+}
+
+
+
+
+
+void mux7(double *cc, double *x,
+ double *bzmd6ftv, int *M, int *q, int *n, int *r) {
+ double urohxe6t;
+ int ayfnwr1v, yq6lorbx, gp1jxzuh, bpvaqm5z,
+ Mq = *M * *q, qr = *q * *r, Mr = *M * *r,
+ kq, kM;
+
+ for(ayfnwr1v = 0; ayfnwr1v < *n; ayfnwr1v++) {
+ for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++) {
+ for(gp1jxzuh = 0; gp1jxzuh < *r; gp1jxzuh++) {
+ kq = gp1jxzuh * *q;
+ kM = gp1jxzuh * *M;
+ urohxe6t = 0.0;
+ for(bpvaqm5z = 0; bpvaqm5z < *q; bpvaqm5z++)
+ urohxe6t += cc[yq6lorbx + bpvaqm5z * *M] * x[bpvaqm5z + kq];
+ bzmd6ftv[yq6lorbx + kM] = urohxe6t;
+ }
+ }
+ cc += Mq;
+ bzmd6ftv += Mr;
+ x += qr;
+ }
+}
+
+
+
+void mux111(double *cc, double *the7mqnvy, int *M, int *R, int *n,
+ double *wk, double *wk2, int *hqipzx3n, int *exz2jrhq,
+ int *dimm, int *rb1onzwu) {
+ double urohxe6t, *pd2;
+ int ayfnwr1v, yq6lorbx, gp1jxzuh, bpvaqm5z,
+ MM = *M * *M, MR = *M * *R, nzqklc9x;
+
+ vdec(hqipzx3n, exz2jrhq, dimm);
+
+ for(ayfnwr1v = 0; ayfnwr1v < MM; ayfnwr1v++)
+ wk[ayfnwr1v] = 0.0;
+
+ for(bpvaqm5z = 0; bpvaqm5z < *n; bpvaqm5z++) {
+ for(ayfnwr1v = 0; ayfnwr1v < *dimm; ayfnwr1v++) {
+ if(*rb1onzwu == 0)
+ wk[hqipzx3n[ayfnwr1v] + exz2jrhq[ayfnwr1v] * *M] =
+ wk[exz2jrhq[ayfnwr1v] + hqipzx3n[ayfnwr1v] * *M] = *cc++;
+ else
+ wk[hqipzx3n[ayfnwr1v] + exz2jrhq[ayfnwr1v] * *M] = *cc++;
+ }
+
+ pd2 = the7mqnvy;
+ for(ayfnwr1v = 0; ayfnwr1v < *M; ayfnwr1v++)
+ for(yq6lorbx = 0; yq6lorbx < *R; yq6lorbx++)
+ wk2[ayfnwr1v + yq6lorbx * *M] = *pd2++;
+
+ for(ayfnwr1v = 0; ayfnwr1v < *M; ayfnwr1v++)
+ for(yq6lorbx = 0; yq6lorbx < *R; yq6lorbx++) {
+ urohxe6t = 0.0;
+ nzqklc9x = *rb1onzwu == 0 ? 0 : ayfnwr1v;
+ for(gp1jxzuh = nzqklc9x; gp1jxzuh < *M; gp1jxzuh++)
+ urohxe6t += wk[ayfnwr1v + gp1jxzuh * *M] * wk2[gp1jxzuh + yq6lorbx * *M];
+ the7mqnvy[yq6lorbx + ayfnwr1v * *R] = urohxe6t;
+ }
+ the7mqnvy += MR;
+ }
+}
+
+
+
+
+void mux15(double *cc, double *x,
+ double *bzmd6ftv, int *M, int *n) {
+ double *pd, *pd2;
+ int ayfnwr1v, yq6lorbx, gp1jxzuh, MM = *M * *M;
+
+ for(ayfnwr1v = 0; ayfnwr1v < *n; ayfnwr1v++) {
+ pd = cc;
+ pd2 = bzmd6ftv;
+ for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++)
+ for(gp1jxzuh = 0; gp1jxzuh < *M; gp1jxzuh++)
+ *pd2++ = *pd++ * x[yq6lorbx];
+
+ pd2 = bzmd6ftv;
+ for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++)
+ for(gp1jxzuh = 0; gp1jxzuh < *M; gp1jxzuh++) {
+ *pd2 *= x[gp1jxzuh];
+ pd2++;
+ }
+
+ bzmd6ftv += MM;
+ x += *M;
+ }
+}
+
+
+
+
+void vchol(double *cc, int *M, int *n, int *ok, double *wk,
+ int *hqipzx3n, int *exz2jrhq, int *dimm) {
+ double urohxe6t, *pd;
+ int bpvaqm5z, ayfnwr1v, yq6lorbx, gp1jxzuh, iM, iiM, rb1onzwu = 0, one = 1;
+
+ vdec(hqipzx3n, exz2jrhq, dimm);
+ pd = wk;
+
+ for(bpvaqm5z = 0; bpvaqm5z < *n; bpvaqm5z++) {
+ *ok = 1;
+
+ m2a(cc, wk, dimm, hqipzx3n, exz2jrhq, &one, M, &rb1onzwu);
+
+ for(ayfnwr1v = 0; ayfnwr1v < *M; ayfnwr1v++) {
+ urohxe6t = 0.0;
+ iM = ayfnwr1v * *M;
+ iiM = ayfnwr1v + iM;
+ for(gp1jxzuh = 0; gp1jxzuh < ayfnwr1v; gp1jxzuh++)
+ urohxe6t += pd[gp1jxzuh + iM] * pd[gp1jxzuh + iM];
+
+ pd[iiM] -= urohxe6t;
+ if(pd[iiM] < 0.0) {
+ *ok = 0;
+ break;
+ }
+ pd[iiM] = sqrt(pd[iiM]);
+
+ for(yq6lorbx = ayfnwr1v+1; yq6lorbx < *M; yq6lorbx++) {
+ urohxe6t = 0.0;
+ for(gp1jxzuh = 0; gp1jxzuh < ayfnwr1v; gp1jxzuh++)
+ urohxe6t += pd[gp1jxzuh + iM] * pd[gp1jxzuh + yq6lorbx * *M];
+ pd[ayfnwr1v + yq6lorbx * *M] = (pd[ayfnwr1v + yq6lorbx * *M] -
+ urohxe6t) / pd[iiM];
+ }
+ }
+
+ a2m(wk, cc, dimm, hqipzx3n, exz2jrhq, &one, M);
+
+ cc += *dimm;
+ ok++;
+ }
+}
+
+
+
+void vforsub(double *cc, double *b, int *M, int *n,
+ double *wk, int *hqipzx3n,
+ int *exz2jrhq, int *dimm) {
+ double urohxe6t, *pd;
+ int yq6lorbx, gp1jxzuh, bpvaqm5z, rb1onzwu = 1, one = 1;
+
+ pd = wk;
+ vdec(hqipzx3n, exz2jrhq, dimm);
+
+ for(bpvaqm5z = 0; bpvaqm5z < *n; bpvaqm5z++) {
+ m2a(cc, wk, dimm, hqipzx3n, exz2jrhq, &one, M, &rb1onzwu);
+
+ for(yq6lorbx = 0; yq6lorbx < *M; yq6lorbx++) {
+ urohxe6t = b[yq6lorbx];
+ for(gp1jxzuh = 0; gp1jxzuh < yq6lorbx; gp1jxzuh++)
+ urohxe6t -= pd[gp1jxzuh + yq6lorbx * *M] * b[gp1jxzuh];
+ b[yq6lorbx] = urohxe6t / pd[yq6lorbx + yq6lorbx * *M];
+ }
+ cc += *dimm;
+ b += *M;
+ }
+}
+
+
+
+
+void vbacksub(double *cc, double *b, int *M, int *n,
+ double *wk, int *hqipzx3n,
+ int *exz2jrhq, int *dimm) {
+ double urohxe6t, *pd;
+ int yq6lorbx, gp1jxzuh, bpvaqm5z, rb1onzwu = 1, one = 1;
+
+ pd = wk;
+ vdec(hqipzx3n, exz2jrhq, dimm);
+
+ for(bpvaqm5z = 0; bpvaqm5z < *n; bpvaqm5z++) {
+ m2a(cc, wk, dimm, hqipzx3n, exz2jrhq, &one, M, &rb1onzwu);
+
+ for(yq6lorbx = *M - 1; yq6lorbx >= 0; yq6lorbx--) {
+ urohxe6t = b[yq6lorbx];
+ for(gp1jxzuh = yq6lorbx + 1; gp1jxzuh < *M; gp1jxzuh++)
+ urohxe6t -= pd[yq6lorbx + gp1jxzuh * *M] * b[gp1jxzuh];
+ b[yq6lorbx] = urohxe6t / pd[yq6lorbx + yq6lorbx * *M];
+ }
+ cc += *dimm;
+ b += *M;
+ }
+}
+
+
+
+void tapply_mat1(double *mat, int *nr, int *nc, int *type) {
+ double *pd = mat, *pd2 = mat + *nr;
+ int ayfnwr1v, yq6lorbx;
+
+ if(*type == 1)
+ for(yq6lorbx = 2; yq6lorbx <= *nc; yq6lorbx++)
+ for(ayfnwr1v = 0; ayfnwr1v < *nr; ayfnwr1v++, pd2++)
+ *pd2 += *pd++;
+
+ if(*type == 2) {
+ pd2 = mat + *nr * *nc - 1;
+ pd = pd2 - *nr;
+ for(yq6lorbx = *nc; yq6lorbx >= 2; yq6lorbx--)
+ for(ayfnwr1v = 0; ayfnwr1v < *nr; ayfnwr1v++, pd2--)
+ *pd2 -= *pd--;
+ }
+
+ if(*type == 3)
+ for(yq6lorbx = 2; yq6lorbx <= *nc; yq6lorbx++)
+ for(ayfnwr1v = 0; ayfnwr1v < *nr; ayfnwr1v++, pd2++)
+ *pd2 *= *pd++;
+
+ if(*type < 1 || *type > 3)
+ Rprintf("Error: *type not ezlgm2uped\n");
+}
+
+
+
+
diff --git a/src/rgam3.c b/src/rgam3.c
index 1fe0b1d..fa5e38f 100644
--- a/src/rgam3.c
+++ b/src/rgam3.c
@@ -719,11 +719,15 @@ void n5aioudkgt9iulbf(double sjwyig9t[], double ghz9vuba[], double po8rwsmy[],
- double g9fvdrbw[4], ms0qypiw[16], wsvdbx3tk, wv2svdbx3tk, qaltf0nz = 0.1e-9;
+
+ double g9fvdrbw[12]; /* 20140522 Effectively g9fvdrbw(4,3), just in case */
+
+ double ms0qypiw[16], wsvdbx3tk, wv2svdbx3tk, qaltf0nz = 0.1e-9;
int ayfnwr1v, yq6lorbx, dqlr5bse, pqzfxw4i, nhnpt1zym1 = *kuzxj1lo + 1,
pqneb2ra = 1, h2dpsbkr = 4;
double *qnwamo0e0, *qnwamo0e1, *qnwamo0e2, *qnwamo0e3, *qnwamo0e4;
+
qnwamo0e0 = zvau2lct; qnwamo0e1 = f6lsuzax; qnwamo0e2 = fvh2rwtc; qnwamo0e3 = dcfir2no; qnwamo0e4 = zyupcmk6;
for (ayfnwr1v = 0; ayfnwr1v < *kuzxj1lo; ayfnwr1v++) {
*qnwamo0e0++ = *qnwamo0e1++ = *qnwamo0e2++ = *qnwamo0e3++ = *qnwamo0e4++ = 0.0e0;
@@ -752,7 +756,13 @@ void n5aioudkgt9iulbf(double sjwyig9t[], double ghz9vuba[], double po8rwsmy[],
yq6lorbx = dqlr5bse - 4 + 1;
wsvdbx3tk = po8rwsmy[ayfnwr1v-1];
wv2svdbx3tk = wsvdbx3tk * g9fvdrbw[0];
+
+
+
+
zyupcmk6[yq6lorbx-1] += wv2svdbx3tk * ghz9vuba[ayfnwr1v-1];
+
+
zvau2lct[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[0];
f6lsuzax[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[1];
fvh2rwtc[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[2];
diff --git a/src/vgam.f b/src/vgam.f
index 964ef3a..4f08370 100644
--- a/src/vgam.f
+++ b/src/vgam.f
@@ -828,8 +828,8 @@ C Output from Public domain Ratfor, version 1.01
info = 1
call x6kanjdh(xin, work3, nef, kgwmz4ip)
call qpsedg8xf(tgiyxdw1, dufozmt7, kgwmz4ip)
- call mxrbkut0f(uwin, work3, kgwmz4ip, xjc4ywlh, nef, wkmm(1,1,1),
- *wkmm(1,1,2), tgiyxdw1, dufozmt7, dimwin, rutyk8mg)
+ call mux17f(uwin, work3, kgwmz4ip, xjc4ywlh, nef, wkmm(1,1,1), wkm
+ *m(1,1,2), tgiyxdw1, dufozmt7, dimwin, rutyk8mg)
do23284 gp1jxzuh=1,xjc4ywlh
ges1xpkr(gp1jxzuh) = gp1jxzuh
23284 continue
@@ -837,8 +837,8 @@ C Output from Public domain Ratfor, version 1.01
call vqrdca(work3,rutyk8mg,rutyk8mg,xjc4ywlh,fasrkub3,ges1xpkr,wor
*k1,qemj9asg,pvofyg8z)
call qpsedg8xf(tgiyxdw1, dufozmt7, kgwmz4ip)
- call nudh6szqf(uwin,sout,r0oydcxb,dimwin,tgiyxdw1,dufozmt7,nef,kgw
- *mz4ip,wkmm)
+ call mux22f(uwin,sout,r0oydcxb,dimwin,tgiyxdw1,dufozmt7,nef,kgwmz4
+ *ip,wkmm)
call vdqrsl(work3,rutyk8mg,rutyk8mg,qemj9asg,fasrkub3,r0oydcxb,wor
*k1(1),effect,beta, work1(1),ub4xioar,job,info)
call vbksf(uwin,ub4xioar,kgwmz4ip,nef,wkmm,tgiyxdw1,dufozmt7,dimwi
@@ -1017,8 +1017,8 @@ C Output from Public domain Ratfor, version 1.01
endif
if(qemj9asg.eq.0)then
call qpsedg8xf(tgiyxdw1,dufozmt7,wy1vqfzu)
- call mxrbkut0f(wpuarq2m, vc6hatuj, wy1vqfzu, xjc4ywlh, kuzxj1lo, w
- *kmm(1,1,1), wkmm(1,1,2), tgiyxdw1, dufozmt7, dimu, rutyk8mg)
+ call mux17f(wpuarq2m, vc6hatuj, wy1vqfzu, xjc4ywlh, kuzxj1lo, wkmm
+ *(1,1,1), wkmm(1,1,2), tgiyxdw1, dufozmt7, dimu, rutyk8mg)
do23330 gp1jxzuh=1,xjc4ywlh
ges1xpkr(gp1jxzuh) = gp1jxzuh
23330 continue
@@ -1069,8 +1069,8 @@ C Output from Public domain Ratfor, version 1.01
23350 continue
23351 continue
call qpsedg8xf(tgiyxdw1,dufozmt7,wy1vqfzu)
- call nudh6szqf(wpuarq2m,ghz9vuba, twk, dimu,tgiyxdw1,dufozmt7,kuzx
- *j1lo,wy1vqfzu,wkmm)
+ call mux22f(wpuarq2m,ghz9vuba, twk, dimu,tgiyxdw1,dufozmt7,kuzxj1l
+ *o,wy1vqfzu,wkmm)
call vdqrsl(vc6hatuj,rutyk8mg,rutyk8mg,qemj9asg,fasrkub3, twk, wk2
*,wk2, beta, wk2,ub4xioar,job,info)
resss=0.0d0
@@ -1180,8 +1180,8 @@ C Output from Public domain Ratfor, version 1.01
23392 continue
23393 continue
call qpsedg8xf(tgiyxdw1,dufozmt7,wy1vqfzu)
- call nudh6szqf(wpuarq2m,ghz9vuba, twk, dimu,tgiyxdw1,dufozmt7,kuzx
- *j1lo,wy1vqfzu,wkmm)
+ call mux22f(wpuarq2m,ghz9vuba, twk, dimu,tgiyxdw1,dufozmt7,kuzxj1l
+ *o,wy1vqfzu,wkmm)
call vdqrsl(vc6hatuj,rutyk8mg,rutyk8mg,qemj9asg,fasrkub3, twk, wk2
*,wk2, beta, wk2,ub4xioar,job,info)
call vbksf(wpuarq2m,ub4xioar,wy1vqfzu,kuzxj1lo,wkmm,tgiyxdw1,dufoz
diff --git a/src/vgam3.c b/src/vgam3.c
index 0b44033..926e940 100644
--- a/src/vgam3.c
+++ b/src/vgam3.c
@@ -196,7 +196,7 @@ void fvlmz9iyjdbomp0g(double rbne6ouj[], double unvxka0m[], int *wy1vqfzu, int *
int *isolve);
extern
-void fvlmz9iyC_nudh6szq(double wpuarq2m[], double tlgduey8[], double lfu2qhid[],
+void fvlmz9iyC_mux22(double wpuarq2m[], double tlgduey8[], double lfu2qhid[],
int *dimu, int *f8yswcat, int *wy1vqfzu);
extern
@@ -208,7 +208,7 @@ void fvlmz9iyC_lkhnw9yq(double wpuarq2m[], double ks3wejcv[],
int *npjlv3mr, int *wy1vqfzu, int *dvhw1ulq);
extern
-void fvlmz9iyC_mxrbkut0(double wpuarq2m[], double he7mqnvy[], int *wy1vqfzu, int *xjc4ywlh,
+void fvlmz9iyC_mux17(double wpuarq2m[], double he7mqnvy[], int *wy1vqfzu, int *xjc4ywlh,
int *f8yswcat, int *dimu, int *rutyk8mg);
@@ -270,11 +270,13 @@ void fapc0tnbtfeswo7c(double osiz4fxy[], int *acpios9q, int *wy1vqfzu, int *ldk,
double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[]) {
- int ayfnwr1v, yq6lorbx;
+ int ayfnwr1v, yq6lorbx, ayfnwr1vupp;
double *fpdlcqk9wbkq9zyi, *fpdlcqk9xecbg0pf, *fpdlcqk9z4grbpiq, *fpdlcqk9d7glzhbj, *fpdlcqk9v2eydbxs,
*fpdlcqk9osiz4fxy;
+
+
fpdlcqk9osiz4fxy = osiz4fxy + *ldk - 1;
fpdlcqk9xecbg0pf = xecbg0pf;
for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) {
@@ -286,10 +288,13 @@ void fapc0tnbtfeswo7c(double osiz4fxy[], int *acpios9q, int *wy1vqfzu, int *ldk,
fpdlcqk9xecbg0pf++;
}
+
fpdlcqk9osiz4fxy = osiz4fxy + *wy1vqfzu * *ldk;
fpdlcqk9osiz4fxy = fpdlcqk9osiz4fxy + *ldk - *wy1vqfzu - 1;
fpdlcqk9z4grbpiq = z4grbpiq;
- for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) {
+ ayfnwr1vupp = *acpios9q - 1;
+ // 20140523; I changed the following line plus 2 other lines:
+ for (ayfnwr1v = 1; ayfnwr1v <= ayfnwr1vupp; ayfnwr1v++) {
fpdlcqk9wbkq9zyi = wbkq9zyi;
for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
*fpdlcqk9osiz4fxy += *fpdlcqk9wbkq9zyi++ * *fpdlcqk9z4grbpiq;
@@ -298,10 +303,12 @@ void fapc0tnbtfeswo7c(double osiz4fxy[], int *acpios9q, int *wy1vqfzu, int *ldk,
fpdlcqk9z4grbpiq++;
}
+
fpdlcqk9osiz4fxy = osiz4fxy + *ldk + 2 * *wy1vqfzu * *ldk;
fpdlcqk9osiz4fxy = fpdlcqk9osiz4fxy - 2 * *wy1vqfzu - 1;
fpdlcqk9d7glzhbj = d7glzhbj;
- for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) {
+ ayfnwr1vupp = *acpios9q - 2;
+ for (ayfnwr1v = 1; ayfnwr1v <= ayfnwr1vupp; ayfnwr1v++) {
fpdlcqk9wbkq9zyi = wbkq9zyi;
for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
*fpdlcqk9osiz4fxy += *fpdlcqk9wbkq9zyi++ * *fpdlcqk9d7glzhbj;
@@ -310,10 +317,12 @@ void fapc0tnbtfeswo7c(double osiz4fxy[], int *acpios9q, int *wy1vqfzu, int *ldk,
fpdlcqk9d7glzhbj++;
}
+
fpdlcqk9osiz4fxy = osiz4fxy + *ldk + 3 * *wy1vqfzu * *ldk;
fpdlcqk9osiz4fxy = fpdlcqk9osiz4fxy - 3 * *wy1vqfzu - 1;
fpdlcqk9v2eydbxs = v2eydbxs;
- for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) {
+ ayfnwr1vupp = *acpios9q - 3;
+ for (ayfnwr1v = 1; ayfnwr1v <= ayfnwr1vupp; ayfnwr1v++) {
fpdlcqk9wbkq9zyi = wbkq9zyi;
for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
*fpdlcqk9osiz4fxy += *fpdlcqk9wbkq9zyi++ * *fpdlcqk9v2eydbxs;
@@ -430,7 +439,17 @@ void Yee_spline(double *sjwyig9t, double *tlgduey8, double *rbne6ouj, double *gk
wkumc9idosiz4fxy = Calloc(*ldk * (*wy1vqfzu * *acpios9q), double);
wkumc9idenaqpzk9 = Calloc(*ldk * (*acpios9q * *wy1vqfzu), double);
wkumc9idbtwy = Calloc(*wy1vqfzu * *acpios9q , double);
- wkumc9idbk3ymcih = Calloc( *acpios9q , double);
+
+
+
+
+
+ wkumc9idbk3ymcih = Calloc( *lqsahu0r , double);
+
+
+
+
+
wkumc9idwk0 = Calloc(*acpios9q * *wy1vqfzu , double);
@@ -1601,7 +1620,7 @@ void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[],
rutyk8mg = *lqsahu0r * *kgwmz4ip;
- fvlmz9iyC_mxrbkut0(wkumc9idueshvo2ic, wkumc9idwk4,
+ fvlmz9iyC_mux17(wkumc9idueshvo2ic, wkumc9idwk4,
kgwmz4ip, &xjc4ywlh, lqsahu0r, &npjlv3mreshvo2ic, &rutyk8mg);
for (gp1jxzuh = 1; gp1jxzuh <= xjc4ywlh; gp1jxzuh++) {
@@ -1610,7 +1629,7 @@ void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[],
F77_CALL(vqrdca)(wkumc9idwk4, &rutyk8mg, &rutyk8mg, &xjc4ywlh, wkumc9idfasrkub3,
wkumc9idges1xpkr, wkumc9idWrk1, &qemj9asg, &pvofyg8z);
- fvlmz9iyC_nudh6szq(wkumc9idueshvo2ic, wkumc9idsout, wkumc9idr0oydcxb,
+ fvlmz9iyC_mux22(wkumc9idueshvo2ic, wkumc9idsout, wkumc9idr0oydcxb,
&npjlv3mreshvo2ic, lqsahu0r, kgwmz4ip);
F77_CALL(vdqrsl)(wkumc9idwk4, &rutyk8mg, &rutyk8mg, &qemj9asg, wkumc9idfasrkub3,
@@ -1822,7 +1841,7 @@ void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[],
}
if (*qemj9asg == 0) {
- fvlmz9iyC_mxrbkut0(wpuarq2m, vc6hatuj, wy1vqfzu, xjc4ywlh, ftnjamu2, npjlv3mr, rutyk8mg);
+ fvlmz9iyC_mux17(wpuarq2m, vc6hatuj, wy1vqfzu, xjc4ywlh, ftnjamu2, npjlv3mr, rutyk8mg);
for (gp1jxzuh = 1; gp1jxzuh <= *xjc4ywlh; gp1jxzuh++) {
ges1xpkr[gp1jxzuh-1] = gp1jxzuh;
@@ -1890,7 +1909,7 @@ void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[],
}
}
- fvlmz9iyC_nudh6szq(wpuarq2m, wkumc9idghz9vuba, wkumc9idTwk, npjlv3mr, ftnjamu2, wy1vqfzu);
+ fvlmz9iyC_mux22(wpuarq2m, wkumc9idghz9vuba, wkumc9idTwk, npjlv3mr, ftnjamu2, wy1vqfzu);
F77_CALL(vdqrsl)(vc6hatuj, rutyk8mg, rutyk8mg, qemj9asg, fasrkub3,
wkumc9idTwk, wkumc9idwk2, wkumc9idwk2, zshtfg8c,
@@ -2024,7 +2043,7 @@ void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[],
}
}
- fvlmz9iyC_nudh6szq(wpuarq2m, wkumc9idghz9vuba, wkumc9idTwk, npjlv3mr, ftnjamu2, wy1vqfzu);
+ fvlmz9iyC_mux22(wpuarq2m, wkumc9idghz9vuba, wkumc9idTwk, npjlv3mr, ftnjamu2, wy1vqfzu);
F77_CALL(vdqrsl)(vc6hatuj, rutyk8mg, rutyk8mg, qemj9asg, fasrkub3,
wkumc9idTwk, wkumc9idwk2, wkumc9idwk2, zshtfg8c,
diff --git a/src/vmux.f b/src/vmux.f
index 94c47ed..31373ab 100644
--- a/src/vmux.f
+++ b/src/vmux.f
@@ -43,14 +43,14 @@ C Output from Public domain Ratfor, version 1.01
return
end
subroutine vm2af(mat, a, dimm, tgiyxdw1, dufozmt7, kuzxj1lo, wy1vq
- *fzu, upper)
+ *fzu, rb1onzwu)
implicit logical (a-z)
integer dimm, tgiyxdw1(dimm), dufozmt7(dimm), kuzxj1lo, wy1vqfzu,
- *upper
+ *rb1onzwu
double precision mat(dimm,kuzxj1lo), a(wy1vqfzu,wy1vqfzu,kuzxj1lo)
integer ayfnwr1v, yq6lorbx, gp1jxzuh, imk5wjxg
imk5wjxg = wy1vqfzu * (wy1vqfzu + 1) / 2
- if(upper .eq. 1 .or. dimm .ne. imk5wjxg)then
+ if(rb1onzwu .eq. 1 .or. dimm .ne. imk5wjxg)then
ayfnwr1v = 1
23015 if(.not.(ayfnwr1v .le. kuzxj1lo))goto 23017
yq6lorbx = 1
@@ -72,7 +72,7 @@ C Output from Public domain Ratfor, version 1.01
do23026 yq6lorbx=1,dimm
a(tgiyxdw1(yq6lorbx),dufozmt7(yq6lorbx),ayfnwr1v) = mat(yq6lorbx,a
*yfnwr1v)
- if(upper .eq. 0)then
+ if(rb1onzwu .eq. 0)then
a(dufozmt7(yq6lorbx),tgiyxdw1(yq6lorbx),ayfnwr1v) = mat(yq6lorbx,a
*yfnwr1v)
endif
@@ -82,20 +82,20 @@ C Output from Public domain Ratfor, version 1.01
23025 continue
return
end
- subroutine nudh6szqf(wpuarq2m, tlgduey8, lfu2qhid, dimu, tgiyxdw1,
- * dufozmt7, kuzxj1lo, wy1vqfzu, wk1200)
+ subroutine mux22f(wpuarq2m, tlgduey8, lfu2qhid, dimu, tgiyxdw1, du
+ *fozmt7, kuzxj1lo, wy1vqfzu, wk1200)
implicit logical (a-z)
integer dimu, tgiyxdw1(*), dufozmt7(*), kuzxj1lo, wy1vqfzu
double precision wpuarq2m(dimu,kuzxj1lo), tlgduey8(kuzxj1lo,wy1vqf
*zu), lfu2qhid(wy1vqfzu,kuzxj1lo), wk1200(wy1vqfzu,wy1vqfzu)
double precision q6zdcwxk
- integer ayfnwr1v, yq6lorbx, bpvaqm5z, one, upper
+ integer ayfnwr1v, yq6lorbx, bpvaqm5z, one, rb1onzwu
one = 1
- upper = 1
+ rb1onzwu = 1
ayfnwr1v = 1
23030 if(.not.(ayfnwr1v .le. kuzxj1lo))goto 23032
call vm2af(wpuarq2m(1,ayfnwr1v), wk1200, dimu, tgiyxdw1, dufozmt7,
- * one, wy1vqfzu, upper)
+ * one, wy1vqfzu, rb1onzwu)
yq6lorbx = 1
23033 if(.not.(yq6lorbx .le. wy1vqfzu))goto 23035
q6zdcwxk = 0.0d0
@@ -122,13 +122,13 @@ C Output from Public domain Ratfor, version 1.01
double precision wpuarq2m(dimu,kuzxj1lo), bvecto(wy1vqfzu,kuzxj1lo
*), wk1200(wy1vqfzu,wy1vqfzu)
double precision q6zdcwxk
- integer ayfnwr1v, yq6lorbx, gp1jxzuh, upper, one
- upper = 1
+ integer ayfnwr1v, yq6lorbx, gp1jxzuh, rb1onzwu, one
+ rb1onzwu = 1
one = 1
ayfnwr1v = 1
23039 if(.not.(ayfnwr1v .le. kuzxj1lo))goto 23041
call vm2af(wpuarq2m(1,ayfnwr1v), wk1200, dimu, tgiyxdw1, dufozmt7,
- * one, wy1vqfzu, upper)
+ * one, wy1vqfzu, rb1onzwu)
yq6lorbx = wy1vqfzu
23042 if(.not.(yq6lorbx .ge. 1))goto 23044
q6zdcwxk = bvecto(yq6lorbx,ayfnwr1v)
@@ -216,8 +216,8 @@ C Output from Public domain Ratfor, version 1.01
23070 continue
return
end
- subroutine mxrbkut0f(wpuarq2m, he7mqnvy, wy1vqfzu, xjc4ywlh, kuzxj
- *1lo, wk1200, wk3400, tgiyxdw1, dufozmt7, dimu, rutyk8mg)
+ subroutine mux17f(wpuarq2m, he7mqnvy, wy1vqfzu, xjc4ywlh, kuzxj1lo
+ *, wk1200, wk3400, tgiyxdw1, dufozmt7, dimu, rutyk8mg)
implicit logical (a-z)
integer dimu, wy1vqfzu, xjc4ywlh, kuzxj1lo, tgiyxdw1(*), dufozmt7(
**), rutyk8mg
diff --git a/src/vmux3.c b/src/vmux3.c
index 3179cd1..ef8f9b7 100644
--- a/src/vmux3.c
+++ b/src/vmux3.c
@@ -9,14 +9,14 @@
void fvlmz9iyC_qpsedg8x(int tgiyxdw1[], int dufozmt7[], int *wy1vqfzu);
int fvlmz9iyC_VIAM(int *cz8qdfyj, int *rvy1fpli, int *wy1vqfzu);
void fvlmz9iyC_vm2a(double mtlgduey8[], double bzmd6ftvmat[], int *dim1m, int *f8yswcat,
- int *wy1vqfzu, int *iupper, int tgiyxdw1[], int dufozmt7[], int *oey3ckps);
-void fvlmz9iyC_nudh6szq(double wpuarq2m[], double tlgduey8[], double bzmd6ftvmat[],
+ int *wy1vqfzu, int *irb1onzwu, int tgiyxdw1[], int dufozmt7[], int *oey3ckps);
+void fvlmz9iyC_mux22(double wpuarq2m[], double tlgduey8[], double bzmd6ftvmat[],
int *npjlv3mr, int *f8yswcat, int *wy1vqfzu);
void fvlmz9iyC_vbks(double wpuarq2m[], double unvxka0m[],
int *wy1vqfzu, int *f8yswcat, int *dimu);
void fvlmz9iyjdbomp0g(double rbne6ouj[], double unvxka0m[],
int *wy1vqfzu, int *dvhw1ulq, int *i_solve);
-void fvlmz9iyC_mxrbkut0(double wpuarq2m[], double he7mqnvy[],
+void fvlmz9iyC_mux17(double wpuarq2m[], double he7mqnvy[],
int *wy1vqfzu, int *xjc4ywlh, int *f8yswcat, int *dimu, int *rutyk8mg);
void fvlmz9iyC_lkhnw9yq(double wpuarq2m[], double ks3wejcv[],
int *npjlv3mr, int *wy1vqfzu, int *dvhw1ulq);
@@ -80,7 +80,7 @@ int fvlmz9iyC_VIAM(int *cz8qdfyj, int *rvy1fpli, int *wy1vqfzu) {
void fvlmz9iyC_vm2a(double mtlgduey8[], double bzmd6ftvmat[], int *dim1m, int *f8yswcat,
- int *wy1vqfzu, int *iupper, int tgiyxdw1[], int dufozmt7[], int *oey3ckps) {
+ int *wy1vqfzu, int *irb1onzwu, int tgiyxdw1[], int dufozmt7[], int *oey3ckps) {
@@ -90,7 +90,7 @@ void fvlmz9iyC_vm2a(double mtlgduey8[], double bzmd6ftvmat[], int *dim1m, int *f
double *qnwamo0e;
if (*oey3ckps == 1) {
- if (*iupper == 1 || *dim1m != imk5wjxg) {
+ if (*irb1onzwu == 1 || *dim1m != imk5wjxg) {
i_size_bzmd6ftvmat = zyojx5hw * *f8yswcat;
qnwamo0e = bzmd6ftvmat;
for (ayfnwr1v = 0; ayfnwr1v < i_size_bzmd6ftvmat; ayfnwr1v++) {
@@ -99,7 +99,7 @@ void fvlmz9iyC_vm2a(double mtlgduey8[], double bzmd6ftvmat[], int *dim1m, int *f
}
}
- if (iupper == 0) {
+ if (irb1onzwu == 0) {
for (ayfnwr1v = 1; ayfnwr1v <= *f8yswcat; ayfnwr1v++) {
urohxe6t = (ayfnwr1v-1) * zyojx5hw;
for (yq6lorbx = 1; yq6lorbx <= *dim1m; yq6lorbx++) {
@@ -126,7 +126,7 @@ void fvlmz9iyC_vm2a(double mtlgduey8[], double bzmd6ftvmat[], int *dim1m, int *f
}
-void fvlmz9iyC_nudh6szq(double wpuarq2m[], double tlgduey8[], double bzmd6ftvmat[],
+void fvlmz9iyC_mux22(double wpuarq2m[], double tlgduey8[], double bzmd6ftvmat[],
int *npjlv3mr, int *f8yswcat, int *wy1vqfzu) {
@@ -268,7 +268,7 @@ void fvlmz9iyjdbomp0g(double rbne6ouj[], double unvxka0m[],
}
-void fvlmz9iyC_mxrbkut0(double wpuarq2m[], double he7mqnvy[],
+void fvlmz9iyC_mux17(double wpuarq2m[], double he7mqnvy[],
int *wy1vqfzu, int *xjc4ywlh, int *f8yswcat,
int *npjlv3mr, int *rutyk8mg) {
diff --git a/vignettes/categoricalVGAM.Rnw b/vignettes/categoricalVGAM.Rnw
deleted file mode 100644
index c4f98e0..0000000
--- a/vignettes/categoricalVGAM.Rnw
+++ /dev/null
@@ -1,2323 +0,0 @@
-\documentclass[article,shortnames,nojss]{jss}
-\usepackage{thumbpdf}
-%% need no \usepackage{Sweave.sty}
-
-\SweaveOpts{engine=R,eps=FALSE}
-%\VignetteIndexEntry{The VGAM Package for Categorical Data Analysis}
-%\VignetteDepends{VGAM}
-%\VignetteKeywords{categorical data analysis, Fisher scoring, iteratively reweighted least squares, multinomial distribution, nominal and ordinal polytomous responses, smoothing, vector generalized linear and additive models, VGAM R package}
-%\VignettePackage{VGAM}
-
-%% new commands
-\newcommand{\sVLM}{\mbox{\scriptsize VLM}}
-\newcommand{\sformtwo}{\mbox{\scriptsize F2}}
-\newcommand{\pr}{\mbox{$P$}}
-\newcommand{\logit}{\mbox{\rm logit}}
-\newcommand{\bzero}{{\bf 0}}
-\newcommand{\bone}{{\bf 1}}
-\newcommand{\bid}{\mbox{\boldmath $d$}}
-\newcommand{\bie}{\mbox{\boldmath $e$}}
-\newcommand{\bif}{\mbox{\boldmath $f$}}
-\newcommand{\bix}{\mbox{\boldmath $x$}}
-\newcommand{\biy}{\mbox{\boldmath $y$}}
-\newcommand{\biz}{\mbox{\boldmath $z$}}
-\newcommand{\biY}{\mbox{\boldmath $Y$}}
-\newcommand{\bA}{\mbox{\rm \bf A}}
-\newcommand{\bB}{\mbox{\rm \bf B}}
-\newcommand{\bC}{\mbox{\rm \bf C}}
-\newcommand{\bH}{\mbox{\rm \bf H}}
-\newcommand{\bI}{\mbox{\rm \bf I}}
-\newcommand{\bX}{\mbox{\rm \bf X}}
-\newcommand{\bW}{\mbox{\rm \bf W}}
-\newcommand{\bY}{\mbox{\rm \bf Y}}
-\newcommand{\bbeta}{\mbox{\boldmath $\beta$}}
-\newcommand{\boldeta}{\mbox{\boldmath $\eta$}}
-\newcommand{\bmu}{\mbox{\boldmath $\mu$}}
-\newcommand{\bnu}{\mbox{\boldmath $\nu$}}
-\newcommand{\diag}{ \mbox{\rm diag} }
-\newcommand{\Var}{ \mbox{\rm Var} }
-\newcommand{\R}{{\textsf{R}}}
-\newcommand{\VGAM}{\pkg{VGAM}}
-
-
-\author{Thomas W.~Yee\\University of Auckland}
-\Plainauthor{Thomas W. Yee}
-
-\title{The \pkg{VGAM} Package for Categorical Data Analysis}
-\Plaintitle{The VGAM Package for Categorical Data Analysis}
-
-\Abstract{
- Classical categorical regression models such as the multinomial logit and
- proportional odds models are shown to be readily handled by the vector
- generalized linear and additive model (VGLM/VGAM) framework. Additionally,
- there are natural extensions, such as reduced-rank VGLMs for
- dimension reduction, and allowing covariates that have values
- specific to each linear/additive predictor,
- e.g., for consumer choice modeling. This article describes some of the
- framework behind the \pkg{VGAM} \R{}~package, its usage and implementation
- details.
-}
-\Keywords{categorical data analysis, Fisher scoring,
- iteratively reweighted least squares,
- multinomial distribution, nominal and ordinal polytomous responses,
- smoothing, vector generalized linear and additive models,
- \VGAM{} \R{} package}
-\Plainkeywords{categorical data analysis, Fisher scoring,
- iteratively reweighted least squares, multinomial distribution,
- nominal and ordinal polytomous responses, smoothing,
- vector generalized linear and additive models, VGAM R package}
-
-\Address{
- Thomas W. Yee \\
- Department of Statistics \\
- University of Auckland, Private Bag 92019 \\
- Auckland Mail Centre \\
- Auckland 1142, New Zealand \\
- E-mail: \email{t.yee at auckland.ac.nz}\\
- URL: \url{http://www.stat.auckland.ac.nz/~yee/}
-}
-
-
-\begin{document}
-
-
-<<echo=FALSE, results=hide>>=
-library("VGAM")
-library("VGAMdata")
-ps.options(pointsize = 12)
-options(width = 72, digits = 4)
-options(SweaveHooks = list(fig = function() par(las = 1)))
-options(prompt = "R> ", continue = "+")
-@
-
-
-% ----------------------------------------------------------------------
-\section{Introduction}
-\label{sec:jsscat.intoduction}
-
-
-This is a \pkg{VGAM} vignette for categorical data analysis (CDA)
-based on~\cite{Yee:2010}.
-Any subsequent features (especially non-backward compatible ones)
-will appear here.
-
-The subject of CDA is concerned with
-analyses where the response is categorical regardless of whether
-the explanatory variables are continuous or categorical. It is a
-very frequent form of data. Over the years several CDA regression
-models for polytomous responses have become popular, e.g., those
-in Table~\ref{tab:cat.quantities}. Not surprisingly, the models
-are interrelated: their foundation is the multinomial distribution
-and consequently they share similar and overlapping properties which
-modellers should know and exploit. Unfortunately, software has been
-slow to reflect their commonality and this makes analyses unnecessarily
-difficult for the practitioner on several fronts, e.g., using different
-functions/procedures to fit different models which does not aid the
-understanding of their connections.
-
-
-This historical misfortune can be seen by considering \R{}~functions
-for~CDA. From the Comprehensive \proglang{R} Archive Network
-(CRAN, \url{http://CRAN.R-project.org/}) there is~\texttt{polr()}
-\citep[in \pkg{MASS};][]{Venables+Ripley:2002} for a proportional odds
-model and~\texttt{multinom()}
-\citep[in~\pkg{nnet};][]{Venables+Ripley:2002} for the multinomial
-logit model. However, both of these can be considered `one-off'
-modeling functions rather than providing a unified offering for CDA.
-The function \texttt{lrm()} \citep[in \pkg{rms};][]{Harrell:2009}
-has greater functionality: it can fit the proportional odds model
-(and the forward continuation ratio model upon preprocessing). Neither
-\texttt{polr()} or \texttt{lrm()} appear able to fit the nonproportional
-odds model. There are non-CRAN packages too, such as the modeling
-function~\texttt{nordr()} \citep[in \pkg{gnlm};][]{gnlm:2007}, which can fit
-the proportional odds, continuation ratio and adjacent categories models;
-however it calls \texttt{nlm()} and the user must supply starting values.
-In general these \R{} \citep{R} modeling functions are not modular
-and often require preprocessing and sometimes are not self-starting.
-The implementations can be perceived as a smattering and piecemeal
-in nature. Consequently if the practitioner wishes to fit the models
-of Table~\ref{tab:cat.quantities} then there is a need to master several
-modeling functions from several packages each having different syntaxes
-etc. This is a hindrance to efficient CDA.
-
-
-
-\begin{table}[tt]
-\centering
-\begin{tabular}{|c|c|l|}
-\hline
-Quantity & Notation &
-%Range of~$j$ &
-\VGAM{} family function \\
-\hline
-%
-$\pr(Y=j+1) / \pr(Y=j)$ &$\zeta_{j}$ &
-%$1,\ldots,M$ &
-\texttt{acat()} \\
-%
-$\pr(Y=j) / \pr(Y=j+1)$ &$\zeta_{j}^{R}$ &
-%$2,\ldots,M+1$ &
-\texttt{acat(reverse = TRUE)} \\
-%
-$\pr(Y>j|Y \geq j)$ &$\delta_{j}^*$ &
-%$1,\ldots,M$ &
-\texttt{cratio()} \\
-%
-$\pr(Y<j|Y \leq j)$ &$\delta_{j}^{*R}$ &
-%$2,\ldots,M+1$ &
-\texttt{cratio(reverse = TRUE)} \\
-%
-$\pr(Y\leq j)$ &$\gamma_{j}$ &
-%$1,\ldots,M$ &
-\texttt{cumulative()} \\
-%
-$\pr(Y\geq j)$ &$\gamma_{j}^R$&
-%$2,\ldots,M+1$ &
-\texttt{cumulative(reverse = TRUE)} \\
-%
-$\log\{\pr(Y=j)/\pr(Y=M+1)\}$ & &
-%$1,\ldots,M$ &
-\texttt{multinomial()} \\
-%
-$\pr(Y=j|Y \geq j)$ &$\delta_{j}$ &
-%$1,\ldots,M$ &
-\texttt{sratio()} \\
-%
-$\pr(Y=j|Y \leq j)$ &$\delta_{j}^R$ &
-%$2,\ldots,M+1$ &
-\texttt{sratio(reverse = TRUE)} \\
-%
-\hline
-\end{tabular}
-\caption{
-Quantities defined in \VGAM{} for a
-categorical response~$Y$ taking values $1,\ldots,M+1$.
-Covariates \bix{} have been omitted for clarity.
-The LHS quantities are~$\eta_{j}$
-or~$\eta_{j-1}$ for~$j=1,\ldots,M$ (not reversed)
-and~$j=2,\ldots,M+1$ (if reversed), respectively.
-All models are estimated by minimizing the deviance.
-All except for \texttt{multinomial()} are suited to ordinal~$Y$.
-\label{tab:cat.quantities}
-}
-\end{table}
-
-
-
-
-\proglang{SAS} \citep{SAS} does not fare much better than~\R. Indeed,
-it could be considered as having an \textit{excess} of options which
-bewilders the non-expert user; there is little coherent overriding
-structure. Its \code{proc logistic} handles the multinomial logit
-and proportional odds models, as well as exact logistic regression
-\citep[see][which is for Version~8 of \proglang{SAS}]{stok:davi:koch:2000}.
-The fact that the proportional odds model may be fitted by \code{proc
-logistic}, \code{proc genmod} and \code{proc probit} arguably leads
-to possible confusion rather than the making of connections, e.g.,
-\code{genmod} is primarily for GLMs and the proportional odds model is not
-a GLM in the classical \cite{neld:wedd:1972} sense. Also, \code{proc
-phreg} fits the multinomial logit model, and \code{proc catmod} with
-its WLS implementation adds to further potential confusion.
-
-
-This article attempts to show how these deficiencies can be addressed
-by considering the vector generalized linear and additive model
-(VGLM/VGAM) framework, as implemented by the author's~\pkg{VGAM}
-package for~\R{}. The main purpose of this paper is to demonstrate
-how the framework is very well suited to many `classical' regression
-models for categorical responses, and to describe the implementation and
-usage of~\pkg{VGAM} for such. To this end an outline of this article
-is as follows. Section~\ref{sec:jsscat.VGLMVGAMoverview} summarizes
-the basic VGLM/VGAM framework. Section~\ref{sec:jsscat.vgamff}
-centers on functions for CDA in~\VGAM. Given an adequate framework,
-some natural extensions of Section~\ref{sec:jsscat.VGLMVGAMoverview} are
-described in Section~\ref{sec:jsscat.othermodels}. Users of \pkg{VGAM}
-can benefit from Section~\ref{sec:jsscat.userTopics} which shows how
-the software reflects their common theory. Some examples are given in
-Section~\ref{sec:jsscat.eg}. Section~\ref{sec:jsscat.implementDetails}
-contains selected topics in statistial computing that are
-more relevant to programmers interested in the underlying code.
-Section~\ref{sec:jsscat.extnUtil} discusses several utilities and
-extensions needed for advanced CDA modeling, and the article concludes
-with a discussion. This document was run using \pkg{VGAM}~0.7-10
-\citep{yee:VGAM:2010} under \R~2.10.0.
-
-
-Some general references for categorical data providing
-background to this article include
-\cite{agre:2002},
-\cite{fahr:tutz:2001},
-\cite{leon:2000},
-\cite{lloy:1999},
-\cite{long:1997},
-\cite{mccu:neld:1989} and
-\cite{simo:2003}.
-An overview of models for ordinal responses is~\cite{liu:agre:2005},
-and a manual for fitting common models found in~\cite{agre:2002}
-to polytomous responses with various software is~\cite{thom:2009}.
-A package for visualizing categorical data in~\R{} is~\pkg{vcd}
-\citep{Meyer+Zeileis+Hornik:2006,Meyer+Zeileis+Hornik:2009}.
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{VGLM/VGAM overview}
-\label{sec:jsscat.VGLMVGAMoverview}
-
-
-This section summarizes the VGLM/VGAM framework with a particular emphasis
-toward categorical models since the classes encapsulates many multivariate
-response models in, e.g., survival analysis, extreme value analysis,
-quantile and expectile regression, time series, bioassay data, nonlinear
-least-squares models, and scores of standard and nonstandard univariate
-and continuous distributions. The framework is partially summarized by
-Table~\ref{tab:rrvglam.jss.subset}. More general details about VGLMs
-and VGAMs can be found in \cite{yee:hast:2003} and \cite{yee:wild:1996}
-respectively. An informal and practical article connecting the general
-framework with the software is~\cite{Rnews:Yee:2008}.
-
-
-
-\subsection{VGLMs}
-\label{sec:wffc.appendixa.vglms}
-
-Suppose the observed response \biy{} is a $q$-dimensional vector.
-VGLMs are defined as a model for which the conditional distribution
-of $\biY$ given explanatory $\bix$ is of the form
-\begin{eqnarray}
-f(\biy | \bix ; \bB, \phi) ~=~ h(\biy, \eta_1,\ldots, \eta_M, \phi)
-\label{gammod}
-\end{eqnarray}
-for some known function $h(\cdot)$, where $\bB = (\bbeta_1 \,
-\bbeta_2 \, \cdots \, \bbeta_M)$ is a $p \times M$ matrix of
-unknown regression coefficients,
-and the~$j$th linear predictor is
-\begin{equation}
-\eta_j ~=~ \eta_j(\bix) ~=~ \bbeta_j^{\top} \bix ~=~
-\sum_{k=1}^p \beta_{(j)k} \, x_k , ~~~~ j=1,\ldots,M.
-\label{gammod2}
-\end{equation}
-Here $\bix=(x_1,\ldots,x_p)^{\top}$ with $x_1 = 1$ if there is an intercept.
-Note that~(\ref{gammod2}) means that \textit{all} the parameters may be
-potentially modelled as functions of~\bix. It can be seen that VGLMs are
-like GLMs but allow for multiple linear predictors, and they encompass
-models outside the small confines of the exponential family.
-In~(\ref{gammod}) the quantity~$\phi$ is an optional scaling parameter
-which is included for backward compatibility with common adjustments
-to overdispersion, e.g., with respect to GLMs.
-
-
-In general there is no relationship between~$q$ and~$M$: it
-depends specifically on the model or distribution to be fitted.
-However, for the `classical' categorical regression models of
-Table~\ref{tab:cat.quantities} we have~$M=q-1$ since~$q$ is the number
-of levels the multi-category response~$Y$ has.
-
-
-
-
-
-The $\eta_j$ of VGLMs may be applied directly to parameters of a
-distribution rather than just to a mean for GLMs. A simple example is
-a univariate distribution with a location parameter~$\xi$ and a scale
-parameter~$\sigma > 0$, where we may take~$\eta_1 = \xi$ and~$\eta_2 =
-\log\,\sigma$. In general, $\eta_{j}=g_{j}(\theta_{j})$ for some parameter
-link function~$g_{j}$ and parameter~$\theta_{j}$.
-For example, the adjacent categories models in
-Table~\ref{tab:cat.quantities} are ratios of two probabilities, therefore
-a log link of~$\zeta_{j}^{R}$ or~$\zeta_{j}$ is the default.
-In \VGAM{}, there are currently over a dozen links to choose from, of
-which any can be assigned to any parameter, ensuring maximum flexibility.
-Table~\ref{tab:jsscat.links} lists some of them.
-
-
-
-\begin{table}[tt]
-\centering
-%\ ~~~~ \par
-\begin{tabular}{|l|l|l|l|}
-\hline
-\ \ ~~~~~~~~~~~~ $\boldeta$ &
-Model & Modeling & Reference \\
- & & function & \\
-%-------------------------------------------------------------
-\hline
-\hline
-%-------------------------------------------------------------
- &&&\\[-1.1ex]
-$\bB_1^{\top} \bix_{1} + \bB_2^{\top} \bix_{2}\ ( = \bB^{\top} \bix)$ &
-VGLM & \texttt{vglm()}
-&
-\cite{yee:hast:2003} \\[1.6ex]
-%Yee \& Hastie~(2003) \\[1.6ex]
-%-------------------------------------------------------------
-\hline
- &&&\\[-1.1ex]
-$\bB_1^{\top} \bix_{1} +
- \sum\limits_{k=p_1+1}^{p_1+p_2} \bH_k \, \bif_{k}^{*}(x_k)$ &
-%\sum\limits_{k=1}^{p_2} \bH_k \, \bif_k(x_k)$ &
-VGAM & \texttt{vgam()}
-&
-\cite{yee:wild:1996} \\[2.2ex]
-%Yee \& Wild~(1996) \\[2.2ex]
-%-------------------------------------------------------------
-\hline
- &&&\\[-1.1ex]
-$\bB_1^{\top} \bix_{1} + \bA \, \bnu$ &
-RR-VGLM & \texttt{rrvglm()}
-&
-\cite{yee:hast:2003} \\[1.8ex]
-%Yee \& Hastie~(2003) \\[1.8ex]
-%-------------------------------------------------------------
-\hline
- &&&\\[-1.1ex]
-See \cite{yee:hast:2003} &
-Goodman's~RC & \texttt{grc()}
-&
-%\cite{yee:hast:2003} \\[1.8ex]
-\cite{good:1981} \\[1.8ex]
-%-------------------------------------------------------------
-\hline
-\end{tabular}
-\caption{
-Some of
-the package \VGAM{} and
-its framework.
-The vector of latent variables $\bnu = \bC^{\top} \bix_2$
-where
-$\bix^{\top} = (\bix_1^{\top}, \bix_2^{\top})$.
-\label{tab:rrvglam.jss.subset}
-}
-%\medskip
-\end{table}
-
-
-
-
-
-
-VGLMs are estimated using iteratively reweighted least squares~(IRLS)
-which is particularly suitable for categorical models
-\citep{gree:1984}.
-All models in this article have a log-likelihood
-\begin{equation}
-\ell ~=~ \sum_{i=1}^n \, w_i \, \ell_i
-\label{eq:log-likelihood.VGAM}
-\end{equation}
-where the~$w_i$ are known positive prior weights.
-Let~$\bix_i$ denote the explanatory vector for the~$i$th observation,
-for $i=1,\dots,n$.
-Then one can write
-\begin{eqnarray}
-\boldeta_i &=& \boldeta(\bix_i) ~=~
-\left(
-\begin{array}{c}
-\eta_1(\bix_i) \\
-\vdots \\
-\eta_M(\bix_i)
-\end{array} \right) ~=~
-\bB^{\top} \bix_i ~=~
-\left(
-\begin{array}{c}
-\bbeta_1^{\top} \bix_i \\
-\vdots \\
-\bbeta_M^{\top} \bix_i
-\end{array} \right)
-\nonumber
-\\
-&=&
-\left(
-\begin{array}{cccc}
-\beta_{(1)1} & \cdots & \beta_{(1)p} \\
-\vdots \\
-\beta_{(M)1} & \cdots & \beta_{(M)p} \\
-\end{array} \right)
-\bix_i ~=~
-\left(
-\bbeta_{(1)} \; \cdots \; \bbeta_{(p)}
-\right)
-\bix_i .
-\label{eq:lin.pred}
-\end{eqnarray}
-In IRLS,
-an adjusted dependent vector $\biz_i = \boldeta_i + \bW_i^{-1} \bid_i$
-is regressed upon a large (VLM) model matrix, with
-$\bid_i = w_i \, \partial \ell_i / \partial \boldeta_i$.
-The working weights $\bW_i$ here are
-$w_i \Var(\partial \ell_i / \partial \boldeta_i)$
-(which, under regularity conditions, is equal to
-$-w_i \, E[ \partial^2 \ell_i / (\partial \boldeta_i \,
-\partial \boldeta_i^{\top})]$),
-giving rise to the Fisher~scoring algorithm.
-
-
-Let $\bX=(\bix_1,\ldots,\bix_n)^{\top}$ be the usual $n \times p$
-(LM) model matrix
-obtained from the \texttt{formula} argument of \texttt{vglm()}.
-Given $\biz_i$, $\bW_i$ and~$\bX{}$ at the current IRLS iteration,
-a weighted multivariate regression is performed.
-To do this, a \textit{vector linear model} (VLM) model matrix
-$\bX_{\sVLM}$ is formed from~$\bX{}$ and~$\bH_k$
-(see Section~\ref{sec:wffc.appendixa.vgams}).
-This is has $nM$~rows, and if there are no constraints then $Mp$~columns.
-Then $\left(\biz_1^{\top},\ldots,\biz_n^{\top}\right)^{\top}$ is regressed
-upon $\bX_{\sVLM}$
-with variance-covariance matrix $\diag(\bW_1^{-1},\ldots,\bW_n^{-1})$.
-This system of linear equations is converted to one large
-WLS fit by premultiplication of the output of
-a Cholesky decomposition of the~$\bW_i$.
-
-
-Fisher~scoring usually has good numerical stability
-because the~$\bW_i$ are positive-definite over a larger
-region of parameter space than Newton-Raphson.
-For the categorical models in this article the expected
-information matrices are simpler than the observed
-information matrices, and are easily derived,
-therefore all the families in Table~\ref{tab:cat.quantities}
-implement Fisher~scoring.
-
-
-
-\subsection{VGAMs and constraint matrices}
-\label{sec:wffc.appendixa.vgams}
-
-
-VGAMs provide additive-model extensions to VGLMs, that is,
-(\ref{gammod2})~is generalized to
-\begin{equation}
-\eta_j(\bix) ~=~ \beta_{(j)1} +
-\sum_{k=2}^p \; f_{(j)k}(x_k), ~~~~ j = 1,\ldots, M,
-\label{addmod}
-\end{equation}
-a sum of smooth functions of the individual covariates, just as
-with ordinary GAMs \citep{hast:tibs:1990}. The $\bif_k =
-(f_{(1)k}(x_k),\ldots,f_{(M)k}(x_k))^{\top}$ are centered for uniqueness,
-and are estimated simultaneously using \textit{vector smoothers}.
-VGAMs are thus a visual data-driven method that is well suited to
-exploring data, and they retain the simplicity of interpretation that
-GAMs possess.
-
-
-
-An important concept, especially for CDA, is the idea of
-`constraints-on-the functions'.
-In practice we often wish to constrain the effect of a covariate to
-be the same for some of the~$\eta_j$ and to have no effect for others.
-We shall see below that this constraints idea is important
-for several categorical models because of a popular parallelism assumption.
-As a specific example, for VGAMs we may wish to take
-\begin{eqnarray*}
-\eta_1 & = & \beta_{(1)1} + f_{(1)2}(x_2) + f_{(1)3}(x_3), \\
-\eta_2 & = & \beta_{(2)1} + f_{(1)2}(x_2),
-\end{eqnarray*}
-so that $f_{(1)2} \equiv f_{(2)2}$ and $f_{(2)3} \equiv 0$.
-For VGAMs, we can represent these models using
-\begin{eqnarray}
-\boldeta(\bix) & = & \bbeta_{(1)} + \sum_{k=2}^p \, \bif_k(x_k)
-\ =\ \bH_1 \, \bbeta_{(1)}^* + \sum_{k=2}^p \, \bH_k \, \bif_k^*(x_k)
-\label{eqn:constraints.VGAM}
-\end{eqnarray}
-where $\bH_1,\bH_2,\ldots,\bH_p$ are known full-column rank
-\textit{constraint matrices}, $\bif_k^*$ is a vector containing a
-possibly reduced set of component functions and $\bbeta_{(1)}^*$ is a
-vector of unknown intercepts. With no constraints at all, $\bH_1 =
-\bH_2 = \cdots = \bH_p = \bI_M$ and $\bbeta_{(1)}^* = \bbeta_{(1)}$.
-Like the $\bif_k$, the~$\bif_k^*$ are centered for uniqueness.
-For VGLMs, the~$\bif_k$ are linear so that
-\begin{eqnarray}
-{\bB}^{\top} &=&
-\left(
-\bH_1 \bbeta_{(1)}^*
- \;
-\Bigg|
- \;
-\bH_2 \bbeta_{(2)}^*
- \;
-\Bigg|
- \;
-\cdots
- \;
-\Bigg|
- \;
-\bH_p \bbeta_{(p)}^*
-\right)
-\label{eqn:lin.coefs4}
-\end{eqnarray}
-for some vectors
-$\bbeta_{(1)}^*,\ldots,\bbeta_{(p)}^*$.
-
-
-The
-$\bX_{\sVLM}$ matrix is constructed from \bX{} and the $\bH_k$ using
-Kronecker product operations.
-For example, with trivial constraints,
-$\bX_{\sVLM} = \bX \otimes \bI_M$.
-More generally,
-\begin{eqnarray}
-\bX_{\sVLM} &=&
-\left(
-\left( \bX \, \bie_{1} \right) \otimes \bH_1
- \;
-\Bigg|
- \;
-\left( \bX \, \bie_{2} \right) \otimes \bH_2
- \;
-\Bigg|
- \;
-\cdots
- \;
-\Bigg|
- \;
-\left( \bX \, \bie_{p} \right) \otimes \bH_p
-\right)
-\label{eqn:X_vlm_Hk}
-\end{eqnarray}
-($\bie_{k}$ is a vector of zeros except for a one in the $k$th~position)
-so that
-$\bX_{\sVLM}$ is $(nM) \times p^*$ where
-$p^* = \sum_{k=1}^{p} \mbox{\textrm{ncol}}(\bH_k)$ is the total number
-of columns of all the constraint matrices.
-Note that $\bX_{\sVLM}$ and \bX{} can be obtained by
-\texttt{model.matrix(vglmObject, type = "vlm")}
-and
-\texttt{model.matrix(vglmObject, type = "lm")}
-respectively.
-Equation~\ref{eqn:lin.coefs4} focusses on the rows of~\bB{} whereas
-\ref{eq:lin.pred}~is on the columns.
-
-
-VGAMs are estimated by applying a modified vector backfitting algorithm
-\citep[cf.][]{buja:hast:tibs:1989} to the $\biz_i$.
-
-
-
-\subsection{Vector splines and penalized likelihood}
-\label{sec:ex.vspline}
-
-If~(\ref{eqn:constraints.VGAM}) is estimated using a vector spline (a
-natural extension of the cubic smoothing spline to vector responses)
-then it can be shown that the resulting solution maximizes a penalized
-likelihood; some details are sketched in~\cite{yee:step:2007}. In fact,
-knot selection for vector spline follows the same idea as O-splines
-\citep[see][]{wand:orme:2008} in order to lower the computational cost.
-
-
-The usage of \texttt{vgam()} with smoothing is very similar
-to~\texttt{gam()} \citep{gam:pack:2009}, e.g.,
-to fit a nonparametric proportional odds model
-\citep[cf.~p.179 of][]{mccu:neld:1989}
-to the pneumoconiosis data one could try
-<<label = pneumocat, eval=T>>=
-pneumo <- transform(pneumo, let = log(exposure.time))
-fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
- cumulative(reverse = TRUE, parallel = TRUE), pneumo)
-@
-Here, setting \texttt{df = 1} means a linear fit so that
-\texttt{df = 2} affords a little nonlinearity.
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section[VGAM family functions]{\pkg{VGAM} family functions}
-\label{sec:jsscat.vgamff}
-
-
-
-This section summarizes and comments on the~\VGAM{} family functions
-of Table~\ref{tab:cat.quantities} for a categorical response variable
-taking values $Y=1,2,\ldots,M+1$. In its most basic invokation, the usage
-entails a trivial change compared to \texttt{glm()}: use \texttt{vglm()}
-instead and assign the \texttt{family} argument a \VGAM{}~family function.
-The use of a \VGAM{}~family function to fit a specific model is far
-simpler than having a different modeling function for each model.
-Options specific to that model appear as arguments of that \VGAM{}~family
-function.
-
-
-
-
-
-While writing \texttt{cratio()} it was found that various authors defined
-the quantity ``continuation ratio'' differently, therefore it became
-necessary to define a ``stopping ratio''. Table~\ref{tab:cat.quantities}
-defines these quantities for \VGAM{}.
-
-
-
-
-The multinomial logit model is usually described by choosing the first or
-last level of the factor to be baseline. \VGAM{}~chooses the last level
-(Table~\ref{tab:cat.quantities}) by default, however that can be changed
-to any other level by use of the \texttt{refLevel} argument.
-
-
-
-
-If the proportional odds assumption is inadequate then one strategy is
-to try use a different link function (see Section~\ref{sec:jsscat.links}
-for a selection). Another alternative is to add extra terms such as
-interaction terms into the linear predictor
-\citep[available in the \proglang{S}~language;][]{cham:hast:1993}.
-Another is to fit the so-called \textit{partial}
-proportional odds model \citep{pete:harr:1990}
-which \VGAM{} can fit via constraint matrices.
-
-
-
-In the terminology of~\cite{agre:2002},
-\texttt{cumulative()} fits the class of \textit{cumulative link models},
-e.g.,
-\texttt{cumulative(link = probit)} is a cumulative probit model.
-For \texttt{cumulative()}
-it was difficult to decide whether
-\texttt{parallel = TRUE}
-or
-\texttt{parallel = FALSE}
-should be the default.
-In fact, the latter is (for now?).
-Users need to set
-\texttt{cumulative(parallel = TRUE)} explicitly to
-fit a proportional odds model---hopefully this will alert
-them to the fact that they are making
-the proportional odds assumption and
-check its validity (\cite{pete:1990}; e.g., through a deviance or
-likelihood ratio test). However the default means numerical problems
-can occur with far greater likelihood.
-Thus there is tension between the two options.
-As a compromise there is now a \VGAM{} family function
-called \texttt{propodds(reverse = TRUE)} which is equivalent to
-\texttt{cumulative(parallel = TRUE, reverse = reverse, link = "logit")}.
-
-
-
-By the way, note that arguments such as
-\texttt{parallel}
-can handle a slightly more complex syntax.
-A call such as
-\code{parallel = TRUE ~ x2 + x5 - 1} means the parallelism assumption
-is only applied to~$X_2$ and~$X_5$.
-This might be equivalent to something like
-\code{parallel = FALSE ~ x3 + x4}, i.e., to the remaining
-explanatory variables.
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Other models}
-\label{sec:jsscat.othermodels}
-
-
-Given the VGLM/VGAM framework of Section~\ref{sec:jsscat.VGLMVGAMoverview}
-it is found that natural extensions are readily proposed in several
-directions. This section describes some such extensions.
-
-
-
-
-\subsection{Reduced-rank VGLMs}
-\label{sec:jsscat.RRVGLMs}
-
-
-Consider a multinomial logit model where $p$ and $M$ are both large.
-A (not-too-convincing) example might be the data frame \texttt{vowel.test}
-in the package \pkg{ElemStatLearn} \citep[see][]{hast:tibs:buja:1994}.
-The vowel recognition data set involves $q=11$ symbols produced from
-8~speakers with 6~replications of each. The training data comprises
-$10$~input features (not including the intercept) based on digitized
-utterances. A multinomial logit model fitted to these data would
-have $\widehat{\bB}$ comprising of $p \times (q-1) = 110$ regression
-coefficients for $n=8\times 6\times 11 = 528$ observations. The ratio
-of $n$~to the number of parameters is small, and it would be good to
-introduce some parsimony into the model.
-
-
-
-A simple and elegant solution is to represent $\widehat{\bB}$ by
-its reduced-rank approximation. To do this, partition $\bix$ into
-$(\bix_1^{\top}, \bix_2^{\top})^{\top}$ and $\bB = (\bB_1^{\top} \;
-\bB_2^{\top})^{\top}$ so that the reduced-rank regression is applied
-to~$\bix_2$. In general, \bB{} is a dense matrix of full rank, i.e., rank
-$=\min(M,p)$, and since there are $M \times p$ regression coefficients
-to estimate this is `too' large for some models and/or data sets.
-If we approximate~$\bB_2$ by a reduced-rank regression \begin{equation}
-\label{eq:rrr.BAC} \bB_2 ~=~ \bC{} \, \bA^{\top} \end{equation} and if
-the rank~$R$ is kept low then this can cut down the number of regression
-coefficients dramatically. If~$R=2$ then the results may be biplotted
-(\texttt{biplot()} in \VGAM{}). Here, \bC{} and \bA{} are $p_2 \times R$
-and $M \times R$ respectively, and usually they are `thin'.
-
-
-More generally, the class of \textit{reduced-rank VGLMs} (RR-VGLMs)
-is simply a VGLM where~$\bB_2$ is expressed as a product of two thin
-estimated matrices (Table~\ref{tab:rrvglam.jss.subset}). Indeed,
-\cite{yee:hast:2003} show that RR-VGLMs are VGLMs with constraint
-matrices that are unknown and estimated. Computationally, this is
-done using an alternating method: in~(\ref{eq:rrr.BAC}) estimate~\bA{}
-given the current estimate of~\bC{}, and then estimate~\bC{} given the
-current estimate of~\bA{}. This alternating algorithm is repeated until
-convergence within each IRLS iteration.
-
-
-Incidentally, special cases of RR-VGLMs have appeared in the
-literature. For example, a RR-multinomial logit model, is known as the
-\textit{stereotype} model \citep{ande:1984}. Another is \cite{good:1981}'s
-RC~model (see Section~\ref{sec:jsscat.rrr.goodman}) which is reduced-rank
-multivariate Poisson model. Note that the parallelism assumption of the
-proportional odds model \citep{mccu:neld:1989} can be thought of as a
-type of reduced-rank regression where the constraint matrices are thin
-($\bone_M$, actually) and known.
-
-
-
-The modeling function \texttt{rrvglm()} should work with any \VGAM{}
-family function compatible with \texttt{vglm()}. Of course, its
-applicability should be restricted to models where a reduced-rank
-regression of~$\bB_2$ makes sense.
-
-
-
-
-
-
-
-
-
-\subsection[Goodman's R x C association model]{Goodman's $R \times C$ association model}
-\label{sec:jsscat.rrr.goodman}
-
-
-
-
-
-Let~$\bY = [(y_{ij})]$ be a $n \times M$ matrix of counts.
-Section~4.2 of~\cite{yee:hast:2003} shows that Goodman's~RC$(R)$ association
-model \citep{good:1981} fits within the VGLM framework by setting up
-the appropriate indicator variables, structural zeros and constraint
-matrices. Goodman's model fits a reduced-rank type model to~\bY{}
-by firstly assuming that~$Y_{ij}$ has a Poisson distribution, and that
-\begin{eqnarray}
-\log \, \mu_{ij} &=& \mu + \alpha_{i} + \gamma_{j} +
-\sum_{k=1}^R a_{ik} \, c_{jk} ,
-\ \ \ i=1,\ldots,n;\ \ j=1,\ldots,M,
-\label{eqn:goodmanrc}
-\end{eqnarray}
-where $\mu_{ij} = E(Y_{ij})$ is the mean of the $i$-$j$ cell, and the
-rank~$R$ satisfies $R < \min(n,M)$.
-
-
-The modeling function \texttt{grc()} should work on any two-way
-table~\bY{} of counts generated by~(\ref{eqn:goodmanrc}) provided
-the number of 0's is not too large. Its usage is quite simple, e.g.,
-\texttt{grc(Ymatrix, Rank = 2)} fits a rank-2 model to a matrix of counts.
-By default a \texttt{Rank = 1} model is fitted.
-
-
-
-
-\subsection{Bradley-Terry models}
-\label{sec:jsscat.brat}
-
-Consider
-an experiment consists of $n_{ij}$ judges who compare
-pairs of items $T_i$, $i=1,\ldots,M+1$.
-They express their preferences between $T_i$ and $T_j$.
-Let $N=\sum \sum_{i<j} n_{ij}$ be the total number of pairwise
-comparisons, and assume independence for ratings of the same pair
-by different judges and for ratings of different pairs by the same judge.
-Let $\pi_i$ be the \textit{worth} of item~$T_i$,
-$$
-\pr(T_i > T_j) ~=~ p_{i/ij} ~=~ \frac{\pi_i}{\pi_i + \pi_j},
-\ ~~~~~i \neq {j},
-$$
-where ``$T_i>T_j$'' means~$i$ is preferred over~$j$.
-Suppose that $\pi_i > 0$.
-Let~$Y_{ij}$ be the number of times that $T_i$ is preferred
-over~$T_j$ in the~$n_{ij}$ comparisons of the pairs.
-Then~$Y_{ij} \sim {\rm Bin}(n_{ij},p_{i/ij})$.
-This is a Bradley-Terry model (without ties),
-and the \VGAM{} family function is~\texttt{brat()}.
-
-
-Maximum likelihood estimation of the parameters $\pi_1,\ldots,\pi_{M+1}$
-involves maximizing
-$$
-\prod_{i<j}^{M+1}
-\left(
-\begin{array}{c}
-n_{ij} \\
-y_{ij}
-\end{array} \right)
-\left(
-\frac{\pi_i}{\pi_i + \pi_j}
-\right)^{y_{ij}}
-\left(
-\frac{\pi_j}{\pi_i + \pi_j}
-\right)^{n_{ij}-y_{ij}} .
-$$
-By default, $\pi_{M+1} \equiv 1$ is used for identifiability,
-however, this can be changed very easily.
-Note that one can define
-linear predictors $\eta_{ij}$ of the form
-\begin{equation}
-\label{eq:bradter.logit}
-\logit
-\left(
-\frac{\pi_i}{\pi_i + \pi_j}
-\right) ~=~ \log
-\left(
-\frac{\pi_i}{\pi_j}
-\right) ~=~ \lambda_i - \lambda_j .
-\end{equation}
-The VGAM{} framework can handle the Bradley-Terry model only for
-intercept-only models; it has
-\begin{equation}
-\label{eq:bradter}
-\lambda_j ~=~ \eta_j ~=~ \log\, \pi_j = \beta_{(1)j},
-\ \ \ \ j=1,\ldots,M.
-\end{equation}
-
-
-As well as having many applications in the field of preferences,
-the Bradley-Terry model has many uses in modeling `contests' between
-teams~$i$ and~$j$, where only one of the teams can win in each
-contest (ties are not allowed under the classical model).
-The {packaging} function \texttt{Brat()} can be used to
-convert a square matrix into one that has more columns, to
-serve as input to \texttt{vglm()}.
-For example,
-for journal citation data where a citation of article~B
-by article~A is a win for article~B and a loss for article~A.
-On a specific data set,
-<<>>=
-journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
-squaremat <- matrix(c(NA, 33, 320, 284, 730, NA, 813, 276,
- 498, 68, NA, 325, 221, 17, 142, NA), 4, 4)
-dimnames(squaremat) <- list(winner = journal, loser = journal)
-@
-then \texttt{Brat(squaremat)} returns a~$1 \times 12$ matrix.
-
-
-
-
-
-
-
-\subsubsection{Bradley-Terry model with ties}
-\label{sec:cat.bratt}
-
-
-The \VGAM{} family function \texttt{bratt()}
-implements
-a Bradley-Terry model with ties (no preference), e.g.,
-where both $T_i$ and $T_j$ are equally good or bad.
-Here we assume
-\begin{eqnarray*}
- \pr(T_i > T_j) &=& \frac{\pi_i}{\pi_i + \pi_j + \pi_0},
-\ ~~~~~
- \pr(T_i = T_j) ~=~ \frac{\pi_0}{\pi_i + \pi_j + \pi_0},
-\end{eqnarray*}
-with $\pi_0 > 0$ as an extra parameter.
-It has
-$$
-\boldeta=(\log \pi_1,\ldots, \log \pi_{M-1}, \log \pi_{0})^{\top}
-$$
-by default, where there are $M$~competitors and $\pi_M \equiv 1$.
-Like \texttt{brat()}, one can choose a different reference group
-and reference value.
-
-
-Other \R{}~packages for the Bradley-Terry model
-include \pkg{BradleyTerry2}
-by H.~Turner and D.~Firth
-\citep[with and without ties;][]{firth:2005,firth:2008}
-and \pkg{prefmod} \citep{Hatzinger:2009}.
-
-
-
-
-\begin{table}[tt]
-\centering
-\begin{tabular}[small]{|l|c|}
-\hline
-\pkg{VGAM} family function & Independent parameters \\
-\hline
-\texttt{ABO()} & $p, q$ \\
-\texttt{MNSs()} & $m_S, m_s, n_S$ \\
-\texttt{AB.Ab.aB.ab()} & $p$ \\
-\texttt{AB.Ab.aB.ab2()} & $p$ \\
-\texttt{AA.Aa.aa()} & $p_A$ \\
-\texttt{G1G2G3()} & $p_1, p_2, f$ \\
-\hline
-\end{tabular}
-\caption{Some genetic models currently implemented
-and their unique parameters.
-\label{tab:gen.all}
-}
-\end{table}
-
-
-
-
-
-\subsection{Genetic models}
-\label{sec:jsscat.genetic}
-
-
-There are quite a number of population genetic models based on the
-multinomial distribution,
-e.g., \cite{weir:1996}, \cite{lang:2002}.
-Table~\ref{tab:gen.all} lists some \pkg{VGAM}~family functions for such.
-
-
-
-
-For example the ABO blood group system
-has two independent parameters~$p$ and~$q$, say.
-Here,
-the blood groups A, B and O~form six possible combinations (genotypes)
-consisting of AA, AO, BB, BO, AB, OO
-(see Table~\ref{tab:ABO}). A and~B are dominant over
-bloodtype~O. Let $p$, $q$ and $r$ be the probabilities
-for A, B and~O respectively (so that
-$p+q+r=1$) for a given population.
-The log-likelihood function is
-\[
-\ell(p,q) \;=\; n_A\, \log(p^2 + 2pr) + n_B\, \log(q^2 + 2qr) + n_{AB}\,
-\log(2pq) + 2 n_O\, \log(1-p-q),
-\]
-where $r = 1 - p -q$, $p \in (\,0,1\,)$,
-$q \in (\,0,1\,)$, $p+q<1$.
-We let $\boldeta = (g(p), g(r))^{\top}$ where $g$ is the link function.
-Any~$g$ from Table~\ref{tab:jsscat.links} appropriate for
-a parameter $\theta \in (0,1)$ will do.
-
-
-A toy example where $p=p_A$ and $q=p_B$ is
-<<>>=
-abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
-fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, abodat)
-coef(fit, matrix = TRUE)
-Coef(fit) # Estimated pA and pB
-@
-The function \texttt{Coef()}, which applies only to intercept-only models,
-applies to $g_{j}(\theta_{j})=\eta_{j}$
-the inverse link function $g_{j}^{-1}$ to~$\widehat{\eta}_{j}$
-to give~$\widehat{\theta}_{j}$.
-
-
-
-
-
-
-
-\begin{table}[tt]
-% Same as Table 14.1 of E-J, and Table 2.6 of Weir 1996
-\begin{center}
-\begin{tabular}{|l|cc|cc|c|c|}
-\hline
-Genotype & AA & AO & BB & BO & AB & OO \\
-Probability&$p^2$&$2pr$&$q^2$&$ 2qr$&$2pq$& $r^2$\\
-Blood group& A & A & B & B & AB & O \\
-\hline
-\end{tabular}
-\end{center}
-\caption{Probability table for the ABO blood group system.
-Note that $p$~and $q$~are the parameters and $r=1-p-q$.
-\label{tab:ABO}
-}
-\end{table}
-
-
-
-
-
-\subsection{Three main distributions}
-\label{sec:jsscat.3maindist}
-
-\cite{agre:2002} discusses three main distributions for categorical
-variables: binomial, multinomial, and Poisson
-\citep{thom:2009}.
-All these are well-represented in the \VGAM{} package,
-accompanied by variant forms.
-For example,
-there is a
-\VGAM{} family function named \texttt{mbinomial()}
-which implements a
-matched-binomial (suitable for matched case-control studies),
-Poisson ordination (useful in ecology for multi-species-environmental data),
-negative binomial families,
-positive and zero-altered and zero-inflated variants,
-and the bivariate odds ratio model
-\citep[\texttt{binom2.or()}; see Section~6.5.6 of][]{mccu:neld:1989}.
-The latter has an \texttt{exchangeable} argument to allow for an
-exchangeable error structure:
-\begin{eqnarray}
-\bH_1 ~=~
-\left( \begin{array}{cc}
-1 & 0 \\
-1 & 0 \\
-0 & 1 \\
-\end{array} \right), ~~~~~
-\bH_k ~=~
-\left( \begin{array}{cc}
-1 \\
-1 \\
-0 \\
-\end{array} \right), ~~k=2,\ldots,p,
-\label{eqn:blom.exchangeable}
-\end{eqnarray}
-since, for data $(Y_1,Y_2,\bix)$,
-$\logit \, P\!\left( Y_{j} = 1 \Big{|} \bix \right) =
-\eta_{j}$ for ${j}=1,2$, and
-$\log \, \psi = \eta_{3}$
-where $\psi$~is the odds ratio,
-and so $\eta_{1}=\eta_{2}$.
-Here, \texttt{binom2.or(zero = 3)} by default meaning $\psi$~is
-modelled as an intercept-only
-(in general, \texttt{zero} may be assigned an integer vector
-such that the value~$j$ means $\eta_{j} = \beta_{(j)1}$,
-i.e., the $j$th~linear/additive predictor is an intercept-only).
-See the online help for all of these models.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Some user-oriented topics}
-\label{sec:jsscat.userTopics}
-
-
-Making the most of \VGAM{} requires an understanding of the general
-VGLM/VGAM framework described Section~\ref{sec:jsscat.VGLMVGAMoverview}.
-In this section we connect elements of that framework with the software.
-Before doing so it is noted that
-a fitted \VGAM{} categorical model has access to the usual
-generic functions, e.g.,
-\texttt{coef()} for
-$\left(\widehat{\bbeta}_{(1)}^{*T},\ldots,\widehat{\bbeta}_{(p)}^{*T}\right)^{\top}$
-(see Equation~\ref{eqn:lin.coefs4}),
-\texttt{constraints()} for $\bH_k$,
-\texttt{deviance()} for $2\left(\ell_{\mathrm{max}} - \ell\right)$,
-\texttt{fitted()} for $\widehat{\bmu}_i$,
-\texttt{logLik()} for $\ell$,
-\texttt{predict()} for $\widehat{\boldeta}_i$,
-\texttt{print()},
-\texttt{residuals(..., type = "response")} for $\biy_i - \widehat{\bmu}_i$ etc.,
-\texttt{summary()},
-\texttt{vcov()} for $\widehat{\Var}(\widehat{\bbeta})$,
-etc.
-The methods function for the extractor function
-\texttt{coef()} has an argument \texttt{matrix}
-which, when set \texttt{TRUE}, returns~$\widehat{\bB}$
-(see Equation~\ref{gammod}) as a $p \times M$ matrix,
-and this is particularly useful for confirming that a fit
-has made a parallelism assumption.
-
-
-
-
-
-
-
-\subsection{Common arguments}
-\label{sec:jsscat.commonArgs}
-
-
-The structure of the unified framework given in
-Section~\ref{sec:jsscat.VGLMVGAMoverview}
-appears clearly through
-the pool of common arguments
-shared by the
-\VGAM{} family functions in Table~\ref{tab:cat.quantities}.
-In particular,
-\texttt{reverse} and
-\texttt{parallel}
-are prominent with CDA.
-These are merely convenient shortcuts for the argument \texttt{constraints},
-which accepts a named list of constraint matrices~$\bH_k$.
-For example, setting
-\texttt{cumulative(parallel = TRUE)} would constrain the coefficients $\beta_{(j)k}$
-in~(\ref{gammod2}) to be equal for all $j=1,\ldots,M$,
-each separately for $k=2,\ldots,p$.
-That is, $\bH_k = \bone_M$.
-The argument~\texttt{reverse} determines the `direction' of
-the parameter or quantity.
-
-Another argument not so much used with CDA is~\texttt{zero};
-this accepts a vector specifying which~$\eta_j$ is to be modelled as
-an intercept-only; assigning a \texttt{NULL} means none.
-
-
-
-
-
-
-
-
-\subsection{Link functions}
-\label{sec:jsscat.links}
-
-Almost all \VGAM{} family functions
-(one notable exception is \texttt{multinomial()})
-allow, in theory, for any link function to be assigned to each~$\eta_j$.
-This provides maximum capability.
-If so then there is an extra argument to pass in any known parameter
-associated with the link function.
-For example, \texttt{link = "logoff", earg = list(offset = 1)}
-signifies a log link with a unit offset:
-$\eta_{j} = \log(\theta_{j} + 1)$ for some parameter~$\theta_{j}\ (> -1)$.
-The name \texttt{earg} stands for ``extra argument''.
-Table~\ref{tab:jsscat.links} lists some links relevant to categorical data.
-While the default gives a reasonable first choice,
-users are encouraged to try different links.
-For example, fitting a binary regression model
-(\texttt{binomialff()}) to the coal miners data set \texttt{coalminers} with
-respect to the response wheeze gives a
-nonsignificant regression coefficient for $\beta_{(1)3}$~with probit analysis
-but not with a logit link when
-$\eta = \beta_{(1)1} + \beta_{(1)2} \, \mathrm{age} + \beta_{(1)3} \, \mathrm{age}^2$.
-Developers and serious users are encouraged to write and use
-new link functions compatible with~\VGAM.
-
-
-
-
-
-
-\begin{table*}[tt]
-\centering
-\medskip
-\begin{tabular}{|l|c|c|}
-\hline
-Link function & $g(\theta)$ & Range of $\theta$ \\
-\hline
-\texttt{cauchit()} & $\tan(\pi(\theta-\frac12))$ & $(0,1)$ \\
-\texttt{cloglog()} & $\log_e\{-\log_e(1 - \theta)\}$ & $(0,1)$ \\
-\texttt{fisherz()} &
-$\frac12\,\log_e\{(1 + \theta)/(1 - \theta)\}$ & $(-1,1)$ \\
-\texttt{identity()} & $\theta$ & $(-\infty,\infty)$ \\
-\texttt{logc()} & $\log_e(1 - \theta)$ & $(-\infty,1)$ \\
-\texttt{loge()} & $\log_e(\theta)$ & $(0,\infty)$ \\
-\texttt{logit()} & $\log_e(\theta/(1 - \theta))$ & $(0,1)$ \\
-\texttt{logoff()} & $\log_e(\theta + A)$ & $(-A,\infty)$ \\
-\texttt{probit()} & $\Phi^{-1}(\theta)$ & $(0,1)$ \\
-\texttt{rhobit()} & $\log_e\{(1 + \theta)/(1 - \theta)\}$ & $(-1,1)$ \\
-\hline
-\end{tabular}
-\caption{
-Some \VGAM{} link functions pertinent to this article.
-\label{tab:jsscat.links}
-}
-\end{table*}
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Examples}
-\label{sec:jsscat.eg}
-
-This section illustrates CDA modeling on three
-data sets in order to give a flavour of what is available in the package.
-
-
-
-
-%20130919
-%Note:
-%\subsection{2008 World Fly Fishing Championships}
-%\label{sec:jsscat.eg.WFFC}
-%are deleted since there are problems with accessing the \texttt{wffc.nc}
-%data etc. since they are now in \pkg{VGAMdata}.
-
-
-
-
-
-
-
-\subsection{Marital status data}
-\label{sec:jsscat.eg.mstatus}
-
-We fit a nonparametric multinomial logit model to data collected from
-a self-administered questionnaire administered in a large New Zealand
-workforce observational study conducted during 1992--3.
-The data were augmented by a second study consisting of retirees.
-For homogeneity, this analysis is restricted
-to a subset of 6053 European males with no missing values.
-The ages ranged between~16 and 88~years.
-The data can be considered a reasonable representation of the white
-male New Zealand population in the early 1990s, and
-are detailed in~\cite{macm:etal:1995} and~\cite{yee:wild:1996}.
-We are interested in exploring how $Y=$ marital status varies as a function
-of $x_2=$ age. The nominal response~$Y$ has four levels;
-in sorted order, they are divorced or separated, married or partnered,
-single and widower.
-We will write these levels as $Y=1$, $2$, $3$, $4$, respectively,
-and will choose the married/partnered (second level) as the reference group
-because the other levels emanate directly from it.
-
-Suppose the data is in a data frame called \texttt{marital.nz}
-and looks like
-<<>>=
-head(marital.nz, 4)
-summary(marital.nz)
-@
-We fit the VGAM
-<<>>=
-fit.ms <- vgam(mstatus ~ s(age, df = 3), multinomial(refLevel = 2),
- data = marital.nz)
-@
-
-Once again let's firstly check the input.
-<<>>=
-head(fit.ms at y, 4)
-colSums(fit.ms at y)
-@
-This seems ok.
-
-
-
-
-Now the estimated component functions $\widehat{f}_{(s)2}(x_2)$
-may be plotted with
-<<fig=F>>=
-# Plot output
-mycol <- c("red","darkgreen","blue")
- par(mfrow=c(2,2))
-plot(fit.ms, se=TRUE, scale=12,
- lcol=mycol, scol=mycol)
-
-# Plot output overlayed
-#par(mfrow=c(1,1))
-plot(fit.ms, se=TRUE, scale=12,
- overlay=TRUE,
- llwd=2,
- lcol=mycol, scol=mycol)
-@
-to produce Figure~\ref{fig:jsscat.eg.mstatus}.
-The \texttt{scale} argument is used here to ensure that the $y$-axes have
-a common scale---this makes comparisons between the component functions
-less susceptible to misinterpretation.
-The first three plots are the (centered) $\widehat{f}_{(s)2}(x_2)$ for
-$\eta_1$,
-$\eta_2$,
-$\eta_3$,
-where
-\begin{eqnarray}
-\label{eq:jsscat.eg.nzms.cf}
-\eta_{s} ~=~
-\log(\pr(Y={t}) / \pr(Y={2})) ~=~
-\beta_{(s)1} + f_{(s)2}(x_2),
-\end{eqnarray}
-$(s,t) = (1,1), (2,3), (3,4)$,
-and~$x_2$ is~\texttt{age}.
-The last plot are the smooths overlaid to aid comparison.
-
-
-It may be seen that the $\pm 2$ standard error bands
-about the \texttt{Widowed} group is particularly wide at
-young ages because of a paucity of data, and
-likewise at old ages amongst the \texttt{Single}s.
-The $\widehat{f}_{(s)2}(x_2)$ appear as one would expect.
-The log relative risk of
-being single relative to being married/partnered drops sharply from
-ages~16 to~40.
-The fitted function for the~\texttt{Widowed} group increases
-with~\texttt{age} and looks reasonably linear.
-The $\widehat{f}_{(1)2}(x_2)$
-suggests a possible maximum around 50~years old---this
-could indicate the greatest marital conflict occurs during
-the mid-life crisis years!
-
-
-
-\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=5.6,echo=FALSE>>=
-# Plot output
-mycol <- c("red","darkgreen","blue")
- par(mfrow=c(2,2))
- par(mar=c(4.2,4.0,1.2,2.2)+0.1)
-plot(fit.ms, se=TRUE, scale=12,
- lcol=mycol, scol=mycol)
-
-# Plot output overlaid
-#par(mfrow=c(1,1))
-plot(fit.ms, se=TRUE, scale=12,
- overlay=TRUE,
- llwd=2,
- lcol=mycol, scol=mycol)
-@
-\caption{
-Fitted (and centered) component functions
-$\widehat{f}_{(s)2}(x_2)$
-from the NZ marital status data
-(see Equation~\ref{eq:jsscat.eg.nzms.cf}).
-The bottom RHS plot are the smooths overlaid.
-\label{fig:jsscat.eg.mstatus}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-The methods function for~\texttt{plot()} can also plot the
-derivatives of the smooths.
-The call
-<<fig=F>>=
-plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
-@
-results in Figure~\ref{fig:jsscat.eg.mstatus.cf.deriv}.
-Once again the $y$-axis scales are commensurate.
-
-\setkeys{Gin}{width=\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=7.2,height=2.4,echo=FALSE>>=
-# Plot output
- par(mfrow=c(1,3))
- par(mar=c(4.5,4.0,0.2,2.2)+0.1)
-plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
-@
-\caption{
-Estimated first derivatives of the component functions,
-$\widehat{f'}_{(s)2}(x_2)$,
-from the NZ marital status data
-(see Equation~\ref{eq:jsscat.eg.nzms.cf}).
-\label{fig:jsscat.eg.mstatus.cf.deriv}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-The derivative for the \texttt{Divorced/Separated} group appears
-linear so that a quadratic component function could be tried.
-Not surprisingly the \texttt{Single} group shows the greatest change;
-also, $\widehat{f'}_{(2)2}(x_2)$ is approximately linear till~50
-and then flat---this suggests one could fit a piecewise quadratic
-function to model that component function up to 50~years.
-The~\texttt{Widowed} group appears largely flat.
-We thus fit the parametric model
-<<>>=
-foo <- function(x, elbow=50)
- poly(pmin(x, elbow), 2)
-
-clist <- list("(Intercept)" = diag(3),
- "poly(age, 2)" = rbind(1, 0, 0),
- "foo(age)" = rbind(0, 1, 0),
- "age" = rbind(0, 0, 1))
-fit2.ms <-
- vglm(mstatus ~ poly(age, 2) + foo(age) + age,
- family = multinomial(refLevel = 2),
- constraints = clist,
- data = marital.nz)
-@
-Then
-<<>>=
-coef(fit2.ms, matrix = TRUE)
-@
-confirms that one term was used for each component function.
-The plots from
-<<fig=F>>=
-par(mfrow=c(2,2))
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[1], scol = mycol[1], which.term = 1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[2], scol=mycol[2], which.term = 2)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[3], scol = mycol[3], which.term = 3)
-@
-are given in Figure~\ref{fig:jsscat.eg.mstatus.vglm}
-and appear like
-Figure~\ref{fig:jsscat.eg.mstatus}.
-
-
-\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=5.6,echo=FALSE>>=
-# Plot output
-par(mfrow=c(2,2))
- par(mar=c(4.5,4.0,1.2,2.2)+0.1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[1], scol = mycol[1], which.term = 1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[2], scol = mycol[2], which.term = 2)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
- lcol = mycol[3], scol = mycol[3], which.term = 3)
-@
-\caption{
-Parametric version of~\texttt{fit.ms}: \texttt{fit2.ms}.
-The component functions are now quadratic, piecewise quadratic/zero,
-or linear.
-\label{fig:jsscat.eg.mstatus.vglm}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-
-It is possible to perform very crude inference based on heuristic theory
-of a deviance test:
-<<>>=
-deviance(fit.ms) - deviance(fit2.ms)
-@
-is small, so it seems the parametric model is quite reasonable
-against the original nonparametric model.
-Specifically,
-the difference in the number of `parameters' is approximately
-<<>>=
-(dfdiff <- df.residual(fit2.ms) - df.residual(fit.ms))
-@
-which gives an approximate $p$~value of
-<<>>=
-1-pchisq(deviance(fit.ms) - deviance(fit2.ms), df=dfdiff)
-@
-Thus \texttt{fit2.ms} appears quite reasonable.
-
-
-
-
-
-
-
-
-The estimated probabilities of the original fit can be plotted
-against~\texttt{age} using
-<<fig=F>>=
-ooo <- with(marital.nz, order(age))
-with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
- type="l", las=1, lwd=2, ylim=0:1,
- ylab="Fitted probabilities",
- xlab="Age", # main="Marital status amongst NZ Male Europeans",
- col=c(mycol[1], "black", mycol[-1])))
-legend(x=52.5, y=0.62, # x="topright",
- col=c(mycol[1], "black", mycol[-1]),
- lty=1:4,
- legend=colnames(fit.ms at y), lwd=2)
-abline(v=seq(10,90,by=5), h=seq(0,1,by=0.1), col="gray", lty="dashed")
-@
-which gives Figure~\ref{fig:jsscat.eg.mstatus.fitted}.
-This shows that between 80--90\%~of NZ white males
-aged between their early~30s to mid-70s
-were married/partnered.
-The proportion widowed
-started to rise steeply from 70~years onwards but remained below~0.5
-since males die younger than females on average.
-
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=4.8,echo=FALSE>>=
- par(mfrow=c(1,1))
- par(mar=c(4.5,4.0,0.2,0.2)+0.1)
-ooo <- with(marital.nz, order(age))
-with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
- type="l", las=1, lwd=2, ylim=0:1,
- ylab="Fitted probabilities",
- xlab="Age",
- col=c(mycol[1], "black", mycol[-1])))
-legend(x=52.5, y=0.62,
- col=c(mycol[1], "black", mycol[-1]),
- lty=1:4,
- legend=colnames(fit.ms at y), lwd=2.1)
-abline(v=seq(10,90,by=5), h=seq(0,1,by=0.1), col="gray", lty="dashed")
-@
-\caption{
-Fitted probabilities for each class for the
-NZ male European
-marital status data
-(from Equation~\ref{eq:jsscat.eg.nzms.cf}).
-\label{fig:jsscat.eg.mstatus.fitted}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-
-
-
-
-\subsection{Stereotype model}
-\label{sec:jsscat.eg.grc.stereotype}
-
-We reproduce some of the analyses of \cite{ande:1984} regarding the
-progress of 101~patients with back pain
-using the data frame \texttt{backPain} from \pkg{gnm}
-\citep{Rnews:Turner+Firth:2007,Turner+Firth:2009}.
-The three prognostic variables are
-length of previous attack ($x_1=1,2$),
-pain change ($x_2=1,2,3$)
-and lordosis ($x_3=1,2$).
-Like him, we treat these as numerical and standardize and negate them.
-%
-The output
-<<>>=
-# Scale the variables? Yes; the Anderson (1984) paper did (see his Table 6).
-head(backPain, 4)
-summary(backPain)
-backPain <- transform(backPain, sx1 = -scale(x1), sx2 = -scale(x2), sx3 = -scale(x3))
-@
-displays the six ordered categories.
-Now a rank-1 stereotype model can be fitted with
-<<>>=
-bp.rrmlm1 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, backPain)
-@
-Then
-<<>>=
-Coef(bp.rrmlm1)
-@
-are the fitted \bA, \bC{} and~$\bB_1$ (see Equation~\ref{eq:rrr.BAC}) and
-Table~\ref{tab:rrvglam.jss.subset}) which agrees with his Table~6.
-Here, what is known as ``corner constraints'' is used
-($(1,1)$ element of \bA{} $\equiv 1$),
-and only the intercepts are not subject to any reduced-rank regression
-by default.
-The maximized log-likelihood from \textsl{\texttt{logLik(bp.rrmlm1)}}
-is $\Sexpr{round(logLik(bp.rrmlm1), 2)}$.
-The standard errors of each parameter can be obtained by
-\textsl{\texttt{summary(bp.rrmlm1)}}.
-The negative elements of~$\widehat{\bC}$ imply the
-latent variable~$\widehat{\nu}$ decreases in value with increasing
-\textsl{\texttt{sx1}},
-\textsl{\texttt{sx2}} and
-\textsl{\texttt{sx3}}.
-The elements of~$\widehat{\bA}$ tend to decrease so it suggests
-patients get worse as $\nu$~increases,
-i.e., get better as \textsl{\texttt{sx1}},
-\textsl{\texttt{sx2}} and
-\textsl{\texttt{sx3}} increase.
-
-
-
-
-
-
-<<echo=FALSE>>=
-set.seed(123)
-@
-A rank-2 model fitted \textit{with a different normalization}
-<<>>=
-bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, backPain, Rank = 2,
- Corner = FALSE, Uncor = TRUE)
-@
-produces uncorrelated $\widehat{\bnu}_i = \widehat{\bC}^{\top} \bix_{2i}$.
-In fact \textsl{\texttt{var(lv(bp.rrmlm2))}} equals $\bI_2$
-so that the latent variables are also scaled to have unit variance.
-The fit was biplotted
-(rows of $\widehat{\bC}$ plotted as arrow;
- rows of $\widehat{\bA}$ plotted as labels) using
-<<figure=F>>=
-biplot(bp.rrmlm2, Acol="blue", Ccol="darkgreen", scores=TRUE,
-# xlim=c(-1,6), ylim=c(-1.2,4), # Use this if not scaled
- xlim=c(-4.5,2.2), ylim=c(-2.2, 2.2), # Use this if scaled
- chull=TRUE, clty=2, ccol="blue")
-@
-to give Figure~\ref{fig:jsscat.eg.rrmlm2.backPain}.
-It is interpreted via inner products due to~(\ref{eq:rrr.BAC}).
-The different normalization means that the interpretation of~$\nu_1$
-and~$\nu_2$ has changed, e.g., increasing
-\textsl{\texttt{sx1}},
-\textsl{\texttt{sx2}} and
-\textsl{\texttt{sx3}} results in increasing $\widehat{\nu}_1$ and
-patients improve more.
-Many of the latent variable points $\widehat{\bnu}_i$ are coincidental
-due to discrete nature of the~$\bix_i$. The rows of~$\widehat{\bA}$
-are centered on the blue labels (rather cluttered unfortunately) and
-do not seem to vary much as a function of~$\nu_2$.
-In fact this is confirmed by~\cite{ande:1984} who showed a rank-1
-model is to be preferred.
-
-
-
-This example demonstrates the ability to obtain a low dimensional view
-of higher dimensional data. The package's website has additional
-documentation including more detailed Goodman's~RC and stereotype
-examples.
-
-
-
-
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=5.3,echo=FALSE>>=
-# Plot output
- par(mfrow=c(1,1))
- par(mar=c(4.5,4.0,0.2,2.2)+0.1)
-
-biplot(bp.rrmlm2, Acol="blue", Ccol="darkgreen", scores=TRUE,
-# xlim=c(-1,6), ylim=c(-1.2,4), # Use this if not scaled
- xlim=c(-4.5,2.2), ylim=c(-2.2, 2.2), # Use this if scaled
- chull=TRUE, clty=2, ccol="blue")
-@
-\caption{
-Biplot of a rank-2 reduced-rank multinomial logit (stereotype) model
-fitted to the back pain data.
-A convex hull surrounds the latent variable scores
-$\widehat{\bnu}_i$
-(whose observation numbers are obscured because of their discrete nature).
-The position of the $j$th~row of~$\widehat{\bA}$
-is the center of the label ``\texttt{log(mu[,j])/mu[,6])}''.
-\label{fig:jsscat.eg.rrmlm2.backPain}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Some implementation details}
-\label{sec:jsscat.implementDetails}
-
-This section describes some implementation details of~\VGAM{}
-which will be more of interest to the developer than to the casual user.
-
-
-
-\subsection{Common code}
-\label{sec:jsscat.implementDetails.code}
-
-It is good programming practice to write reusable code where possible.
-All the \VGAM{}~family functions in Table~\ref{tab:cat.quantities}
-process the response in the same way because the same segment of code
-is executed. This offers a degree of uniformity in terms of how input is
-handled, and also for software maintenance
-(\cite{altm:jack:2010} enumerates good programming techniques and references).
-As well, the default initial values are computed in the same manner
-based on sample proportions of each level of~$Y$.
-
-
-
-
-
-\subsection[Matrix-band format of wz]{Matrix-band format of \texttt{wz}}
-\label{sec:jsscat.implementDetails.mbformat}
-
-The working weight matrices $\bW_i$ may become large for categorical
-regression models. In general, we have to evaluate the~$\bW_i$
-for~$i=1,\ldots,n$, and naively, this could be held in an \texttt{array} of
-dimension~\texttt{c(M, M, n)}. However, since the~$\bW_i$ are symmetric
-positive-definite it suffices to only store the upper or lower half of
-the matrix.
-
-
-
-The variable~\texttt{wz} in \texttt{vglm.fit()}
-stores the working weight matrices $\bW_i$ in
-a special format called the \textit{matrix-band} format. This
-format comprises a $n \times M^*$ matrix where
-$$
-M^* ~=~ \sum_{i=1}^{\footnotesize \textit{hbw}} \;
-\left(M-i+1\right) ~=~
-\frac12 \, \textit{hbw}\, \left(2\,M - \textit{hbw} +1\right)
-$$
-is the number of columns. Here, \textit{hbw} refers to the
-\textit{half-bandwidth} of the matrix, which is an integer
-between~1 and~$M$ inclusive. A diagonal matrix has
-unit half-bandwidth, a tridiagonal matrix has half-bandwidth~2, etc.
-
-
-Suppose $M=4$. Then \texttt{wz} will have up to $M^*=10$ columns
-enumerating the unique elements of~$\bW_i$ as follows:
-\begin{eqnarray}
-\bW_i ~=~
-\left( \begin{array}{rrrr}
-1 & 5 & 8 & 10 \\
- & 2 & 6 & 9 \\
- & & 3 & 7 \\
- & & & 4
-\end{array} \right).
-\label{eqn:hbw.eg}
-\end{eqnarray}
-That is, the order is firstly the diagonal, then the band above that,
-followed by the second band above the diagonal etc.
-Why is such a format adopted?
-For this example, if $\bW_i$ is diagonal then only the first 4 columns
-of \texttt{wz} are needed. If $\bW_i$ is tridiagonal then only the
-first~7 columns of \texttt{wz} are needed.
-If $\bW_i$ \textit{is} banded then \texttt{wz} needs not have
-all $\frac12 M(M+1)$ columns; only~$M^*$ columns suffice, and the
-rest of the elements of~$\bW_i$ are implicitly zero.
-As well as reducing the size of \texttt{wz} itself in most cases, the
-matrix-band format often makes the computation of \texttt{wz} very
-simple and efficient. Furthermore, a Cholesky decomposition of a
-banded matrix will be banded. A final reason is that sometimes we
-want to input~$\bW_i$ into \VGAM: if \texttt{wz} is $M \times M \times
-n$ then \texttt{vglm(\ldots, weights = wz)} will result in an error
-whereas it will work if \texttt{wz} is an $n \times M^*$ matrix.
-
-
-
-To facilitate the use of the matrix-band format,
-a few auxiliary functions have been written.
-In particular, there is \texttt{iam()} which gives the indices
-for an array-to-matrix.
-In the $4\times 4$ example above,
-<<>>=
-iam(NA, NA, M = 4, both = TRUE, diag = TRUE)
-@
-returns the indices for the respective array coordinates for
-successive columns of matrix-band format
-(see Equation~\ref{eqn:hbw.eg}).
-If \texttt{diag = FALSE} then the first~4 elements in each vector
-are omitted. Note that the first two arguments of
-\texttt{iam()} are not used here and have been assigned
-\texttt{NA}s for simplicity.
-For its use on the multinomial logit model, where
-$(\bW_i)_{jj} = w_i\,\mu_{ij} (1-\mu_{ij}),\ j=1,\ldots,M$, and
-$(\bW_i)_{jk} = -w_i\,\mu_{ij} \mu_{ik},\ j\neq k$,
-this can be programmed succinctly like
-\begin{Code}
-wz <- mu[, 1:M] * (1 - mu[, 1:M])
-if (M > 1) {
- index <- iam(NA, NA, M = M, both = TRUE, diag = FALSE)
- wz <- cbind(wz, -mu[, index$row] * mu[, index$col])
-}
-wz <- w * wz
-\end{Code}
-(the actual code is slightly more complicated).
-In general, \VGAM{}~family functions can be remarkably compact,
-e.g.,
-\texttt{acat()},
-\texttt{cratio()}
-and
-\texttt{multinomial()} are all less than 120~lines of code each.
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Extensions and utilities}
-\label{sec:jsscat.extnUtil}
-
-This section describes some useful utilities/extensions of the above.
-
-
-
-\subsection{Marginal effects}
-\label{sec:jsscat.extnUtil.margeff}
-
-
-Models such as the multinomial logit and cumulative link models
-model the posterior probability $p_{j} = \pr(Y=j|\bix)$ directly.
-In some applications, knowing the derivative of~$p_{j}$
-with respect to some of the~$x_k$ is useful;
-in fact, often just knowing the sign is important.
-The function \texttt{margeff()} computes the derivatives and
-returns them as a $p \times (M+1) \times n$ array.
-For the multinomial logit model it is easy to show
-\begin{eqnarray}
-\frac{\partial \, p_{j}(\bix_i)}{\partial \,
-\bix_{i}}
-&=&
-p_{j}(\bix_i)
-\left\{
- \bbeta_{j} -
-\sum_{s=1}^{M+1}
-p_{s}(\bix_i)
-\,
- \bbeta_{s}
-\right\},
-\label{eqn:multinomial.marginalEffects}
-\end{eqnarray}
-while for
-\texttt{cumulative(reverse = FALSE)}
-we have
-$p_{j} = \gamma_{j} - \gamma_{j-1} = h(\eta_{j}) - h(\eta_{j-1})$
-where $h=g^{-1}$ is the inverse of the link function
-(cf.~Table~\ref{tab:cat.quantities})
-so that
-\begin{eqnarray}
-\frac{\partial \, p_{j}(\bix_{})}{\partial \,
-\bix}
-&=&
-h'(\eta_{j}) \, \bbeta_{j} -
-h'(\eta_{j-1}) \, \bbeta_{j-1} .
-\label{eqn:cumulative.marginalEffects}
-\end{eqnarray}
-
-
-
-
-The function \texttt{margeff()} returns an array with these
-derivatives and should handle any value of
-\texttt{reverse} and \texttt{parallel}.
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\subsection[The xij argument]{The \texttt{xij} argument}
-\label{sec:jsscat.extnUtil.xij}
-
-There are many models, including those for categorical data,
-where the value of an explanatory variable~$x_k$ differs depending
-on which linear/additive predictor~$\eta_{j}$.
-Here is a well-known example from {consumer choice} modeling.
-Suppose an econometrician is interested in peoples'
-choice of transport for travelling to work
-and that there are four choices:
-$Y=1$ for ``bus'',
-$Y=2$ ``train'',
-$Y=3$ ``car'' and
-$Y=4$ means ``walking''.
-Assume that people only choose one means to go to work.
-Suppose there are three covariates:
-$X_2=$ cost,
-$X_3=$ journey time, and
-$X_4=$ distance.
-Of the covariates only~$X_4$ (and the intercept~$X_1$)
-is the same for all transport choices;
-the cost and journey time differ according to the means chosen.
-Suppose a random sample of~$n$ people is collected
-from some population, and that each person has
-access to all these transport modes.
-For such data, a natural regression model would be a
-multinomial logit model with~$M=3$:
-for $j=1,\ldots,M$, we have
-$\eta_{j} =$
-\begin{eqnarray}
-\log \frac{\pr(Y=j)}{\pr(Y=M+1)}
-&=&
-\beta_{(j)1}^{*} +
-\beta_{(1)2}^{*} \, (x_{i2j}-x_{i24}) +
-\beta_{(1)3}^{*} \, (x_{i3j}-x_{i34}) +
-\beta_{(1)4}^{*} \, x_{i4},
-\label{eqn:xij.eg.gotowork}
-\end{eqnarray}
-where, for the~$i$th person,
-$x_{i2j}$ is the cost for the~$j$th transport means, and
-$x_{i3j}$ is the journey time of the~$j$th transport means.
-The distance to get to work is $x_{i4}$; it has the same value
-regardless of the transport means.
-
-
-Equation~\ref{eqn:xij.eg.gotowork}
-implies $\bH_1=\bI_3$ and $\bH_2=\bH_3=\bH_4=\bone_3$.
-Note
-also that if the last response category is used as the baseline or
-reference group (the default of \texttt{multinomial()}) then $x_{ik,M+1}$
-can be subtracted from $x_{ikj}$ for~$j=1,\ldots,M$---this
-is the natural way $x_{ik,M+1}$ enters into the model.
-
-
-
-
-Recall from~(\ref{gammod2}) that we had
-\begin{equation}
-\eta_j(\bix_i) ~=~ \bbeta_j^{\top} \bix_i ~=~
-\sum_{k=1}^{p} \, x_{ik} \, \beta_{(j)k} .
-\label{eqn:xij0}
-\end{equation}
-Importantly, this can be generalized to
-\begin{equation}
-\eta_j(\bix_{ij}) ~=~ \bbeta_j^{\top} \bix_{ij} ~=~
-\sum_{k=1}^{p} \, x_{ikj} \, \beta_{(j)k} ,
-\label{eqn:xij}
-\end{equation}
-or writing this another way (as a mixture or hybrid),
-\begin{equation}
-\eta_j(\bix_{i}^{*},\bix_{ij}^{*}) ~=~
-\bbeta_{j}^{*T} \bix_{i}^{*} + \bbeta_{j}^{**T} \bix_{ij}^{*} .
-\label{eqn:xij2}
-\end{equation}
-Often $\bbeta_{j}^{**} = \bbeta_{}^{**}$, say.
-In~(\ref{eqn:xij2}) the variables in~$\bix_{i}^{*}$ are common to
-all~$\eta_{j}$, and the variables in~$\bix_{ij}^{*}$ have
-different values for differing~$\eta_{j}$.
-This allows for covariate values that are specific to each~$\eta_j$,
-a facility which is very important in many applications.
-
-
-The use of the \texttt{xij} argument with the \VGAM{} family function
-\texttt{multinomial()} has very important applications in economics.
-In that field the term ``multinomial logit model'' includes a variety of
-models such as the ``generalized logit model'' where (\ref{eqn:xij0})
-holds, the ``conditional logit model'' where~(\ref{eqn:xij}) holds,
-and the ``mixed logit model,'' which is a combination of the two,
-where~(\ref{eqn:xij2}) holds.
-The generalized logit model focusses on the individual as the unit of
-analysis, and uses individual characteristics as explanatory variables,
-e.g., age of the person in the transport example.
-The conditional logit model assumes different values for each
-alternative and the impact of a unit of~$x_k$ is assumed to be constant
-across alternatives, e.g., journey time in the choice of transport mode.
-Unfortunately, there is confusion in the literature for the terminology
-of the models. Some authors call \texttt{multinomial()}
-with~(\ref{eqn:xij0}) the ``generalized logit model''.
-Others call the mixed
-logit model the ``multinomial logit model'' and view the generalized
-logit and conditional logit models as special cases.
-In~\VGAM{} terminology there is no need to give different names to
-all these slightly differing special cases. They are all still called
-multinomial logit models, although it may be added that there are
-some covariate-specific linear/additive predictors.
-The important thing is that the framework accommodates~$\bix_{ij}$,
-so one tries to avoid making life unnecessarily complicated.
-And~\texttt{xij} can apply in theory to any VGLM and not just to the
-multinomial logit model.
-\cite{imai:king:lau:2008} present another perspective on the
-$\bix_{ij}$ problem with illustrations from \pkg{Zelig}
-\citep{Zelig:2009}.
-
-
-
-
-
-\subsubsection[Using the xij argument]{Using the \texttt{xij} argument}
-\label{sec:xij.sub}
-
-\VGAM{} handles variables whose values depend on $\eta_{j}$,
-(\ref{eqn:xij2}), using the \texttt{xij} argument.
-It is assigned an~S formula or a list of \proglang{S}~formulas.
-Each formula, which must have~$M$ \textit{different} terms,
-forms a matrix that premultiplies a constraint matrix.
-In detail, (\ref{eqn:xij0})~can be written in vector form as
-\begin{equation}
-\boldeta(\bix_i) ~=~ \bB^{\top} \bix_i ~=~
-\sum_{k=1}^{p} \, \bH_{k} \, \bbeta_{k}^{*} \, x_{ik},
-\label{eqn:xij0.vector}
-\end{equation}
-where
-$\bbeta_{k}^{*} =
-\left( \beta_{(1)k}^{*},\ldots,\beta_{(r_k)k}^{*} \right)^{\top}$
-is to be estimated.
-This may be written
-\begin{eqnarray}
-\boldeta(\bix_{i})
-&=&
-\sum_{k=1}^{p} \, \diag(x_{ik},\ldots,x_{ik}) \,
-\bH_k \, \bbeta_{k}^{*}.
-\label{eqn:xij.d.vector}
-\end{eqnarray}
-To handle~(\ref{eqn:xij})--(\ref{eqn:xij2})
-we can generalize~(\ref{eqn:xij.d.vector}) to
-\begin{eqnarray}
-\boldeta_i
-&=&
-\sum_{k=1}^{p} \, \diag(x_{ik1},\ldots,x_{ikM}) \;
-\bH_k \, \bbeta_{k}^{*}
-\ \ \ \ \left(=
-\sum_{k=1}^{p} \, \bX_{(ik)}^{*} \,
-\bH_k \, \bbeta_{k}^{*} ,
-\mathrm{\ say} \right).
-\label{eqn:xij.vector}
-\end{eqnarray}
-Each component of the list \texttt{xij} is a formula having~$M$ terms
-(ignoring the intercept) which
-specifies the successive diagonal elements of the matrix~$\bX_{(ik)}^{*}$.
-Thus each row of the constraint matrix may be multiplied by a different
-vector of values.
-The constraint matrices themselves are not affected by the
-\texttt{xij} argument.
-
-
-
-
-
-How can one fit such models in \VGAM{}?
-Let us fit~(\ref{eqn:xij.eg.gotowork}).
-Suppose the journey cost and time variables have had the
-cost and time of walking subtracted from them.
-Then,
-using ``\texttt{.trn}'' to denote train,
-\begin{Code}
-fit2 <- vglm(cbind(bus, train, car, walk) ~ Cost + Time + Distance,
- fam = multinomial(parallel = TRUE ~ Cost + Time + Distance - 1),
- xij = list(Cost ~ Cost.bus + Cost.trn + Cost.car,
- Time ~ Time.bus + Time.trn + Time.car),
- form2 = ~ Cost.bus + Cost.trn + Cost.car +
- Time.bus + Time.trn + Time.car +
- Cost + Time + Distance,
- data = gotowork)
-\end{Code}
-should do the job.
-Here, the argument \texttt{form2} is assigned a second \proglang{S}~formula which
-is used in some special circumstances or by certain types
-of~\VGAM{} family functions.
-The model has $\bH_{1} = \bI_{3}$ and $\bH_{2} = \bH_{3} = \bH_{4} = \bone_{3}$
-because the lack of parallelism only applies to the intercept.
-However, unless \texttt{Cost} is the same as \texttt{Cost.bus} and
-\texttt{Time} is the same as \texttt{Time.bus},
-this model should not be plotted with \texttt{plotvgam()};
-see the author's homepage for further documentation.
-
-
-By the way,
-suppose
-$\beta_{(1)4}^{*}$
-in~(\ref{eqn:xij.eg.gotowork})
-is replaced by~$\beta_{(j)4}^{*}$.
-Then the above code but with
-\begin{Code}
- fam = multinomial(parallel = FALSE ~ 1 + Distance),
-\end{Code}
-should fit this model.
-Equivalently,
-\begin{Code}
- fam = multinomial(parallel = TRUE ~ Cost + Time - 1),
-\end{Code}
-
-
-
-
-
-
-\subsubsection{A more complicated example}
-\label{sec:xij.complicated}
-
-The above example is straightforward because the
-variables were entered linearly. However, things
-become more tricky if data-dependent functions are used in
-any \texttt{xij} terms, e.g., \texttt{bs()}, \texttt{ns()} or \texttt{poly()}.
-In particular, regression splines such as \texttt{bs()} and \texttt{ns()}
-can be used to estimate a general smooth function~$f(x_{ij})$, which is
-very useful for exploratory data analysis.
-
-
-
-Suppose we wish to fit the variable \texttt{Cost} with a smoother.
-This is possible with regression splines and using a trick.
-Firstly note that
-\begin{Code}
-fit3 <- vglm(cbind(bus, train, car, walk) ~ ns(Cost) + Time + Distance,
- multinomial(parallel = TRUE ~ ns(Cost) + Time + Distance - 1),
- xij = list(ns(Cost) ~ ns(Cost.bus) + ns(Cost.trn) + ns(Cost.car),
- Time ~ Time.bus + Time.trn + Time.car),
- form2 = ~ ns(Cost.bus) + ns(Cost.trn) + ns(Cost.car) +
- Time.bus + Time.trn + Time.car +
- ns(Cost) + Cost + Time + Distance,
- data = gotowork)
-\end{Code}
-will \textit{not} work because the basis functions for
-\texttt{ns(Cost.bus)}, \texttt{ns(Cost.trn)} and \texttt{ns(Cost.car)}
-are not identical since the knots differ.
-Consequently, they represent different functions despite
-having common regression coefficients.
-
-
-Fortunately, it is possible to force the~\texttt{ns()} terms
-to have identical basis functions by using a trick:
-combine the vectors temporarily.
-To do this, one can let
-\begin{Code}
-NS <- function(x, ..., df = 3)
- ns(c(x, ...), df = df)[1:length(x), , drop = FALSE]
-\end{Code}
-This computes a natural cubic B-spline evaluated at~\texttt{x} but it uses the
-other arguments as well to form an overall vector from which to obtain
-the (common) knots.
-Then the usage of \texttt{NS()} can be something like
-\begin{Code}
-fit4 <- vglm(cbind(bus, train, car, walk) ~ NS(Cost.bus, Cost.trn, Cost.car)
- + Time + Distance,
- multinomial(parallel = TRUE ~ NS(Cost.bus, Cost.trn, Cost.car)
- + Time + Distance - 1),
- xij = list(NS(Cost.bus, Cost.trn, Cost.car) ~
- NS(Cost.bus, Cost.trn, Cost.car) +
- NS(Cost.trn, Cost.car, Cost.bus) +
- NS(Cost.car, Cost.bus, Cost.trn),
- Time ~ Time.bus + Time.trn + Time.car),
- form2 = ~ NS(Cost.bus, Cost.trn, Cost.car) +
- NS(Cost.trn, Cost.car, Cost.bus) +
- NS(Cost.car, Cost.bus, Cost.trn) +
- Time.bus + Time.trn + Time.car +
- Cost.bus + Cost.trn + Cost.car +
- Time + Distance,
- data = gotowork)
-\end{Code}
-So \texttt{NS(Cost.bus, Cost.trn, Cost.car)}
-is the smooth term for
-\texttt{Cost.bus}, etc.
-Furthermore, \texttt{plotvgam()} may be applied to
-\texttt{fit4}, in which case the fitted regression spline is plotted
-against its first inner argument, viz.~\texttt{Cost.bus}.
-
-
-One of the reasons why it will predict correctly, too,
-is due to ``smart prediction''
-\citep{Rnews:Yee:2008}.
-
-
-
-\subsubsection{Implementation details}
-\label{sec:jss.xij.implementationDetails}
-
-The~\texttt{xij} argument operates \textit{after} the
-ordinary $\bX_{\sVLM}$ matrix is created. Then selected columns
-of~$\bX_{\sVLM}$ are modified from the constraint matrices, \texttt{xij}
-and~\texttt{form2} arguments. That is, from \texttt{form2}'s model
-matrix $\bX_{\sformtwo}$, and the~$\bH_k$. This whole operation
-is possible because $\bX_{\sVLM}$ remains structurally the same.
-The crucial equation is~(\ref{eqn:xij.vector}).
-
-
-Other \texttt{xij} examples are given in the online help of
-\texttt{fill()} and \texttt{vglm.control()},
-as well as at the package's webpage.
-
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Discussion}
-\label{sec:jsscat.discussion}
-
-
-This article has sought to convey how VGLMs/VGAMs are well suited for
-fitting regression models for categorical data. Its primary strength
-is its simple and unified framework, and when reflected in software,
-makes practical CDA more understandable and efficient. Furthermore,
-there are natural extensions such as a reduced-rank variant and
-covariate-specific~$\eta_{j}$. The \VGAM{}~package potentially offers
-a wide selection of models and utilities.
-
-
-There is much future work to do.
-Some useful additions to the package include:
-\begin{enumerate}
-
-\item
-Bias-reduction \citep{firt:1993} is a method for removing the~$O(n^{-1})$
-bias from a maximum likelihood estimate. For a substantial class of
-models including GLMs it can be formulated in terms of a minor adjustment
-of the score vector within an IRLS algorithm \citep{kosm:firt:2009}.
-One by-product, for logistic regression, is that while the maximum
-likelihood estimate (MLE) can be infinite, the adjustment leads to
-estimates that are always finite. At present the \R{}~package \pkg{brglm}
-\citep{Kosmidis:2008} implements bias-reduction for a number of models.
-Bias-reduction might be implemented by adding an argument
-\texttt{bred = FALSE}, say, to some existing \VGAM{} family functions.
-
-
-\item
-Nested logit models were developed to overcome a fundamental shortcoming
-related to the multinomial logit model, viz.~the independence of
-irrelevant alternatives~(IIA) assumption. Roughly, the multinomial logit
-model assumes the ratio of the choice probabilities of two alternatives
-is not dependent on the presence or absence of other alternatives in
-the model. This presents problems that are often illustrated by the
-famed red bus-blue bus problem.
-
-
-
-
-\item
-The generalized estimating equations (GEE) methodology is largely
-amenable to IRLS and this should be added to the package in the future
-\citep{wild:yee:1996}.
-
-
-\item
-For logistic regression \proglang{SAS}'s \code{proc logistic} gives
-a warning if the data is {completely separate} or {quasi-completely
-separate}. Its effects are that some regression coefficients tend to~$\pm
-\infty$. With such data, all (to my knowledge) \R{}~implementations
-give warnings that are vague, if any at all, and this is rather
-unacceptable \citep{alli:2004}. The \pkg{safeBinaryRegression} package
-\citep{Konis:2009} overloads \code{glm()} so that a check for the
-existence of the MLE is made before fitting a binary response GLM.
-
-
-\end{enumerate}
-
-
-In closing, the \pkg{VGAM} package is continually being developed,
-therefore some future changes in the implementation details and usage
-may occur. These may include non-backward-compatible changes (see the
-\code{NEWS} file.) Further documentation and updates are available at
-the author's homepage whose URL is given in the \code{DESCRIPTION} file.
-
-
-
-% ----------------------------------------------------------------------
-\section*{Acknowledgments}
-
-The author thanks Micah Altman, David Firth and Bill Venables for helpful
-conversations, and Ioannis Kosmidis for a reprint.
-Thanks also to The Institute for Quantitative Social Science at Harvard
-University for their hospitality while this document was written during a
-sabbatical visit.
-
-
-
-
-
-\bibliography{categoricalVGAMbib}
-
-\end{document}
-
-
-
-
diff --git a/vignettes/categoricalVGAMbib.bib b/vignettes/categoricalVGAMbib.bib
deleted file mode 100644
index 7343b44..0000000
--- a/vignettes/categoricalVGAMbib.bib
+++ /dev/null
@@ -1,611 +0,0 @@
- at article{yee:wild:1996,
- Author = {Yee, T. W. and Wild, C. J.},
- Title = {Vector Generalized Additive Models},
- Year = 1996,
- JOURNAL = {Journal of the Royal Statistical Society~B},
- Volume = 58,
- Pages = {481--493},
- Keywords = {Nonparametric regression; Smoothing},
- Number = 3,
-}
-
- at article{gree:1984,
- Author = {Green, P. J.},
- Title = {Iteratively Reweighted Least Squares for Maximum Likelihood
- Estimation, and Some Robust and Resistant Alternatives},
- Year = 1984,
- JOURNAL = {Journal of the Royal Statistical Society~B},
- Volume = 46,
- Pages = {149--192},
- Keywords = {Scoring; Generalized linear model; Regression; Residual},
- Number = 2,
-}
-
- at book{hast:tibs:1990,
- Author = {Hastie, T. J. and Tibshirani, R. J.},
- Title = {Generalized Additive Models},
- Year = 1990,
- Publisher = {Chapman \& Hall},
- Address = {London},
- Pages = {335},
- Keywords = {Regression; Nonparametric; Generalized linear model}
-}
-
- at Manual{gam:pack:2009,
- title = {\pkg{gam}: Generalized Additive Models},
- author = {Trevor Hastie},
- year = {2008},
- note = {\proglang{R}~package version~1.01},
- url = {http://CRAN.R-project.org/package=gam}
-}
-
- at article{ande:1984,
- Author = {Anderson, J. A.},
- Title = {Regression and Ordered Categorical Variables},
- Year = 1984,
- JOURNAL = {Journal of the Royal Statistical Society~B},
- Volume = 46,
- Pages = {1--30},
- Keywords = {Assessed variable; Logistic regression; Stereotype
- regression; Maximum likelihood},
- Number = 1,
-}
-
- at article{firt:1993,
-author = {Firth, D.},
-title = {Bias Reduction of Maximum Likelihood Estimates},
-journal = {Biometrika},
-volume = {80},
-pages = {27--38},
-year = {1993},
-number = {1},
-abstract = {It is shown how, in regular parametric problems, the
-first-order term is removed from the asymptotic bias of maximum likelihood
-estimates by a suitable modification of the score function. In exponential
-families with canonical parameterization the effect is to penalize the
-likelihood by the Jeffreys invariant prior. In binomial logistic models,
-Poisson log linear models and certain other generalized linear models,
-the Jeffreys prior penalty function can be imposed in standard regression
-software using a scheme of iterative adjustments to the data.},
-}
-
- at InProceedings{alli:2004,
- Author = {Allison, P.},
- Title = {Convergence Problems in Logistic Regression},
- chapter = {10},
- Year = 2004,
- Crossref = {altm:gill:mcdo:2004},
- Pages = {238--252},
- BookTITLE = {Numerical Issues in Statistical Computing for the Social
- Scientist},
- PUBLISHER = {Wiley-Interscience},
- ADDRESS = {Hoboken, NJ, USA},
-}
-
- at book {altm:gill:mcdo:2004,
- AUTHOR = {Altman, Micah and Gill, Jeff and McDonald, Michael P.},
- TITLE = {Numerical Issues in Statistical Computing for the Social
- Scientist},
- PUBLISHER = {Wiley-Interscience},
- ADDRESS = {Hoboken, NJ, USA},
- YEAR = {2004},
- PAGES = {xvi+323},
- MRCLASS = {62-02 (62-04 62P25 65-02 91-02)},
- MRNUMBER = {MR2020104},
-}
-
- at article{yee:2010v,
- Author = {Yee, T. W.},
- Title = {{VGLM}s and {VGAM}s:
- An Overview for Applications in Fisheries Research},
- Year = 2010,
- Journal = {Fisheries Research},
- FJournal = {Fisheries Research},
- Volume = {101},
- Pages = {116--126},
- Number = {1--2},
-}
-
- at article{imai:king:lau:2008,
- AUTHOR = {Imai, Kosuke and King, Gary and Lau, Olivia},
- TITLE = {Toward A Common Framework for Statistical Analysis and
- Development},
- JOURNAL = {Journal of Computational and Graphical Statistics},
- YEAR = 2008,
- VOLUME = 17,
- PAGES = {892--913},
- NUMBER = 4,
-}
-
- at book{stok:davi:koch:2000,
- Author = {Stokes, W. and Davis, J. and Koch, W.},
- Title = {Categorical Data Analysis Using The \proglang{SAS} System},
- Year = 2000,
- Edition = {2nd},
- Publisher = {SAS Institute Inc.},
- Address = {Cary, NC, USA},
- PAGES = {648},
-}
-
- at article{neld:wedd:1972,
- Author = {Nelder, J. A. and Wedderburn, R. W. M.},
- Title = {Generalized Linear Models},
- Year = 1972,
- JOURNAL = {Journal of the Royal Statistical Society~A},
- Volume = 135,
- Pages = {370--384},
- Keywords = {Probit analysis; Analysis of variance; Contingency table;
- Exponential family; Quantal response; Weighted least
- squares},
- Number = 3,
-}
-
- at book{agre:2002,
- Author = {Agresti, Alan},
- Title = {Categorical Data Analysis},
- Year = 2002,
- Publisher = {John Wiley \& Sons},
- Address = {New York, USA},
- Edition = {2nd},
-}
-
- at book{fahr:tutz:2001,
- Author = {Fahrmeir, L. and Tutz, G.},
- Title = {Multivariate Statistical Modelling Based on Generalized Linear
- Models},
- Year = 2001,
- Edition = {2nd},
- Publisher = {Springer-Verlag},
- ADDRESS = {New York, USA},
-}
-
- at book{leon:2000,
- Author = {Leonard, Thomas},
- Title = {A Course in Categorical Data Analysis},
- Year = 2000,
- Publisher = {Chapman \& Hall/CRC},
- Address = {Boca Raton, FL, USA},
-}
-
- at book{lloy:1999,
- Author = {Lloyd, C. J.},
- Title = {Statistical Analysis of Categorical Data},
- Year = 1999,
- Publisher = {John Wiley \& Sons},
- Address = {New York, USA}
-}
-
- at book{long:1997,
- Author = {Long, J. S.},
- Title = {Regression Models for Categorical and Limited Dependent Variables},
- Year = 1997,
- Publisher = {Sage Publications},
- ADDRESS = {Thousand Oaks, CA, USA},
-}
-
- at book{mccu:neld:1989,
- Author = {McCullagh, P. and Nelder, J. A.},
- Title = {Generalized Linear Models},
- Year = 1989,
- Edition = {2nd},
- Publisher = {Chapman \& Hall},
- Address = {London},
- Pages = {500}
-}
-
- at book{simo:2003,
- Author = {Simonoff, J. S.},
- Title = {Analyzing Categorical Data},
- Year = 2003,
- Pages = {496},
- Publisher = {Springer-Verlag},
- Address = {New York, USA}
-}
-
- at article{liu:agre:2005,
- Author = {Liu, I. and Agresti, A.},
- Title = {The Analysis of Ordered Categorical Data:
- An Overview and a Survey of Recent Developments},
- Year = 2005,
- Journal = {Sociedad Estad{\'i}stica e Investigaci{\'o}n Operativa Test},
- Volume = 14,
- Pages = {1--73},
- Number = 1,
-}
-
- at MANUAL{thom:2009,
- TITLE = {\proglang{R} (and \proglang{S-PLUS}) Manual to Accompany
- Agresti's \textit{Categorical Data Analysis}~(2002),
- 2nd edition},
- AUTHOR = {Thompson, L. A.},
- YEAR = {2009},
- URL = {https://home.comcast.net/~lthompson221/Splusdiscrete2.pdf},
-}
-
- at article{yee:2008c,
- Author = {Yee, T. W.},
- Title = {The \pkg{VGAM} Package},
- Year = 2008,
- Journal = {\proglang{R} {N}ews},
- Volume = 8,
- Pages = {28--39},
- Number = 2,
-}
-
- at article{Rnews:Yee:2008,
- author = {Thomas W. Yee},
- title = {The \pkg{VGAM} Package},
- journal = {\proglang{R}~News},
- year = 2008,
- volume = 8,
- pages = {28--39},
- month = {October},
- url = {http://CRAN.R-project.org/doc/Rnews/},
- number = 2,
-}
-
- at article{yee:hast:2003,
- AUTHOR = {Yee, T. W. and Hastie, T. J.},
- TITLE = {Reduced-rank Vector Generalized Linear Models},
- JOURNAL = {Statistical Modelling},
- Volume = 3,
- Pages = {15--41},
- YEAR = {2003},
- Number = 1,
-}
-
-article{yee:wild:1996,
- Author = {Yee, T. W. and Wild, C. J.},
- Title = {Vector Generalized Additive Models},
- Year = 1996,
- JOURNAL = {Journal of the Royal Statistical Society~B},
- Volume = 58,
- Pages = {481--493},
- Keywords = {Nonparametric regression; Smoothing},
- Number = 3,
-}
-
- at article{good:1981,
- Author = {Goodman, L. A.},
- Title = {Association Models and Canonical Correlation in the Analysis
- of Cross-classifications Having Ordered Categories},
- Year = 1981,
- Journal = {Journal of the American Statistical Association},
- Volume = 76,
- Pages = {320--334},
- Number = 374,
-}
-
- at article{buja:hast:tibs:1989,
- Author = {Buja, Andreas and Hastie, Trevor and Tibshirani, Robert},
- Title = {Linear Smoothers and Additive Models},
- Year = 1989,
- JOURNAL = {The Annals of Statistics},
- Volume = 17,
- Pages = {453--510},
- Keywords = {Nonparametric; Regression; Kernel estimator},
- Number = 2,
-}
-
- at article{yee:step:2007,
- AUTHOR = {Yee, Thomas W. and Stephenson, Alec G.},
- TITLE = {Vector Generalized Linear and Additive Extreme Value Models},
- JOURNAL = {Extremes},
- FJOURNAL = {Extremes. Statistical Theory and Applications in Science,
- Engineering and Economics},
- VOLUME = {10},
- YEAR = {2007},
- PAGES = {1--19},
- MRCLASS = {Database Expansion Item},
- MRNUMBER = {MR2407639},
- NUMBER = {1--2},
-}
-
- at article{wand:orme:2008,
- Author = {Wand, M. P. and Ormerod, J. T.},
- Title = {On Semiparametric Regression with {O}'{S}ullivan Penalized Splines},
- Year = 2008,
- Journal = {The Australian and New Zealand Journal of Statistics},
- Volume = 50,
- Issue = 2,
- Pages = {179--198},
- Number = 2,
-}
-
- at book{cham:hast:1993,
- Editor = {Chambers, John M. and Hastie, Trevor J.},
- Title = {Statistical Models in \proglang{S}},
- Publisher = {Chapman \& Hall},
- Year = 1993,
- Pages = {608},
- Address = {New York, USA},
- Keywords = {Computing},
-}
-
- at Article{pete:harr:1990,
- Author = {Peterson, B. and Harrell, Frank E.},
- Title = {Partial Proportional Odds Models for Ordinal Response Variables},
- Year = 1990,
- Journal = {Applied Statistics},
- Volume = 39,
- Pages = {205--217},
- Number = 2,
-}
-
- at article{pete:1990,
- Author = {Peterson, B.},
- Title = {Letter to the Editor: Ordinal Regression Models for
- Epidemiologic Data},
- Year = 1990,
- Journal = {American Journal of Epidemiology},
- Volume = 131,
- Pages = {745--746}
-}
-
- at article{hast:tibs:buja:1994,
- AUTHOR = {Hastie, Trevor and Tibshirani, Robert and Buja, Andreas},
- TITLE = {Flexible Discriminant Analysis by Optimal Scoring},
- JOURNAL = {Journal of the American Statistical Association},
- VOLUME = {89},
- YEAR = {1994},
- PAGES = {1255--1270},
- CODEN = {JSTNAL},
- MRCLASS = {62H30},
- MRNUMBER = {95h:62099},
- NUMBER = {428},
-}
-
- at article{firth:2005,
- Author = {Firth, David},
- Title = {{B}radley-{T}erry Models in \proglang{R}},
- Year = 2005,
- Journal = {Journal of Statistical Software},
- Volume = 12,
- Number = 1,
- Pages = {1--12},
- URL = "http://www.jstatsoft.org/v12/i01/",
-}
-
- at book{weir:1996,
- Author = {Weir, Bruce S.},
- Title = {Genetic Data Analysis II: Methods for Discrete Population
- Genetic Data},
- Year = 1996,
- Publisher = {Sinauer Associates, Inc.},
- Address = {Sunderland, MA, USA}
-}
-
- at book{lang:2002,
- Author = {Lange, Kenneth},
- Title = {Mathematical and Statistical Methods for Genetic Analysis},
- Year = 2002,
- Edition = {2nd},
- Publisher = {Springer-Verlag},
- Address = {New York, USA},
-}
-
- at article{macm:etal:1995,
- Author = {MacMahon, S. and Norton, R. and Jackson, R. and Mackie, M. J. and
- Cheng, A. and
- Vander Hoorn, S. and Milne, A. and McCulloch, A.},
- Title = {Fletcher {C}hallenge-{U}niversity of {A}uckland {H}eart \&
- {H}ealth {S}tudy: Design and Baseline Findings},
- Year = 1995,
- Journal = {New Zealand Medical Journal},
- Volume = 108,
- Pages = {499--502},
-}
-
- at article{altm:jack:2010,
- author = {Altman, M. and Jackman, S.},
- title = "Nineteen Ways of Looking at Statistical Software",
- journal = "Journal of Statistical Software",
- year = "2010",
- note = "Forthcoming"
-}
-
- at article{fox:hong:2009,
- author = "John Fox and Jangman Hong",
- title = {Effect Displays in \proglang{R} for Multinomial and
- Proportional-Odds Logit Models:
- Extensions to the \pkg{effects} Package},
- journal = "Journal of Statistical Software",
- volume = "32",
- number = "1",
- pages = "1--24",
- year = "2009",
- URL = "http://www.jstatsoft.org/v32/i01/",
-}
-
- at article{wild:yee:1996,
- Author = {Wild, C. J. and Yee, T. W.},
- Title = {Additive Extensions to Generalized Estimating Equation
- Methods},
- Year = 1996,
- JOURNAL = {Journal of the Royal Statistical Society~B},
- Volume = 58,
- Pages = {711--725},
- Keywords = {Longitudinal data; Nonparametric; Regression; Smoothing},
- NUMBER = {4},
-}
-
- at Article{Yee:2010,
- author = {Thomas W. Yee},
- title = {The \pkg{VGAM} Package for Categorical Data Analysis},
- journal = {Journal of Statistical Software},
- year = {2010},
- volume = {32},
- number = {10},
- pages = {1--34},
- url = {http://www.jstatsoft.org/v32/i10/}
-}
-
- at Manual{R,
- title = {\proglang{R}: {A} Language and Environment
- for Statistical Computing},
- author = {{\proglang{R} Development Core Team}},
- organization = {\proglang{R} Foundation for Statistical Computing},
- address = {Vienna, Austria},
- year = {2009},
- note = {{ISBN} 3-900051-07-0},
- url = {http://www.R-project.org/}
-}
-
- at Book{Venables+Ripley:2002,
- author = {William N. Venables and Brian D. Ripley},
- title = {Modern Applied Statistics with \proglang{S}},
- edition = {4th},
- year = {2002},
- pages = {495},
- publisher = {Springer-Verlag},
- address = {New York},
- url = {http://www.stats.ox.ac.uk/pub/MASS4/},
-}
-
- at Manual{SAS,
- author = {{\proglang{SAS} Institute Inc.}},
- title = {The \proglang{SAS} System, Version 9.1},
- year = {2003},
- address = {Cary, NC},
- url = {http://www.sas.com/}
-}
-
- at Manual{yee:VGAM:2010,
- title = {\pkg{VGAM}: Vector Generalized Linear and Additive Models},
- author = {Yee, T. W.},
- year = {2010},
- note = {\proglang{R}~package version~0.7-10},
- url = {http://CRAN.R-project.org/package=VGAM}
-}
-
- at Manual{Harrell:2009,
- title = {\pkg{rms}: Regression Modeling Strategies},
- author = {Frank E. {Harrell, Jr.}},
- year = {2009},
- note = {\proglang{R}~package version~2.1-0},
- url = {http://CRAN.R-project.org/package=rms}
-}
-
- at Manual{Meyer+Zeileis+Hornik:2009,
- title = {\pkg{vcd}: Visualizing Categorical Data},
- author = {David Meyer and Achim Zeileis and Kurt Hornik},
- year = {2009},
- note = {\proglang{R}~package version~1.2-7},
- url = {http://CRAN.R-project.org/package=vcd}
-}
-
- at Article{Meyer+Zeileis+Hornik:2006,
- author = {David Meyer and Achim Zeileis and Kurt Hornik},
- title = {The Strucplot Framework: Visualizing Multi-Way
- Contingency Tables with \pkg{vcd}},
- journal = {Journal of Statistical Software},
- year = {2006},
- volume = {17},
- number = {3},
- pages = {1--48},
- url = {http://www.jstatsoft.org/v17/i03/}
-}
-
- at Manual{Turner+Firth:2009,
- title = {Generalized Nonlinear Models in \proglang{R}:
- An Overview of the \pkg{gnm} Package},
- author = {Heather Turner and David Firth},
- year = {2009},
- note = {\proglang{R}~package version~0.10-0},
- url = {http://CRAN.R-project.org/package=gnm},
-}
-
- at Article{Rnews:Turner+Firth:2007,
- author = {Heather Turner and David Firth},
- title = {\pkg{gnm}: A Package for Generalized Nonlinear Models},
- journal = {\proglang{R}~News},
- year = 2007,
- volume = 7,
- number = 2,
- pages = {8--12},
- month = {October},
- url = {http://CRAN.R-project.org/doc/Rnews/},
-}
-
-
- at Manual{ElemStatLearn:2009,
- title = {\pkg{ElemStatLearn}: Data Sets, Functions and
- Examples from the Book `The Elements
- of Statistical Learning, Data Mining, Inference, and
- Prediction' by Trevor Hastie, Robert Tibshirani and Jerome
- Friedman},
- author = {Kjetil Halvorsen},
- year = {2009},
- note = {\proglang{R}~package version~0.1-7},
- url = {http://CRAN.R-project.org/package=ElemStatLearn},
- }
-
- at Manual{Zelig:2009,
- title = {\pkg{Zelig}: Everyone's Statistical Software},
- author = {Kosuke Imai and Gary King and Olivia Lau},
- year = {2009},
- note = {\proglang{R}~package version~3.4-5},
- url = {http://CRAN.R-project.org/package=Zelig},
-}
-
- at article{kosm:firt:2009,
- author = {Kosmidis, I. and Firth, D.},
- title = {Bias Reduction in Exponential Family Nonlinear Models},
- year = {2009},
- JOURNAL = {Biometrika},
- FJOURNAL = {Biometrika},
- volume = {96},
- PAGES = {793--804},
- NUMBER = {4},
-}
-
- at techreport{kosm:firt:2008,
- author = {Kosmidis, I. and Firth, D.},
- title = {Bias Reduction in Exponential Family Nonlinear Models},
- Journal = {CRiSM Paper No.~08-05v2},
- year = {2008},
- URL = "http://www.warwick.ac.uk/go/crism",
- Institution = {Department of Statistics, Warwick University},
-}
-
- at Manual{Kosmidis:2008,
- title = {\pkg{brglm}: Bias Reduction in Binary-Response {GLMs}},
- author = {Ioannis Kosmidis},
- year = {2008},
- note = {\proglang{R}~package version~0.5-4},
- url = {http://CRAN.R-project.org/package=brglm},
-}
-
- at Manual{Hatzinger:2009,
- title = {\pkg{prefmod}: Utilities to Fit Paired Comparison
- Models for Preferences},
- author = {Reinhold Hatzinger},
- year = {2009},
- note = {\proglang{R}~package version~0.8-16},
- url = {http://CRAN.R-project.org/package=prefmod},
-}
-
- at Manual{firth:2008,
- title = {\pkg{BradleyTerry}: Bradley-Terry Models},
- author = {David Firth},
- year = {2008},
- note = {\proglang{R}~package version~0.8-7},
- url = {http://CRAN.R-project.org/package=BradleyTerry},
- }
-
- at Manual{gnlm:2007,
- title = {\pkg{gnlm}: Generalized Nonlinear Regression Models},
- author = {Jim Lindsey},
- year = {2007},
- note = {\proglang{R}~package version~1.0},
- url = {http://popgen.unimaas.nl/~jlindsey/rcode.html},
-}
-
- at Manual{Konis:2009,
- title = {\pkg{safeBinaryRegression}: Safe Binary Regression},
- author = {Kjell Konis},
- year = {2009},
- note = {\proglang{R}~package version~0.1-2},
- url = {http://CRAN.R-project.org/package=safeBinaryRegression},
-}
-
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-vgam.git
More information about the debian-science-commits
mailing list