[r-cran-vgam] 33/63: Import Upstream version 0.9-1
Andreas Tille
tille at debian.org
Tue Jan 24 13:54:32 UTC 2017
This is an automated email from the git hooks/post-receive script.
tille pushed a commit to branch master
in repository r-cran-vgam.
commit d32329601e8b91526026ba2a28e0a375ccc0c14e
Author: Andreas Tille <tille at debian.org>
Date: Tue Jan 24 14:16:57 2017 +0100
Import Upstream version 0.9-1
---
BUGS | 16 +
DESCRIPTION | 11 +-
MD5 | 659 ++--
NAMESPACE | 41 +-
NEWS | 92 +
R/Links.R | 2 +-
R/aamethods.q | 2 +-
R/add1.vglm.q | 2 +-
R/attrassign.R | 2 +-
R/bAIC.q | 2 +-
R/build.terms.vlm.q | 2 +-
R/calibrate.q | 2 +-
R/cao.R | 2 +-
R/cao.fit.q | 1682 ++++-----
R/coef.vlm.q | 31 +-
R/cqo.R | 2 +-
R/cqo.fit.q | 597 ++--
R/deviance.vlm.q | 20 +-
R/effects.vglm.q | 19 +-
R/family.actuary.R | 2435 +++++++++++--
R/family.aunivariate.R | 907 +++--
R/family.basics.R | 704 ++--
R/family.binomial.R | 2306 +++++++-----
R/family.bivariate.R | 1791 +++++-----
R/family.categorical.R | 1686 ++++-----
R/family.censored.R | 1041 ++++--
R/family.circular.R | 208 +-
R/family.exp.R | 190 +-
R/family.extremes.R | 1839 +++++-----
R/family.fishing.R | 126 +-
R/family.functions.R | 328 +-
R/family.genetic.R | 605 ++--
R/family.glmgam.R | 925 ++---
R/family.loglin.R | 131 +-
R/family.math.R | 75 +-
R/family.mixture.R | 490 ++-
R/family.nonlinear.R | 42 +-
R/family.normal.R | 1849 ++++++----
R/family.others.R | 370 +-
R/family.positive.R | 2042 +++++++----
R/family.qreg.R | 2839 +++++++--------
R/family.quantal.R | 264 +-
R/family.rcim.R | 319 +-
R/family.rcqo.R | 288 +-
R/family.robust.R | 37 +-
R/family.rrr.R | 2808 ++++++++-------
R/family.sur.R | 442 +++
R/family.survival.R | 366 +-
R/family.ts.R | 534 +--
R/family.univariate.R | 5998 +++++++++++---------------------
R/family.vglm.R | 2 +-
R/family.zeroinf.R | 1908 +++++-----
R/fittedvlm.R | 2 +-
R/formula.vlm.q | 2 +-
R/generic.q | 2 +-
R/links.q | 101 +-
R/logLik.vlm.q | 30 +-
R/lrwaldtest.R | 30 +-
R/model.matrix.vglm.q | 427 +--
R/mux.q | 368 +-
R/nobs.R | 37 +-
R/plot.vglm.q | 1179 ++++---
R/predict.vgam.q | 525 +--
R/predict.vglm.q | 272 +-
R/predict.vlm.q | 491 +--
R/print.vglm.q | 15 +-
R/print.vlm.q | 2 +-
R/qrrvglm.control.q | 23 +-
R/qtplot.q | 2 +-
R/residuals.vlm.q | 379 +-
R/rrvglm.R | 2 +-
R/rrvglm.control.q | 252 +-
R/rrvglm.fit.q | 298 +-
R/s.q | 6 +-
R/s.vam.q | 409 ++-
R/smart.R | 2 +-
R/step.vglm.q | 9 +-
R/summary.vgam.q | 101 +-
R/summary.vglm.q | 77 +-
R/summary.vlm.q | 149 +-
R/uqo.R | 104 +-
R/vgam.R | 406 ++-
R/vgam.control.q | 4 +-
R/vgam.fit.q | 491 +--
R/vgam.match.q | 125 +-
R/vglm.R | 60 +-
R/vglm.control.q | 107 +-
R/vglm.fit.q | 145 +-
R/vlm.R | 215 +-
R/vlm.wfit.q | 211 +-
R/vsmooth.spline.q | 801 ++---
R/zzz.R | 13 -
data/Huggins89.t1.rda | Bin 0 -> 439 bytes
data/Perom.rda | Bin 0 -> 431 bytes
data/alclevels.rda | Bin 551 -> 549 bytes
data/alcoff.rda | Bin 546 -> 546 bytes
data/auuc.rda | Bin 246 -> 243 bytes
data/backPain.rda | Bin 484 -> 480 bytes
data/car.all.rda | Bin 6961 -> 6987 bytes
data/chinese.nz.txt.gz | Bin 365 -> 380 bytes
data/crashbc.rda | Bin 375 -> 373 bytes
data/crashf.rda | Bin 341 -> 339 bytes
data/crashi.rda | Bin 491 -> 489 bytes
data/crashmc.rda | Bin 386 -> 383 bytes
data/crashp.rda | Bin 376 -> 375 bytes
data/crashtr.rda | Bin 362 -> 360 bytes
data/crime.us.rda | Bin 3976 -> 0 bytes
data/datalist | 49 -
data/fibre15.rda | Bin 247 -> 0 bytes
data/fibre1dot5.rda | Bin 298 -> 0 bytes
data/finney44.rda | Bin 210 -> 207 bytes
data/gala.rda | Bin 1052 -> 0 bytes
data/gew.txt.gz | Bin 530 -> 544 bytes
data/hspider.rda | Bin 1344 -> 1343 bytes
data/hued.rda | Bin 415 -> 0 bytes
data/huie.rda | Bin 419 -> 0 bytes
data/huse.rda | Bin 324 -> 0 bytes
data/leukemia.rda | Bin 329 -> 328 bytes
data/marital.nz.rda | Bin 10504 -> 10480 bytes
data/mmt.rda | Bin 4222 -> 4205 bytes
data/{olympic.txt.gz => olym08.txt.gz} | Bin 941 -> 943 bytes
data/olym12.txt.gz | Bin 0 -> 922 bytes
data/pneumo.rda | Bin 268 -> 266 bytes
data/rainfall.rda | Bin 11062 -> 0 bytes
data/ruge.rda | Bin 258 -> 255 bytes
data/toxop.rda | Bin 474 -> 481 bytes
data/ugss.rda | Bin 11588 -> 0 bytes
data/venice.rda | Bin 983 -> 971 bytes
data/venice90.rda | Bin 8068 -> 8220 bytes
data/wffc.indiv.rda | Bin 2565 -> 2590 bytes
data/wffc.nc.rda | Bin 4292 -> 4247 bytes
data/wffc.rda | Bin 10236 -> 10202 bytes
data/wffc.teams.rda | Bin 542 -> 540 bytes
data/xs.nz.rda | Bin 221524 -> 0 bytes
inst/CITATION | 21 +
inst/doc/categoricalVGAM.R | 396 +++
inst/doc/categoricalVGAM.pdf | Bin 678663 -> 677107 bytes
man/AA.Aa.aa.Rd | 25 +-
man/AB.Ab.aB.ab.Rd | 42 +-
man/AB.Ab.aB.ab2.Rd | 49 +-
man/ABO.Rd | 10 +-
man/AICvlm.Rd | 20 +-
man/Coef.Rd | 11 +-
man/Coef.qrrvglm-class.Rd | 30 +-
man/Coef.qrrvglm.Rd | 38 +-
man/Coef.rrvglm-class.Rd | 20 +-
man/Coef.rrvglm.Rd | 13 +-
man/Coef.vlm.Rd | 25 +-
man/CommonVGAMffArguments.Rd | 40 +-
man/DeLury.Rd | 65 +-
man/G1G2G3.Rd | 49 +-
man/Huggins89.t1.Rd | 110 +
man/Opt.Rd | 22 +-
man/Perom.Rd | 75 +
man/Qvar.Rd | 47 +-
man/SUR.Rd | 192 +
man/VGAM-package.Rd | 95 +-
man/alaplace3.Rd | 78 +-
man/alaplaceUC.Rd | 4 +-
man/amlbinomial.Rd | 32 +-
man/amlexponential.Rd | 14 +-
man/amlnormal.Rd | 61 +-
man/amlpoisson.Rd | 14 +-
man/auuc.Rd | 8 +
man/backPain.Rd | 2 +
man/beta.ab.Rd | 30 +-
man/betaII.Rd | 10 +-
man/betabinomUC.Rd | 24 +-
man/betabinomial.Rd | 56 +-
man/betabinomial.ab.Rd | 22 +-
man/betageometric.Rd | 6 +-
man/bilogis4UC.Rd | 24 +-
man/binom2.or.Rd | 25 +-
man/binom2.orUC.Rd | 46 +-
man/binom2.rho.Rd | 95 +-
man/binom2.rhoUC.Rd | 29 +-
man/binomialff.Rd | 39 +-
man/binormal.Rd | 26 +-
man/bisa.Rd | 4 +
man/bisaUC.Rd | 51 +-
man/bivgamma.mckay.Rd | 17 +-
man/bmi.nz.Rd | 2 +-
man/borel.tanner.Rd | 4 +-
man/calibrate.Rd | 47 +-
man/calibrate.qrrvglm.Rd | 47 +-
man/calibrate.qrrvglm.control.Rd | 16 +-
man/cao.Rd | 44 +-
man/cao.control.Rd | 68 +-
man/cardioid.Rd | 2 +
man/cenpoisson.Rd | 58 +-
man/cgumbel.Rd | 40 +-
man/chest.nz.Rd | 6 +-
man/chinese.nz.Rd | 35 +-
man/constraints.Rd | 11 +-
man/cqo.Rd | 29 +-
man/crashes.Rd | 12 +-
man/cratio.Rd | 55 +-
man/crime.us.Rd | 81 -
man/cumulative.Rd | 12 +-
man/dagum.Rd | 9 +-
man/dagumUC.Rd | 42 +-
man/dcennormal1.Rd | 20 +-
man/deplot.lmscreg.Rd | 37 +-
man/depvar.Rd | 8 +-
man/eexpUC.Rd | 47 +-
man/enormUC.Rd | 34 +-
man/enzyme.Rd | 6 +-
man/eunifUC.Rd | 38 +-
man/expexp.Rd | 12 +-
man/fgm.Rd | 34 +-
man/fill.Rd | 90 +-
man/fisherz.Rd | 13 +-
man/fisk.Rd | 2 +-
man/fiskUC.Rd | 4 +-
man/fittedvlm.Rd | 10 +-
man/fnormal1.Rd | 4 +
man/frechet.Rd | 4 +-
man/frechetUC.Rd | 18 +-
man/freund61.Rd | 12 +-
man/gamma2.Rd | 6 +-
man/gaussianff.Rd | 10 +-
man/genbetaII.Rd | 4 +-
man/geometric.Rd | 77 +-
man/gev.Rd | 13 +-
man/gevUC.Rd | 5 +-
man/gew.Rd | 92 +-
man/golf.Rd | 24 +-
man/gpdUC.Rd | 6 +-
man/grain.us.Rd | 8 +-
man/grc.Rd | 29 +-
man/hormone.Rd | 69 +-
man/huber.Rd | 18 +-
man/huberUC.Rd | 11 +-
man/hued.Rd | 55 -
man/huggins91.Rd | 182 -
man/huggins91UC.Rd | 141 -
man/huie.Rd | 59 -
man/hunua.Rd | 22 +-
man/huse.Rd | 73 -
man/inv.gaussianff.Rd | 4 +-
man/invbinomial.Rd | 2 +-
man/invlomax.Rd | 2 +-
man/invlomaxUC.Rd | 4 +-
man/invparalogistic.Rd | 2 +-
man/invparalogisticUC.Rd | 4 +-
man/is.parallel.Rd | 5 +-
man/is.zero.Rd | 3 +-
man/lambertW.Rd | 7 +-
man/{lv.Rd => latvar.Rd} | 37 +-
man/leipnik.Rd | 4 +-
man/lirat.Rd | 2 +-
man/lms.bcn.Rd | 10 +-
man/loge.Rd | 5 +-
man/logit.Rd | 4 +-
man/loglinb2.Rd | 10 +-
man/loglinb3.Rd | 5 +
man/lomax.Rd | 32 +-
man/lomaxUC.Rd | 32 +-
man/lvplot.qrrvglm.Rd | 73 +-
man/makeham.Rd | 9 +-
man/maxwellUC.Rd | 9 +-
man/mlogit.Rd | 14 +-
man/multinomial.Rd | 35 +-
man/nbcanlink.Rd | 6 +-
man/nbolf.Rd | 13 +-
man/negbinomial.Rd | 8 +-
man/negbinomial.size.Rd | 6 +-
man/normal1.Rd | 22 +-
man/notdocumentedyet.Rd | 51 +-
man/olym.Rd | 79 +
man/olympic.Rd | 56 -
man/oxtemp.Rd | 2 +-
man/paralogistic.Rd | 3 +-
man/paralogisticUC.Rd | 2 +-
man/pareto1.Rd | 7 +-
man/paretoIV.Rd | 2 +-
man/paretoIVUC.Rd | 5 +-
man/perks.Rd | 6 +
man/perksUC.Rd | 1 +
man/persp.qrrvglm.Rd | 4 +-
man/pgamma.deriv.Rd | 128 +
man/pgamma.deriv.unscaled.Rd | 102 +
man/plackett.Rd | 4 +-
man/plotvgam.Rd | 6 +-
man/pneumo.Rd | 8 +-
man/pnorm2UC.Rd | 124 +
man/poissonff.Rd | 21 +-
man/polf.Rd | 11 +-
man/posbernUC.Rd | 168 +
man/posbernoulli.b.Rd | 209 ++
man/posbernoulli.t.Rd | 216 ++
man/posbernoulli.tb.Rd | 217 ++
man/posbinomUC.Rd | 11 +-
man/posbinomial.Rd | 56 +-
man/qrrvglm.control.Rd | 27 +-
man/rcqo.Rd | 1 +
man/rrar.Rd | 18 +-
man/rrvglm-class.Rd | 9 +-
man/rrvglm.Rd | 28 +-
man/rrvglm.control.Rd | 23 +-
man/s.Rd | 52 +-
man/seq2binomial.Rd | 18 +-
man/sinmad.Rd | 21 +-
man/sinmadUC.Rd | 4 +-
man/snormUC.Rd | 5 +-
man/studentt.Rd | 2 +-
man/tikuv.Rd | 4 +-
man/tikuvUC.Rd | 2 +
man/tobit.Rd | 2 +
man/toxop.Rd | 4 +-
man/tparetoUC.Rd | 3 +
man/trplot.qrrvglm.Rd | 6 +-
man/truncweibull.Rd | 150 +
man/ugss.Rd | 100 -
man/undocumented-methods.Rd | 10 +
man/uqo.Rd | 61 +-
man/uqo.control.Rd | 31 +
man/venice.Rd | 18 +-
man/vgam.Rd | 19 +-
man/vgam.control.Rd | 10 +-
man/vglm.Rd | 54 +-
man/vglm.control.Rd | 50 +-
man/vglmff-class.Rd | 44 +-
man/vsmooth.spline.Rd | 43 +-
man/weibull.Rd | 3 +-
man/wffc.P2star.Rd | 7 +-
man/wffc.Rd | 20 +-
man/wffc.indiv.Rd | 4 +-
man/wffc.nc.Rd | 6 +-
man/wffc.teams.Rd | 6 +-
man/xs.nz.Rd | 418 ---
man/zabinomial.Rd | 1 +
man/zageometric.Rd | 8 +-
man/zeta.Rd | 24 +-
man/zibinomial.Rd | 34 +-
man/zigeometric.Rd | 13 +-
man/zinegbinomial.Rd | 14 +-
man/zipebcom.Rd | 38 +-
man/zipoisson.Rd | 32 +-
src/fgam.f | 22 +-
src/rgam.f | 2 +-
src/rgam3.c | 193 +-
src/vcall2.f | 5 +-
src/vdigami.f | 156 +
src/vgam.f | 62 +-
src/vgam3.c | 1092 +++---
src/vlinpack1.f | 4 +-
src/vlinpack2.f | 18 +-
src/vlinpack3.f | 28 +-
src/vmux.f | 12 +-
350 files changed, 31164 insertions(+), 24778 deletions(-)
diff --git a/BUGS b/BUGS
index a31fe77..f0aa161 100755
--- a/BUGS
+++ b/BUGS
@@ -1,5 +1,21 @@
Here is a list of known bugs.
+2012-09
+
+
+loge('a', short = FALSE, inverse = FALSE)
+loge('a', short = FALSE, inverse = TRUE)
+give the same answer.
+
+
+
+Coef(vglm.dirmultinomial.fit) fails.
+Evidently, multiple "mlogit"s saved on vglm.dirmultinomial.fit at misc
+do not suffice.
+
+
+
+
2011-12
VGAM version 0.8-4 said it needed R version 2-11.1 or later.
diff --git a/DESCRIPTION b/DESCRIPTION
index 83d1374..5906366 100755
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,10 +1,10 @@
Package: VGAM
-Version: 0.9-0
-Date: 2012-09-01
+Version: 0.9-1
+Date: 2013-04-27
Title: Vector Generalized Linear and Additive Models
Author: Thomas W. Yee <t.yee at auckland.ac.nz>
Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
-Depends: R (>= 2.14.0), splines, methods, stats, stats4
+Depends: R (>= 2.15.1), splines, methods, stats, stats4
Suggests: MASS
Description: Vector generalized linear and additive models, and
associated models (Reduced-Rank VGLMs, Quadratic RR-VGLMs,
@@ -17,6 +17,7 @@ Imports: methods, stats, stats4
URL: http://www.stat.auckland.ac.nz/~yee/VGAM
LazyLoad: yes
LazyData: yes
-Packaged: 2012-09-01 04:39:09 UTC; tyee001
+Packaged: 2013-04-27 04:22:15 UTC; tyee001
+NeedsCompilation: yes
Repository: CRAN
-Date/Publication: 2012-09-01 05:55:16
+Date/Publication: 2013-04-27 09:02:44
diff --git a/MD5 b/MD5
index 579f1ea..62c0978 100644
--- a/MD5
+++ b/MD5
@@ -1,145 +1,137 @@
-60b13c57b66bb77e65321c5c0a3b1dab *BUGS
-eba6e49209343f83cd62777ae45f40b8 *DESCRIPTION
+f6c2eaaf925e53832fcb53239b4f5cc8 *BUGS
+6a7fba4cfaba8a30efbd9872bcdb525f *DESCRIPTION
dd959d3a0cd680792122813a7d58d506 *DISCLAIMER
-8d0d88feac7fa558a6ec04249d6b4feb *NAMESPACE
-4a7690a3913431a23fbef73c99a7ebba *NEWS
-555d62a4356133367fbd0d49ed5d19d1 *R/Links.R
-ca2f5b0e1d1e0c4bb984a82a7d837495 *R/aamethods.q
-b3239d3ffdffe1de9581354b05d66016 *R/add1.vglm.q
-95904ca107a7d51b5a7a6603304e962b *R/attrassign.R
-fa7ea29619a00514f855a075070edae7 *R/bAIC.q
-73b6069b0e12d23150dcfc61134aeed5 *R/build.terms.vlm.q
-56bed730c52b3d44ff151e0b6db57be6 *R/calibrate.q
-b09327ef1094ac0ff74c3283155ea3fb *R/cao.R
-0bdd385d0a1232292595d702d2ae167d *R/cao.fit.q
-714327842f7526ae1527e3c2f8cd3a9b *R/coef.vlm.q
-a3f5ad1bd124d07a69c74b7f917a9a58 *R/cqo.R
-e94fae353c86c2ece43792e9c2f777a0 *R/cqo.fit.q
-211da72b82e2edb1271019a0db41ae12 *R/deviance.vlm.q
-80861c2c2454b9c298e11cbadc431056 *R/effects.vglm.q
-6ec5fa8424b3096f84725878bb303296 *R/family.actuary.R
-cbed3db74d49ab7d24f8c07737b186b3 *R/family.aunivariate.R
-ac6e4976d5c319558a3fbd2fb01f5c58 *R/family.basics.R
-36efe4bc416dca7ab18b96dcc56f67f8 *R/family.binomial.R
-c4d71b12cf141977e5ea1f14f362bac1 *R/family.bivariate.R
-49ac6615afe7a8da3388392f8136ee9e *R/family.categorical.R
-3a8cbb695a8700fd26a6e9de3a2920b7 *R/family.censored.R
-63d90d55ae10db5f8fd4367b2fe3d775 *R/family.circular.R
-52f9dc7247202a2d76e69e7c213aa786 *R/family.exp.R
-8b339c2d1f2f274659ed53b5bb4b6cdd *R/family.extremes.R
-938226f9a34e4f6fd80108a9a89c2651 *R/family.fishing.R
-b0910b3f615575850e1417740dedaaa7 *R/family.functions.R
-dd14928a169b772f96b78fe7126377fc *R/family.genetic.R
-efd7915c4bbd4ab239cd590422af74cb *R/family.glmgam.R
-d81d63f88573296e4241ffe8a87ee99d *R/family.loglin.R
-e159913225b326c326e2c8bffde57dcc *R/family.math.R
-cf1ef5c0906c17220d1fe0d061514d0d *R/family.mixture.R
-8c91b1d7a9cc6c3acf7d45aa19f6853c *R/family.nonlinear.R
-14985d0dbaed80504e0a3507afe5810a *R/family.normal.R
-d017890f91aeb1645df847674f904184 *R/family.others.R
-9b5c067f38b1b25476bc52165676e367 *R/family.positive.R
-bfd45858f4bec29d0deacf2b58a71e59 *R/family.qreg.R
-57ae7b23e7562be6d99fa29ecc61c234 *R/family.quantal.R
-e0b7d3dc700e001480e3d5b7d7790e40 *R/family.rcim.R
-1e4aeedf2b6062c72ee0844ed57fc5f2 *R/family.rcqo.R
-a7fc259a5e78902f612d8c5bcc0852e0 *R/family.robust.R
-27382145fd1f60de7726fba47e7952b8 *R/family.rrr.R
-955838f5c05811c822ef1db7b15fce4a *R/family.survival.R
-f8f2bc7b1d740b98b18c7de6915f7ff8 *R/family.ts.R
-8490fe238fa094ad63ae5db1d04cf83d *R/family.univariate.R
-ba86e91c886455a19a223ab62d72910b *R/family.vglm.R
-202dae68295c91f63a80dc2e342e1f77 *R/family.zeroinf.R
-c4209518badc8f38d39cd2d7b8405d24 *R/fittedvlm.R
-c6167af1886c3d0340b9b41a66f8d1a9 *R/formula.vlm.q
-33aa96487bc94130897db4ca82ec9559 *R/generic.q
-646e478fc9a1c0635921e05b1b546f7a *R/links.q
-ec95b1084b5bba66c143c75ef5425268 *R/logLik.vlm.q
-12c9c7e7246afe10034cdd60a90a73d0 *R/lrwaldtest.R
-f88c81e01b502e036f99c9d38d39e284 *R/model.matrix.vglm.q
-76b26cdae089197c27a697d06ad16c30 *R/mux.q
-473fd0f09548cd41a4d126dc0f3498ce *R/nobs.R
-ca74849eea37277b840ca90fe843beb4 *R/plot.vglm.q
-225861a6f45f0b2efe3bd315f8a5aaff *R/predict.vgam.q
-21a0facdd8b7d12fd37eefcc65e7228a *R/predict.vglm.q
-c1ab98752f47716f978a9f1a406b1112 *R/predict.vlm.q
-d56618bb580ea017562efbb27aa69ee5 *R/print.vglm.q
-04a7a1cc3e40dc1b4e861e191e91edfd *R/print.vlm.q
-9fb95687e7080c3b216ee7c79cb1be0a *R/qrrvglm.control.q
-865a789765db838345b85273ced9189f *R/qtplot.q
-cc43b350b8191fca4af2b1e969256a27 *R/residuals.vlm.q
-144643456f5d88647df94b5421bd850a *R/rrvglm.R
-e1375a19876aca5ad23de2b5548223a0 *R/rrvglm.control.q
-a64fe1a10cc52a121f1af8f410953e4c *R/rrvglm.fit.q
-b7b95cdd6591250161f2c93699983466 *R/s.q
-8d57d9c6666bc912dc03123fb36b6908 *R/s.vam.q
-6dff9b78299e3ecf4e5087946d54cb95 *R/smart.R
-7ce45be4048ac6023d1bfcd703f80391 *R/step.vglm.q
-44fc620e4e847fee0a9ce1365f3ffd27 *R/summary.vgam.q
-c2cfd1291178b694071730b2d53d02c2 *R/summary.vglm.q
-242d83e29ebcdbebf30dd6e1b256aaa6 *R/summary.vlm.q
-753dad5450557ae57cbff2cb4c229b3a *R/uqo.R
-f223707b00020c752536ef95ea7150bb *R/vgam.R
-6cc23d07c2031dcad93254f092118ce9 *R/vgam.control.q
-726aa9b28c1fb045753253af10152e71 *R/vgam.fit.q
-a4c1ebcffe0e1e65daaff53ae414da4c *R/vgam.match.q
-f03cb94631bcfdccf01e1542fb0f976e *R/vglm.R
-2ef254676e032cb2aca91352565b46d4 *R/vglm.control.q
-fb812b12aaf59ab251153fcc3482e556 *R/vglm.fit.q
-38aeb51b3ed4d9a4a5f1af56da21b32b *R/vlm.R
-e76f5e142ff6bc7ad92fc0eece93bb9d *R/vlm.wfit.q
-f9c093d80ffab1851abc4459e35050d9 *R/vsmooth.spline.q
-c1c2fce6995f00d9ec512f818662a7c1 *R/zzz.R
-f8995346c8c9f824505f62825d3afa0d *data/alclevels.rda
-8879c9b3cca96c907424391706f5bf56 *data/alcoff.rda
-ead10f7aec9214d93787e0f6cfa2c26f *data/auuc.rda
-3ca7268f4f9287b28bb45ce767611375 *data/backPain.rda
+2af0a233dbb327202c9855a893e5fe4f *NAMESPACE
+1a92d93a22cfa2999d96ddaf2b98f990 *NEWS
+d62d56dbac3a02af9d7c8b2bbcb39e8b *R/Links.R
+adf209eb2798923a1c63284e2f35a74f *R/aamethods.q
+15d1737d6888f9bb905bb0d2d82a385b *R/add1.vglm.q
+1738067495adbcc1127a1f8f3cc7988a *R/attrassign.R
+832a8467cdb8625a3be4ae3d17f2b90f *R/bAIC.q
+5cdd8c0f82dea0e413e04bc2cff8c123 *R/build.terms.vlm.q
+b6860bb9ee446a7dd9b684c95aa5bc05 *R/calibrate.q
+6e439ff28115e3dee0999c1fb16549d8 *R/cao.R
+ccee03271e151a65bfdfe831b9f3e8b5 *R/cao.fit.q
+86abaa804bbae3663eba175e424cb507 *R/coef.vlm.q
+bfa85b0c6a4c0d8ef179e167e6fb6d93 *R/cqo.R
+fe11e7df7fc7466a1ad1ae2eb7302790 *R/cqo.fit.q
+abd0b60fa8407957c67d4392d7af26fe *R/deviance.vlm.q
+a6b5f34153d4828e30d35addf7d6ba9f *R/effects.vglm.q
+c613675237da101f62d7b09a5a9022b9 *R/family.actuary.R
+ef98d702078dfe8028a9ca67293ff0e9 *R/family.aunivariate.R
+821b41568b72112b39e165bee64fff63 *R/family.basics.R
+cd6ac62efb3f8c85f9943b9e366ffcf6 *R/family.binomial.R
+dc12aa7a7020b9fcb98101ecbc151976 *R/family.bivariate.R
+f69cb7e860b7381e6876499575031e00 *R/family.categorical.R
+7b8a2ffd2480d2bc1686acd3975925bc *R/family.censored.R
+6e22b04d33eac0d296369deb9eb0df6d *R/family.circular.R
+635eb4cbaa3c7d3eb7aa589425520f91 *R/family.exp.R
+5221dacf55524f6604bc7f63a087f349 *R/family.extremes.R
+d3fb45972867409ec229acb7a053bee4 *R/family.fishing.R
+9826cf013ff6c981f11f32a06d26d3ab *R/family.functions.R
+7ee48c4f697f650bcd8ed13d50ff99de *R/family.genetic.R
+3c5e4f0c78262e274ac99bc697c0626b *R/family.glmgam.R
+4b271424d2b5c532da022b833fa091c7 *R/family.loglin.R
+ff91b689c8f0f97da4f15ce5a938a248 *R/family.math.R
+0aafeb41fdf7d02837c021b387f94b55 *R/family.mixture.R
+bc95bc6f29a8bbb163a03e800627c802 *R/family.nonlinear.R
+8f365a42782116a4049d78817ef26443 *R/family.normal.R
+71d2f8f47e7be7e42bc725383fe9b8b2 *R/family.others.R
+d4e9e1cdf543f7b59a67b9229aa4adc5 *R/family.positive.R
+cf30ede4751332d2e97a032812719180 *R/family.qreg.R
+b4a7110d940135f7372ae51f0a32070a *R/family.quantal.R
+c365f0b1c200d523c71b3fddffd31ef7 *R/family.rcim.R
+9160d6726da1309528dc856c44c75421 *R/family.rcqo.R
+2a7ba5edcb2a5e996431700f90cc5ca5 *R/family.robust.R
+f4c4f0abbc5708e66af94e0e873a590e *R/family.rrr.R
+089ae9a0fd12c18dcb10fde5fc394291 *R/family.sur.R
+1b6f4e240e52a537fc9855e5608344ca *R/family.survival.R
+e53b98453c106c2176b60a6e2241b08b *R/family.ts.R
+df658830892993fe457fc0146aaa2a3d *R/family.univariate.R
+11583197eff8498de3c6121ab66c707a *R/family.vglm.R
+d59a4ccad1536b81308ec42dffec9a2a *R/family.zeroinf.R
+daae5f4987b87f24e1dc0aa1c3602147 *R/fittedvlm.R
+4b557b8346c0b3634105f243ddfbf24a *R/formula.vlm.q
+6ac6db14a8544716dd49fdc95cc06893 *R/generic.q
+104a446ef4f0f15e56f3342ca01b34a0 *R/links.q
+88359e6090cbf9b245bc49ac35805e1e *R/logLik.vlm.q
+a3ccdcdbfa8ca1a1881c8853a7eafd2f *R/lrwaldtest.R
+6f82978825337429b7c05d36c12ed68a *R/model.matrix.vglm.q
+1732357e0c3e1a2e278f364f005762bb *R/mux.q
+ea6f08f274acb629f4cedb9a46e0ec20 *R/nobs.R
+6414d0ff798fffb260139b4131c7808b *R/plot.vglm.q
+f87f2f2a142820503c24a9124a7f7fd4 *R/predict.vgam.q
+90b48a5c5096e443ef28634d1447e600 *R/predict.vglm.q
+a57f83121934ed29f45a6440def49bde *R/predict.vlm.q
+53a8b748527a8b5738121fefb84587fc *R/print.vglm.q
+e1d1e80faf5b350b32676e53a565263f *R/print.vlm.q
+0fa72053f84f1c878c451c1561a66e3a *R/qrrvglm.control.q
+7a85e29e0e6c86a808dbc67a5058a2f2 *R/qtplot.q
+512cf9358cb8da821891c5ef1e7ca4f0 *R/residuals.vlm.q
+d7b993156aea56e835e2af8d3df41cf6 *R/rrvglm.R
+42e7eec20c6ca8bbb76472b3f98f5536 *R/rrvglm.control.q
+470aa87f01b3f571a962465cd2064102 *R/rrvglm.fit.q
+d0f49d2c6af83f79ce6894993a42b79d *R/s.q
+59971ce313b9d5d8117ee9be43049940 *R/s.vam.q
+17b2981fe5a5a8b6c8d5ff24e1110c4b *R/smart.R
+1bccef4ed724ae0a8d0f677c7d12c15d *R/step.vglm.q
+7fdc7139fbe351c53b7a5b64f782ada9 *R/summary.vgam.q
+d8ddb7543987a1d3088e6c264d253d85 *R/summary.vglm.q
+254a4caed282a79fe7bd72a6ac8a54e1 *R/summary.vlm.q
+9fd5ab4d09a51e27b81ed54d0ba98f84 *R/uqo.R
+58d011e757b69c50072aba6636d0459e *R/vgam.R
+c479ba9b1e9dfe567e2d02d667392c0e *R/vgam.control.q
+1bc56d80200a7c3e8974b6ebd3cddbd1 *R/vgam.fit.q
+fa4a8b03864d4c88623072fbc836ddbb *R/vgam.match.q
+a11d62d8e230f9d3f5d1169ffac27703 *R/vglm.R
+5e7d4ef7fbcd4a050378cac4480e6a1b *R/vglm.control.q
+8b749d824c552fa958f669f4461c0480 *R/vglm.fit.q
+df2d63117cb8b126e5f568d0c3c0b5f7 *R/vlm.R
+128626597c68cf1d6bfe46edce99017a *R/vlm.wfit.q
+a65b9ce0f4ca8924a4d612dceb7431a3 *R/vsmooth.spline.q
+1fd723ab36f7d8d06ea80e7f0695839b *data/Huggins89.t1.rda
+5d76a6219b59d73d8606351a4e838435 *data/Perom.rda
+9813abe80f1fd180438de1b64a494d23 *data/alclevels.rda
+dc1953bd5b84c6c769b3216b6c9bfe8e *data/alcoff.rda
+c69d92ac37883bcb93de5c689f617c6c *data/auuc.rda
+e597da31ffc931395065afd49d7e1171 *data/backPain.rda
4fa3eac69a59ea5ed0123d54528e5595 *data/backPain.txt.gz
e039fd36c33b359830b2ac811ca7fc49 *data/bmi.nz.txt.xz
-837a49777680ee5db5bc19d979b41352 *data/car.all.rda
+ac781eb03705011daac38279dd8126d9 *data/car.all.rda
b29c1a4125f0898885b0a723442d6a92 *data/chest.nz.txt.bz2
-0f45f6779a3c3583f4edf68da4045509 *data/chinese.nz.txt.gz
+4df5fd8b5db905c4c19071e1e6a698a4 *data/chinese.nz.txt.gz
3cb8bc8e1fc615416f0c8838a50b3f51 *data/coalminers.txt.gz
-354e62c12a62b99b1f6c71dff0c67d7c *data/crashbc.rda
-7168dbd2899219a06842ad9245a9aa2f *data/crashf.rda
-205e81b515e761af9ea0926e84bbf34e *data/crashi.rda
-7d5d4a36296b2867eca6b14325d551a4 *data/crashmc.rda
-8352cfe8d6008c38cfee1ad1b3d431a6 *data/crashp.rda
-f8cdc2708b60ec3aa810dcbc57987727 *data/crashtr.rda
-9e9daede0c8ac4a0d94c0124db7c64dc *data/crime.us.rda
-5914585931c1d74e9ce75c859ecdaaab *data/datalist
+a1736f849c17c8def4126685c80a27c7 *data/crashbc.rda
+710992c846632d4bb836e0db7754577c *data/crashf.rda
+d5308a91f8bb3ada33dc46b322cbbf33 *data/crashi.rda
+109603b8ff2aed220209e950e92dcea2 *data/crashmc.rda
+a2bdcbc61dd121258d7d44f4eab13588 *data/crashp.rda
+071eb8e5c533bc745bf06a166365d2a1 *data/crashtr.rda
08e87bb80a2364697b17ccec6260387c *data/enzyme.txt.gz
-8a6e6874a36a0b0fce11b00f47ba5eec *data/fibre15.rda
-eacd8c708d059e1596542bf9fa188992 *data/fibre1dot5.rda
-e8b6cda757bcfc6ef70f39cae18000cb *data/finney44.rda
-f373bbea310609ccd4dba24d3b29310f *data/gala.rda
-8508a1cb5a09b65616ed9dfe1fc7a7a9 *data/gew.txt.gz
+b351998ad2ed6556fb83dafdbf2c0540 *data/finney44.rda
+3125b7b004c671f9d4516999c8473eac *data/gew.txt.gz
bec512b2d2d680889c9b71c7b97dbffd *data/grain.us.txt.bz2
9dcb8cdf026f5468fa70f8037fd72a0b *data/hormone.txt.bz2
-3f9fe2fafc59fbebe839018e2e2a9167 *data/hspider.rda
-66a528a02fc7bf76888cb436304e32c3 *data/hued.rda
-3224319b9eb26228a67fa1afddddbd21 *data/huie.rda
+b003bfd39730aa0656fc38ac2c347caf *data/hspider.rda
dffe21fbabf645127bccc3f3733098a7 *data/hunua.txt.bz2
-a42b02a1b149d0f68924efe5a1677cfc *data/huse.rda
-e9f41116a56cb8b27abe4779cfe5edf9 *data/leukemia.rda
+ebf3caea112403777897aa8b631ac27d *data/leukemia.rda
aba4885e0eeda8ee887a422fee01e02a *data/lirat.txt.gz
-978765fe2df9da5d12ac22f7b76d6033 *data/marital.nz.rda
-86c034950ba10d2f51df20c07d6d3599 *data/mmt.rda
-1017612628ed904e97e5a426d307b16f *data/olympic.txt.gz
+75cf48caa6781de4a80496c31604b1ef *data/marital.nz.rda
+d692afa917e63fa7707495b25ae93bee *data/mmt.rda
+56490506642d6415ac67d9b6a7f7aff6 *data/olym08.txt.gz
+fe334fe839d5efbe61aa3a757c38faeb *data/olym12.txt.gz
3ed63397c4a34f3233326ade6cfd1279 *data/oxtemp.txt.gz
-42e0f4010aa801431f44233ca9f31bde *data/pneumo.rda
-cbfedcc4f17fd8a9b91e45254a0fd1bd *data/rainfall.rda
-21428fae28f282371d93975f1a57815e *data/ruge.rda
-c26f47d0d34a00f305777ffff683eefa *data/toxop.rda
+04b56fb5acddca81eb3916826a4c88a3 *data/pneumo.rda
+824247155f0456f146af38c8818314cf *data/ruge.rda
+d55951f9995a47976dcc28bd4c877a6a *data/toxop.rda
1b059fc42c890bf89f2282298828d098 *data/ucberk.txt.gz
-5860454aa3f06e743f0a126d4bbe7d9c *data/ugss.rda
-cde476ebb7ed10e5096e608fc819c697 *data/venice.rda
-34655ef9a62c04a076581197337217ad *data/venice90.rda
+3be014e1cf99d07b22ca4757d4e43408 *data/venice.rda
+db2bece75f2f401b842b47b210541ed8 *data/venice90.rda
e990ca4deea25b60febd2d315a6a9ec4 *data/waitakere.txt.bz2
-3d0cfa016c4497d13feb41d146b12a03 *data/wffc.indiv.rda
-00fde79b032634a43cb03bcd733c5b82 *data/wffc.nc.rda
-e657c5db8078ce26fb46d85e0586b711 *data/wffc.rda
-73992544fe6110710bf257d34bdc9be4 *data/wffc.teams.rda
-75d3578e31965889ed7aabaaac670a01 *data/xs.nz.rda
+ad7680ca4b2ee5cdcfdc6efd64734e2b *data/wffc.indiv.rda
+4d0e86344820512b6e9d661b62c8df22 *data/wffc.nc.rda
+f89fc57a32f5dc7b3ac764ccf9010050 *data/wffc.rda
+0e5d28602f173f25c3ae8ae8ca9ab6d7 *data/wffc.teams.rda
81f7f0844a196dc48e91870c4cfafc99 *demo/00Index
532aba4ad4cac611141491a5bb886236 *demo/binom2.or.R
a7db0d0c4cc964b01ddbe0cb74153304 *demo/cqo.R
@@ -147,124 +139,127 @@ a7db0d0c4cc964b01ddbe0cb74153304 *demo/cqo.R
d2c02ccaf4d548cc83b3148e55ff0fa3 *demo/lmsqreg.R
a3d2728927fc5a3090f8f4ae9af19e1a *demo/vgam.R
00eee385e1a5c716a6f37797c3b4bec5 *demo/zipoisson.R
-45d6563f929e021db90f9c0289e6093e *inst/CITATION
+60616e1e78fe61c1fd4acdf0d3129747 *inst/CITATION
+fae24431ceffb7f1c6390d81307cda6e *inst/doc/categoricalVGAM.R
b1a84a83b8fb788d31d509e17936b603 *inst/doc/categoricalVGAM.Rnw
-32e56802e5c4b20821e23c0edd0603a3 *inst/doc/categoricalVGAM.pdf
+a844badb9c938f40a4f3d76f6b7cb9a7 *inst/doc/categoricalVGAM.pdf
e4c5415e487f533b70695b17e40d97bc *inst/doc/categoricalVGAMbib.bib
-fbc2e1f379d2815d0dd19ab1b38ac031 *man/AA.Aa.aa.Rd
-4bab796e819b1a3dc50c612c79a9ed77 *man/AB.Ab.aB.ab.Rd
-af731838bfdeab2348710b41b09c8247 *man/AB.Ab.aB.ab2.Rd
-954a72710f0b6ae0a2a4013ba53134fb *man/ABO.Rd
-e205077baf82273656dade8e39dfd0f0 *man/AICvlm.Rd
-4c634c4ac3a9673b49e00a21a5edcac0 *man/Coef.Rd
-42eae1271b8c7f35a723eec2221a21f2 *man/Coef.qrrvglm-class.Rd
-b00890f6b16bb85829fcea8e429045b9 *man/Coef.qrrvglm.Rd
-7750539b34da20b20c40be62371fbc68 *man/Coef.rrvglm-class.Rd
-5bff76cdc1894e593aa8d69a6426b0b3 *man/Coef.rrvglm.Rd
-02efc2828e76eac595695059463d1d47 *man/Coef.vlm.Rd
-9293e04f06a3076e2030005bd2f84a78 *man/CommonVGAMffArguments.Rd
-4c84f8608e7e5a2a69fbb22198aadf95 *man/DeLury.Rd
-5bb061aa2d95a580d67ffd29200de30c *man/G1G2G3.Rd
+e77fe3e9c0a314c51ba4b36b8d56684b *man/AA.Aa.aa.Rd
+3d5d059af0e7d0c88fe059f8fed7e81e *man/AB.Ab.aB.ab.Rd
+038a23a3cfb521f14b4885e49bf0188d *man/AB.Ab.aB.ab2.Rd
+ccf14c227880ca872a7471cf5f7c94b5 *man/ABO.Rd
+37202536ea507b17bb8472e3fd1b78e4 *man/AICvlm.Rd
+2dda55df0947c86b4614e2d722efb713 *man/Coef.Rd
+e2087f40465b8feca48d61fb1cecfc6c *man/Coef.qrrvglm-class.Rd
+956683c1b81f04580aa6546a85c7d20a *man/Coef.qrrvglm.Rd
+9335dbbcdb81716ec556bf5bcf0be309 *man/Coef.rrvglm-class.Rd
+dd9202d518789994bd081f16a81631ef *man/Coef.rrvglm.Rd
+673fb7bdbda0010ee45586680a0275b1 *man/Coef.vlm.Rd
+9b60092b7d4f21ff458a0279096ef3bb *man/CommonVGAMffArguments.Rd
+06084db07bf8e6b2bc59dd0f40a23f8d *man/DeLury.Rd
+64b643dcd690b1eb601fcc70af495790 *man/G1G2G3.Rd
+fac93d02848bc713742065083217496a *man/Huggins89.t1.Rd
f7bc9b5114ed94e014016aed05b8e7d3 *man/Inv.gaussian.Rd
77388e0223539826ca69389d46f80550 *man/Links.Rd
0a95f8292850ef5b0fcf516400864c84 *man/MNSs.Rd
45c9ca6851177b813be07e2446614721 *man/Max.Rd
-2e0f16626b262cb24ca839f7313e8fb9 *man/Opt.Rd
+d11449e8d78b47fe2811767798a3966a *man/Opt.Rd
f9fb54b978cba49b278630f9403dd73c *man/Pareto.Rd
-c361935c5582a73d817e33febeec862a *man/Qvar.Rd
+a8acd542dbd768859c06a2b6811c0a13 *man/Perom.Rd
+02bd50562a32ff0a21d887511d222364 *man/Qvar.Rd
4273365f7ee730f68259e69fb65f7746 *man/Rcam.Rd
+e22155cf6e28945d43ed76d0d02e6746 *man/SUR.Rd
2db32b22773df2628c8dbc168636c9f0 *man/SurvS4-class.Rd
4f4e89cb6c8d7db676f3e5224d450271 *man/SurvS4.Rd
1f34fdf36c631e984d2a9f28bf607b67 *man/Tol.Rd
-943c75146bb5ef05028dde4481884d32 *man/VGAM-package.Rd
+35fb38864c1e10a928af13a607e7b4b8 *man/VGAM-package.Rd
41de97f0bacb4bedc36a589af710ff99 *man/acat.Rd
-20dd8ec5a2dd956f2dbbdfa237a138ba *man/alaplace3.Rd
-670cc88c57c693ba72d1ee1fe69743b6 *man/alaplaceUC.Rd
+d479795a5dfdd0949d86aa70fffc1140 *man/alaplace3.Rd
+8c0662467fc225892c1a1cde14e9fbf5 *man/alaplaceUC.Rd
af06e5a2e0552a8ef63756f1c3bce00b *man/amh.Rd
5e1012c84beb593f4558a9df064a3304 *man/amhUC.Rd
-f7baeef1c4920ff1040f87674cfe7909 *man/amlbinomial.Rd
-5bf31a5de5606026bb7d8ad5f3474552 *man/amlexponential.Rd
-16513a48808783de16394f9e0a381bbe *man/amlnormal.Rd
-6396ff5ca052ca79bee66e1e7d703395 *man/amlpoisson.Rd
-ba175111a99a5dd998a544b32e3390d4 *man/auuc.Rd
-e4b6fadd6f54fc3293c2d0016c7672c4 *man/backPain.Rd
+c034aafa09900eda5767b557ae18e665 *man/amlbinomial.Rd
+cdb087cd9e65ef96ba2e848dee9e4eeb *man/amlexponential.Rd
+6cddfc975ac4418a3693fbf3c810d96d *man/amlnormal.Rd
+8c0315925316e09ad8847a5bc960d478 *man/amlpoisson.Rd
+9f1ddcb0af49daaec702a1284341d778 *man/auuc.Rd
+bcddb8c1df8893cf14a4400ee5dee6df *man/backPain.Rd
34b5510370a46ab522a754c731a437be *man/benfUC.Rd
c1483ea97ab8115ef70f90bc0984ac6d *man/benini.Rd
b3e26d0011014d3722b4ecb3675c4aea *man/beniniUC.Rd
-084de566e49c6576179252616603f88d *man/beta.ab.Rd
-35e3e02fe0995db0290ca31c4ac5d7b4 *man/betaII.Rd
-41820caae54231fdfe4f43c64c8b2aa6 *man/betabinomUC.Rd
-2e338ffe0772901aca870d11acb5e072 *man/betabinomial.Rd
-55283e8cce35112fb0c664219b92b6a2 *man/betabinomial.ab.Rd
+28d965f409597a6485f3141173f901a3 *man/beta.ab.Rd
+91deeb79a61f94c1af5d7ac251132821 *man/betaII.Rd
+72c00470a5c89c7ebfc9e695da9b07d4 *man/betabinomUC.Rd
+053b67bde772fc8d0e96b5b0ac5ebc6c *man/betabinomial.Rd
+504ee243a39c7173ac40841afe16339f *man/betabinomial.ab.Rd
be38265c59ae5f15c757009310e14a92 *man/betaff.Rd
da3fdbf88efd6225c08377a461e45c50 *man/betageomUC.Rd
-30933e446c25b25f33b59d50f596d6c9 *man/betageometric.Rd
+63ba9c485c5d5b4962fa8e215f4ee87e *man/betageometric.Rd
aa6ee6bd6c48de8d03f18a80b836edae *man/betanormUC.Rd
f568faafa4b67d1f0bf9ba07ddc4a7f3 *man/betaprime.Rd
-7adaeed3dae23da1a0cc5eb9358d4597 *man/bilogis4UC.Rd
+1cf45cc5335d55c0a46d1e7df469ce3d *man/bilogis4UC.Rd
b81f6ad16bb834d3fde123062ba31ec8 *man/bilogistic4.Rd
-929e542ce0d1937818bbc7a28c595927 *man/binom2.or.Rd
-048aeadf836fe881f654f34004ae7040 *man/binom2.orUC.Rd
-27716f59421fefe451a8dee31527d1fa *man/binom2.rho.Rd
-34a781218843e7b670c6192867ea40e9 *man/binom2.rhoUC.Rd
-023dfaa228619f7cefbb20566c36433b *man/binomialff.Rd
-7e87b855d981532ef91977c44baa59e4 *man/binormal.Rd
+7e042a6903115d2eb77d0ef3a35cd8ab *man/binom2.or.Rd
+1f1a653e623b2abbb4662b16070019db *man/binom2.orUC.Rd
+a8cc7cbfa4c21672956a187c4ffba22d *man/binom2.rho.Rd
+c3f3f95519510e5a324c74369bc52a63 *man/binom2.rhoUC.Rd
+7dcb53c5b43d65f3837a65463e1f5612 *man/binomialff.Rd
+85bd227a0d4ae18c5511206758f982b3 *man/binormal.Rd
bdad9ecfb116c4f30f930bcaf7208735 *man/biplot-methods.Rd
-84a98434cb39c14a367de2215e72c22b *man/bisa.Rd
-8dc011224820b9c25d52ac088d6c330d *man/bisaUC.Rd
-1190d249811d1a2d7dc952f8af02e90a *man/bivgamma.mckay.Rd
-342d3d5c9931bc7327dc44d346c402f6 *man/bmi.nz.Rd
-df2a69a92e00c0433cc8f83ad970c89b *man/borel.tanner.Rd
+3c8ee4feffa56a6e15b24f0c502026c6 *man/bisa.Rd
+832abdebf1e3013d0421f5012efd3a7e *man/bisaUC.Rd
+59d5b0478df13fc8ca7c6650e70105ac *man/bivgamma.mckay.Rd
+81a2433effb7547679702256a5536b04 *man/bmi.nz.Rd
+44f06f92ed85ef1cf5e447ffed182989 *man/borel.tanner.Rd
4e692566eefaedf275e8693ea2f6efbe *man/bortUC.Rd
7bc3641f9f81a4eb77a304103e5f1dcc *man/brat.Rd
0eaf999500ce9554156f37acbfe1e01a *man/bratUC.Rd
b4c37774de88cd2f3f8f5e89ced2b491 *man/bratt.Rd
f640961a0c1a206ce052a54bb7b4ca34 *man/calibrate-methods.Rd
-702754aad58a33aba1594bc7d2d45acf *man/calibrate.Rd
-6cc85adda04a13e2ef01e0da265b67fd *man/calibrate.qrrvglm.Rd
-7bc25736ab5e60ead3c3bb6a34e34aa2 *man/calibrate.qrrvglm.control.Rd
-7308576228b41ce02ac3b9f61c8f9f6e *man/cao.Rd
-f15b81668cd82879e8f00897fb30eea9 *man/cao.control.Rd
+8ecd34f0a725bf795101738a60bbb401 *man/calibrate.Rd
+483b5be2dbbd2d6281d08e730e0e607d *man/calibrate.qrrvglm.Rd
+6b6e9dd2da2d784fefb5144eb0e02818 *man/calibrate.qrrvglm.control.Rd
+ef9e501f27ab7c71b817615b21405bfd *man/cao.Rd
+e8c2f9b88e37763580bf77f68b0e8fc8 *man/cao.control.Rd
e4b532eb5880648443b6fc60b31fbc36 *man/cardUC.Rd
-6ce12b5487a1650d3289522fbb73e0c2 *man/cardioid.Rd
+7aea0f32a547bc26d3dfaf65aab3a8b7 *man/cardioid.Rd
288036a65bb6f386d29a99dd40e91a32 *man/cauchit.Rd
81d694e2aea915b2d8ed6c406f517baa *man/cauchy.Rd
2ab80616c05e7aebdcf769c35316eab1 *man/ccoef-methods.Rd
35499ce13b26395bc61c5931d202cf24 *man/ccoef.Rd
5985b55cbfe98a8a7d2b4de3fe3265bf *man/cdf.lmscreg.Rd
bd25f55e6466226cb79f74482f793a3f *man/cennormal1.Rd
-15ae61dc3c4394f9c3d0dd89c2d337b0 *man/cenpoisson.Rd
+d2156c3ff1e1ecaa38eaa4bbfe3649c0 *man/cenpoisson.Rd
a443fafdb223e2fa87d3766ea31d3fd8 *man/cgo.Rd
-3780e11c1ea1d54dcf57137fe1179390 *man/cgumbel.Rd
-8b1f242c28ecc87b8f3850ee789a144e *man/chest.nz.Rd
-488c3d97209a21d15ee76e547f3a7d99 *man/chinese.nz.Rd
+b6cb82fa50d35036cd635f8b1a1a4ec4 *man/cgumbel.Rd
+1d5073eb8aded1b67fc52855c72fbc8d *man/chest.nz.Rd
+8b159dce27c0461aa7ce49eda949f697 *man/chinese.nz.Rd
d58b97e7b28882f689a67019139cef86 *man/chisq.Rd
8ecbb478efcf4b0184a994182b5b2b94 *man/clo.Rd
2ebe24734ed0652482c35da374b660db *man/cloglog.Rd
1aa6ee888bb532eef1f232c9f6a02b5d *man/coalminers.Rd
-5fdafee68a84d78df4a63faf2ad313a7 *man/constraints.Rd
-5d2914e0a13b6c6eb815e8286c5f36b9 *man/cqo.Rd
-30051aefddc0470b8a4ed3089f07cc68 *man/crashes.Rd
-7633b255b36ed442cd8fbcb4e86f2f0e *man/cratio.Rd
-6fb9db2b54b6b351d5fa6ee4c1e0334e *man/crime.us.Rd
-5c9d818d5d737e1ed673bed73e32d356 *man/cumulative.Rd
-95759e81b76b715b322489284d72cbcd *man/dagum.Rd
-69387a098ea4f01d352f9b3faafbd504 *man/dagumUC.Rd
-fab5adfeb805c5aa673ed7377f4fd78e *man/dcennormal1.Rd
-b2a696abb80c47fa0497c245c180ba13 *man/deplot.lmscreg.Rd
-7f57d255543bc7d13dadf322805c99c0 *man/depvar.Rd
+9250590d8aae1e18953bbc50cbc922d8 *man/constraints.Rd
+7564384537e0ed18e6dcac3e0df5b32a *man/cqo.Rd
+2f595bffa2e5d997ae33fd6ca7e3f22c *man/crashes.Rd
+e591cff73505c3e967aea2aa47a4dddf *man/cratio.Rd
+51843053ae7e7f2535986ba9fa8707e8 *man/cumulative.Rd
+c909335c97a9ae26916016dfcc31b804 *man/dagum.Rd
+97868e30408a4a35750f9692f5e87b68 *man/dagumUC.Rd
+e04b86db7152a2d8633c16f07e389357 *man/dcennormal1.Rd
+fa3351f5e58b37cd7c452ee0a991d76d *man/deplot.lmscreg.Rd
+8c45fa4b18d6cfd8fec96f8071cef604 *man/depvar.Rd
c4b52569e78545a35752e1368c2c16df *man/dexpbinomial.Rd
6c6f8430f3c65c7ba3ce883eb2c9ad7f *man/df.residual.Rd
d21eb844e77835fb1d6ae46a2b112a97 *man/dirichlet.Rd
825897c6d06a47e9ac809bd2251cdb68 *man/dirmul.old.Rd
77a420a5a6ec80e1af4ed8074d516766 *man/dirmultinomial.Rd
-f22567676718224898e78ee587bfaf7a *man/eexpUC.Rd
-fe902b6457a11d51c758018d9dad7682 *man/enormUC.Rd
-7008f7c3d5c5cb178b2ef1d6d2aa8c27 *man/enzyme.Rd
+844efd17a8d861d7cd173c64f1c8173f *man/eexpUC.Rd
+d512d29748153b09903ac96efa50a8d4 *man/enormUC.Rd
+72492c419917c144ffadc656ee56a63b *man/enzyme.Rd
a29f442ce60d8ac8185738242b4f49ce *man/erf.Rd
159ea23d4b4c5e3d473abf5c7f7db841 *man/erlang.Rd
-e3446627fdcccb65abbeff03a109b6aa *man/eunifUC.Rd
-233a9e25094ef11cfc7aa858f2cc9c15 *man/expexp.Rd
+55dad4e8509a4d3522f6c06f53093803 *man/eunifUC.Rd
+607d45ed7e4eaebf6cac40c14a57eda0 *man/expexp.Rd
f5c104469adfcf4d21cb4c8c525c0850 *man/expexp1.Rd
391ec14ac5da161f67cb01f91bf474cd *man/expgeometric.Rd
bba52379a93d8f2e909b579215811554 *man/expgeometricUC.Rd
@@ -277,47 +272,47 @@ bbd414bfb50f4be140ac6b66b29694cd *man/exppoisson.Rd
2cb7a7ffba4a046d1205295d75d23a18 *man/felix.Rd
0bfa97ff4d9eead46aa1a822e2c231c7 *man/felixUC.Rd
77038da711286677c94066f9326b2a20 *man/fff.Rd
-60dc65a9677bfa00c99ccdc0bd2449d2 *man/fgm.Rd
+b85c54aaade0e94059fcdfd760c23cbd *man/fgm.Rd
0c4744ec66aa44b14f5c3dd2d79856a1 *man/fgmUC.Rd
-0f91dd411c054004631a677eda63db79 *man/fill.Rd
+725193beb8ca3f28903db56ec6d50767 *man/fill.Rd
b929e2ab670eb59700bc4a1db07bbbc0 *man/finney44.Rd
-6c1e3ad4431df4a8f949ec87d523de03 *man/fisherz.Rd
-0cab527544d71e1909b24a4be8a11f69 *man/fisk.Rd
-e8265b669964f68bedc38035251bf595 *man/fiskUC.Rd
-9b60e6d859114ce0c7a47f87456dd656 *man/fittedvlm.Rd
+6bb9c425367a0154d70bb5baa702b826 *man/fisherz.Rd
+464a5be86b451beaef25e096cff36273 *man/fisk.Rd
+8215ca60f756bf8f9f2e3b404741fbd7 *man/fiskUC.Rd
+81d03e605f6e9bfc48c612dd6369b51e *man/fittedvlm.Rd
e3ffaf55fb9c925685d1259eedc4fd3b *man/fnormUC.Rd
-2d7d7f37e64c9ad1d896dcea590ee4fc *man/fnormal1.Rd
+a449dd872d994d44bb6f7986249f8784 *man/fnormal1.Rd
80974c2814d703c1c1d4eab536f656a2 *man/frank.Rd
e6d4221fd51756a2881065dfc303edef *man/frankUC.Rd
-b60b1268713121e14fadc654729842ab *man/frechet.Rd
-2716982ec8d58016f0d08737aecd8843 *man/frechetUC.Rd
-ef897e4618c5244c2a59dde719f011d2 *man/freund61.Rd
+d08c0b1aaf965520260ac15ad66a8d9f *man/frechet.Rd
+0e54e074f0de1b996e1f38fee8d1f844 *man/frechetUC.Rd
+3f27614050eac4ca6b793df27105cdbc *man/freund61.Rd
2b392459d756beb1213250d266c90076 *man/fsqrt.Rd
97b73c666866f4daa6e5be208fb7fee3 *man/gamma1.Rd
-0ae1b94f9b6384cb4084dfd3a04861a3 *man/gamma2.Rd
+5edcb17bbf9d4e0f7a6f96ed709b5ed1 *man/gamma2.Rd
c0e3957aaf1b96e0a35a2ea95c023fc3 *man/gamma2.ab.Rd
4aeaf1f465f97afa3305a6ed9dcb049f *man/gammahyp.Rd
40973d8617d8769e4cf70b17d9b19846 *man/garma.Rd
-446118938e1448f78ddf8ae797495d60 *man/gaussianff.Rd
-3f6f548d8e09f030cf675128e5926bfd *man/genbetaII.Rd
+3013563566e6982b6e1b939e48cf9c6e *man/gaussianff.Rd
+df1c376b3ca400ad967513a8f3b1da44 *man/genbetaII.Rd
ac349c9adadfadb8cc9a574409c22956 *man/gengamma.Rd
bd63e15c3ac9ad8a8213d4cdc8bb3440 *man/gengammaUC.Rd
c572a5a90988743fd046d5332bef6497 *man/genpoisson.Rd
b1c3656df6f641f918c4e5bbd4fb239f *man/genrayleigh.Rd
c31e093e7b6e5a4a7959ba6404b85a23 *man/genrayleighUC.Rd
-cc6be93cb89e2eec6efd5ded2448285a *man/geometric.Rd
+ad1646249e1de561bdd9fe261057a97c *man/geometric.Rd
78b7d9455f1eaa4572ff54427d77935f *man/get.smart.Rd
14a7e2eca6a27884e1673bd908df11e1 *man/get.smart.prediction.Rd
-c8382766873c747985f8b7fea99704db *man/gev.Rd
-e4c037fc281c8a6948962264493baf94 *man/gevUC.Rd
-690b69d50e92a781720cc547dd22c3b4 *man/gew.Rd
-ee5c919188e3d8ad589ea8d98ddd3ad8 *man/golf.Rd
+a7cc3a8b2ab30458538d2f36279135aa *man/gev.Rd
+838c81d8d6c94f4f3ae49df0b25d1cfa *man/gevUC.Rd
+f87241a6011f5f5a49921a1842a177ed *man/gew.Rd
+e85bfce5bc1b53316766a1edea3f707c *man/golf.Rd
5cc8c0cabb839b34f4f37de4b57f4428 *man/gompertz.Rd
3affd7c0ae94702950fb738253059a68 *man/gompertzUC.Rd
81d287969447618149d22113fa118d40 *man/gpd.Rd
-9c77b9e29e9364865bfd8bf0c7143437 *man/gpdUC.Rd
-d262446f558ffbaba51cc8ff86e5ab1a *man/grain.us.Rd
-1daecbfc273e25de8e6811cb7803c788 *man/grc.Rd
+54b49cf2e3ba865dc7c9297948036d9a *man/gpdUC.Rd
+3f3f9b4cb1bd341a9c4c063594516611 *man/grain.us.Rd
+21550b13a293b7e3062daf1fba963c09 *man/grc.Rd
3ffdad5594e4eec6062097a5c7c974e7 *man/gumbel.Rd
a6df41a1cc82c1744cad46ba89a5b161 *man/gumbelII.Rd
2127127ee0e62bb2cefe05462bee7c39 *man/gumbelIIUC.Rd
@@ -325,39 +320,35 @@ a6df41a1cc82c1744cad46ba89a5b161 *man/gumbelII.Rd
977ee282217151a6c5b83867eab32573 *man/gumbelUC.Rd
fc6b1658cbcb87054ab516552b6875f9 *man/guplot.Rd
c1a9370d3c80cd92d9510442da0ff940 *man/hatvalues.Rd
-5914e78d3a007ed9338d2a94e07e9f36 *man/hormone.Rd
+bed7fbc305bb784fb723242146e2ac9a *man/hormone.Rd
57a5f4c37dd40a74161489df6759fcd4 *man/hspider.Rd
-769c424052e85555142f8c4551282fa0 *man/huber.Rd
-fe68021175fa4c20ade86f55db7b5443 *man/huberUC.Rd
-bb9248061e4bcf80a1f239192629dd44 *man/hued.Rd
-dd719768426a76fe0d017f0b1975bdcb *man/huggins91.Rd
-80c6c747a9f873fa6e8e40565a0a9665 *man/huggins91UC.Rd
-d44f3df87816b5cf0f1ef98315300456 *man/huie.Rd
-3cb4fc1b3a7f1a6bcf7822219ac25525 *man/hunua.Rd
-08383189cb05fe01a3c8a5fa2e2c78c5 *man/huse.Rd
+b9ed0e8079f4e57429b4647193c5cbc5 *man/huber.Rd
+ea67b113e21bbe6197fff2358cb47179 *man/huberUC.Rd
+b330f328e4d6f0db4928a92b30611267 *man/hunua.Rd
cd473192d2153433bee1530bce881972 *man/hyperg.Rd
34ba5a500d1e9395c1e6761334232c0e *man/hypersecant.Rd
63751a4f55b918aad163a53994a01a07 *man/hzeta.Rd
c3ca61cb9f3d309e8a08dd528de7d994 *man/hzetaUC.Rd
1e31e772997c2b18bc113d77e1e0e176 *man/iam.Rd
f4dd596dc646925e2c68c9679c799472 *man/identity.Rd
-da60bdf0881d1c9ba435a6455fe214bf *man/inv.gaussianff.Rd
-085be4050d440ab7aa197da463927e20 *man/invbinomial.Rd
-00895467cdcea7ae66dfeccb3d84366c *man/invlomax.Rd
-01cb2a27a9c0eae7d315f3ca158749f5 *man/invlomaxUC.Rd
-8d0593ef6ef39c02009383bc4e5c2dfc *man/invparalogistic.Rd
-6a8c2453b40d2f3badd4d9c0bb67d907 *man/invparalogisticUC.Rd
-1bf97bf1064b8487d9b18f648a2249f0 *man/is.parallel.Rd
+3f07920de00eeb5766f5fbf545e792f5 *man/inv.gaussianff.Rd
+77d16112e2aed1f927ca1d0f4cee0a18 *man/invbinomial.Rd
+ceafec1c5c64f77d3bf0e39bee2b0277 *man/invlomax.Rd
+93c76dca757056d75f7978328608cce8 *man/invlomaxUC.Rd
+5aeacd9294068b2ea86d1f7269c56965 *man/invparalogistic.Rd
+d5b78c1484a4756f09a7f109c753626d *man/invparalogisticUC.Rd
+f70dc86e1c466a9dd45efa98a5445fc8 *man/is.parallel.Rd
a286dd7874899803d31aa0a72aad64f2 *man/is.smart.Rd
-b829d9d0aa0947644b415535a4ed5be7 *man/is.zero.Rd
+1b33dcd08e9f444146fb7fe03a425add *man/is.zero.Rd
30a15dcaa326928e71982bc7306a79cf *man/koenker.Rd
50dded53a59735a07217074d8228393f *man/koenkerUC.Rd
0d9800aa2eb316c662b36593ac2c74a6 *man/kumar.Rd
8756e8c50075f92aeede56aedff7d2c7 *man/kumarUC.Rd
-6f2f641c0cb15f24ec1777d2db159459 *man/lambertW.Rd
+7b2e3a9a2fae362f36bea1ab5539e6f9 *man/lambertW.Rd
0c7294d5f5b568a23c2634a86a07f62b *man/laplace.Rd
7310aca7179d6f31d9e0da64944e8328 *man/laplaceUC.Rd
-f35539501667121c53abd0b1e448b150 *man/leipnik.Rd
+2aa7fa15b90a2e05cb9c261b192040fb *man/latvar.Rd
+a75f79d7fcb3ce0380768c06fbbf0e4c *man/leipnik.Rd
c93045a9f05888a4675ba3d48e70e7e7 *man/lerch.Rd
8c7fca39c92e5f79391a7881a0f44026 *man/leukemia.Rd
13b2cc3332ac9559d5d47790a8e206e1 *man/levy.Rd
@@ -367,37 +358,36 @@ fd33ebb21f7ab741392b8c15ec54f5e4 *man/lindUC.Rd
7ca83cec8ecb2fd661ca66bba89dc411 *man/lindley.Rd
59375533957aa583acf12b0b44b0d718 *man/lino.Rd
9c786943dcad40f95f4dddd3ff0f37db *man/linoUC.Rd
-9a021048d7a9c594643d91d3d4b234cd *man/lirat.Rd
+b5dfa4faa955b15ebade0a3bdc8f93fe *man/lirat.Rd
fc9016da8aeb1d1bb210ef7274f9da3d *man/lms.bcg.Rd
-688d994bbe84b5ed2b1cc962037f2721 *man/lms.bcn.Rd
+111314b39e384cb6a87307d87cad309a *man/lms.bcn.Rd
6e2e5248c45084fbcb0090b86f7f3f46 *man/lms.yjn.Rd
0d35403673c679344da32f978a2331b2 *man/logUC.Rd
f0502f0505925ca9d48e6e3994f278a0 *man/logc.Rd
-8e5086b9f1709bb02e1ea438d6c88297 *man/loge.Rd
+d962e7f739d3d752e48ceeb9d5f256c9 *man/loge.Rd
2be2b998e9b4d3d32e72f2c9e0662273 *man/logff.Rd
14c728f5bfd8968fc74390f1cb95dc44 *man/logistic.Rd
-74e267e8cbc018f13583babaa3ab73cf *man/logit.Rd
+8d40cf7f3736ad9219312e228348711c *man/logit.Rd
1f63716471926cf3baae3150c94beb74 *man/loglapUC.Rd
a570e779c1f0741c4196a0982fdeddb1 *man/loglaplace.Rd
-43012be50bf4ad3610f50a3609f80b20 *man/loglinb2.Rd
-54e34264cb73f9d54c4c412af81c17fe *man/loglinb3.Rd
+9217cff35cff9e9e1394d54a30a20ddb *man/loglinb2.Rd
+480a45fd3cf55ef81365ecdb397e8fe2 *man/loglinb3.Rd
f1c11784dff391acf166a8986d434354 *man/loglog.Rd
4c6053656b2fe0276fbe1a99b0174238 *man/lognormal.Rd
e859c980e26eb3e483d0f3648b502d13 *man/logoff.Rd
-929d46b782f13e591d4989724343cbde *man/lomax.Rd
-06ca5cde9d161d2320f87f6b2fc04aa1 *man/lomaxUC.Rd
+5ce7aa8f16e81795577cc553d40a1e9c *man/lomax.Rd
+9281fd7fad7d154a35ae0534cf4d2e3b *man/lomaxUC.Rd
950443559c152cc441b4b08dd5c7e12e *man/lqnorm.Rd
3f48084e64cd4663677fc8df8e4ecf3d *man/lrtest.Rd
-49f8def752351e1f34beefea82985ca4 *man/lv.Rd
c066460c787fa701788c400e56edbf80 *man/lvplot.Rd
-f909e728550a7e0e95f17ec7d12d0a85 *man/lvplot.qrrvglm.Rd
+8b3ee5b0b1b1ec9659882b0d75a786bc *man/lvplot.qrrvglm.Rd
30f7cce914cf36078392189f12c0670e *man/lvplot.rrvglm.Rd
-9aae7ea097d087c0acfee0b7358a997e *man/makeham.Rd
+6fab686982d148f43e04ca4674dd14cf *man/makeham.Rd
f459ac6b3f9453e0fb6cf4dfce393b64 *man/makehamUC.Rd
a836cdea396e90233979a1065e9aa401 *man/margeff.Rd
b5c6a5a36ebe07a60b152387e8096d9a *man/marital.nz.Rd
eae8c8d703abffa56be56cc88743822c *man/maxwell.Rd
-e01c8beb637aca15dd5aaee412b5c3ea *man/maxwellUC.Rd
+1fc207ea724c1fb681dc0805733571ba *man/maxwellUC.Rd
ad6f24fe862c9936ea99033ba89d4fcf *man/mbinomial.Rd
d0ba1cb515890aa57df222840a8ba7d4 *man/mccullagh89.Rd
4d8d0f37dc8249d00e52283764534e98 *man/meplot.Rd
@@ -405,46 +395,53 @@ d0ba1cb515890aa57df222840a8ba7d4 *man/mccullagh89.Rd
49ed6c8e6d160b323f1f2acd75d5daec *man/mix2exp.Rd
2a272b10b746642a9ee5bbc6cbfc9511 *man/mix2normal1.Rd
908970d91303cee973dba82825fabd4b *man/mix2poisson.Rd
-815499481774f0be63eda5da52650954 *man/mlogit.Rd
+6cc2c2af7e4107aebccbe4809d649033 *man/mlogit.Rd
e41c539196b04b87d33595a73efef01d *man/model.framevlm.Rd
73bc45aa0257f78953611c9fb6daba39 *man/model.matrixvlm.Rd
85d73b769924c10e91065f87bf237fb7 *man/moffset.Rd
7184b188c705a6e326e454f859e76f1b *man/morgenstern.Rd
-a7808eda65b29d6da616e5ecaf83b063 *man/multinomial.Rd
+21bb447049798227c4080791cb1157b3 *man/multinomial.Rd
0ef36351d132ce1f91580c5f71237f39 *man/nakagami.Rd
c69bfdd1afbf8ea05b2d37c27f2b097b *man/nakagamiUC.Rd
-f18d4e0e5edbaf7f33417e87e8b9317c *man/nbcanlink.Rd
-effbf6636c9e903cc25b4428e7bc3b60 *man/nbolf.Rd
-7a99bf77f55ae58fa6036072c2685258 *man/negbinomial.Rd
-ca68c753f150159fdf7c91f53e800b4d *man/negbinomial.size.Rd
-e0b6546fb8d6bb8a5e2506dc89819178 *man/normal1.Rd
-2e6a59a3d8e48a34dd04a2fda189a23e *man/notdocumentedyet.Rd
-8a118515f4955e425adcd83f7da456ec *man/olympic.Rd
+498f65c2a4248ef79d9d8ceaef534069 *man/nbcanlink.Rd
+cf0351ecf6456216e465895afff76ad7 *man/nbolf.Rd
+5f085d3658315ecf2f70d91b422d1baa *man/negbinomial.Rd
+0b6168d2b3d79f02d51dc1f185ad7d35 *man/negbinomial.size.Rd
+70653b46108e5e99fcc5b23b7fe97dda *man/normal1.Rd
+29a2a7258f41ef47450d2de1c261ae87 *man/notdocumentedyet.Rd
+dd58e372f599256d80973bc07c85597b *man/olym.Rd
0c48bfcd8e3d21e919b3c0f55fd4d8e2 *man/ordpoisson.Rd
-c0074d4c77ded24e50fd3fe3668a4011 *man/oxtemp.Rd
-21cf3000f5edd5c31cf53cb0c9ae0f7c *man/paralogistic.Rd
-7aa703a30747006ad5b2628fd5e593da *man/paralogisticUC.Rd
-2b2712df539d0a0738ac618669767905 *man/pareto1.Rd
-26cbc4f613bbbd8c907337e3b203ae07 *man/paretoIV.Rd
-4874d4d01dff5685441b03e23b02746c *man/paretoIVUC.Rd
-79f129c66a04bef03318c6efe6d6aaea *man/perks.Rd
-a489c7a05dccb46a72f15269c8bdb5ad *man/perksUC.Rd
-bb672a5c452bbe1a01fe06657dccb7d4 *man/persp.qrrvglm.Rd
+025c5545a37dd996931ea7d2b42211b5 *man/oxtemp.Rd
+24a97e3b9709df47d079f4e2665f497b *man/paralogistic.Rd
+2fc2cf7200b0f4409471aa2e584168a3 *man/paralogisticUC.Rd
+85ba1e6e60fa44f0f79e789bab5616d3 *man/pareto1.Rd
+7d6736ddbbfb94188b43ee784cba88a7 *man/paretoIV.Rd
+00859ab21f1eb0d605d68c2ad78c771c *man/paretoIVUC.Rd
+96c9b961001987506c9e736f053ac2d6 *man/perks.Rd
+e03cf5b8c36eb729c3f9ab0f1520d505 *man/perksUC.Rd
+e3241c34fea9817fe468c92eaeb8ca65 *man/persp.qrrvglm.Rd
+a38168dd57b4be503cf47732714e441b *man/pgamma.deriv.Rd
+8e0120c68b69d0760218c483490aed8e *man/pgamma.deriv.unscaled.Rd
b6d928375ee9738785be7ec7fa66d277 *man/plackUC.Rd
-1312b1dda42c2f696a2824e2bd0e2ad0 *man/plackett.Rd
+06966c021b6214237508543c52109d57 *man/plackett.Rd
791d04a5c3a3bc514bf0ed1fc639f8ab *man/plotdeplot.lmscreg.Rd
e6eaf56a6f7b23ede6cbd92dbce502ed *man/plotqrrvglm.Rd
958dcd119ee66e5d5318c4cf19f024f8 *man/plotqtplot.lmscreg.Rd
45ee1e3b4fe0a2577f5ea8732f1db0f8 *man/plotrcim0.Rd
-db9c5b2ca7fd4417d4d88d02317acebb *man/plotvgam.Rd
+613de2bdef6aabc49d265fd1f9ee3648 *man/plotvgam.Rd
72bade4a008240a55ae5a8e5298e30b8 *man/plotvgam.control.Rd
-aa55e676b3fd0fab0f1aee26ab9fa6de *man/pneumo.Rd
-1cb05da296ec9389de210df4d27e71c9 *man/poissonff.Rd
+bbe8bffd4bcfa945d9573d135bb543f3 *man/pneumo.Rd
+9f2d37ecfc67140980a2870d0101f743 *man/pnorm2UC.Rd
+8a2b05c37dc154659b9783eea0c5808b *man/poissonff.Rd
dab0255f3b6f88ca8362af2570311a2e *man/poissonp.Rd
-ed23d712bc7ffe5a7f70481774e1e827 *man/polf.Rd
+fe262a77e1fef4fd1c795b198d040bda *man/polf.Rd
2b1a116706ced6399a4248853e001d89 *man/polonoUC.Rd
-f8d8123a109be7db427120a4b67513e3 *man/posbinomUC.Rd
-a6c09d4d735df69c71432b9b801216e8 *man/posbinomial.Rd
+43997b2ec625ae0093dc7485034085bc *man/posbernUC.Rd
+00637f43cacf2b2fe91af295fe378a66 *man/posbernoulli.b.Rd
+f05048b373dfce9317ddbabb088ef0f1 *man/posbernoulli.t.Rd
+392ccdfd3c141d7654aa10bac5136d04 *man/posbernoulli.tb.Rd
+8953a5a5559f58d0ebbabb3b0e50ba99 *man/posbinomUC.Rd
+cfdbefc16cb1001c3027fedd64e65f66 *man/posbinomial.Rd
6ec345e5d20c36bdde7b7d09c9b71893 *man/posgeomUC.Rd
d14c926ed9841f43e6ace38ca9a7529f *man/posnegbinUC.Rd
ac1f3ebc8db196c11356963d4f82d509 *man/posnegbinomial.Rd
@@ -460,14 +457,14 @@ f26232b73e5f0c2f323d019ba9e46ada *man/probit.Rd
811cfe4a15b3b140c48d930e3172a195 *man/propodds.Rd
ccdfc3f7df34475385a243eae0ab5877 *man/prplot.Rd
de570e252375d7052edaa7fb175f67eb *man/put.smart.Rd
-438d995cac8b7eae527bf97188e97f92 *man/qrrvglm.control.Rd
+9e2f7efa937bc97a63800de0afe9455c *man/qrrvglm.control.Rd
ddfc6463c5266b7dd79c7a7e9d3f8f6c *man/qtplot.gumbel.Rd
7894f8d45225244008021bd30565ea32 *man/qtplot.lmscreg.Rd
eb986116765a0a7229e0988a343f1b6b *man/quasibinomialff.Rd
c2efda0141a3df852b775baa18af0c7a *man/quasipoissonff.Rd
67da92796b1e1d1f8866fee2c8cf4954 *man/rayleigh.Rd
02bfbc64253593edfa891a19f33acd89 *man/rayleighUC.Rd
-c3854f1526ca08f961489ef371183939 *man/rcqo.Rd
+ff8c88be946408af6bf1b0931033ee4d *man/rcqo.Rd
1d9601bd76b8c0cddcf567b144b5ef89 *man/rdiric.Rd
385bd032acb1f2925c49a7748dcb8631 *man/recexp1.Rd
2af6888fb0758a9fdaf45fc72f844724 *man/reciprocal.Rd
@@ -477,19 +474,19 @@ b70c93ab6124de167a4ccab2f8fc2221 *man/riceUC.Rd
7471692a618c57fe5f5137deadaef4f7 *man/riceff.Rd
5cfc734589e404f286ce8cda342344bd *man/rig.Rd
258a5e119f601399b04d4dc51ce2e4ef *man/rlplot.egev.Rd
-2b81fff5b22c9f72773a61d5e39a8747 *man/rrar.Rd
-ed93c6e06d519ab3ddb92c73cf62bb67 *man/rrvglm-class.Rd
-9a90884892c72a0d48bd33ea0a13e4ce *man/rrvglm.Rd
-b104826904e5b6dfd293fb60aaa4dccf *man/rrvglm.control.Rd
+801fbf593c190957e8abd87b1a5bbbdf *man/rrar.Rd
+ae184e5777e6d580e7200434a99744e2 *man/rrvglm-class.Rd
+8ba13aec3e907579d7009e2f648daefc *man/rrvglm.Rd
+df2e65a3466384528c48da00a8dd7293 *man/rrvglm.control.Rd
493070deddef6815cdd2de211f3a65db *man/rrvglm.optim.control.Rd
ecc44804896b8f3d4a9d469a952fe9a6 *man/ruge.Rd
-b8b40b0a50bc2cf97bfc45b4b250a7a4 *man/s.Rd
-49804a5ab4ef29fd6b394b9fee5b18ac *man/seq2binomial.Rd
+850477e7023b0617c4dd9bf177881736 *man/s.Rd
+3e48779e7f6cb3965b6b97a3cc6c840c *man/seq2binomial.Rd
71367fe3b494a45c98f9a96e1fd791e0 *man/setup.smart.Rd
22fd8f8f7a559acaecfbca2c6dbe5818 *man/simplex.Rd
7cdf80a6cdb171d1f6f9ae200422b159 *man/simplexUC.Rd
-198cfe54eeb201c3e5de6c16c14afcaa *man/sinmad.Rd
-077ac803be0b8fe390a59faa5a32523d *man/sinmadUC.Rd
+4d13e6cf2248dde66a69216540cd2e87 *man/sinmad.Rd
+754b3dbc268f1df1bf8f675da6a2ebf8 *man/sinmadUC.Rd
8555a29368f14ba2a2ead5344f4ae716 *man/skellam.Rd
4cdec195b127858706897733934dffc4 *man/skellamUC.Rd
094fd596b913d88f9941bb26396d4b72 *man/skewnormal1.Rd
@@ -498,88 +495,88 @@ b8b40b0a50bc2cf97bfc45b4b250a7a4 *man/s.Rd
1ed10e28c013e2e08ac5f053b2454714 *man/smart.expression.Rd
163cdb3e4a225aceee82e2d19488d56e *man/smart.mode.is.Rd
2b68a9e20182e8892bb7be344e58e997 *man/smartpred.Rd
-6efb329ba91500aa45ba2f3706e1f331 *man/snormUC.Rd
+d48e1e2fa242ba626e652480e84b0a43 *man/snormUC.Rd
3849f780d823a1a0aa67bb65ac35510e *man/sratio.Rd
-9b172b6ef80fc2e1b5b00b3a0aa1dce7 *man/studentt.Rd
-ed3bff9c47db0c26084efc1a74454f2d *man/tikuv.Rd
-d6c0077cad16ec5218cf5ca71898105a *man/tikuvUC.Rd
-076bb1dac7293c1de7f2ecd9f5f5fec5 *man/tobit.Rd
+3fb3e5774481ff1af1ab3dd012fd37c0 *man/studentt.Rd
+2228be8da02861f85cd2bf77d409333f *man/tikuv.Rd
+c0a24f0780ee14e1aadcf261ccf2d80b *man/tikuvUC.Rd
+caedfadbe16b9c5e83dc81c74ba4e20d *man/tobit.Rd
95db69c0da2ceff7fcb86d6893a861c9 *man/tobitUC.Rd
-f5ad31498c55094320a6c5f8632a3ff6 *man/toxop.Rd
-d4859684f7ab3f490a5f7279c5a1bf0b *man/tparetoUC.Rd
+5e27256f78d67206249604fee70af378 *man/toxop.Rd
+dd9c86342f896f1b28763fe16a615910 *man/tparetoUC.Rd
39423c1ea32c5ba0d4286b815ad2712d *man/triangle.Rd
a262cd49e16acd6fb583cb2aa0fc5a94 *man/triangleUC.Rd
304a7f28494e6f4a3f6e6bb42d02671f *man/trplot.Rd
-d7e22cc248287250fe6308ffdfc9e0ef *man/trplot.qrrvglm.Rd
+df89cf9f2a94441eaf3d8d625dc992eb *man/trplot.qrrvglm.Rd
+5ddf60a47daa1dde214b91ca9dd7df6d *man/truncweibull.Rd
50ada9ecd189456ce9f218d22b49089c *man/ucberk.Rd
-1fc91e082e70251f46af4261f7d48f78 *man/ugss.Rd
-ff424ad63653087fd40315ae0763f0a7 *man/undocumented-methods.Rd
-1dc06807944c2ece254ebbcd034a12a5 *man/uqo.Rd
-f9eeeaeacdb82471c5230468b61d7bdd *man/uqo.control.Rd
-f78da1e2ac9068f2781922657705b723 *man/venice.Rd
+0f938e4ad276b59e46cabc77a2f8e79f *man/undocumented-methods.Rd
+89ca278b0ede1400678b3525f178aa03 *man/uqo.Rd
+f63e291da13f8a3c89a60e7b174ccd67 *man/uqo.control.Rd
+9ffc09b8e1bca4fe6e4c298e4537adbd *man/venice.Rd
5d0f6c9e067bd6e7d44891427c0b47ff *man/vgam-class.Rd
-d3dec49d63432c4e702ab28d994663c1 *man/vgam.Rd
-31977aad5fed703735d83dbb04524345 *man/vgam.control.Rd
+bb56e57215c669e19712b2f3a583172a *man/vgam.Rd
+c059eb2c3a2c325bd3b9498abe0a5d46 *man/vgam.control.Rd
3901a430c138688b96027a1c8a96c4fd *man/vglm-class.Rd
-6e640c3fde4c99c2984a4c7612c019cb *man/vglm.Rd
-ad5684fc42b1f1f5cc881f6e7d49019d *man/vglm.control.Rd
-f57f8703ffce527c50bc9297fe5dd94f *man/vglmff-class.Rd
+cf27a581829d8d7081e55ebffb0dfecf *man/vglm.Rd
+e9971e040dd16e21b4f4445dcf288faf *man/vglm.control.Rd
+a8508ebb5ce0d2fed90d3e9e1d081455 *man/vglmff-class.Rd
9d43253faca810a9baa7f654ac7792b3 *man/vonmises.Rd
-33d0f6c4c20377940add441c4d482e78 *man/vsmooth.spline.Rd
+77f9be156a1a59c429db0e480eff0f37 *man/vsmooth.spline.Rd
c498f29d7fc8156fd345b4892f02190d *man/waitakere.Rd
e4d3a522ebb0edad3f9f8261d8f40d93 *man/wald.Rd
-651416c8a31226aebba2e11b5a091cdf *man/weibull.Rd
+b3e006846209fa329deadfc18aab6c9d *man/weibull.Rd
e3068604e1a1986a32e83c891782a70a *man/weightsvglm.Rd
-a1fd4bb94558a6ebde1ed7e07a717956 *man/wffc.P2star.Rd
-cdd118028d235ad90e2351163b9ac670 *man/wffc.Rd
-31c7ead90337df892da898d01168b4b2 *man/wffc.indiv.Rd
-ce03a749bcb5428662ac78b85bd6f08d *man/wffc.nc.Rd
-664d89e742974a4be71a459a68bbfc80 *man/wffc.teams.Rd
+a361b06e43268acba1a3ec3f81fd65cd *man/wffc.P2star.Rd
+c4bad409f04a155d39f12f93d489849f *man/wffc.Rd
+48a51ab0fa73a56e7206d44760639788 *man/wffc.indiv.Rd
+a0b29acd25cad083c4bc7ccfa491885e *man/wffc.nc.Rd
+2cf0ef83f7ff09796fbb1f357ac6da61 *man/wffc.teams.Rd
655258cff21a67e1549b204ff3d451a5 *man/wrapup.smart.Rd
-114a4ad6e5056896cd22d7558fc5b520 *man/xs.nz.Rd
bcb9181a6ca8398fefd44de6552a8938 *man/yeo.johnson.Rd
e3116eb4708dc7d3a6afdb76e3705284 *man/yip88.Rd
21a90fbde0228b4e74bba93b50300b54 *man/yulesimon.Rd
a6128b966f2d5d6df5f36b11bc2c3607 *man/yulesimonUC.Rd
702b59c0ff9a17b02e63efbe7451ef34 *man/zabinomUC.Rd
-2f6dffea54d337e1ed60f267388557e9 *man/zabinomial.Rd
+2a4b6a8e46e7fdcc896c4a291d5c2e81 *man/zabinomial.Rd
7fdb1e52df331edbf0e234b7f455a9e0 *man/zageomUC.Rd
-27960c593ab3e907048e7ef7523b1efb *man/zageometric.Rd
+91a61e2550e8daa836931fcdf23dd8d9 *man/zageometric.Rd
cbc82d4435bdb4bcf8d8c4a2d5a9e483 *man/zanegbinUC.Rd
a214209935a1a86d8129d38fe37cc05c *man/zanegbinomial.Rd
ce015717ce27f27018754d67e3316957 *man/zapoisUC.Rd
035de7769a8dabd54be20e64592e0bd4 *man/zapoisson.Rd
61cce538df41d42d6e5daf8f37635527 *man/zero.Rd
-dc4cfc56ff0924b05ad0af86d916c23b *man/zeta.Rd
+7985338d08e88fa23cce9cc0a09724b6 *man/zeta.Rd
e0ef189ae8251b5e0d20b614c18cdd5a *man/zetaUC.Rd
86813485832ea3097bccb17a30752861 *man/zetaff.Rd
2dcc3a027d670144db7a96b4ccf48949 *man/zibinomUC.Rd
-6dab9406e35eba935bb67ff6c39c4b2e *man/zibinomial.Rd
+e012ae5e25cc15fdfba42f127bedf773 *man/zibinomial.Rd
eac0a99dd131fe06d3ed428eb3f4c515 *man/zigeomUC.Rd
-a49780b1594cd24043384312ccf975ad *man/zigeometric.Rd
+9ea946fdd3d0189c4d634cfb48dd1f06 *man/zigeometric.Rd
5a3c5dfb9a9340b0cbd930e1c3c30ad0 *man/zinegbinUC.Rd
-810f6051f65319950eaf7b623db4d357 *man/zinegbinomial.Rd
-3fac9599b8980c7ed980519facd5dfda *man/zipebcom.Rd
+243a21fd3b1684694bfae65502ad9c2e *man/zinegbinomial.Rd
+89d598976784c12c45db5af25d1bc66f *man/zipebcom.Rd
e8e65cb1b0a3b7ae3bfb81222966024d *man/zipf.Rd
15d3e6361ff82acece70960b06e13d1b *man/zipfUC.Rd
e06712314cd3b09f403cfd0aea0b4b31 *man/zipoisUC.Rd
-e3bd4c85369f4fe2cc8d7996a792660f *man/zipoisson.Rd
+ccbd33a607fe455f79a9d3248234ac35 *man/zipoisson.Rd
4aaf5efcfbcf1bdf32b13f632ac3ed0f *src/caqo3.c
-69d2fd2a25229e368e8cf93ed005f14f *src/fgam.f
+77ed63cecc681dfebc94a028d0cfc996 *src/fgam.f
f8fe99dcda865eceb06b66f4976f4bf2 *src/gautr.c
dc1ca5b4e9a67b6d48c25e7107112d9c *src/lerchphi.c
9dd33afbac4653b7d8bdbd2794b9c262 *src/lms.f
9cfd5e51c2dba024afc28b0fffaece4a *src/muxr.c
-6f2d68edb270dca177d290a0d62992fd *src/rgam.f
-749d84c8e3b17645ddcdb85a77dc9acc *src/rgam3.c
+1f51508edc95c9a11a4443d19ef759af *src/rgam.f
+ef267a93286cc6c6464fd50192ec0702 *src/rgam3.c
10939d9fb380d54da716a835d37fdf75 *src/tyeepolygamma3.c
-5d14c85e6eda8c2d1a3219a2aa3c708a *src/vcall2.f
+79cf39f1d83f25e29a6c56d344ea8d76 *src/vcall2.f
+83c304cbbe3f0a9bfbe7ab5aa0eefd4e *src/vdigami.f
3e145d8721d17dbd0e642508c2de1472 *src/veigen.f
-91fd839e31da38b116c09ef24a3c25d0 *src/vgam.f
-456d597ae327bc181a1405e3809f7665 *src/vgam3.c
-bccf9d58334e1fde641a6d59443cd915 *src/vlinpack1.f
-fe604895e0e9c3314f9d29378d1d0ed1 *src/vlinpack2.f
-a7625ebca9616112544d1e8155a5922a *src/vlinpack3.f
-93bbb9483bd82b692febd98c0cf10a6d *src/vmux.f
+cc72ffc1acb79e253cc97fbe2608e9ed *src/vgam.f
+5d87230c617938f7ed3e71123c30a160 *src/vgam3.c
+f910910e33c21855f63634e4e9a99903 *src/vlinpack1.f
+80c0a0f512ae74ecbed144c5f115fb16 *src/vlinpack2.f
+e9187111f5c6ce1e5808bbb3dc088c17 *src/vlinpack3.f
+9e424b144361fdaa0d8573729df1d442 *src/vmux.f
0317d171d3fa308b4e19e2c386341945 *src/vmux3.c
d5c3783cc318a8e1c0b7aafcf5849dee *src/zeta3.c
diff --git a/NAMESPACE b/NAMESPACE
index 8eab56d..75c0eb1 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -7,6 +7,14 @@
useDynLib(VGAM)
+export(pgamma.deriv, pgamma.deriv.unscaled, truncweibull)
+
+export(binom2.rho.ss)
+
+
+export(arwz2wz)
+
+
export(link2list)
export(mlogit)
@@ -69,11 +77,14 @@ export(plota21)
-export(confint_rrnb, confint_nb1)
+export(Confint.rrnb, Confint.nb1)
export(vcovrrvglm)
-export(huggins91, dhuggins91, rhuggins91)
+export(posbernoulli.b, posbernoulli.t, posbernoulli.tb, aux.posbernoulli)
+export(N.hat.posbernoulli)
+export(dposbern, rposbern)
+export(posbern.aux)
@@ -87,7 +98,7 @@ export(is.empty.list)
export(
Build.terms.vlm,
procVec,
-rss.vgam,
+ResSS.vgam,
vcontrol.expression,
vplot, vplot.default, vplot.factor, vplot.list,
vplot.matrix, vplot.numeric, vvplot.factor)
@@ -193,9 +204,11 @@ anova.vglm,
bisa, dbisa, pbisa, qbisa, rbisa,
betabinomial.ab, betabinomial,
dexpbinomial,
-dbetabinom, pbetabinom, rbetabinom, dbetabinom.ab, pbetabinom.ab, rbetabinom.ab,
+dbetabinom, pbetabinom, rbetabinom,
+dbetabinom.ab, pbetabinom.ab, rbetabinom.ab,
biplot.qrrvglm,
dbort, rbort, borel.tanner,
+care.exp,
cauchy, cauchy1,
ccoef.cao, ccoef.Coef.cao, ccoef.Coef.qrrvglm, ccoef.qrrvglm,
cdf, cdf.lms.bcg, cdf.lms.bcn,
@@ -241,8 +254,9 @@ lqnorm,
dbilogis4, pbilogis4, rbilogis4, bilogistic4,
logistic1, logistic2,
logLik.vlm, lv.cao,
-lv.Coef.qrrvglm,
-lvplot.cao, lv.qrrvglm,
+latvar.Coef.qrrvglm, latvar.qrrvglm,
+lvplot.cao,
+Rank, Rank.rrvglm, Rank.qrrvglm, Rank.cao,
Max.Coef.qrrvglm, Max.qrrvglm,
is.bell.vlm, is.bell.rrvglm, is.bell.qrrvglm, is.bell.cao, is.bell,
model.matrix.qrrvglm,
@@ -319,6 +333,7 @@ dzeta)
export(lm2vlm.model.matrix)
+export(vlm2lm.model.matrix)
@@ -438,7 +453,8 @@ export(dgengamma, pgengamma, qgengamma, rgengamma)
export(
dbenf, pbenf, qbenf, rbenf,
-genbetaII, genpoisson, geometric,
+genbetaII, genpoisson,
+geometric, truncgeometric,
dlino, plino, qlino, rlino, lino,
grc,
dhzeta, phzeta, qhzeta, rhzeta, hzeta,
@@ -450,12 +466,12 @@ dyules, pyules, ryules, yulesimon,
logff, dlog, plog, rlog,
loglinb2, loglinb3,
loglog, lognormal3, lvplot.qrrvglm,
-lvplot, lvplot.rrvglm, lv, Max, MNSs,
+lvplot, lvplot.rrvglm, lv, latvar, Max, MNSs,
dmultinomial, multinomial, margeff)
export(
-huber, huber1, dhuber, edhuber, phuber, qhuber, rhuber)
+huber2, huber1, dhuber, edhuber, phuber, qhuber, rhuber)
export(
@@ -472,7 +488,8 @@ deexp, peexp, qeexp, reexp)
export(
meplot, meplot.default, meplot.vlm,
guplot, guplot.default, guplot.vlm,
-negbinomial, negbinomial.size, polya, normal1,
+negbinomial, negbinomial.size, polya,
+normal1, SUR,
nbcanlink,
tobit, dtobit, ptobit, qtobit, rtobit,
Opt,
@@ -568,7 +585,7 @@ logLik,
vcov,
deviance,
calibrate, cdf, ccoef, df.residual,
-lv, Max, Opt, Tol,
+lv, latvar, Max, Opt, Tol,
biplot, deplot, lvplot, qtplot, rlplot, meplot,
trplot, vplot,
formula, case.names, variable.names,
diff --git a/NEWS b/NEWS
index 256dccf..ac59fa3 100755
--- a/NEWS
+++ b/NEWS
@@ -6,6 +6,98 @@
+ CHANGES IN VGAM VERSION 0.9-1
+
+NEW FEATURES
+
+ o A companion package, called \pkg{VGAMdata}, is new.
+ Some large data sets previously in \pkg{VGAM} have
+ been shifted there, e.g., xs.nz and ugss.
+ In \pkg{VGAMdata} there is (new) oly12 and students.tw.
+ o pnorm2() argument names have changed from 'sd1' to
+ 'var1', etc. and 'rho' to 'cov12'.
+ See documentation.
+ Warning given if it returns any negative value.
+ o Introduction of g-type arguments for grid search.
+ o Improved initial values for: lomax().
+ o Argument 'bred' works for poissonff().
+ o latvar() generic available, identical to lv(). But the
+ latter will be withdrawn soon.
+ o Rank() generic available for RR-VGLMs, QRR-VGLMs, CAO models.
+ o New function: pgamma.deriv(), pgamma.deriv.unscaled(),
+ vlm2lm.model.matrix().
+ o New VGAM family functions:
+ posbernoulli.b(), posbernoulli.t(), posbernoulli.tb(tau = 2 or 3).
+ These provide estimates of N as well as its standard error.
+ Also, truncgeometric() and truncweibull() are new.
+ Also, SUR() is new.
+ Also, binom2.rho.ss() does not work yet.
+ o New argument 'matrix.out = FALSE' for constraints.vlm().
+ o cm.vgam() has a few more arguments to provide more flexibility.
+ But there should be no changes for VGAM users at this stage.
+ o Renamed functions: confint_rrnb() is now renamed to
+ Confint.rrnb() and confint_nb1() is now renamed to Confint.nb1().
+ o Some changes to component names returned by Confint.rrnb() and
+ Confint.nb1(): $CI. and $SE. are uppercase.
+ o Some zero-inflated VGAM family functions return a
+ "vglm" object with @misc$pstr0 for the estimated
+ probability of a structural zero.
+ o New data set: olym12.
+ Note that Students.tw is earmarked for \pkg{VGAMdata}.
+ o Data sets renamed:
+ olympic renamed to olym08.
+ o Qvar() has a 'which.eta = 1' argument specifying which linear
+ predictor to use. So quasi-variances are now
+ available to models with M > 1 linear predictors.
+ o Tested okay on R 3.0.0.
+
+
+BUG FIXES and CHANGES
+
+ o VGAM now depends on R >= 2.15.1.
+ o Fortran array bounds problems (picked up by
+ AddressSanitizer) have been fixed.
+ o All "no visible binding for global variables" warnings
+ have been suppressed.
+ o vgam() with a s(spar = myspar) term should run, and if myspar
+ is extracted from a previous vgam() model then the two models
+ should effectively be the same.
+ o summaryvgam() did not calculate or print out all the p-values
+ for testing linearity.
+ o fnormal1()@initialize was faulty wrt lm.wfit().
+ o zageometric() and zigeometric() handle multiple responses.
+ o mlogit(inverse = TRUE) and mlogit(inverse = FALSE)
+ were switched. Now multinomial() makes use of mlogit().
+ mlogit() now calls care.exp() to avoid overflow and underflow;
+ this stops multinomial() from returning a NA as a fitted
+ value if abs(eta) is very large.
+ o arwz2wz() introduced to simplify multiple responses
+ working weight matrices (wrt construction).
+ o Renamed functions:
+ dhuggins91() is now dposbern(),
+ huber() is now huber2(),
+ ei() is now eifun(), eij() is now eijfun(),
+ rss.vgam() is now ResSS.vgam().
+ o fisherz(theta) was wrong. Corrected, then replaced by
+ atanh(theta).
+ o [dpq]dagum(x), [dpq]lomax(x), [dpq]sinmad(x), etc. handled
+ correctly for x = 0, Inf, -Inf, NaN, NA.
+ o qdagum(x) failed due to 'Scale' [thanks to Alena Tartalova].
+ o Arguments renamed:
+ 'intercept.apply' renamed to 'apply.parint',
+ 'Norrr' renamed to 'noRRR' (warning/error message issued),
+ 'nowarning' renamed to 'noWarning' in vglm.control().
+ o seq2binomial()@loglikelihood includes the binomial
+ lchoose() constants.
+ o qgev() bug [thanks to Alex Cannon], and qgpd().
+ o cao() produces less error/warning messages usually.
+ o Data sets corrected for errors: chinese.nz.
+ o Data set changes: gew had incorrect y1 and y2 values,
+ and variables x1 to x4 have been renamed to value.g, capital.w,
+ etc. The year variable has been added.
+
+
+
CHANGES IN VGAM VERSION 0.9-0
NEW FEATURES
diff --git a/R/Links.R b/R/Links.R
index 865df57..cb3c275 100644
--- a/R/Links.R
+++ b/R/Links.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/aamethods.q b/R/aamethods.q
index eb3113d..da6810f 100644
--- a/R/aamethods.q
+++ b/R/aamethods.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/add1.vglm.q b/R/add1.vglm.q
index 35f4770..496c131 100644
--- a/R/add1.vglm.q
+++ b/R/add1.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/attrassign.R b/R/attrassign.R
index 8a58530..f05c201 100644
--- a/R/attrassign.R
+++ b/R/attrassign.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/bAIC.q b/R/bAIC.q
index 7d91359..2021cfd 100644
--- a/R/bAIC.q
+++ b/R/bAIC.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/build.terms.vlm.q b/R/build.terms.vlm.q
index 85d7665..45212cd 100644
--- a/R/build.terms.vlm.q
+++ b/R/build.terms.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/calibrate.q b/R/calibrate.q
index dd61d5e..22723a0 100644
--- a/R/calibrate.q
+++ b/R/calibrate.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/cao.R b/R/cao.R
index 59ebb61..370a83b 100644
--- a/R/cao.R
+++ b/R/cao.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/cao.fit.q b/R/cao.fit.q
index de47284..9486db9 100644
--- a/R/cao.fit.q
+++ b/R/cao.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -9,236 +9,242 @@
-cao.fit <- function(x, y, w = rep(1, length(x[, 1])),
- etastart = NULL, mustart = NULL, coefstart = NULL,
- offset = 0, family,
- control = cao.control(...), criterion = "coefficients",
- qr.arg = FALSE, constraints = NULL, extra = NULL,
- Terms=Terms, function.name = "cao", ...)
-{
- specialCM = NULL
- post = list()
- check.rank = TRUE #
- nonparametric <- TRUE
- optim.maxit <- control$optim.maxit
- save.weight <- control$save.weight
- trace <- control$trace
- minimize.criterion <- control$min.criterion
-
- n <- dim(x)[1]
+cao.fit <-
+ function(x, y, w = rep(1, length(x[, 1])),
+ etastart = NULL, mustart = NULL, coefstart = NULL,
+ offset = 0, family,
+ control = cao.control(...), criterion = "coefficients",
+ qr.arg = FALSE, constraints = NULL, extra = NULL,
+ Terms = Terms, function.name = "cao", ...) {
- copy_X_vlm <- FALSE # May be overwritten in @initialize
+ maxitl <- fv <- NULL
- X_vlm_save <- NULL
- intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
- y.names <- predictors.names <- NULL # May be overwritten in @initialize
-
- n.save <- n
+ specialCM <- NULL
+ post <- list()
+ check.rank <- TRUE #
+ nonparametric <- TRUE
+ optim.maxit <- control$optim.maxit
+ save.weight <- control$save.weight
+ trace <- control$trace
+ minimize.criterion <- control$min.criterion
+ n <- dim(x)[1]
- Rank <- control$Rank
- rrcontrol <- control #
- if (length(family at initialize))
- eval(family at initialize) # Initialize mu and M (and optionally w)
- n <- n.save
+ copy_X_vlm <- FALSE # May be overwritten in @initialize
- modelno = switch(family at vfamily[1], "poissonff" = 2,
- "binomialff" = 1, "quasipoissonff" = 0, "quasibinomialff" = 0,
- "negbinomial"=3,
- "gamma2"=5, "gaussianff"=8,
- 0) # stop("cannot fit this model using fast algorithm")
- if (!modelno) stop("the family function does not work with cao()")
- if (modelno == 1) modelno = get("modelno", envir = VGAM:::VGAMenv)
+ X_vlm_save <- NULL
- eval(rrr.init.expression)
+ intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
+ y.names <- predictors.names <- NULL # May be overwritten in @initialize
- if (length(etastart)) {
- eta <- etastart
- mu <- if (length(mustart)) mustart else
- if (length(body(slot(family, "linkinv"))))
- slot(family, "linkinv")(eta, extra) else
- warning("argument 'etastart' assigned a value ",
- "but there is no 'linkinv' slot to use it")
+
+ n.save <- n
+
+
+ Rank <- control$Rank
+ rrcontrol <- control #
+
+ if (length(family at initialize))
+ eval(family at initialize) # Initialize mu and M (and optionally w)
+ n <- n.save
+
+ modelno <- switch(family at vfamily[1], "poissonff" = 2,
+ "binomialff" = 1, "quasipoissonff" = 0,
+ "quasibinomialff" = 0, "negbinomial" = 3,
+ "gamma2" = 5, "gaussianff" = 8,
+ 0) # stop("cannot fit this model using fast algorithm")
+ if (!modelno)
+ stop("the family function does not work with cao()")
+ if (modelno == 1)
+ modelno <- get("modelno", envir = VGAM:::VGAMenv)
+
+ eval(rrr.init.expression)
+
+ if (length(etastart)) {
+ eta <- etastart
+ mu <- if (length(mustart)) mustart else
+ if (length(body(slot(family, "linkinv"))))
+ slot(family, "linkinv")(eta, extra) else
+ warning("argument 'etastart' assigned a value ",
+ "but there is no 'linkinv' slot to use it")
+ }
+
+ if (length(mustart)) {
+ mu <- mustart
+ if (length(body(slot(family, "linkfun")))) {
+ eta <- slot(family, "linkfun")(mu, extra)
+ } else {
+ warning("argument 'mustart' assigned a value ",
+ "but there is no 'link' slot to use it")
}
+ }
- if (length(mustart)) {
- mu <- mustart
- if (length(body(slot(family, "linkfun")))) {
- eta <- slot(family, "linkfun")(mu, extra)
- } else {
- warning("argument 'mustart' assigned a value ",
- "but there is no 'link' slot to use it")
- }
- }
+ M <- if (is.matrix(eta)) ncol(eta) else 1
- M <- if (is.matrix(eta)) ncol(eta) else 1
+ if (length(family at constraints))
+ eval(family at constraints)
- if (length(family at constraints))
- eval(family at constraints)
+ special.matrix <- matrix(-34956.125, M, M) # An unlikely used matrix
+ just.testing <- cm.vgam(special.matrix, x, rrcontrol$noRRR, constraints)
+ findex <- trivial.constraints(just.testing, special.matrix)
+ tc1 <- trivial.constraints(constraints)
- special.matrix = matrix(-34956.125, M, M) # An unlikely used matrix
- just.testing <- cm.vgam(special.matrix, x, rrcontrol$Norrr, constraints)
- findex = trivial.constraints(just.testing, special.matrix)
- tc1 = trivial.constraints(constraints)
+ if (all(findex == 1))
+ stop("No covariates to form latent variables from.")
+ colx1.index <- names.colx1.index <- NULL
+ dx2 <- dimnames(x)[[2]]
+ if (sum(findex)) {
+ asx <- attr(x, "assign")
+ for(ii in names(findex))
+ if (findex[ii]) {
+ names.colx1.index <- c(names.colx1.index, dx2[asx[[ii]]])
+ colx1.index <- c(colx1.index, asx[[ii]])
+ }
+ names(colx1.index) <- names.colx1.index
+ }
+ rrcontrol$colx1.index <- control$colx1.index <- colx1.index
+ colx2.index <- 1:ncol(x)
+ names(colx2.index) <- dx2
+ colx2.index <- colx2.index[-colx1.index]
+ p1 <- length(colx1.index); p2 <- length(colx2.index)
+ rrcontrol$colx2.index <- control$colx2.index <- colx2.index
- if (all(findex == 1))
- stop("No covariates to form latent variables from.")
- colx1.index = names.colx1.index = NULL
- dx2 = dimnames(x)[[2]]
- if (sum(findex)) {
- asx = attr(x, "assign")
- for(ii in names(findex))
- if (findex[ii]) {
- names.colx1.index = c(names.colx1.index, dx2[asx[[ii]]])
- colx1.index = c(colx1.index, asx[[ii]])
- }
- names(colx1.index) = names.colx1.index
- }
- # Save it on the object:
- rrcontrol$colx1.index = control$colx1.index = colx1.index
- colx2.index = 1:ncol(x)
- names(colx2.index) = dx2
- colx2.index = colx2.index[-colx1.index]
- p1 = length(colx1.index); p2 = length(colx2.index)
- # Save it on the object:
- rrcontrol$colx2.index = control$colx2.index = colx2.index
-
-
-
- Cmat = if (length(rrcontrol$Cinit))
- matrix(rrcontrol$Cinit,p2,Rank) else {
- if (!rrcontrol$Use.Init.Poisson.QO) {
- matrix(rnorm(p2*Rank, sd=rrcontrol$SD.Cinit), p2, Rank)
- } else {
- .Init.Poisson.QO(ymat=as.matrix(y),
- X1 = x[,colx1.index,drop = FALSE],
- X2 = x[,colx2.index,drop = FALSE],
- Rank=rrcontrol$Rank, trace=rrcontrol$trace,
- max.ncol.etamat = rrcontrol$Etamat.colmax,
- Crow1positive=rrcontrol$Crow1positive,
- constwt= any(family at vfamily[1] ==
- c("negbinomial","gamma2","gaussianff")),
- takelog= any(family at vfamily[1] != c("gaussianff")))
- }
+
+
+ Cmat <- if (length(rrcontrol$Cinit)) {
+ matrix(rrcontrol$Cinit, p2, Rank)
+ } else {
+ if (!rrcontrol$Use.Init.Poisson.QO) {
+ matrix(rnorm(p2*Rank, sd = rrcontrol$SD.Cinit), p2, Rank)
+ } else {
+ .Init.Poisson.QO(ymat = as.matrix(y),
+ X1 = x[,colx1.index, drop = FALSE],
+ X2 = x[,colx2.index, drop = FALSE],
+ Rank = rrcontrol$Rank, trace = rrcontrol$trace,
+ max.ncol.etamat = rrcontrol$Etamat.colmax,
+ Crow1positive = rrcontrol$Crow1positive,
+ constwt = any(family at vfamily[1] ==
+ c("negbinomial", "gamma2", "gaussianff")),
+ takelog = any(family at vfamily[1] != c("gaussianff")))
}
+ }
- rrcontrol$Cinit = control$Cinit = Cmat # Good for valt()
+ rrcontrol$Cinit <- control$Cinit <- Cmat # Good for valt()
- Blist <- process.constraints(constraints, x, M, specialCM=specialCM)
+ Blist <- process.constraints(constraints, x, M, specialCM = specialCM)
- nice31 = checkCMCO(Blist, control=control, modelno=modelno)
- if (nice31 != 1) stop("not nice")
+ nice31 <- checkCMCO(Blist, control = control, modelno = modelno)
+ if (nice31 != 1) stop("not nice")
- ncolBlist <- unlist(lapply(Blist, ncol))
- lv.mat = x[, colx2.index, drop = FALSE] %*% Cmat
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ lv.mat <- x[, colx2.index, drop = FALSE] %*% Cmat
- rmfromVGAMenv(c("etamat", "beta"), prefix = ".VGAM.CAO.")
+ rmfromVGAMenv(c("etamat", "beta"), prefix = ".VGAM.CAO.")
- Nice21 = length(names.colx1.index) == 1 &&
- names.colx1.index == "(Intercept)"
- if (!Nice21) stop("'Norrr = ~ 1' is supported only, without constraints")
- NOS = ifelse(modelno %in% c(3, 5), M/2, M)
- p1star. = if (Nice21) ifelse(modelno %in% c(3, 5), 2, 1) else M
- p2star. = if (Nice21) Rank else stop("not Nice21")
- pstar. = p1star. + p2star.
- nstar = if (Nice21) ifelse(modelno %in% c(3, 5), n * 2, n) else n * M
- lenbeta = pstar. * ifelse(Nice21, NOS, 1)
+ Nice21 <- length(names.colx1.index) == 1 &&
+ names.colx1.index == "(Intercept)"
+ if (!Nice21) stop("'noRRR = ~ 1' is supported only, without constraints")
+ NOS <- ifelse(modelno %in% c(3, 5), M/2, M)
+ p1star. <- if (Nice21) ifelse(modelno %in% c(3, 5), 2, 1) else M
+ p2star. <- if (Nice21) Rank else stop("not Nice21")
+ pstar. <- p1star. + p2star.
+ nstar <- if (Nice21) ifelse(modelno %in% c(3, 5), n * 2, n) else n * M
+ lenbeta <- pstar. * ifelse(Nice21, NOS, 1)
- othint = c(Rank, control$EqualTol, pstar. ,
- dim2wz = 1, inited = 0, # w(,dimw) cols
- modelno, maxitl=control$maxitl, actnits = 0, twice = 0, p1star. ,
- p2star. , Nice21, lenbeta, controlITolerances = 0, control$trace,
- p1, p2=p2, imethod=control$imethod, bchat = 0)
- othdbl = c(small=control$SmallNo, fseps=control$epsilon,
- .Machine$double.eps,
- iKvector=rep(control$iKvector, len=NOS),
- iShape=rep(control$iShape, len=NOS),
- resss = 0, bfeps=control$bf.epsilon, hstep = 0.1)
+ othint <- c(Rank, control$EqualTol, pstar. ,
+ dim2wz = 1, inited = 0, # w(,dimw) cols
+ modelno, maxitl = control$maxitl, actnits = 0, twice = 0, p1star. ,
+ p2star. , Nice21, lenbeta, controlITolerances = 0, control$trace,
+ p1, p2 = p2, imethod = control$imethod, bchat = 0)
+ othdbl <- c(small = control$SmallNo, fseps = control$epsilon,
+ .Machine$double.eps,
+ iKvector = rep(control$iKvector, len = NOS),
+ iShape = rep(control$iShape, len = NOS),
+ resss = 0, bfeps = control$bf.epsilon, hstep = 0.1)
- for(iter in 1:optim.maxit) {
- if (control$trace) {
- cat("\nIteration", iter, "\n")
- flush.console()
- }
+ for(iter in 1:optim.maxit) {
+ if (control$trace) {
+ cat("\nIteration", iter, "\n")
+ flush.console()
+ }
- conjgrad = optim(par=c(Cmat), fn=callcaoc,
+ conjgrad <- optim(par = c(Cmat), fn = callcaoc,
gr = if (control$GradientFunction) calldcaoc else NULL,
method = "BFGS",
- control=list(fnscale = 1, trace=as.integer(control$trace),
- maxit=control$Maxit.optim, REPORT = 10),
- etamat=eta, xmat = x, ymat=y, # as.matrix(y),
- wvec=w, modelno=modelno,
- Control=control,
- Nice21=Nice21,
+ control=list(fnscale = 1, trace = as.integer(control$trace),
+ maxit = control$Maxit.optim, REPORT = 10),
+ etamat = eta, xmat = x, ymat = y, # as.matrix(y),
+ wvec = w, modelno = modelno,
+ Control = control,
+ Nice21 = Nice21,
p1star. = p1star. , p2star. = p2star. ,
- n=n, M=M,
- othint=othint, othdbl=othdbl,
+ n = n, M = M,
+ othint = othint, othdbl = othdbl,
alldump = FALSE)
- Cmat = matrix(conjgrad$par, p2, Rank) # old becoz of scale(cmatrix)
+ Cmat <- matrix(conjgrad$par, p2, Rank) # old becoz of scale(cmatrix)
# Cmat <- Cmat %*% Ut # Normalized
- if (converged <- (conjgrad$convergence == 0)) break
- }
+ if (converged <- (conjgrad$convergence == 0)) break
+ }
- if (!converged) {
- if (maxitl > 1) {
- warning("convergence not obtained in", maxitl, "iterations.")
- } else {
- warning("convergence not obtained")
- }
+ if (!converged) {
+ if (maxitl > 1) {
+ warning("convergence not obtained in", maxitl, "iterations.")
} else {
+ warning("convergence not obtained")
}
- Cmat = crow1C(Cmat, control$Crow1positive) # Make sure signs are right
+ } else {
+ }
+ Cmat <- crow1C(Cmat, control$Crow1positive) # Make sure signs are right
- flush.console()
- temp9 =
- callcaoc(cmatrix=Cmat,
- etamat=eta, xmat = x, ymat=y, wvec=w, modelno=modelno,
- Control=control,
- Nice21=Nice21,
- p1star. = p1star. , p2star. = p2star. ,
- n=n, M=M,
- othint=othint, othdbl=othdbl,
- alldump = TRUE)
- if (!is.list(extra))
- extra = list()
- extra$Cmat = temp9$Cmat
-
- ynames = dimnames(y)[[2]]
- extra$df1.nl = temp9$df1.nl
- extra$lambda1 = temp9$lambda1
- extra$spar1 = temp9$spar1
- names(extra$df1.nl) =
- names(extra$lambda1) =
- names(extra$spar1) = ynames
- if (Rank == 2) {
- extra$spar2 = temp9$spar2
- extra$lambda2 = temp9$lambda2
- extra$df2.nl = temp9$df2.nl
- names(extra$df2.nl) =
- names(extra$lambda2) =
- names(extra$spar2) = ynames
- }
+ flush.console()
+ temp9 <-
+ callcaoc(cmatrix = Cmat,
+ etamat = eta, xmat = x, ymat = y, wvec = w, modelno = modelno,
+ Control = control,
+ Nice21 = Nice21,
+ p1star. = p1star. , p2star. = p2star. ,
+ n = n, M = M,
+ othint = othint, othdbl = othdbl,
+ alldump = TRUE)
+ if (!is.list(extra))
+ extra <- list()
+ extra$Cmat <- temp9$Cmat
- extra$alldeviance = temp9$alldeviance
- names(extra$alldeviance) = ynames
+ ynames <- dimnames(y)[[2]]
+ extra$df1.nl <- temp9$df1.nl
+ extra$lambda1 <- temp9$lambda1
+ extra$spar1 <- temp9$spar1
+ names(extra$df1.nl) <-
+ names(extra$lambda1) <-
+ names(extra$spar1) <- ynames
+ if (Rank == 2) {
+ extra$spar2 <- temp9$spar2
+ extra$lambda2 <- temp9$lambda2
+ extra$df2.nl <- temp9$df2.nl
+ names(extra$df2.nl) <-
+ names(extra$lambda2) <-
+ names(extra$spar2) <- ynames
+ }
- mu = matrix(temp9$fitted, n, NOS, byrow = TRUE)
+ extra$alldeviance <- temp9$alldeviance
+ names(extra$alldeviance) <- ynames
+ mu <- matrix(temp9$fitted, n, NOS, byrow = TRUE)
@@ -249,74 +255,75 @@ cao.fit <- function(x, y, w = rep(1, length(x[, 1])),
- dn <- labels(x)
- yn <- dn[[1]]
+ dn <- labels(x)
+ yn <- dn[[1]]
- if (is.matrix(mu)) {
- if (length(dimnames(y)[[2]])) {
- y.names <- dimnames(y)[[2]]
- }
- if (length(dimnames(mu)[[2]])) {
- y.names <- dimnames(mu)[[2]]
- }
- dimnames(mu) <- list(yn, y.names)
- } else {
- names(mu) <- names(fv)
+
+ if (is.matrix(mu)) {
+ if (length(dimnames(y)[[2]])) {
+ y.names <- dimnames(y)[[2]]
+ }
+ if (length(dimnames(mu)[[2]])) {
+ y.names <- dimnames(mu)[[2]]
}
+ dimnames(mu) <- list(yn, y.names)
+ } else {
+ names(mu) <- names(fv)
+ }
- fit <- list(
- fitted.values=mu,
- Cmatrix = Cmat,
- terms=Terms) # terms: This used to be done in vglm()
+ fit <- list(
+ fitted.values = mu,
+ Cmatrix = Cmat,
+ terms = Terms) # terms: This used to be done in vglm()
- misc <- list(
- criterion = criterion,
- predictors.names = predictors.names,
- M = M,
- n = n,
- nonparametric = nonparametric,
- p = ncol(x),
- ynames = ynames)
+ misc <- list(
+ criterion = criterion,
+ predictors.names = predictors.names,
+ M = M,
+ n = n,
+ nonparametric = nonparametric,
+ p = ncol(x),
+ ynames = ynames)
- crit.list <- list()
- crit.list$deviance = temp9$deviance
+ crit.list <- list()
+ crit.list$deviance <- temp9$deviance
- if (w[1] != 1 || any(w != w[1]))
- fit$prior.weights <- w
-
- if (length(family at last))
- eval(family at last)
-
- structure(c(fit,
- temp9,
- list(
- contrasts = attr(x, "contrasts"),
- control = control,
- crit.list = crit.list,
- extra = extra,
- family = family,
- iter = iter,
- misc = misc,
- post = post,
- x = x,
- y = y)),
- vclass = family at vfamily)
+ if (w[1] != 1 || any(w != w[1]))
+ fit$prior.weights <- w
+
+ if (length(family at last))
+ eval(family at last)
+
+ structure(c(fit,
+ temp9,
+ list(
+ contrasts = attr(x, "contrasts"),
+ control = control,
+ crit.list = crit.list,
+ extra = extra,
+ family = family,
+ iter = iter,
+ misc = misc,
+ post = post,
+ x = x,
+ y = y)),
+ vclass = family at vfamily)
}
-cao.control = function(Rank = 1,
+cao.control <- function(Rank = 1,
all.knots = FALSE,
criterion = "deviance",
Cinit = NULL,
@@ -326,7 +333,8 @@ cao.control = function(Rank = 1,
GradientFunction = FALSE, # For now 24/12/04
iKvector = 0.1,
iShape = 0.1,
- Norrr = ~ 1,
+ noRRR = ~ 1,
+ Norrr = NA,
SmallNo = 5.0e-13,
Use.Init.Poisson.QO = TRUE,
@@ -344,86 +352,99 @@ cao.control = function(Rank = 1,
df2.nl = 2.5, # About 1.5--2.5 gives the flexibility of a quadratic
spar1 = 0, # 0 means df1.nl is used
spar2 = 0, # 0 means df2.nl is used
- ...)
-{
- if (!is.Numeric(iShape, positive = TRUE))
- stop("bad input for 'iShape'")
- if (!is.Numeric(iKvector, positive = TRUE))
- stop("bad input for 'iKvector'")
- if (!is.Numeric(imethod, positive = TRUE, allowable.length = 1,
- integer.valued = TRUE))
- stop("bad input for 'imethod'")
- if (criterion != "deviance") stop("'criterion' must be 'deviance'")
- if (GradientFunction)
- stop("14/1/05; GradientFunction = TRUE not working yet")
- se.fit = as.logical(FALSE)
- if (se.fit) stop("se.fit = FALSE handled only")
-
- if (length(Cinit) && !is.Numeric(Cinit))
- stop("Bad input for 'Cinit'")
- if (!is.Numeric(Bestof, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE))
- stop("Bad input for 'Bestof'")
- if (!is.Numeric(maxitl, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE))
- stop("Bad input for 'maxitl'")
- if (!is.Numeric(bf.epsilon, allowable.length = 1,
- positive = TRUE))
- stop("Bad input for 'bf.epsilon'")
- if (!is.Numeric(bf.maxit, integer.valued = TRUE,
- positive = TRUE, allowable.length = 1))
- stop("Bad input for 'bf.maxit'")
- if (!is.Numeric(Etamat.colmax, positive = TRUE,
- allowable.length = 1) ||
- Etamat.colmax < Rank)
- stop("bad input for 'Etamat.colmax'")
- if (!is.Numeric(Maxit.optim, integer.valued = TRUE,
- positive = TRUE, allowable.length = 1))
- stop("Bad input for 'Maxit.optim'")
- if (!is.Numeric(optim.maxit, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE))
- stop("Bad input for 'optim.maxit'")
- if (!is.Numeric(SD.sitescores, allowable.length = 1,
- positive = TRUE))
- stop("Bad input for 'SD.sitescores'")
- if (!is.Numeric(SD.Cinit, allowable.length = 1,
- positive = TRUE))
- stop("Bad input for 'SD.Cinit'")
- if (!is.Numeric(df1.nl) || any(df1.nl < 0))
- stop("Bad input for 'df1.nl'")
- if (any(df1.nl >= 0 & df1.nl < 0.05)) {
- warning("'df1.nl' values between 0 and 0.05 converted to 0.05")
- df1.nl[df1.nl < 0.05] = 0.05
- }
- if (!is.Numeric(df2.nl) || any(df2.nl < 0))
- stop("Bad input for 'df2.nl'")
- if (any(df2.nl >= 0 & df2.nl < 0.05)) {
- warning("'df2.nl' values between 0 and 0.05 converted to 0.05")
- df2.nl[df2.nl < 0.05] = 0.05
- }
- if (!is.Numeric(spar1) || any(spar1 < 0))
- stop("Bad input for 'spar1'")
- if (!is.Numeric(spar2) || any(spar2 < 0))
- stop("Bad input for 'spar2'")
- if (!is.Numeric(epsilon, positive = TRUE, allowable.length = 1))
- stop("Bad input for 'epsilon'")
-
- if (!is.Numeric(SmallNo, positive = TRUE, allowable.length = 1))
- stop("Bad input for 'SmallNo'")
- if ((SmallNo < .Machine$double.eps) ||
- (SmallNo > .0001)) stop("'SmallNo' is out of range")
-
- ans = list(
+ ...) {
+
+
+ if (length(Norrr) != 1 || !is.na(Norrr)) {
+ warning("argument 'Norrr' has been replaced by 'noRRR'. ",
+ "Assigning the latter but using 'Norrr' will become an error in ",
+ "the next VGAM version soon.")
+ noRRR <- Norrr
+ }
+
+
+
+ if (!is.Numeric(iShape, positive = TRUE))
+ stop("bad input for 'iShape'")
+ if (!is.Numeric(iKvector, positive = TRUE))
+ stop("bad input for 'iKvector'")
+ if (!is.Numeric(imethod, positive = TRUE, allowable.length = 1,
+ integer.valued = TRUE))
+ stop("bad input for 'imethod'")
+
+ if (criterion != "deviance") stop("'criterion' must be 'deviance'")
+ if (GradientFunction)
+ stop("14/1/05; GradientFunction = TRUE not working yet")
+
+ se.fit <- as.logical(FALSE)
+ if (se.fit) stop("se.fit = FALSE handled only")
+
+ if (length(Cinit) && !is.Numeric(Cinit))
+ stop("Bad input for 'Cinit'")
+ if (!is.Numeric(Bestof, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("Bad input for 'Bestof'")
+ if (!is.Numeric(maxitl, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("Bad input for 'maxitl'")
+ if (!is.Numeric(bf.epsilon, allowable.length = 1,
+ positive = TRUE))
+ stop("Bad input for 'bf.epsilon'")
+ if (!is.Numeric(bf.maxit, integer.valued = TRUE,
+ positive = TRUE, allowable.length = 1))
+ stop("Bad input for 'bf.maxit'")
+ if (!is.Numeric(Etamat.colmax, positive = TRUE,
+ allowable.length = 1) ||
+ Etamat.colmax < Rank)
+ stop("bad input for 'Etamat.colmax'")
+ if (!is.Numeric(Maxit.optim, integer.valued = TRUE,
+ positive = TRUE, allowable.length = 1))
+ stop("Bad input for 'Maxit.optim'")
+ if (!is.Numeric(optim.maxit, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("Bad input for 'optim.maxit'")
+ if (!is.Numeric(SD.sitescores, allowable.length = 1,
+ positive = TRUE))
+ stop("Bad input for 'SD.sitescores'")
+ if (!is.Numeric(SD.Cinit, allowable.length = 1,
+ positive = TRUE))
+ stop("Bad input for 'SD.Cinit'")
+ if (!is.Numeric(df1.nl) || any(df1.nl < 0))
+ stop("Bad input for 'df1.nl'")
+ if (any(df1.nl >= 0 & df1.nl < 0.05)) {
+ warning("'df1.nl' values between 0 and 0.05 converted to 0.05")
+ df1.nl[df1.nl < 0.05] <- 0.05
+ }
+ if (!is.Numeric(df2.nl) || any(df2.nl < 0))
+ stop("Bad input for 'df2.nl'")
+ if (any(df2.nl >= 0 & df2.nl < 0.05)) {
+ warning("'df2.nl' values between 0 and 0.05 converted to 0.05")
+ df2.nl[df2.nl < 0.05] = 0.05
+ }
+ if (!is.Numeric(spar1) || any(spar1 < 0))
+ stop("Bad input for 'spar1'")
+ if (!is.Numeric(spar2) || any(spar2 < 0))
+ stop("Bad input for 'spar2'")
+ if (!is.Numeric(epsilon, positive = TRUE, allowable.length = 1))
+ stop("Bad input for 'epsilon'")
+
+ if (!is.Numeric(SmallNo, positive = TRUE, allowable.length = 1))
+ stop("Bad input for 'SmallNo'")
+ if ((SmallNo < .Machine$double.eps) ||
+ (SmallNo > .0001))
+ stop("'SmallNo' is out of range")
+
+ ans <- list(
Corner = FALSE, # A constant, not a control parameter; unneeded?
EqualTolerances = FALSE, # A constant, not a control parameter; needed
ITolerances = FALSE, # A constant, not a control parameter; unneeded?
Quadratic = FALSE, # A constant, not a control parameter; unneeded?
all.knots = as.logical(all.knots)[1],
Bestof = Bestof,
- Cinit=Cinit,
+ Cinit = Cinit,
ConstrainedO = TRUE, # A constant, not a control parameter
- criterion=criterion,
- Crow1positive=as.logical(rep(Crow1positive, len=Rank)),
+ criterion = criterion,
+ Crow1positive = as.logical(rep(Crow1positive, len = Rank)),
epsilon = epsilon,
Etamat.colmax = Etamat.colmax,
FastAlgorithm = TRUE, # A constant, not a control parameter
@@ -434,14 +455,14 @@ cao.control = function(Rank = 1,
imethod = imethod,
Maxit.optim = Maxit.optim,
optim.maxit = optim.maxit,
- Norrr=Norrr,
+ noRRR = noRRR,
Rank = Rank,
SD.sitescores = SD.sitescores,
SD.Cinit = SD.Cinit,
se.fit = se.fit, # If TRUE, then would need storage for S QR fits
SmallNo = SmallNo,
trace = as.integer(trace),
- Use.Init.Poisson.QO=Use.Init.Poisson.QO,
+ Use.Init.Poisson.QO = Use.Init.Poisson.QO,
iKvector = as.numeric(iKvector),
iShape = as.numeric(iShape),
DF1 = 2.5, # Used as Default value if df1.nl has no default
@@ -457,24 +478,24 @@ cao.control = function(Rank = 1,
create.cms <- function(Rank = 1, M, MSratio = 1, which, p1 = 1) {
- if (!is.Numeric(p1, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE))
- stop("bad input for 'p1'")
- Blist. = vector("list", p1 + Rank)
- for(rr in 1:(p1+Rank))
- Blist.[[rr]] = diag( M )
- names(Blist.) = if (p1 == 1) c("(Intercept)", names(which)) else stop()
- if (MSratio == 2) {
- for(r in 1:Rank)
- Blist.[[p1+r]] = eij(1, M)
- }
- Blist.
+ if (!is.Numeric(p1, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("bad input for 'p1'")
+ Blist. <- vector("list", p1 + Rank)
+ for(rr in 1:(p1+Rank))
+ Blist.[[rr]] <- diag(M)
+ names(Blist.) <- if (p1 == 1) c("(Intercept)", names(which)) else stop()
+ if (MSratio == 2) {
+ for(r in 1:Rank)
+ Blist.[[p1+r]] <- eijfun(1, M)
+ }
+ Blist.
}
-callcaoc = function(cmatrix,
+callcaoc <- function(cmatrix,
etamat, xmat, ymat, wvec, modelno,
Control, Nice21 = TRUE,
p1star. = if (modelno %in% c(3, 5)) 2 else 1,
@@ -482,108 +503,108 @@ callcaoc = function(cmatrix,
n, M,
othint, othdbl,
alldump = FALSE) {
- flush.console()
-
- control = Control
- Rank = control$Rank
- p1 = length(control$colx1.index)
- p2 = length(control$colx2.index)
- yn = dimnames(ymat)[[2]]
- if (length(yn) != ncol(ymat))
- stop("the column names of 'ymat' must be given")
- queue = qbig = Rank # 19/10/05; number of smooths per species
- NOS = if (modelno %in% c(3, 5)) M/2 else M
- df1.nl = procVec(control$df1.nl, yn = yn , Default = control$DF1)
- spar1 = procVec(control$spar1, yn = yn , Default = control$SPAR1)
- df2.nl = procVec(control$df2.nl, yn = yn , Default = control$DF2)
- spar2 = procVec(control$spar2, yn = yn , Default = control$SPAR2)
+ flush.console()
+
+ control <- Control
+ Rank <- control$Rank
+ p1 <- length(control$colx1.index)
+ p2 <- length(control$colx2.index)
+ yn <- dimnames(ymat)[[2]]
+ if (length(yn) != ncol(ymat))
+ stop("the column names of 'ymat' must be given")
+ queue <- qbig <- Rank # 19/10/05; number of smooths per species
+ NOS <- if (modelno %in% c(3, 5)) M/2 else M
+ df1.nl <- procVec(control$df1.nl, yn = yn , Default = control$DF1)
+ spar1 <- procVec(control$spar1, yn = yn , Default = control$SPAR1)
+ df2.nl <- procVec(control$df2.nl, yn = yn , Default = control$DF2)
+ spar2 <- procVec(control$spar2, yn = yn , Default = control$SPAR2)
if (any(c(length(spar1), length(spar2), length(df1.nl),
length(df2.nl)) != NOS))
stop("wrong length in at least one of ",
"'df1.nl', 'df2.nl', 'spar1', 'spar2'")
- cmatrix = matrix(cmatrix, p2, Rank) # crow1C() needs a matrix as input
- cmatrix = crow1C(cmatrix, crow1positive =control$Crow1positive)
- numat = xmat[,control$colx2.index,drop = FALSE] %*% cmatrix
- evnu = eigen(var(numat))
- temp7 = if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
+ cmatrix <- matrix(cmatrix, p2, Rank) # crow1C() needs a matrix as input
+ cmatrix <- crow1C(cmatrix, crow1positive = control$Crow1positive)
+ numat <- xmat[,control$colx2.index, drop = FALSE] %*% cmatrix
+ evnu <- eigen(var(numat))
+ temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
evnu$vector %*% evnu$value^(-0.5)
- cmatrix = cmatrix %*% temp7
- cmatrix = crow1C(cmatrix, crow1positive =control$Crow1positive)
- numat = xmat[,control$colx2.index,drop = FALSE] %*% cmatrix
+ cmatrix <- cmatrix %*% temp7
+ cmatrix <- crow1C(cmatrix, crow1positive =control$Crow1positive)
+ numat <- xmat[,control$colx2.index, drop = FALSE] %*% cmatrix
- dim(numat) = c(n, Rank)
- mynames5 = if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = "")
- nu1mat = cbind("(Intercept)" = 1, lv = numat)
- dimnames(nu1mat) = list(dimnames(xmat)[[1]], c("(Intercept)", mynames5))
+ dim(numat) <- c(n, Rank)
+ mynames5 <- if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = "")
+ nu1mat <- cbind("(Intercept)" = 1, lv = numat)
+ dimnames(nu1mat) <- list(dimnames(xmat)[[1]], c("(Intercept)", mynames5))
- temp.smooth.frame = vector("list", p1+Rank) # Temporary makeshift frame
- names(temp.smooth.frame) = c(names(control$colx1.index), mynames5)
+ temp.smooth.frame <- vector("list", p1+Rank) # Temporary makeshift frame
+ names(temp.smooth.frame) <- c(names(control$colx1.index), mynames5)
for(uu in 1:(p1+Rank)) {
- temp.smooth.frame[[uu]] = nu1mat[,uu]
+ temp.smooth.frame[[uu]] <- nu1mat[,uu]
}
- temp.smooth.frame = data.frame(temp.smooth.frame)
+ temp.smooth.frame <- data.frame(temp.smooth.frame)
for(uu in 1:Rank) {
- attr(temp.smooth.frame[,uu+p1], "spar") = 0 # this value unused
- attr(temp.smooth.frame[,uu+p1], "df") = 4 # this value unused
+ attr(temp.smooth.frame[,uu+p1], "spar") <- 0 # this value unused
+ attr(temp.smooth.frame[,uu+p1], "df") <- 4 # this value unused
}
- pstar. = p1star. + p2star. # = Mdot + Rank
- nstar = if (Nice21) ifelse(modelno %in% c(3, 5), n * 2, n) else n * M
- lenbeta = pstar. * ifelse(Nice21, NOS, 1) # Holds the linear coeffs
+ pstar. <- p1star. + p2star. # = Mdot + Rank
+ nstar <- if (Nice21) ifelse(modelno %in% c(3, 5), n * 2, n) else n * M
+ lenbeta <- pstar. * ifelse(Nice21, NOS, 1) # Holds the linear coeffs
- inited = if (exists(".VGAM.CAO.etamat", envir=VGAM:::VGAMenv)) 1 else 0
- usethiseta = if (inited == 1)
+ inited <- if (exists(".VGAM.CAO.etamat", envir=VGAM:::VGAMenv)) 1 else 0
+ usethiseta <- if (inited == 1)
getfromVGAMenv("etamat", prefix = ".VGAM.CAO.") else t(etamat)
if (any(is.na(usethiseta))) {
- usethiseta = t(etamat) # So that dim(usethiseta) == c(M,n)
+ usethiseta <- t(etamat) # So that dim(usethiseta) == c(M,n)
rmfromVGAMenv("etamat", prefix = ".VGAM.CAO.")
}
- usethisbeta = if (inited == 2)
+ usethisbeta <- if (inited == 2)
getfromVGAMenv("beta", prefix = ".VGAM.CAO.") else double(lenbeta)
- othint[5] = inited # Refine initialization within C
- pstar = NOS * pstar.
- bnumat = if (Nice21) matrix(0, nstar, pstar.) else
+ othint[5] <- inited # Refine initialization within C
+ pstar <- NOS * pstar.
+ bnumat <- if (Nice21) matrix(0, nstar, pstar.) else
stop("code not written here")
- M. = MSratio = M / NOS # 1 or 2 usually
- which = p1 + (1:Rank) # These columns are smoothed
- nwhich = names(which) = mynames5
+ M. <- MSratio <- M / NOS # 1 or 2 usually
+ which <- p1 + (1:Rank) # These columns are smoothed
+ nwhich <- names(which) <- mynames5
- origBlist = Blist. = create.cms(Rank=Rank, M=M., MSratio=MSratio,
- which=which, p1=p1) # For 1 species only
+ origBlist <- Blist. <- create.cms(Rank = Rank, M = M., MSratio = MSratio,
+ which = which, p1 = p1) # For 1 species only
ncolBlist. <- unlist(lapply(Blist. , ncol))
- smooth.frame = s.vam(x=nu1mat, zedd = NULL, wz = NULL, smomat = NULL,
- which=which,
- smooth.frame=temp.smooth.frame,
- bf.maxit=control$bf.maxit,
- bf.epsilon=control$bf.epsilon,
- trace = FALSE, se.fit=control$se.fit,
- X_vlm_save=bnumat, Blist=Blist. ,
- ncolBlist=ncolBlist. ,
- M= M. , qbig = NULL, Umat = NULL, # NULL ==> unneeded
- all.knots=control$all.knots, nk = NULL,
+ smooth.frame <- s.vam(x = nu1mat, zedd = NULL, wz = NULL, smomat = NULL,
+ which = which,
+ smooth.frame = temp.smooth.frame,
+ bf.maxit = control$bf.maxit,
+ bf.epsilon = control$bf.epsilon,
+ trace = FALSE, se.fit = control$se.fit,
+ X_vlm_save = bnumat, Blist = Blist. ,
+ ncolBlist = ncolBlist. ,
+ M = M. , qbig = NULL, Umat = NULL, # NULL ==> unneeded
+ all.knots = control$all.knots, nk = NULL,
sf.only = TRUE)
ldk <- 3 * max(ncolBlist.[nwhich]) + 1 # 11/7/02
- dimw. = M. # Smoothing one spp. at a time
- dim1U. = M.
- wz. = matrix(0, n, dimw. )
+ dimw. <- M. # Smoothing one spp. at a time
+ dim1U. <- M.
+ wz. <- matrix(0, n, dimw. )
if (names(Blist.)[1] != "(Intercept)") stop("something wrong here")
Blist.[[1]] <- NULL
- trivc = rep(2 - M. , len=queue) # All of queue smooths are basic smooths
+ trivc <- rep(2 - M. , len = queue) # All of queue smooths are basic smooths
ncbvec <- ncolBlist.[nwhich]
ncolb <- max(ncbvec)
- qbig. = NOS * qbig # == NOS * Rank; holds all the smooths
- if (!all.equal(as.vector(ncbvec), rep(1, len=queue)))
+ qbig. <- NOS * qbig # == NOS * Rank; holds all the smooths
+ if (!all.equal(as.vector(ncbvec), rep(1, len = queue)))
stop("'ncbvec' not right---should be a queue-vector of ones")
- pbig = pstar. #
+ pbig <- pstar. #
contr.sp <- list(low = -1.5,## low = 0. was default till R 1.3.x
@@ -599,48 +620,48 @@ callcaoc = function(cmatrix,
eps = 0.00244, # was default till R 1.3.x
maxit = 500 )
- npetc = c(n=nrow(nu1mat), p. =ncol(nu1mat), q=length(which),
- se.fit=control$se.fit, 0,
- control$bf.maxit, qrank = 0, M= M. , nbig=nstar, pbig=pbig,
- qbig=qbig, dim2wz= dimw. , dim1U= dim1U. , ierror = 0, ldk=ldk,
+ npetc <- c(n = nrow(nu1mat), p. = ncol(nu1mat), q = length(which),
+ se.fit = control$se.fit, 0,
+ control$bf.maxit, qrank = 0, M = M. , nbig = nstar, pbig = pbig,
+ qbig = qbig, dim2wz = dimw. , dim1U = dim1U. , ierror = 0, ldk=ldk,
contr.sp$maxit, iinfo = 0)
if (Rank == 2) {
- smopar = (c(spar1, spar2))[interleave.VGAM(4*NOS, M = 2)]
- dofvec = (1.0 + c(df1.nl, df2.nl))[interleave.VGAM(4*NOS, M = 2)]
- lamvec = 0 * dofvec
+ smopar <- (c(spar1, spar2))[interleave.VGAM(4*NOS, M = 2)]
+ dofvec <- (1.0 + c(df1.nl, df2.nl))[interleave.VGAM(4*NOS, M = 2)]
+ lamvec <- 0 * dofvec
stop("20100414; havent got Rank = 2 going yet")
} else {
- smopar = c(spar1, spar2)
- dofvec = c(df1.nl, df2.nl) + 1.0
- lamvec = 0 * dofvec
+ smopar <- c(spar1, spar2)
+ dofvec <- c(df1.nl, df2.nl) + 1.0
+ lamvec <- 0 * dofvec
}
ans1 <- dotC(name = "vcao6",
- numat=as.double(numat), ymat=as.double(ymat), wvec=as.double(wvec),
- etamat=as.double(usethiseta), fv=double(NOS*n), zedd=double(n*M),
- wz=double(n*M), U=double(M*n), # bnumat=as.double(bnumat),
- qr=double(nstar*pstar.), qraux=double(pstar.), qpivot=integer(pstar.),
- n=as.integer(n), M=as.integer(M), NOS=as.integer(NOS),
- nstar=as.integer(nstar), dim1U=as.integer( M ), # for U, not U.
- errcode=integer(1), othint=as.integer(othint),
- deviance=double(1 + NOS), # NOS more elts added 20100413
- beta=as.double(usethisbeta),
- othdbl=as.double(othdbl),
+ numat = as.double(numat), ymat = as.double(ymat), wvec = as.double(wvec),
+ etamat = as.double(usethiseta), fv = double(NOS*n), zedd = double(n*M),
+ wz = double(n*M), U = double(M*n), # bnumat = as.double(bnumat),
+ qr = double(nstar*pstar.), qraux = double(pstar.), qpivot = integer(pstar.),
+ n = as.integer(n), M = as.integer(M), NOS = as.integer(NOS),
+ nstar = as.integer(nstar), dim1U = as.integer( M ), # for U, not U.
+ errcode = integer(1), othint = as.integer(othint),
+ deviance = double(1 + NOS), # NOS more elts added 20100413
+ beta = as.double(usethisbeta),
+ othdbl = as.double(othdbl),
npetc = as.integer(npetc), M. = as.integer( M. ),
dofvec = as.double(dofvec),
lamvec = as.double(lamvec),
smopar = as.double(smopar),
- match=as.integer(smooth.frame$o), as.integer(smooth.frame$nef),
- which=as.integer(which),
+ match = as.integer(smooth.frame$o), as.integer(smooth.frame$nef),
+ which = as.integer(which),
smomat = as.double(matrix(0, n, qbig. )),
- nu1mat=as.double(nu1mat),
- blist=as.double(unlist( Blist. )),
+ nu1mat = as.double(nu1mat),
+ blist = as.double(unlist( Blist. )),
as.integer(ncbvec),
- smap=as.integer(1:(Rank+1)), #
+ smap = as.integer(1:(Rank+1)), #
trivc = as.integer(trivc),
levmat = as.double(matrix(0, n, qbig. )),
bcoefficients = double(NOS * sum(smooth.frame$nknots*ncbvec)),
@@ -661,42 +682,42 @@ flush.console()
rmfromVGAMenv(c("etamat", "beta"), prefix = ".VGAM.CAO.")
}
- returnans = if (alldump) {
- bindex = ans1$bindex
- ncolBlist = ncbvec
+ returnans <- if (alldump) {
+ bindex <- ans1$bindex
+ ncolBlist <- ncbvec
Bspline2 <- vector("list", NOS)
names(Bspline2) <- dimnames(ymat)[[2]]
Bspline <- vector("list", length(nwhich))
names(Bspline) <- nwhich
- ind9 = 0 # moving index
+ ind9 <- 0 # moving index
for(sppno in 1:NOS) {
for(ii in 1:length(nwhich)) {
- ind7 = (smooth.frame$bindex[ii]):(smooth.frame$bindex[ii+1]-1)
- ans = ans1$bcoeff[ind9+ind7]
- ans = matrix(ans, ncol=ncolBlist[nwhich[ii]])
+ ind7 <- (smooth.frame$bindex[ii]):(smooth.frame$bindex[ii+1]-1)
+ ans <- ans1$bcoeff[ind9+ind7]
+ ans <- matrix(ans, ncol=ncolBlist[nwhich[ii]])
Bspline[[ii]] = new(Class = "vsmooth.spline.fit",
"Bcoefficients" = ans,
"xmax" = smooth.frame$xmax[ii],
"xmin" = smooth.frame$xmin[ii],
"knots" = as.vector(smooth.frame$knots[[ii]]))
}
- ind9 = ind9 + smooth.frame$bindex[length(nwhich)+1]-1
- Bspline2[[sppno]] = Bspline
+ ind9 <- ind9 + smooth.frame$bindex[length(nwhich)+1]-1
+ Bspline2[[sppno]] <- Bspline
}
- qrank = npetc[7] # Assume all species have the same qrank value
- dim(ans1$etamat) = c(M,n) # was c(n,M) prior to 22/8/06
+ qrank <- npetc[7] # Assume all species have the same qrank value
+ dim(ans1$etamat) <- c(M,n) # was c(n,M) prior to 22/8/06
- df1.nl = ans1$dofvec[1:NOS] - 1.0
- lambda1 = ans1$lamvec[1:NOS]
- spar1 = ans1$smopar[1:NOS]
+ df1.nl <- ans1$dofvec[1:NOS] - 1.0
+ lambda1 <- ans1$lamvec[1:NOS]
+ spar1 <- ans1$smopar[1:NOS]
if (Rank == 2) {
stop("20100414; this isnt working yet")
- df2.nl = ans1$dofvec[NOS + (1:NOS)] - 1.0
- lambda2 = ans1$lamvec[NOS + (1:NOS)]
- spar2 = ans1$smopar[NOS + (1:NOS)]
+ df2.nl <- ans1$dofvec[NOS + (1:NOS)] - 1.0
+ lambda2 <- ans1$lamvec[NOS + (1:NOS)]
+ spar2 <- ans1$smopar[NOS + (1:NOS)]
}
list(deviance = ans1$deviance[1],
@@ -726,7 +747,7 @@ flush.console()
-calldcaoc = function(cmatrix,
+calldcaoc <- function(cmatrix,
etamat, xmat, ymat, wvec, modelno,
Control, Nice21 = TRUE,
p1star. = if (modelno %in% c(3, 5)) 2 else 1,
@@ -736,69 +757,81 @@ calldcaoc = function(cmatrix,
alldump = FALSE) {
- if (alldump) stop("really used?")
- flush.console()
+ if (alldump)
+ stop("really used?")
+ flush.console()
- if (!Nice21) stop("'Nice21' must be TRUE")
- control = Control
- Rank = control$Rank
- p2 = length(control$colx2.index)
- yn = dimnames(ymat)[[2]]
- if (!length( yn )) yn = paste("Y", 1:ncol(ymat), sep = "")
- cmatrix = scale(cmatrix)
- xmat2 <- xmat[,control$colx2.index,drop = FALSE] #ccc
- numat <- xmat2 %*% matrix(cmatrix, p2, Rank)
- dim(numat) <- c(nrow(xmat), Rank)
- temp.smooth.frame = vector("list", 1+Rank) # Temporary makeshift frame
- mynames5 = if (Rank == 1) "lv" else paste("lv",1:Rank,sep = "")
- names(temp.smooth.frame) = c("(Intercept)", mynames5)
- temp.smooth.frame[[1]] = rep(1, len=n)
- for(uu in 1:Rank) {
- temp.smooth.frame[[uu+1]] = numat[,uu]
- }
- temp.smooth.frame = data.frame(temp.smooth.frame)
- for(uu in 1:Rank) {
- attr(temp.smooth.frame[,uu+1], "spar") = 0 # any old value
- attr(temp.smooth.frame[,uu+1], "df") = 4 # any old value
- }
- pstar. = p1star. + p2star.
- nstar = if (Nice21) ifelse(modelno %in% c(3, 5), n * 2, n) else n * M
- NOS = ifelse(modelno %in% c(3, 5), M / 2, M)
- lenbeta = pstar. * ifelse(Nice21, NOS, 1)
+ U <- NULL
+
+
+
+
+
+ if (!Nice21)
+ stop("'Nice21' must be TRUE")
+ control <- Control
+ Rank <- control$Rank
+ p2 <- length(control$colx2.index)
+ yn <- dimnames(ymat)[[2]]
+ if (!length( yn ))
+ yn <- paste("Y", 1:ncol(ymat), sep = "")
+
+
+ cmatrix <- scale(cmatrix)
+
+ xmat2 <- xmat[,control$colx2.index, drop = FALSE] #ccc
+ numat <- xmat2 %*% matrix(cmatrix, p2, Rank)
+ dim(numat) <- c(nrow(xmat), Rank)
+ temp.smooth.frame <- vector("list", 1+Rank) # Temporary makeshift frame
+ mynames5 <- if (Rank == 1) "lv" else paste("lv",1:Rank,sep = "")
+ names(temp.smooth.frame) <- c("(Intercept)", mynames5)
+ temp.smooth.frame[[1]] <- rep(1, len=n)
+ for(uu in 1:Rank) {
+ temp.smooth.frame[[uu+1]] <- numat[,uu]
+ }
+ temp.smooth.frame <- data.frame(temp.smooth.frame)
+ for(uu in 1:Rank) {
+ attr(temp.smooth.frame[,uu+1], "spar") <- 0 # any old value
+ attr(temp.smooth.frame[,uu+1], "df") <- 4 # any old value
+ }
+ pstar. <- p1star. + p2star.
+ nstar <- if (Nice21) ifelse(modelno %in% c(3, 5), n * 2, n) else n * M
+ NOS <- ifelse(modelno %in% c(3, 5), M / 2, M)
+ lenbeta <- pstar. * ifelse(Nice21, NOS, 1)
if (TRUE) {
- inited = if (exists(".VGAM.CAO.etamat", envir = VGAM:::VGAMenv)) 1 else 0
- usethiseta = if (inited == 1) get(".VGAM.CAO.etamat",
+ inited <- if (exists(".VGAM.CAO.etamat", envir = VGAM:::VGAMenv)) 1 else 0
+ usethiseta <- if (inited == 1) get(".VGAM.CAO.etamat",
envir = VGAM:::VGAMenv) else t(etamat)
}
- usethisbeta = if (inited == 2) get(".VGAM.CAO.beta",
+ usethisbeta <- if (inited == 2) get(".VGAM.CAO.beta",
envir = VGAM:::VGAMenv) else double(lenbeta)
- pstar = NOS * pstar.
- bnumat = if (Nice21) matrix(0,nstar,pstar) else stop("need 'Nice21'")
+ pstar <- NOS * pstar.
+ bnumat <- if (Nice21) matrix(0,nstar,pstar) else stop("need 'Nice21'")
- M. = MSratio = M / NOS # 1 or 2 usually
+ M. <- MSratio <- M / NOS # 1 or 2 usually
- p1 = 1
+ p1 <- 1
- which = p1 + (1:Rank) # The first 1 is the intercept term
- nwhich = names(which) = mynames5
+ which <- p1 + (1:Rank) # The first 1 is the intercept term
+ nwhich <- names(which) <- mynames5
- origBlist = Blist. = create.cms(Rank=Rank, M=M., MSratio=MSratio,
- which=which, p1 = p1) # For 1 species
+ origBlist <- Blist. <- create.cms(Rank = Rank, M = M., MSratio = MSratio,
+ which = which, p1 = p1) # For 1 species
ncolBlist. <- unlist(lapply(Blist. , ncol))
- nu1mat = cbind("(Intercept)" = 1, lv=numat)
- dimnames(nu1mat) = list(dimnames(xmat)[[1]], c("(Intercept)","lv"))
+ nu1mat <- cbind("(Intercept)" = 1, lv = numat)
+ dimnames(nu1mat) <- list(dimnames(xmat)[[1]], c("(Intercept)","lv"))
- smooth.frame = s.vam(x=nu1mat, zedd = NULL, wz = NULL, smomat = NULL,
+ smooth.frame <- s.vam(x = nu1mat, zedd = NULL, wz = NULL, smomat = NULL,
which = which,
smooth.frame = temp.smooth.frame,
bf.maxit = control$bf.maxit,
@@ -807,6 +840,7 @@ calldcaoc = function(cmatrix,
X_vlm_save = bnumat, Blist = Blist.,
ncolBlist = ncolBlist. ,
M = M. , qbig = NULL,
+
Umat = U, # NULL value ==> not needed
all.knots = control$all.knots, nk = NULL,
sf.only = TRUE)
@@ -816,8 +850,8 @@ calldcaoc = function(cmatrix,
- wz. = matrix(0, n, M. ) # not sure
- dimw. = if (is.matrix( wz. )) ncol( wz. ) else 1
+ wz. <- matrix(0, n, M. ) # not sure
+ dimw. <- if (is.matrix( wz. )) ncol( wz. ) else 1
dim1U. <- M. # 20100410
@@ -825,32 +859,32 @@ calldcaoc = function(cmatrix,
- queue = qbig = Rank # 19/10/05; number of smooths per species
+ queue <- qbig <- Rank # 19/10/05; number of smooths per species
Blist.[[1]] <- NULL
- trivc = rep(2 - M. , len=queue) # All of queue smooths are basic smooths
+ trivc <- rep(2 - M. , len = queue) # All of queue smooths are basic smooths
ncbvec <- ncolBlist.[nwhich]
ncolb <- max(ncbvec)
- qbig. = NOS * qbig # == NOS * Rank
- pbig = pstar. # Not sure
+ qbig. <- NOS * qbig # == NOS * Rank
+ pbig <- pstar. # Not sure
if (FALSE) {
- df1.nl = rep(control$df1.nl, len=NOS) # This is used
- df2.nl = rep(control$df2.nl, len=NOS) # This is used
- spar1 = rep(control$spar1, len=NOS) # This is used
- spar2 = rep(control$spar2, len=NOS) # This is used
+ df1.nl <- rep(control$df1.nl, len = NOS) # This is used
+ df2.nl <- rep(control$df2.nl, len = NOS) # This is used
+ spar1 <- rep(control$spar1, len = NOS) # This is used
+ spar2 <- rep(control$spar2, len = NOS) # This is used
} else {
# This is used
- df1.nl = procVec(control$df1.nl, yn = yn , Default = control$DF1)
- df2.nl = df1.nl # 20100417; stopgap
- spar1 = procVec(control$spar1, yn = yn , Default = control$SPAR1)
- spar2 = spar1 # 20100417; stopgap
- dofvec = c(df1.nl, df2.nl)
- lamvec = 0 * dofvec
- smopar = c(spar1, spar2)
+ df1.nl <- procVec(control$df1.nl, yn = yn , Default = control$DF1)
+ df2.nl <- df1.nl # 20100417; stopgap
+ spar1 <- procVec(control$spar1, yn = yn , Default = control$SPAR1)
+ spar2 <- spar1 # 20100417; stopgap
+ dofvec <- c(df1.nl, df2.nl)
+ lamvec <- 0 * dofvec
+ smopar <- c(spar1, spar2)
}
@@ -872,46 +906,46 @@ calldcaoc = function(cmatrix,
warning("20100405; this is old:")
- npetc = c(n=n, p = 1+Rank, length(which), se.fit=control$se.fit, 0,
- maxitl=control$maxitl, qrank = 0, M= M. , n.M = n* M. ,
- pbig=sum( ncolBlist.),
- qbig=qbig, dimw= dimw. , dim1U= dim1U. , ierror = 0, ldk=ldk)
+ npetc <- c(n = n, p = 1+Rank, length(which), se.fit = control$se.fit, 0,
+ maxitl = control$maxitl, qrank = 0, M = M. , n.M = n* M. ,
+ pbig = sum( ncolBlist.),
+ qbig = qbig, dimw = dimw. , dim1U = dim1U. , ierror = 0, ldk = ldk)
warning("20100405; this is new:")
- npetc = c(n=nrow(nu1mat), p. =ncol(nu1mat),
- q=length(which),
- se.fit=control$se.fit, 0,
- control$bf.maxit, qrank = 0, M= M. , nbig=nstar, pbig=pbig,
- qbig=qbig, dim2wz= dimw. , dim1U= dim1U. , ierror = 0, ldk=ldk,
+ npetc <- c(n = nrow(nu1mat), p. = ncol(nu1mat),
+ q = length(which),
+ se.fit = control$se.fit, 0,
+ control$bf.maxit, qrank = 0, M = M. , nbig = nstar, pbig = pbig,
+ qbig = qbig, dim2wz = dimw. , dim1U = dim1U. , ierror = 0, ldk = ldk,
contr.sp$maxit, iinfo = 0)
flush.console()
ans1 <-
dotC(name = if (Nice21) "vdcao6" else stop("need 'Nice21'"),
- numat=as.double(numat), as.double(ymat), as.double(wvec),
- etamat=as.double(usethiseta), fv=double(NOS*n), zedd=double(n*M),
- wz=double(n*M), U=double(M*n), # bnumat=as.double(bnumat),
- qr=double(nstar*pstar.), qraux=double(pstar.), qpivot=integer(pstar.),
- as.integer(n), as.integer(M), NOS=as.integer(NOS),
- as.integer(nstar), dim1U=as.integer(M),
- errcode=integer(1), othint=as.integer(othint),
- deviance = double(1 + NOS), beta=as.double(usethisbeta),
- othdbl=as.double(othdbl),
+ numat = as.double(numat), as.double(ymat), as.double(wvec),
+ etamat = as.double(usethiseta), fv = double(NOS*n), zedd = double(n*M),
+ wz = double(n*M), U = double(M*n), # bnumat = as.double(bnumat),
+ qr = double(nstar*pstar.), qraux = double(pstar.), qpivot = integer(pstar.),
+ as.integer(n), as.integer(M), NOS = as.integer(NOS),
+ as.integer(nstar), dim1U = as.integer(M),
+ errcode = integer(1), othint = as.integer(othint),
+ deviance = double(1 + NOS), beta = as.double(usethisbeta),
+ othdbl = as.double(othdbl),
as.double(xmat2),
- cmat=as.double(cmatrix),
- p2=as.integer(p2), deriv=double(p2*Rank),
- betasave=double(lenbeta),
+ cmat = as.double(cmatrix),
+ p2 = as.integer(p2), deriv = double(p2*Rank),
+ betasave = double(lenbeta),
npetc = as.integer(npetc), M. = as.integer( M. ),
dofvec = as.double(dofvec + 1.0),
lamvec = as.double(0 * dofvec),
smopar = as.double(smopar),
- match=as.integer(smooth.frame$o), as.integer(smooth.frame$nef),
+ match = as.integer(smooth.frame$o), as.integer(smooth.frame$nef),
as.integer(which),
smomat = as.double(matrix(0, n, qbig. )),
- nu1mat=as.double(nu1mat),
+ nu1mat = as.double(nu1mat),
as.double(unlist( Blist. )),
- as.integer(ncbvec), smap=as.integer(1:(Rank+1)),
+ as.integer(ncbvec), smap = as.integer(1:(Rank+1)),
trivc = as.integer(trivc),
levmat = as.double(matrix(0, n, qbig. )),
bcoefficients = double(NOS * sum(smooth.frame$nknots*ncbvec)),
@@ -922,38 +956,38 @@ warning("20100405; this is new:")
flush.console()
assign(".VGAM.CAO.etamat", ans1$etamat, envir = VGAM:::VGAMenv)
- assign(".VGAM.CAO.z", ans1$zedd, envir=VGAM:::VGAMenv) # z; minus any offset
- assign(".VGAM.CAO.U", ans1$U, envir=VGAM:::VGAMenv) # U
+ assign(".VGAM.CAO.z", ans1$zedd, envir = VGAM:::VGAMenv) # z; minus any offset
+ assign(".VGAM.CAO.U", ans1$U, envir = VGAM:::VGAMenv) # U
if (ans1$errcode == 0) {
} else {
cat("warning in calldcaoc: error code = ", ans1$errcode, "\n")
flush.console()
}
- returnans = if (alldump) {
- bindex = ans1$bindex
- ncolBlist = ncbvec
+ returnans <- if (alldump) {
+ bindex <- ans1$bindex
+ ncolBlist <- ncbvec
Bspline2 <- vector("list", NOS)
names(Bspline2) <- dimnames(ymat)[[2]]
Bspline <- vector("list", length(nwhich))
names(Bspline) <- nwhich
- ind9 = 0 # moving index
+ ind9 <- 0 # moving index
for(jay in 1:NOS) {
for(ii in 1:length(nwhich)) {
- ind9 = ind9[length(ind9)] + (bindex[ii]):(bindex[ii+1]-1)
- ans = ans1$bcoeff[ind9]
- ans = matrix(ans, ncol=ncolBlist[nwhich[ii]])
- Bspline[[ii]] = new(Class = "vsmooth.spline.fit",
+ ind9 <- ind9[length(ind9)] + (bindex[ii]):(bindex[ii+1]-1)
+ ans <- ans1$bcoeff[ind9]
+ ans <- matrix(ans, ncol = ncolBlist[nwhich[ii]])
+ Bspline[[ii]] <- new(Class = "vsmooth.spline.fit",
"Bcoefficients" = ans,
"xmax" = smooth.frame$xmax[ii],
"xmin" = smooth.frame$xmin[ii],
"knots" = as.vector(smooth.frame$knots[[ii]]))
}
- Bspline2[[jay]] = Bspline
+ Bspline2[[jay]] <- Bspline
}
- qrank = npetc[7] # Assume all species have the same qrank value
- dim(ans1$etamat) = c(M,n) # bug: was c(n,M) prior to 22/8/06
+ qrank <- npetc[7] # Assume all species have the same qrank value
+ dim(ans1$etamat) <- c(M,n) # bug: was c(n,M) prior to 22/8/06
list(deviance = ans1$deviance[1],
alldeviance = ans1$deviance[-1],
bcoefficients = ans1$bcoefficients,
@@ -1005,84 +1039,84 @@ setClass(Class = "Coef.cao", representation(
"spar2" = "numeric"))
-Coef.cao = function(object,
+Coef.cao <- function(object,
epsOptimum = 0.00001, # determines how accurately Optimum is estimated
- gridlen = 40, # Number of points on the grid (one level at a time)
+ gridlen = 40, # Number of points on the grid (one level at a time)
maxgriditer = 10, # Maximum number of iterations allowed for grid search
smallno = 0.05,
...) {
- if (!is.Numeric(epsOptimum, positive = TRUE, allowable.length = 1))
- stop("bad input for argument 'epsOptimum'")
- if (!is.Numeric(gridlen, positive = TRUE, integer.valued = TRUE) ||
- gridlen < 5)
- stop("bad input for argument 'gridlen'")
- if (!is.Numeric(maxgriditer, positive = TRUE,
- allowable.length = 1, integer.valued = TRUE) ||
- maxgriditer < 3)
- stop("bad input for argument 'maxgriditer'")
- if (!is.logical(ConstrainedO <- object at control$ConstrainedO))
- stop("cannot determine whether the model is constrained or not")
- if (!is.Numeric(smallno, positive = TRUE, allowable.length = 1) ||
- smallno > 0.5 || smallno < 0.0001)
- stop("bad input for argument 'smallno'")
-
- ocontrol = object at control
- if ((Rank <- ocontrol$Rank) > 2) stop("'Rank' must be 1 or 2")
- gridlen = rep(gridlen, length=Rank)
- M = if (any(slotNames(object) == "predictors") &&
- is.matrix(object at predictors)) ncol(object at predictors) else
- object at misc$M
- NOS = if (length(object at y)) ncol(object at y) else M
- MSratio = M / NOS # 1 or 2; First value is g(mean)=quadratic form in lv
- nice21 = (length(ocontrol$colx1.index) == 1) &&
+ if (!is.Numeric(epsOptimum, positive = TRUE, allowable.length = 1))
+ stop("bad input for argument 'epsOptimum'")
+ if (!is.Numeric(gridlen, positive = TRUE, integer.valued = TRUE) ||
+ gridlen < 5)
+ stop("bad input for argument 'gridlen'")
+ if (!is.Numeric(maxgriditer, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE) ||
+ maxgriditer < 3)
+ stop("bad input for argument 'maxgriditer'")
+ if (!is.logical(ConstrainedO <- object at control$ConstrainedO))
+ stop("cannot determine whether the model is constrained or not")
+ if (!is.Numeric(smallno, positive = TRUE, allowable.length = 1) ||
+ smallno > 0.5 || smallno < 0.0001)
+ stop("bad input for argument 'smallno'")
+
+ ocontrol <- object at control
+ if ((Rank <- ocontrol$Rank) > 2) stop("'Rank' must be 1 or 2")
+ gridlen <- rep(gridlen, length=Rank)
+ M <- if (any(slotNames(object) == "predictors") &&
+ is.matrix(object at predictors)) ncol(object at predictors) else
+ object at misc$M
+ NOS <- if (length(object at y)) ncol(object at y) else M
+ MSratio <- M / NOS # 1 or 2; First value is g(mean)=quadratic form in lv
+ nice21 <- (length(ocontrol$colx1.index) == 1) &&
(names(ocontrol$colx1.index) == "(Intercept)")
- if (!nice21) stop("Can only handle 'Norrr = ~ 1'")
-
- p1 = length(ocontrol$colx1.index)
- p2 = length(ocontrol$colx2.index)
- modelno = object at control$modelno # 1,2,3,... or 0
- ynames = object at misc$ynames
- if (!length(ynames)) ynames = object at misc$predictors.names
- if (!length(ynames)) ynames = object at misc$ynames
- if (!length(ynames)) ynames = paste("Y", 1:NOS, sep = "")
- lp.names = object at misc$predictors.names
- if (!length(lp.names)) lp.names = NULL
-
- lv.names = if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = "")
- Cmat = object at extra$Cmat # p2 x Rank (provided maxitl > 1)
+ if (!nice21) stop("Can only handle 'noRRR = ~ 1'")
+
+ p1 <- length(ocontrol$colx1.index)
+ p2 <- length(ocontrol$colx2.index)
+ modelno <- object at control$modelno # 1,2,3,... or 0
+ ynames <- object at misc$ynames
+ if (!length(ynames)) ynames <- object at misc$predictors.names
+ if (!length(ynames)) ynames <- object at misc$ynames
+ if (!length(ynames)) ynames <- paste("Y", 1:NOS, sep = "")
+ lp.names <- object at misc$predictors.names
+ if (!length(lp.names)) lp.names <- NULL
+
+ lv.names <- if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = "")
+ Cmat <- object at extra$Cmat # p2 x Rank (provided maxitl > 1)
if (ConstrainedO)
- dimnames(Cmat) = list(names(ocontrol$colx2.index), lv.names)
- lv.mat = if (ConstrainedO) {
- object at x[,ocontrol$colx2.index,drop = FALSE] %*% Cmat
+ dimnames(Cmat) <- list(names(ocontrol$colx2.index), lv.names)
+ lv.mat <- if (ConstrainedO) {
+ object at x[,ocontrol$colx2.index, drop = FALSE] %*% Cmat
} else {
object at lv
}
- optimum = matrix(as.numeric(NA), Rank, NOS,
- dimnames=list(lv.names, ynames))
- extents = apply(lv.mat, 2, range) # 2 by R
+ optimum <- matrix(as.numeric(NA), Rank, NOS,
+ dimnames = list(lv.names, ynames))
+ extents <- apply(lv.mat, 2, range) # 2 by R
- maximum = rep(as.numeric(NA), len=NOS)
+ maximum <- rep(as.numeric(NA), len=NOS)
- whichSpecies = 1:NOS # Do it for all species
+ whichSpecies <- 1:NOS # Do it for all species
if (Rank == 1) {
- gridd = cbind(seq(extents[1,1], extents[2,1], len=gridlen))
+ gridd <- cbind(seq(extents[1,1], extents[2,1], len=gridlen))
} else {
- gridd = expand.grid(seq(extents[1,1], extents[2,1], len=gridlen[1]),
- seq(extents[1,2], extents[2,2], len=gridlen[2]))
- eta2matrix = matrix(0, NOS, 1)
+ gridd <- expand.grid(seq(extents[1,1], extents[2,1], len=gridlen[1]),
+ seq(extents[1,2], extents[2,2], len=gridlen[2]))
+ eta2matrix <- matrix(0, NOS, 1)
}
- gridd.orig = gridd
+ gridd.orig <- gridd
# if (Rank == 2) then this is for initial values
for(sppno in 1:length(whichSpecies)) {
- gridd = gridd.orig
- gridres1 = gridd[2,1] - gridd[1,1]
- gridres2 = if (Rank == 2) gridd[2,2] - gridd[1,2] else 0
- griditer = 1
+ gridd <- gridd.orig
+ gridres1 <- gridd[2,1] - gridd[1,1]
+ gridres2 <- if (Rank == 2) gridd[2,2] - gridd[1,2] else 0
+ griditer <- 1
- thisSpecies = whichSpecies[sppno]
- indexSpecies = if (is.character(whichSpecies))
+ thisSpecies <- whichSpecies[sppno]
+ indexSpecies <- if (is.character(whichSpecies))
match(whichSpecies[sppno], ynames) else whichSpecies[sppno]
if (is.na(indexSpecies))
@@ -1090,75 +1124,76 @@ Coef.cao = function(object,
while(griditer == 1 ||
((griditer <= maxgriditer) &&
- ((gridres1 > epsOptimum) || (gridres2 > epsOptimum)))) {
- temp = predictcao(object, grid=gridd, sppno=thisSpecies,
+ ((gridres1 > epsOptimum) ||
+ (gridres2 > epsOptimum)))) {
+ temp <- predictcao(object, grid=gridd, sppno=thisSpecies,
Rank=Rank, deriv = 0, MSratio=MSratio)
- yvals = temp$yvals # gridlen-vector
- xvals = temp$xvals # gridlen x Rank; gridd
- if (length(temp$eta2)) eta2matrix[sppno,1] = temp$eta2
+ yvals <- temp$yvals # gridlen-vector
+ xvals <- temp$xvals # gridlen x Rank; gridd
+ if (length(temp$eta2)) eta2matrix[sppno,1] <- temp$eta2
- nnn = length(yvals)
- index = (1:nnn)[yvals == max(yvals)]
+ nnn <- length(yvals)
+ index <- (1:nnn)[yvals == max(yvals)]
if (length(index) != 1) warning("could not find a single maximum")
if (Rank == 2) {
- initvalue = rep(xvals[index,], length=Rank) # for optim()
+ initvalue <- rep(xvals[index,], length=Rank) # for optim()
# Make sure initvalue is in the interior
if (abs(initvalue[1] - extents[1,1]) < smallno)
- initvalue[1] = extents[1,1] + smallno
+ initvalue[1] <- extents[1,1] + smallno
if (abs(initvalue[1] - extents[2,1]) < smallno)
- initvalue[1] = extents[2,1] - smallno
+ initvalue[1] <- extents[2,1] - smallno
if (abs(initvalue[2] - extents[1,2]) < smallno)
- initvalue[2] = extents[1,2] + smallno
+ initvalue[2] <- extents[1,2] + smallno
if (abs(initvalue[2] - extents[2,2]) < smallno)
- initvalue[2] = extents[2,2] - smallno
+ initvalue[2] <- extents[2,2] - smallno
break
}
if (index == 1 || index == nnn) {
- maximum[sppno] = optimum[1,sppno] = NA
- gridres1 = epsOptimum + 1 # equivalent to a break
+ maximum[sppno] <- optimum[1,sppno] <- NA
+ gridres1 <- epsOptimum + 1 # equivalent to a break
break # just in case
} else {
- maximum[sppno] = yvals[index] # on the eta scale
- optimum[1,sppno] = xvals[index,1]
- gridd[,1] = seq(
+ maximum[sppno] <- yvals[index] # on the eta scale
+ optimum[1,sppno] <- xvals[index,1]
+ gridd[,1] <- seq(
max(extents[1,1], optimum[1,sppno]-gridres1),
min(extents[2,1], optimum[1,sppno]+gridres1),
len=gridlen)
- gridres1 = gridd[2,1] - gridd[1,1]
- griditer = griditer + 1
+ gridres1 <- gridd[2,1] - gridd[1,1]
+ griditer <- griditer + 1
}
} # of while
if (Rank == 2) {
# Rank = 2, so use optim(). The above was to get initial values.
- myfun = function(x, object, sppno, Rank = 1, deriv = 0, MSratio = 1) {
+ myfun <- function(x, object, sppno, Rank = 1, deriv = 0, MSratio = 1) {
# x is a 2-vector
- x = matrix(x, 1, length(x))
- temp = predictcao(object, grid = x, sppno=sppno,
- Rank=Rank, deriv=deriv, MSratio=MSratio)
+ x <- matrix(x, 1, length(x))
+ temp <- predictcao(object, grid = x, sppno = sppno,
+ Rank = Rank, deriv = deriv, MSratio = MSratio)
temp$yval
}
- answer = optim(initvalue, myfun, gr = NULL, method = "L-BFGS-B",
- lower=extents[1,], upper=extents[2,],
- control=list(fnscale = -1), # maximize!
- object=object, sppno=sppno, Rank=Rank,
- deriv = 0, MSratio=MSratio)
+ answer <- optim(initvalue, myfun, gr = NULL, method = "L-BFGS-B",
+ lower = extents[1,], upper = extents[2,],
+ control = list(fnscale = -1), # maximize!
+ object = object, sppno = sppno, Rank = Rank,
+ deriv = 0, MSratio = MSratio)
# Check to see if the soln is @ boundary. If not, assign it.
for(rindex in 1:Rank)
if (abs(answer$par[rindex] - extents[1,rindex]) > smallno &&
abs(answer$par[rindex] - extents[2,rindex]) > smallno) {
- optimum[rindex,sppno] = answer$par[rindex]
- maximum[sppno] = answer$value
+ optimum[rindex,sppno] <- answer$par[rindex]
+ maximum[sppno] <- answer$value
}
} # end of Rank = 2
} # end of sppno
- myetamat = rbind(maximum)
- if (MSratio == 2) myetamat = kronecker(myetamat, matrix(1:0, 1, 2))
- maximum = object at family@linkinv(eta=myetamat, extra=object at extra)
- maximum = c(maximum) # Convert from matrix to vector
- names(maximum) = ynames
+ myetamat <- rbind(maximum)
+ if (MSratio == 2) myetamat <- kronecker(myetamat, matrix(1:0, 1, 2))
+ maximum <- object at family@linkinv(eta = myetamat, extra = object at extra)
+ maximum <- c(maximum) # Convert from matrix to vector
+ names(maximum) <- ynames
- ans = new(Class = "Coef.cao",
+ ans <- new(Class = "Coef.cao",
Bspline = object at Bspline,
Constrained=ConstrainedO,
df1.nl = object at extra$df1.nl,
@@ -1173,65 +1208,66 @@ Coef.cao = function(object,
spar1 = object at extra$spar1)
if (ConstrainedO) {ans at C = Cmat} else {Cmat = NULL}
if (Rank == 2) {
- dimnames(eta2matrix) = list(
+ dimnames(eta2matrix) <- list(
object at misc$predictors.names[c(FALSE,TRUE)], " ")
- ans at eta2 = eta2matrix
- ans at df2.nl = object at extra$df2.nl
- ans at spar2 = object at extra$spar2
+ ans at eta2 <- eta2matrix
+ ans at df2.nl <- object at extra$df2.nl
+ ans at spar2 <- object at extra$spar2
}
for(rindex in 1:Rank) {
- ans at OptimumOrder[rindex,] = order(ans at Optimum[rindex,])
- ans at lvOrder[,rindex] = order(ans at lv[,rindex])
+ ans at OptimumOrder[rindex,] <- order(ans at Optimum[rindex,])
+ ans at lvOrder[,rindex] <- order(ans at lv[,rindex])
}
if (length(object at misc$estimated.dispersion) &&
object at misc$estimated.dispersion) {
- p = length(object at coefficients)
- n = object at misc$n
- M = object at misc$M
- NOS = if (length(object at y)) ncol(object at y) else M
- pstar = p + length(Cmat) # Adjustment
- adjusted.dispersion = object at misc$dispersion *
+ p <- length(object at coefficients)
+ n <- object at misc$n
+ M <- object at misc$M
+ NOS <- if (length(object at y)) ncol(object at y) else M
+ pstar <- p + length(Cmat) # Adjustment
+ adjusted.dispersion <- object at misc$dispersion *
(n*M - p) / (n*M - pstar)
- ans at dispersion = adjusted.dispersion
+ ans at dispersion <- adjusted.dispersion
}
if (MSratio == 2) {
- lcoef = object at coefficients
- temp = lcoef[((1:NOS)-1)*(2+Rank)+2]
- names(temp) = object at misc$predictors.names[2*(1:NOS)]
- ans at dispersion = temp
+ lcoef <- object at coefficients
+ temp <- lcoef[((1:NOS)-1)*(2+Rank)+2]
+ names(temp) <- object at misc$predictors.names[2*(1:NOS)]
+ ans at dispersion <- temp
}
- dimnames(ans at Optimum) = list(lv.names, ynames)
+ dimnames(ans at Optimum) <- list(lv.names, ynames)
ans
}
-show.Coef.cao = function(object, digits = max(2, options()$digits-2), ...) {
- Rank = object at Rank
- NOS = object at NOS
- M = object at M
+show.Coef.cao <-
+ function(object, digits = max(2, options()$digits-2), ...) {
+ Rank <- object at Rank
+ NOS <- object at NOS
+ M <- object at M
- Maximum = if (length(object at Maximum))
- cbind(Maximum=object at Maximum) else NULL
- optmat = cbind(t(object at Optimum))
- dimnames(optmat) = list(dimnames(optmat)[[1]],
+ Maximum <- if (length(object at Maximum))
+ cbind(Maximum = object at Maximum) else NULL
+ optmat <- cbind(t(object at Optimum))
+ dimnames(optmat) <- list(dimnames(optmat)[[1]],
if (Rank > 1) paste("Optimum", dimnames(optmat)[[2]], sep = ".")
else "Optimum")
if ( object at Constrained ) {
cat("\nC matrix (constrained/canonical coefficients)\n")
- print(object at C, digits=digits, ...)
+ print(object at C, digits = digits, ...)
}
cat("\nOptima and maxima\n")
- print(cbind(Optimum=optmat,
+ print(cbind(Optimum = optmat,
Maximum), digits = max(1, digits-1))
cat("\nNonlinear degrees of freedom\n")
if (Rank == 1) {
- print(cbind(df1.nl = object at df1.nl), digits=max(2, digits-1), ...)
+ print(cbind(df1.nl = object at df1.nl), digits = max(2, digits-1), ...)
} else {
print(cbind(df1.nl = object at df1.nl,
- df2.nl = object at df2.nl), digits=max(2, digits-1), ...)
+ df2.nl = object at df2.nl), digits = max(2, digits-1), ...)
}
invisible(object)
}
@@ -1255,10 +1291,10 @@ setMethod("Coef", "cao", function(object, ...) Coef.cao(object, ...))
-lvplot.cao = function(object,
- add= FALSE, plot.it = TRUE, rugplot = TRUE, y = FALSE,
- type=c("fitted.values", "predictors"),
- xlab=paste("Latent Variable", if (Rank == 1) "" else " 1", sep = ""),
+lvplot.cao <- function(object,
+ add = FALSE, plot.it = TRUE, rugplot = TRUE, y = FALSE,
+ type = c("fitted.values", "predictors"),
+ xlab = paste("Latent Variable", if (Rank == 1) "" else " 1", sep = ""),
ylab = if (Rank == 1) switch(type, predictors = "Predictors",
fitted.values = "Fitted values") else "Latent Variable 2",
pcex=par()$cex, pcol=par()$col, pch=par()$pch,
@@ -1267,41 +1303,40 @@ lvplot.cao = function(object,
sites= FALSE, spch = NULL, scol=par()$col, scex=par()$cex,
sfont=par()$font,
whichSpecies = NULL,
- check.ok = TRUE, ...)
-{
+ check.ok = TRUE, ...) {
type <- match.arg(type, c("fitted.values", "predictors"))[1]
if ((Rank <- object at control$Rank) > 2)
stop("can only handle 'Rank' = 1 or 2 models")
- M = if (any(slotNames(object) == "predictors") &&
+ M <- if (any(slotNames(object) == "predictors") &&
is.matrix(object at predictors)) ncol(object at predictors) else
object at misc$M
- NOS = ncol(object at y)
- MSratio = M / NOS # First value is g(mean) = quadratic form in lv
- n = object at misc$n
- colx2.index = object at control$colx2.index
- cx1i = object at control$colx1.index
- if (!length(whichSpecies)) whichSpecies = 1:NOS
+ NOS <- ncol(object at y)
+ MSratio <- M / NOS # First value is g(mean) = quadratic form in lv
+ n <- object at misc$n
+ colx2.index <- object at control$colx2.index
+ cx1i <- object at control$colx1.index
+ if (!length(whichSpecies)) whichSpecies <- 1:NOS
if (check.ok)
if (!(length(cx1i) == 1 && names(cx1i) == "(Intercept)"))
- stop("latent variable plots allowable only for 'Norrr = ~ 1' models")
+ stop("latent variable plots allowable only for 'noRRR = ~ 1' models")
- Coeflist = Coef(object)
- Cmat = Coeflist at C
- lvmat = Coeflist at lv # n x Rank
+ Coeflist <- Coef(object)
+ Cmat <- Coeflist at C
+ lvmat <- Coeflist at lv # n x Rank
if (!plot.it) return(lvmat)
- r.curves = slot(object, type) # n times (M or S) (\boldeta or \boldmu)
+ r.curves <- slot(object, type) # n times (M or S) (\boldeta or \boldmu)
if (MSratio != 1 && type == "predictors")
- stop("can only plot the predictors if M = S")
- MorS = ncol(r.curves) # Actually, here, the value is S always.
+ stop("can only plot the predictors if M == S")
+ MorS <- ncol(r.curves) # Actually, here, the value is S always.
if (!add) {
if (Rank == 1) {
matplot(lvmat,
if ( y && type == "fitted.values")
- object at y[,whichSpecies,drop = FALSE] else
- r.curves[,whichSpecies,drop = FALSE],
+ object at y[,whichSpecies, drop = FALSE] else
+ r.curves[,whichSpecies, drop = FALSE],
type = "n", xlab = xlab, ylab=ylab, ...)
} else { # Rank == 2
matplot(c(Coeflist at Optimum[1,whichSpecies], lvmat[,1]),
@@ -1311,31 +1346,31 @@ lvplot.cao = function(object,
}
- pch <- rep(pch, leng=length(whichSpecies))
- pcol <- rep(pcol, leng=length(whichSpecies))
- pcex <- rep(pcex, leng=length(whichSpecies))
- llty <- rep(llty, leng=length(whichSpecies))
- lcol <- rep(lcol, leng=length(whichSpecies))
- llwd <- rep(llwd, leng=length(whichSpecies))
- adj.arg <- rep(adj.arg, leng=length(whichSpecies))
+ pch <- rep(pch, leng = length(whichSpecies))
+ pcol <- rep(pcol, leng = length(whichSpecies))
+ pcex <- rep(pcex, leng = length(whichSpecies))
+ llty <- rep(llty, leng = length(whichSpecies))
+ lcol <- rep(lcol, leng = length(whichSpecies))
+ llwd <- rep(llwd, leng = length(whichSpecies))
+ adj.arg <- rep(adj.arg, leng = length(whichSpecies))
- sppnames = if (type == "predictors") dimnames(r.curves)[[2]] else
+ sppnames <- if (type == "predictors") dimnames(r.curves)[[2]] else
dimnames(object at y)[[2]]
if (Rank == 1) {
for(sppno in 1:length(whichSpecies)) {
- thisSpecies = whichSpecies[sppno]
- indexSpecies = if (is.character(whichSpecies))
+ thisSpecies <- whichSpecies[sppno]
+ indexSpecies <- if (is.character(whichSpecies))
match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
if (is.na(indexSpecies))
stop("mismatch found in 'whichSpecies'")
- xx = lvmat
- yy = r.curves[,indexSpecies]
- o = sort.list(xx)
- xx = xx[ o ]
- yy = yy[ o ]
+ xx <- lvmat
+ yy <- r.curves[,indexSpecies]
+ o <- sort.list(xx)
+ xx <- xx[ o ]
+ yy <- yy[ o ]
lines(xx, yy, col=lcol[sppno], lwd=llwd[sppno], lty=llty[sppno])
if ( y && type == "fitted.values") {
- ypts = object at y
+ ypts <- object at y
if (ncol(as.matrix(ypts)) == ncol(r.curves))
points(xx, ypts[o,sppno], col=pcol[sppno],
cex=pcex[sppno], pch=pch[sppno])
@@ -1349,8 +1384,8 @@ lvplot.cao = function(object,
rep(spch, length=nrow(lvmat)), col=scol, cex=scex, font=sfont)
}
for(sppno in 1:length(whichSpecies)) {
- thisSpecies = whichSpecies[sppno]
- indexSpecies = if (is.character(whichSpecies))
+ thisSpecies <- whichSpecies[sppno]
+ indexSpecies <- if (is.character(whichSpecies))
match(whichSpecies[sppno], sppnames) else
whichSpecies[sppno]
if (is.na(indexSpecies))
@@ -1361,8 +1396,8 @@ lvplot.cao = function(object,
}
if (label.arg) {
for(sppno in 1:length(whichSpecies)) {
- thisSpecies = whichSpecies[sppno]
- indexSpecies = if (is.character(whichSpecies))
+ thisSpecies <- whichSpecies[sppno]
+ indexSpecies <- if (is.character(whichSpecies))
match(whichSpecies[sppno], sppnames) else
whichSpecies[sppno]
text(Coeflist at Optimum[1,indexSpecies],
@@ -1388,12 +1423,12 @@ predict.cao <- function (object, newdata = NULL,
type <- match.arg(type, c("link", "response", "terms"))[1]
if (type != "link" && deriv != 0)
stop("Setting deriv = <positive integer> requires type='link'")
- na.act = object at na.action
- object at na.action = list()
- ocontrol = object at control
- nice21 = (length(ocontrol$colx1.index) == 1) &&
+ na.act <- object at na.action
+ object at na.action <- list()
+ ocontrol <- object at control
+ nice21 <- (length(ocontrol$colx1.index) == 1) &&
(names(ocontrol$colx1.index) == "(Intercept)")
- if (!nice21) stop("Can only handle 'Norrr = ~ 1'")
+ if (!nice21) stop("Can only handle 'noRRR = ~ 1'")
if (!length(newdata) && type == "response" &&
length(object at fitted.values)) {
@@ -1421,13 +1456,13 @@ predict.cao <- function (object, newdata = NULL,
xlev = object at xlevels)
if (nice21 && nrow(X)!=nrow(newdata)) {
- as.save = attr(X, "assign")
- X = X[rep(1, nrow(newdata)),,drop = FALSE]
- dimnames(X) = list(dimnames(newdata)[[1]], "(Intercept)")
- attr(X, "assign") = as.save # Restored
+ as.save <- attr(X, "assign")
+ X <- X[rep(1, nrow(newdata)),, drop = FALSE]
+ dimnames(X) <- list(dimnames(newdata)[[1]], "(Intercept)")
+ attr(X, "assign") <- as.save # Restored
}
- offset = if (!is.null(off.num <- attr(tt, "offset"))) {
+ offset <- if (!is.null(off.num <- attr(tt, "offset"))) {
eval(attr(tt, "variables")[[off.num+1]], newdata)
} else if (!is.null(object at offset))
eval(object at call$offset, newdata)
@@ -1439,65 +1474,65 @@ predict.cao <- function (object, newdata = NULL,
attr(X, "assign") <- attrassigndefault(X, tt)
}
- cancoefs = ccoef(object)
+ cancoefs <- ccoef(object)
- lvmat = X[,ocontrol$colx2.index,drop = FALSE] %*% cancoefs # n x Rank
+ lvmat <- X[,ocontrol$colx2.index, drop = FALSE] %*% cancoefs # n x Rank
- Rank = ocontrol$Rank
- NOS = ncol(object at y)
- sppnames = dimnames(object at y)[[2]]
- modelno = ocontrol$modelno # 1,2,3,5 or 0
- M = if (any(slotNames(object) == "predictors") &&
+ Rank <- ocontrol$Rank
+ NOS <- ncol(object at y)
+ sppnames <- dimnames(object at y)[[2]]
+ modelno <- ocontrol$modelno # 1,2,3,5 or 0
+ M <- if (any(slotNames(object) == "predictors") &&
is.matrix(object at predictors)) ncol(object at predictors) else
object at misc$M
- MSratio = M / NOS # First value is g(mean) = quadratic form in lv
+ MSratio <- M / NOS # First value is g(mean) = quadratic form in lv
if (type == "terms") {
terms.mat = matrix(0,nrow(X),Rank*NOS) # 1st R cols for spp.1, etc.
- interceptvector = rep(0, len=NOS)
+ interceptvector <- rep(0, len=NOS)
} else {
- etamat = matrix(0, nrow(X), M) # Could contain derivatives
+ etamat <- matrix(0, nrow(X), M) # Could contain derivatives
}
- ind8 = 1:Rank
- whichSpecies = 1:NOS # Do it all for all species
+ ind8 <- 1:Rank
+ whichSpecies <- 1:NOS # Do it all for all species
for(sppno in 1:length(whichSpecies)) {
- thisSpecies = whichSpecies[sppno]
- indexSpecies = if (is.character(whichSpecies))
+ thisSpecies <- whichSpecies[sppno]
+ indexSpecies <- if (is.character(whichSpecies))
match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
if (is.na(indexSpecies))
stop("mismatch found in 'whichSpecies'")
- temp345 = predictcao(object, grid=lvmat, sppno=thisSpecies,
+ temp345 <- predictcao(object, grid=lvmat, sppno=thisSpecies,
Rank=Rank, deriv=deriv, MSratio=MSratio,
type=ifelse(type == "response", "link", type))
if (MSratio == 2) {
if (any(type == c("link", "response"))) {
- etamat[,2*sppno-1] = temp345$yvals
- etamat[,2*sppno ] = temp345$eta2
+ etamat[,2*sppno-1] <- temp345$yvals
+ etamat[,2*sppno ] <- temp345$eta2
} else {
- terms.mat[,ind8] = temp345
- interceptvector[sppno] = attr(temp345, "constant")
+ terms.mat[,ind8] <- temp345
+ interceptvector[sppno] <- attr(temp345, "constant")
}
} else {
if (any(type == c("link", "response"))) {
- etamat[,sppno] = temp345$yvals
+ etamat[,sppno] <- temp345$yvals
} else {
- terms.mat[,ind8] = temp345
- interceptvector[sppno] = attr(temp345, "constant")
+ terms.mat[,ind8] <- temp345
+ interceptvector[sppno] <- attr(temp345, "constant")
}
}
- ind8 = ind8 + Rank
+ ind8 <- ind8 + Rank
}
if (length(offset) && any(offset != 0))
etamat <- etamat + offset
if (type == "link") {
- dimnames(etamat) = list(dimnames(X)[[1]], if (deriv == 0)
+ dimnames(etamat) <- list(dimnames(X)[[1]], if (deriv == 0)
object at misc$predictors.names else NULL)
return(etamat)
} else if (type == "response") {
fv <- object at family@linkinv(etamat, extra=object at extra)
- dimnames(fv) = list(dimnames(fv)[[1]],
+ dimnames(fv) <- list(dimnames(fv)[[1]],
dimnames(object at fitted.values)[[2]])
return(fv)
} else {
@@ -1524,45 +1559,45 @@ predictcao <- function(object, grid, sppno, Rank = 1,
if (type == "terms" && deriv != 0)
stop("'deriv' must be 0 when type=\"terms\"")
- temp.b = object at Bspline[[sppno]]
+ temp.b <- object at Bspline[[sppno]]
if (type == "terms") {
- meanlv = colMeans(grid)
- answer = matrix(0, nrow(grid), Rank)
+ meanlv <- colMeans(grid)
+ answer <- matrix(0, nrow(grid), Rank)
} else {
- nlfunvalues = 0
+ nlfunvalues <- 0
}
for(rindex in 1:Rank) {
- temp = temp.b[[rindex]] # temp is of class "vsmooth.spline.fit"
- nlpart = predict(temp, grid[,rindex], deriv = deriv)
- yvals = nlpart$y
+ temp <- temp.b[[rindex]] # temp is of class "vsmooth.spline.fit"
+ nlpart <- predict(temp, grid[,rindex], deriv = deriv)
+ yvals <- nlpart$y
if (type == "terms") {
answer[,rindex] = yvals
} else {
- nlfunvalues = nlfunvalues + yvals
+ nlfunvalues <- nlfunvalues + yvals
}
}
# Get the linear part of the additive predictor (intercept and slopes)
- lcoef = object at coefficients # linear coefs; dont use coef() (== Coef)
- llcoef = lcoef[(1+(sppno-1)*(MSratio+Rank)):(sppno*(MSratio+Rank))]
+ lcoef <- object at coefficients # linear coefs; dont use coef() (== Coef)
+ llcoef <- lcoef[(1+(sppno-1)*(MSratio+Rank)):(sppno*(MSratio+Rank))]
if (type == "terms") {
- interceptvector = llcoef[1]
+ interceptvector <- llcoef[1]
for(rindex in 1:Rank) {
- answer[,rindex] = answer[,rindex] + (grid[,rindex] -
+ answer[,rindex] <- answer[,rindex] + (grid[,rindex] -
meanlv[rindex]) * llcoef[MSratio+rindex]
- interceptvector = interceptvector +
+ interceptvector <- interceptvector +
meanlv[rindex] * llcoef[MSratio+rindex]
}
} else {
- linpar = if (deriv == 0) {
+ linpar <- if (deriv == 0) {
llcoef[1]+grid %*% llcoef[-(1:MSratio)]
} else {
if(deriv == 1) llcoef[MSratio+rindex] else 0
}
- nlfunvalues = nlfunvalues + linpar # Now complete
+ nlfunvalues <- nlfunvalues + linpar # Now complete
}
if (type == "terms") {
- attr(answer, "constant") = interceptvector
+ attr(answer, "constant") <- interceptvector
answer
} else {
list(xvals = grid,
@@ -1574,7 +1609,7 @@ predictcao <- function(object, grid, sppno, Rank = 1,
-plot.cao = function(x,
+plot.cao <- function(x,
xlab = if (Rank == 1) "Latent Variable" else
paste("Latent Variable", 1:Rank),
ylab = NULL, residuals.arg = FALSE,
@@ -1589,54 +1624,54 @@ plot.cao = function(x,
scale = 0, ylim = NULL,
overlay = FALSE, ...)
{
- Rank = x at control$Rank
+ Rank <- x at control$Rank
if (!is.logical(center.cf) || length(center.cf) != 1)
stop("bad input for argument 'center.cf'")
if (Rank > 1 && !center.cf)
stop("center.cf = TRUE is needed for models with Rank > 1")
- NOS = ncol(x at y)
- sppnames = dimnames(x at y)[[2]]
- modelno = x at control$modelno # 1,2,3, or 0
- M = if (any(slotNames(x) == "predictors") &&
+ NOS <- ncol(x at y)
+ sppnames <- dimnames(x at y)[[2]]
+ modelno <- x at control$modelno # 1,2,3, or 0
+ M <- if (any(slotNames(x) == "predictors") &&
is.matrix(x at predictors)) ncol(x at predictors) else x at misc$M
if (all((MSratio <- M / NOS) != c(1,2))) stop("bad value for 'MSratio'")
- pcol = rep(pcol, length = Rank*NOS)
- pcex = rep(pcex, length = Rank*NOS)
- pch = rep(pch, length = Rank*NOS)
- lcol = rep(lcol, length = Rank*NOS)
- lwd = rep(lwd, length = Rank*NOS)
- lty = rep(lty, length = Rank*NOS)
- xlab = rep(xlab, length = Rank)
- if (!length(whichSpecies)) whichSpecies = 1:NOS
+ pcol <- rep(pcol, length = Rank*NOS)
+ pcex <- rep(pcex, length = Rank*NOS)
+ pch <- rep(pch, length = Rank*NOS)
+ lcol <- rep(lcol, length = Rank*NOS)
+ lwd <- rep(lwd, length = Rank*NOS)
+ lty <- rep(lty, length = Rank*NOS)
+ xlab <- rep(xlab, length = Rank)
+ if (!length(whichSpecies)) whichSpecies <- 1:NOS
if (length(ylab))
- ylab = rep(ylab, len=length(whichSpecies)) # Too long if overlay
+ ylab <- rep(ylab, len=length(whichSpecies)) # Too long if overlay
if (length(main))
- main = rep(main, len=length(whichSpecies)) # Too long if overlay
- lvmat = lv(x)
- nice21 = length(x at control$colx1.index) == 1 &&
+ main <- rep(main, len=length(whichSpecies)) # Too long if overlay
+ lvmat <- lv(x)
+ nice21 <- length(x at control$colx1.index) == 1 &&
names(x at control$colx1.index) == "(Intercept)"
if (!nice21)
stop("can only handle intercept-only models")
- counter = 0
+ counter <- 0
for(sppno in 1:length(whichSpecies)) {
- thisSpecies = whichSpecies[sppno]
- indexSpecies = if (is.character(whichSpecies))
+ thisSpecies <- whichSpecies[sppno]
+ indexSpecies <- if (is.character(whichSpecies))
match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
if (is.na(indexSpecies))
stop("mismatch found in 'whichSpecies'")
- terms.mat = predictcao(object = x, grid=lvmat, type = "terms",
+ terms.mat <- predictcao(object = x, grid=lvmat, type = "terms",
sppno=indexSpecies, Rank=Rank,
deriv=deriv, MSratio=MSratio)
for(rindex in WhichRank) {
- xvals = lvmat[,rindex]
- yvals = terms.mat[,rindex]
- o = sort.list(xvals)
- xvals = xvals[ o ]
- yvals = yvals[ o ]
- if (!center.cf) yvals = yvals + attr(terms.mat, "constant")
+ xvals <- lvmat[,rindex]
+ yvals <- terms.mat[,rindex]
+ o <- sort.list(xvals)
+ xvals <- xvals[ o ]
+ yvals <- yvals[ o ]
+ if (!center.cf) yvals <- yvals + attr(terms.mat, "constant")
if (!add)
if (sppno == 1 || !overlay) {
- ylim.use = if (length(ylim)) ylim else
+ ylim.use <- if (length(ylim)) ylim else
ylim.scale(range(yvals), scale)
matplot(xvals, yvals, type = "n",
xlab = xlab[rindex],
@@ -1650,7 +1685,7 @@ plot.cao = function(x,
if (residuals.arg) {
stop("cannot handle residuals = TRUE yet")
}
- counter = counter + 1
+ counter <- counter + 1
lines(xvals, yvals,
col=lcol[counter], lwd=lwd[counter], lty=lty[counter])
if (rugplot) rug(xvals)
@@ -1669,7 +1704,7 @@ setMethod("plot", "cao",
-persp.cao = function(x,
+persp.cao <- function(x,
plot.it = TRUE,
xlim = NULL, ylim = NULL, zlim = NULL, # zlim ignored if Rank == 1
gridlength = if (Rank == 1) 301 else c(51,51),
@@ -1686,70 +1721,70 @@ persp.cao = function(x,
lwd=par()$lwd,
rugplot = FALSE,
...) {
- object = x # don't like x as the primary argument
- coefobj = Coef(object)
+ object <- x # don't like x as the primary argument
+ coefobj <- Coef(object)
if ((Rank <- coefobj at Rank) > 2)
stop("object must be a rank-1 or rank-2 model")
- fvmat = fitted(object)
- NOS = ncol(fvmat) # Number of species
- M = if (any(slotNames(object) == "predictors") &&
+ fvmat <- fitted(object)
+ NOS <- ncol(fvmat) # Number of species
+ M <- if (any(slotNames(object) == "predictors") &&
is.matrix(object at predictors)) ncol(object at predictors) else
object at misc$M
- MSratio = M / NOS # First value is g(mean) = quadratic form in lv
+ MSratio <- M / NOS # First value is g(mean) = quadratic form in lv
- xlim = if (length(xlim)) xlim else range(coefobj at lv[,1])
+ xlim <- if (length(xlim)) xlim else range(coefobj at lv[,1])
if (!length(ylim.orig <- ylim)) {
- ylim = if (Rank == 1) c(0, max(fvmat)*stretch) else
+ ylim <- if (Rank == 1) c(0, max(fvmat)*stretch) else
range(coefobj at lv[,2])
}
- xlim = rep(xlim, length = 2)
- ylim = rep(ylim, length = 2)
- gridlength = rep(gridlength, length=Rank)
- lv1 = seq(xlim[1], xlim[2], length=gridlength[1])
- lv2 = if (Rank == 2) seq(ylim[1], ylim[2], len=gridlength[2]) else NULL
- lvmat = if (Rank == 2) expand.grid(lv1, lv2) else cbind(lv1)
-
- sppNames = dimnames(object at y)[[2]]
+ xlim <- rep(xlim, length = 2)
+ ylim <- rep(ylim, length = 2)
+ gridlength <- rep(gridlength, length=Rank)
+ lv1 <- seq(xlim[1], xlim[2], length=gridlength[1])
+ lv2 <- if (Rank == 2) seq(ylim[1], ylim[2], len=gridlength[2]) else NULL
+ lvmat <- if (Rank == 2) expand.grid(lv1, lv2) else cbind(lv1)
+
+ sppNames <- dimnames(object at y)[[2]]
if (!length(whichSpecies)) {
- whichSpecies = sppNames[1:NOS]
- whichSpecies.numer = 1:NOS
+ whichSpecies <- sppNames[1:NOS]
+ whichSpecies.numer <- 1:NOS
} else
if (is.numeric(whichSpecies)) {
- whichSpecies.numer = whichSpecies
- whichSpecies = sppNames[whichSpecies.numer] # Convert to character
+ whichSpecies.numer <- whichSpecies
+ whichSpecies <- sppNames[whichSpecies.numer] # Convert to character
} else
- whichSpecies.numer = match(whichSpecies, sppNames)
+ whichSpecies.numer <- match(whichSpecies, sppNames)
- LP = matrix(as.numeric(NA),nrow(lvmat),NOS) # For 1st eta for each spp.
+ LP <- matrix(as.numeric(NA),nrow(lvmat),NOS) # For 1st eta for each spp.
for(sppno in 1:NOS) {
- temp = predictcao(object=object, grid=lvmat, sppno=sppno,
+ temp <- predictcao(object=object, grid=lvmat, sppno=sppno,
Rank=Rank, deriv = 0, MSratio=MSratio)
- LP[,sppno] = temp$yval
+ LP[,sppno] <- temp$yval
}
if (MSratio == 2) {
- LP = kronecker(LP, matrix(1:0, 1, 2)) # n x M
+ LP <- kronecker(LP, matrix(1:0, 1, 2)) # n x M
}
- fitvals = object at family@linkinv(LP, extra=object at extra) # n by NOS
- dimnames(fitvals) = list(NULL, dimnames(fvmat)[[2]])
+ fitvals <- object at family@linkinv(LP, extra=object at extra) # n by NOS
+ dimnames(fitvals) <- list(NULL, dimnames(fvmat)[[2]])
if (Rank == 1) {
if (plot.it) {
if (!length(ylim.orig))
- ylim = c(0, max(fitvals[,whichSpecies.numer])*stretch) # A revision
- col = rep(col, len=length(whichSpecies.numer))
- lty = rep(lty, len=length(whichSpecies.numer))
- lwd = rep(lwd, len=length(whichSpecies.numer))
+ ylim <- c(0, max(fitvals[,whichSpecies.numer])*stretch) # A revision
+ col <- rep(col, len=length(whichSpecies.numer))
+ lty <- rep(lty, len=length(whichSpecies.numer))
+ lwd <- rep(lwd, len=length(whichSpecies.numer))
matplot(lv1, fitvals, xlab = xlab, ylab=ylab,
type = "n", main=main, xlim = xlim, ylim=ylim, ...)
if (rugplot) rug(lv(object))
for(sppno in 1:length(whichSpecies.numer)) {
- ptr2 = whichSpecies.numer[sppno] # points to species column
+ ptr2 <- whichSpecies.numer[sppno] # points to species column
lines(lv1, fitvals[,ptr2], col=col[sppno],
lty=lty[sppno], lwd=lwd [sppno], ...)
if (labelSpecies) {
- ptr1 = (1:nrow(fitvals))[max(fitvals[,ptr2]) ==
+ ptr1 <- (1:nrow(fitvals))[max(fitvals[,ptr2]) ==
fitvals[,ptr2]]
- ptr1 = ptr1[1]
+ ptr1 <- ptr1[1]
text(lv1[ptr1], fitvals[ptr1,ptr2]+(stretch-1) *
diff(range(ylim)), label=sppNames[sppno],
col=col[sppno], ...)
@@ -1757,15 +1792,15 @@ persp.cao = function(x,
}
}
} else {
- maxfitted = matrix(fitvals[,whichSpecies[1]], length(lv1),
+ maxfitted <- matrix(fitvals[,whichSpecies[1]], length(lv1),
length(lv2))
if (length(whichSpecies) > 1)
for(sppno in whichSpecies[-1]) {
- maxfitted = pmax(maxfitted, matrix(fitvals[,sppno],
+ maxfitted <- pmax(maxfitted, matrix(fitvals[,sppno],
length(lv1), length(lv2)))
}
if (!length(zlim))
- zlim = range(maxfitted, na.rm = TRUE)
+ zlim <- range(maxfitted, na.rm = TRUE)
if (plot.it)
graphics:::persp.default(lv1, lv2, maxfitted,
zlim=zlim,
@@ -1786,7 +1821,7 @@ setMethod("persp", "cao", function(x, ...) persp.cao(x = x, ...))
-lv.cao = function(object, ...) {
+lv.cao <- function(object, ...) {
Coef(object, ...)@lv
}
@@ -1811,24 +1846,25 @@ setClass(Class = "summary.cao",
-summary.cao = function(object, ...) {
- answer = Coef(object, ...)
+summary.cao <- function(object, ...) {
+ answer <- Coef(object, ...)
- answer = as(answer, "summary.cao")
+ answer <- as(answer, "summary.cao")
- answer at misc = object at misc
- answer at call = object at call
- answer
+ answer at misc <- object at misc
+ answer at call <- object at call
+ answer
}
+
setMethod("summary", "cao", function(object, ...)
- summary.cao(object, ...))
+ summary.cao(object, ...))
-show.summary.cao = function(x, ...) {
+show.summary.cao <- function(x, ...) {
cat("\nCall:\n")
dput(x at call)
@@ -1856,11 +1892,12 @@ setMethod("show", "summary.cao",
-ccoef.cao = function(object, ...) {
+ccoef.cao <- function(object, ...) {
Coef(object, ...)@C
}
-ccoef.Coef.cao = function(object, ...) {
+
+ccoef.Coef.cao <- function(object, ...) {
if (length(list(...)))
warning("Too late! Ignoring the extra arguments")
object at C
@@ -1868,9 +1905,10 @@ ccoef.Coef.cao = function(object, ...) {
if(!isGeneric("ccoef"))
- setGeneric("ccoef", function(object, ...) standardGeneric("ccoef"))
+ setGeneric("ccoef", function(object, ...) standardGeneric("ccoef"))
-setMethod("ccoef", "cao", function(object, ...) ccoef.cao(object, ...))
+setMethod("ccoef", "cao", function(object, ...)
+ ccoef.cao(object, ...))
setMethod("ccoef", "Coef.cao", function(object, ...)
ccoef.Coef.cao(object, ...))
@@ -1878,6 +1916,8 @@ setMethod("ccoef", "Coef.cao", function(object, ...)
if(!isGeneric("calibrate"))
setGeneric("calibrate", function(object, ...)
standardGeneric("calibrate"))
+
+
setMethod("calibrate", "cao", function(object, ...)
calibrate.qrrvglm(object, ...))
@@ -1886,7 +1926,7 @@ setMethod("calibrate", "qrrvglm", function(object, ...)
calibrate.qrrvglm(object, ...))
-Tol.cao = function(object, ...) {
+Tol.cao <- function(object, ...) {
stop("The tolerance for a 'cao' object is undefined")
}
diff --git a/R/coef.vlm.q b/R/coef.vlm.q
index 7d57ebd..57863be 100644
--- a/R/coef.vlm.q
+++ b/R/coef.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -81,21 +81,30 @@ Coef.vlm <- function(object, ...) {
LL <- length(object at family@vfamily)
funname <- paste("Coef.", object at family@vfamily[LL], sep = "")
+
if (exists(funname)) {
newcall <- paste("Coef.", object at family@vfamily[LL],
"(object, ...)", sep = "")
newcall <- parse(text = newcall)[[1]]
- eval(newcall)
- } else
+ return(eval(newcall))
+ }
+
+
+ answer <-
if (length(tmp2 <- object at misc$link) &&
object at misc$intercept.only &&
trivial.constraints(object at constraints)) {
+
+
+ if (!is.list(use.earg <- object at misc$earg))
+ use.earg <- list()
+
answer <-
eta2theta(rbind(coefvlm(object)),
link = object at misc$link,
- earg = object at misc$earg)
+ earg = use.earg)
answer <- c(answer)
@@ -105,6 +114,20 @@ Coef.vlm <- function(object, ...) {
} else {
coefvlm(object, ... )
}
+
+
+
+ if (length(tmp3 <- object at misc$parameter.names) &&
+ object at misc$intercept.only &&
+ trivial.constraints(object at constraints)) {
+ answer <- c(answer)
+ if (length(tmp3) == object at misc$M &&
+ is.character(tmp3))
+ names(answer) <- tmp3
+ }
+
+
+ answer
}
diff --git a/R/cqo.R b/R/cqo.R
index 89ce1f7..f708ab5 100644
--- a/R/cqo.R
+++ b/R/cqo.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/cqo.fit.q b/R/cqo.fit.q
index 83e9aae..cbfaa14 100644
--- a/R/cqo.fit.q
+++ b/R/cqo.fit.q
@@ -1,77 +1,77 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
-callcqoc = function(cmatrix, etamat, xmat, ymat, wvec,
+callcqoc <- function(cmatrix, etamat, xmat, ymat, wvec,
X_vlm_1save, modelno, Control,
- n, M, p1star, p2star, nice31, allofit=FALSE) {
- ocmatrix = cmatrix
- control = Control
- Rank = control$Rank
- p1 = length(control$colx1.index); p2 = length(control$colx2.index)
- dim(cmatrix) = c(p2, Rank) # for crow1C
- pstar = p1star + p2star
- maxMr = max(M, Rank)
- nstar = if (nice31) ifelse(modelno == 3 || modelno == 5,n*2,n) else n*M
- NOS = ifelse(modelno == 3 || modelno==5, M/2, M)
- lenbeta = pstar * ifelse(nice31, NOS, 1)
+ n, M, p1star, p2star, nice31, allofit = FALSE) {
+ ocmatrix <- cmatrix
+ control <- Control
+ Rank <- control$Rank
+ p1 <- length(control$colx1.index); p2 <- length(control$colx2.index)
+ dim(cmatrix) <- c(p2, Rank) # for crow1C
+ pstar <- p1star + p2star
+ maxMr <- max(M, Rank)
+ nstar <- if (nice31) ifelse(modelno == 3 || modelno == 5,n*2,n) else n*M
+ NOS <- ifelse(modelno == 3 || modelno==5, M/2, M)
+ lenbeta <- pstar * ifelse(nice31, NOS, 1)
if (itol <- control$ITolerances) {
if (Rank > 1) {
- numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
- evnu = eigen(var(numat))
- cmatrix = cmatrix %*% evnu$vector
+ numat <- xmat[,control$colx2.index,drop = FALSE] %*% cmatrix
+ evnu <- eigen(var(numat))
+ cmatrix <- cmatrix %*% evnu$vector
}
- cmatrix = crow1C(cmatrix, control$Crow1positive)
- numat = xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
- sdnumat = apply(numat, 2, sd)
+ cmatrix <- crow1C(cmatrix, control$Crow1positive)
+ numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
+ sdnumat <- apply(numat, 2, sd)
for(lookat in 1:Rank)
if (sdnumat[lookat] >
control$MUXfactor[lookat] * control$isdlv[lookat]) {
- muxer = control$isdlv[lookat] *
+ muxer <- control$isdlv[lookat] *
control$MUXfactor[lookat] / sdnumat[lookat]
- numat[,lookat] = numat[,lookat] * muxer
- cmatrix[,lookat] = cmatrix[,lookat] * muxer
+ numat[,lookat] <- numat[,lookat] * muxer
+ cmatrix[,lookat] <- cmatrix[,lookat] * muxer
if (control$trace) {
cat(paste("Taking evasive action for latent variable ",
- lookat, ".\n", sep=""))
+ lookat, ".\n", sep = ""))
flush.console()
}
rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance",
"cmatrix", "ocmatrix"), prefix = ".VGAM.CQO.")
}
} else {
- numat = xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
- evnu = eigen(var(numat))
- temp7 = if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
+ numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
+ evnu <- eigen(var(numat))
+ temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
evnu$vector %*% evnu$value^(-0.5)
- cmatrix = cmatrix %*% temp7
- cmatrix = crow1C(cmatrix, control$Crow1positive)
- numat = xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
+ cmatrix <- cmatrix %*% temp7
+ cmatrix <- crow1C(cmatrix, control$Crow1positive)
+ numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
}
- inited = if (is.R()) {
+ inited <- if (is.R()) {
if (exists(".VGAM.CQO.etamat", envir = VGAM:::VGAMenv)) 1 else 0
} else 0
- usethiseta = if (inited == 1)
+ usethiseta <- if (inited == 1)
getfromVGAMenv("etamat", prefix = ".VGAM.CQO.") else t(etamat)
- usethisbeta = if (inited == 2)
+ usethisbeta <- if (inited == 2)
getfromVGAMenv("beta", prefix = ".VGAM.CQO.") else double(lenbeta)
- othint = c(Rank = Rank, control$EqualTol, pstar = pstar,
+ othint <- c(Rank = Rank, control$EqualTol, pstar = pstar,
dimw = 1, inited = inited, modelno = modelno,
maxitl = control$maxitl, actnits = 0, twice = 0,
p1star = p1star, p2star = p2star, nice31 = nice31,
lenbeta = lenbeta, itol = itol, control$trace,
p1 = p1, p2 = p2, control$imethod)
- bnumat = if (nice31) matrix(0,nstar,pstar) else
+ bnumat <- if (nice31) matrix(0,nstar,pstar) else
cbind(matrix(0,nstar,p2star), X_vlm_1save)
@@ -103,8 +103,6 @@ callcqoc = function(cmatrix, etamat, xmat, ymat, wvec,
assign(".VGAM.CQO.cmatrix", cmatrix, envir = VGAM:::VGAMenv)
assign(".VGAM.CQO.ocmatrix", ocmatrix, envir = VGAM:::VGAMenv)
} else {
- .VGAM.CQO.cmatrix <<- cmatrix
- .VGAM.CQO.ocmatrix <<- ocmatrix
}
} else {
print("hi 88 20100402; all the species did not converge in callcqo")
@@ -125,38 +123,39 @@ callcqoc = function(cmatrix, etamat, xmat, ymat, wvec,
-calldcqo = function(cmatrix, etamat, xmat, ymat, wvec,
+calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
X_vlm_1save, modelno, Control,
n, M, p1star, p2star, nice31, allofit = FALSE) {
- control = Control
- Rank = control$Rank
- p1 = length(control$colx1.index); p2 = length(control$colx2.index)
- dim(cmatrix) = c(p2, Rank) # for crow1C
+ control <- Control
+ Rank <- control$Rank
+ p1 <- length(control$colx1.index); p2 <- length(control$colx2.index)
+ dim(cmatrix) <- c(p2, Rank) # for crow1C
xmat2 <- xmat[, control$colx2.index, drop = FALSE] #ccc
numat <- double(n*Rank) #ccc
- pstar = p1star + p2star
- maxMr = max(M, Rank)
- nstar = if (nice31) ifelse(modelno == 3 || modelno == 5,n*2,n) else n*M
- NOS = ifelse(modelno == 3 || modelno == 5, M/2, M)
- lenbeta = pstar * ifelse(nice31, NOS, 1)
+ pstar <- p1star + p2star
+ maxMr <- max(M, Rank)
+ nstar <- if (nice31)
+ ifelse(modelno == 3 || modelno == 5,n*2,n) else n*M
+ NOS <- ifelse(modelno == 3 || modelno == 5, M/2, M)
+ lenbeta <- pstar * ifelse(nice31, NOS, 1)
if (itol <- control$ITolerances) {
if (Rank > 1) {
- numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
- evnu = eigen(var(numat))
- cmatrix = cmatrix %*% evnu$vector
+ numat <- xmat[, control$colx2.index, drop=FALSE] %*% cmatrix
+ evnu <- eigen(var(numat))
+ cmatrix <- cmatrix %*% evnu$vector
}
- cmatrix = crow1C(cmatrix, control$Crow1positive)
- numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
- sdnumat = apply(numat, 2, sd)
+ cmatrix <- crow1C(cmatrix, control$Crow1positive)
+ numat <- xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+ sdnumat <- apply(numat, 2, sd)
for(lookat in 1:Rank)
if (sdnumat[lookat] > control$MUXfactor[lookat] *
control$isdlv[lookat]) {
- muxer = control$isdlv[lookat] *
+ muxer <- control$isdlv[lookat] *
control$MUXfactor[lookat] / sdnumat[lookat]
- cmatrix[,lookat] = cmatrix[,lookat] * muxer
+ cmatrix[,lookat] <- cmatrix[,lookat] * muxer
if (control$trace) {
cat(paste("Taking evasive action for latent variable ",
lookat, ".\n", sep=""))
@@ -166,54 +165,54 @@ calldcqo = function(cmatrix, etamat, xmat, ymat, wvec,
"cmatrix", "ocmatrix"), prefix = ".VGAM.CQO.")
}
} else {
- numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
- evnu = eigen(var(numat))
- temp7 = if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
+ numat <- xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+ evnu <- eigen(var(numat))
+ temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
evnu$vector %*% evnu$value^(-0.5)
- cmatrix = cmatrix %*% temp7
- cmatrix = crow1C(cmatrix, control$Crow1positive)
- numat = xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
+ cmatrix <- cmatrix %*% temp7
+ cmatrix <- crow1C(cmatrix, control$Crow1positive)
+ numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
}
- inited = if (is.R()) {
+ inited <- if (is.R()) {
if (exists(".VGAM.CQO.etamat", envir = VGAM:::VGAMenv)) 1 else 0
} else 0
- usethiseta = if (inited == 1)
+ usethiseta <- if (inited == 1)
getfromVGAMenv("etamat", prefix = ".VGAM.CQO.") else t(etamat)
- usethisbeta = if (inited == 2)
+ usethisbeta <- if (inited == 2)
getfromVGAMenv("beta", prefix = ".VGAM.CQO.") else double(lenbeta)
- othint = c(Rank, control$EqualTol, pstar, dimw = 1, inited = inited,
+ othint <- c(Rank, control$EqualTol, pstar, dimw = 1, inited = inited,
modelno, maxitl = control$maxitl, actnits = 0, twice = 0,
p1star = p1star, p2star = p2star, nice31 = nice31, lenbeta,
itol = itol, control$trace,
p1, p2, control$imethod) # other ints
- bnumat = if (nice31) matrix(0,nstar,pstar) else
+ bnumat <- if (nice31) matrix(0,nstar,pstar) else
cbind(matrix(0,nstar,p2star), X_vlm_1save)
flush.console()
ans1 <-
- dotC(name="dcqo1", numat=as.double(numat), as.double(ymat),
+ dotC(name = "dcqo1", numat = as.double(numat), as.double(ymat),
as.double(if (p1) xmat[,control$colx1.index] else 999),
- as.double(wvec), etamat=as.double(usethiseta),
- moff=double(if (itol) n else 1),
+ as.double(wvec), etamat = as.double(usethiseta),
+ moff = double(if (itol) n else 1),
fv = double(NOS*n), z = double(n*M), wz = double(n*M),
U = double(M*n), bnumat = as.double(bnumat),
- qr=double(nstar * pstar), qraux=double(pstar), qpivot=integer(pstar),
- as.integer(n), as.integer(M), NOS=as.integer(NOS),
- as.integer(nstar), dim1U=as.integer(M),
+ qr = double(nstar * pstar), qraux = double(pstar), qpivot = integer(pstar),
+ as.integer(n), as.integer(M), NOS = as.integer(NOS),
+ as.integer(nstar), dim1U = as.integer(M),
errcode = integer(1 + NOS), othint = as.integer(othint),
- deviance=double(1 + NOS), beta=as.double(usethisbeta),
- othdbl = as.double(c(small=control$SmallNo,
- epsilon=control$epsilon, .Machine$double.eps,
- iKvector=rep(control$iKvector, len=NOS),
- iShape=rep(control$iShape, len=NOS))),
+ deviance = double(1 + NOS), beta = as.double(usethisbeta),
+ othdbl = as.double(c(small = control$SmallNo,
+ epsilon = control$epsilon, .Machine$double.eps,
+ iKvector = rep(control$iKvector, len = NOS),
+ iShape = rep(control$iShape, len = NOS))),
xmat2 = as.double(xmat2),
- cmat=as.double(cmatrix),
- p2=as.integer(p2), deriv=double(p2*Rank),
- hstep=as.double(control$Hstep))
+ cmat = as.double(cmatrix),
+ p2 = as.integer(p2), deriv = double(p2*Rank),
+ hstep = as.double(control$Hstep))
if (ans1$errcode[1] != 0) {
warning("error code in calldcqo = ", ans1$errcode[1])
@@ -229,72 +228,82 @@ calldcqo = function(cmatrix, etamat, xmat, ymat, wvec,
checkCMCO <- function(Blist, control, modelno) {
- p1 = length(colx1.index <- control$colx1.index)
- p2 = length(colx2.index <- control$colx2.index)
- if (p1 + p2 != length(Blist))
- stop("'Blist' is the wrong length")
- if (p1 == 0 || p2 == 0)
- stop("Some variables are needed in Norrr and non-Norrr arguments")
- if (all(names(colx1.index) != "(Intercept)"))
- stop("an intercept term must be in the argument 'Norrr' formula")
- Blist1 = vector("list", p1)
- Blist2 = vector("list", p2)
- for(kk in 1:p1)
- Blist1[[kk]] = Blist[[(colx1.index[kk])]]
+ p1 <- length(colx1.index <- control$colx1.index)
+ p2 <- length(colx2.index <- control$colx2.index)
+ if (p1 + p2 != length(Blist))
+ stop("'Blist' is the wrong length")
+ if (p1 == 0 || p2 == 0)
+ stop("Some variables are needed in noRRR and non-noRRR arguments")
+ if (all(names(colx1.index) != "(Intercept)"))
+ stop("an intercept term must be in the argument 'noRRR' formula")
+ Blist1 <- vector("list", p1)
+ Blist2 <- vector("list", p2)
+ for(kk in 1:p1)
+ Blist1[[kk]] <- Blist[[(colx1.index[kk])]]
+ for(kk in 1:p2)
+ Blist2[[kk]] <- Blist[[(colx2.index[kk])]]
+
+ if (modelno == 3 || modelno == 5) {
+ if (p1 > 1)
+ for(kk in 2:p1)
+ Blist1[[kk]] <- (Blist1[[kk]])[c(TRUE,FALSE),,drop = FALSE]
for(kk in 1:p2)
- Blist2[[kk]] = Blist[[(colx2.index[kk])]]
-
- if (modelno == 3 || modelno == 5) {
- if (p1 > 1)
- for(kk in 2:p1)
- Blist1[[kk]] = (Blist1[[kk]])[c(TRUE,FALSE),,drop=FALSE]
- for(kk in 1:p2)
- Blist2[[kk]] = (Blist2[[kk]])[c(TRUE,FALSE),,drop=FALSE]
- }
+ Blist2[[kk]] <- (Blist2[[kk]])[c(TRUE,FALSE),,drop = FALSE]
+ }
- if (!all(trivial.constraints(Blist2) == 1))
- stop("the constraint matrices for the non-Norrr terms ",
- "are not trivial")
+ if (!all(trivial.constraints(Blist2) == 1))
+ stop("the constraint matrices for the non-noRRR terms ",
+ "are not trivial")
if (!trivial.constraints(Blist1[[1]]))
stop("the constraint matrices for intercept term is ",
"not trivial")
if (p1 > 1)
for(kk in 2:p1)
if (!trivial.constraints(list(Blist1[[kk]])))
- stop("the constraint matrices for some 'Norrr' ",
+ stop("the constraint matrices for some 'noRRR' ",
"terms is not trivial")
- nice31 = if (control$Quadratic)
- (!control$EqualTol || control$ITolerances) else TRUE
- as.numeric(nice31)
+ nice31 <- if (control$Quadratic)
+ (!control$EqualTol || control$ITolerances) else TRUE
+ as.numeric(nice31)
}
-cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
- etastart=NULL, mustart=NULL, coefstart=NULL,
- offset=0, family,
- control=qrrvglm.control(...),
- constraints=NULL,
- extra=NULL,
- Terms=Terms, function.name="cqo", ...)
-{
- if (!all(offset == 0)) stop("cqo.fit() cannot handle offsets")
- specialCM = NULL
- post = list()
- nonparametric <- FALSE
- epsilon <- control$epsilon
- maxitl <- control$maxitl
- save.weight <- control$save.weight
- trace <- control$trace
- orig.stepsize <- control$stepsize
+cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
+ etastart = NULL, mustart = NULL, coefstart = NULL,
+ offset = 0, family,
+ control = qrrvglm.control(...),
+ constraints = NULL,
+ extra = NULL,
+ Terms = Terms, function.name = "cqo", ...) {
+
- n <- dim(x)[1]
+ modelno <- quasi.newton <- NOS <- z <- fv <- NULL
- intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
- y.names <- predictors.names <- NULL # May be overwritten in @initialize
+
+
+
+
+ if (!all(offset == 0))
+ stop("cqo.fit() cannot handle offsets")
+ specialCM <- NULL
+ post <- list()
+ nonparametric <- FALSE
+ epsilon <- control$epsilon
+ maxitl <- control$maxitl
+ save.weight <- control$save.weight
+ trace <- control$trace
+ orig.stepsize <- control$stepsize
+
+
+ n <- dim(x)[1]
+
+
+ intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
+ y.names <- predictors.names <- NULL # May be overwritten in @initialize
n.save <- n
@@ -334,23 +343,23 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
M <- if (is.matrix(eta)) ncol(eta) else 1
if (is.character(rrcontrol$Dzero)) {
- index = match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]])
+ index <- match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]])
if (any(is.na(index)))
stop("Dzero argument didn't fully match y-names")
if (length(index) == M)
stop("all linear predictors are linear in the",
" latent variable(s); so set 'Quadratic=FALSE'")
- rrcontrol$Dzero = control$Dzero = index
+ rrcontrol$Dzero <- control$Dzero <- index
}
if (length(family at constraints))
eval(family at constraints)
- special.matrix = matrix(-34956.125, M, M) # An unlikely used matrix
- just.testing <- cm.vgam(special.matrix, x, rrcontrol$Norrr, constraints)
- findex = trivial.constraints(just.testing, special.matrix)
- tc1 = trivial.constraints(constraints)
+ special.matrix <- matrix(-34956.125, M, M) # An unlikely used matrix
+ just.testing <- cm.vgam(special.matrix, x, rrcontrol$noRRR, constraints)
+ findex <- trivial.constraints(just.testing, special.matrix)
+ tc1 <- trivial.constraints(constraints)
if (!control$Quadratic && sum(!tc1)) {
for(ii in names(tc1))
@@ -361,35 +370,35 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
if (all(findex == 1))
stop("use vglm(), not rrvglm()!")
- colx1.index = names.colx1.index = NULL
- dx2 = dimnames(x)[[2]]
+ colx1.index <- names.colx1.index <- NULL
+ dx2 <- dimnames(x)[[2]]
if (sum(findex)) {
- asx = attr(x, "assign")
+ asx <- attr(x, "assign")
for(ii in names(findex))
if (findex[ii]) {
- names.colx1.index = c(names.colx1.index, dx2[asx[[ii]]])
- colx1.index = c(colx1.index, asx[[ii]])
+ names.colx1.index <- c(names.colx1.index, dx2[asx[[ii]]])
+ colx1.index <- c(colx1.index, asx[[ii]])
}
- names(colx1.index) = names.colx1.index
+ names(colx1.index) <- names.colx1.index
}
- rrcontrol$colx1.index=control$colx1.index = colx1.index
- colx2.index = 1:ncol(x)
- names(colx2.index) = dx2
- colx2.index = colx2.index[-colx1.index]
- p1 = length(colx1.index); p2 = length(colx2.index)
- rrcontrol$colx2.index=control$colx2.index = colx2.index
+ rrcontrol$colx1.index <- control$colx1.index <- colx1.index
+ colx2.index <- 1:ncol(x)
+ names(colx2.index) <- dx2
+ colx2.index <- colx2.index[-colx1.index]
+ p1 <- length(colx1.index); p2 <- length(colx2.index)
+ rrcontrol$colx2.index <- control$colx2.index <- colx2.index
Amat <- if (length(rrcontrol$Ainit)) rrcontrol$Ainit else
- matrix(rnorm(M * Rank, sd=rrcontrol$SD.Cinit), M, Rank)
+ matrix(rnorm(M * Rank, sd = rrcontrol$SD.Cinit), M, Rank)
- Cmat = if (length(rrcontrol$Cinit)) {
+ Cmat <- if (length(rrcontrol$Cinit)) {
matrix(rrcontrol$Cinit, p2, Rank)
} else {
if (!rrcontrol$Use.Init.Poisson.QO) {
- matrix(rnorm(p2 * Rank, sd=rrcontrol$SD.Cinit), p2, Rank)
+ matrix(rnorm(p2 * Rank, sd = rrcontrol$SD.Cinit), p2, Rank)
} else
.Init.Poisson.QO(ymat = as.matrix(y),
X1 = x[, colx1.index, drop = FALSE],
@@ -404,8 +413,8 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
}
if (rrcontrol$ITolerances) {
- lvmat = x[, rrcontrol$colx2.index, drop = FALSE] %*% Cmat
- lvmatmeans = t(lvmat) %*% matrix(1/n, n, 1)
+ lvmat <- x[, rrcontrol$colx2.index, drop = FALSE] %*% Cmat
+ lvmatmeans <- t(lvmat) %*% matrix(1/n, n, 1)
if (!all(abs(lvmatmeans) < 4))
warning("ITolerances=TRUE but the variables making up the ",
"latent variable(s) do not appear to be centered.")
@@ -414,34 +423,34 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
Amat[c(FALSE,TRUE),] <- 0 # Intercept only for log(k)
if (length(control$szero))
- Amat[control$szero,] = 0
+ Amat[control$szero,] <- 0
- rrcontrol$Ainit = control$Ainit = Amat # Good for valt()
- rrcontrol$Cinit = control$Cinit = Cmat # Good for valt()
+ rrcontrol$Ainit <- control$Ainit <- Amat # Good for valt()
+ rrcontrol$Cinit <- control$Cinit <- Cmat # Good for valt()
- Blist <- process.constraints(constraints, x, M, specialCM=specialCM)
- nice31 = checkCMCO(Blist, control=control, modelno=modelno)
+ Blist <- process.constraints(constraints, x, M, specialCM = specialCM)
+ nice31 <- checkCMCO(Blist, control = control, modelno = modelno)
ncolBlist <- unlist(lapply(Blist, ncol))
dimB <- sum(ncolBlist)
X_vlm_save <- if (nice31) {
NULL
} else {
- tmp500 = lm2qrrvlm.model.matrix(x = x, Blist = Blist,
+ tmp500 <- lm2qrrvlm.model.matrix(x = x, Blist = Blist,
C = Cmat, control = control)
- xsmall.qrr = tmp500$new.lv.model.matrix
- B.list = tmp500$constraints
- lv.mat = tmp500$lv.mat
+ xsmall.qrr <- tmp500$new.lv.model.matrix
+ B.list <- tmp500$constraints
+ lv.mat <- tmp500$lv.mat
if (length(tmp500$offset)) {
- offset = tmp500$offset
+ offset <- tmp500$offset
}
- lm2vlm.model.matrix(xsmall.qrr, B.list, xij=control$xij)
+ lm2vlm.model.matrix(xsmall.qrr, B.list, xij = control$xij)
}
if (length(coefstart) && length(X_vlm_save)) {
eta <- if (ncol(X_vlm_save) > 1) X_vlm_save %*% coefstart +
offset else X_vlm_save * coefstart + offset
- eta <- if (M > 1) matrix(eta, ncol=M, byrow=TRUE) else c(eta)
+ eta <- if (M > 1) matrix(eta, ncol = M, byrow = TRUE) else c(eta)
mu <- family at linkinv(eta, extra)
}
@@ -460,12 +469,12 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
eval(family at fini)
asgn <- attr(x, "assign")
- coefs = getfromVGAMenv("beta", prefix = ".VGAM.CQO.")
+ coefs <- getfromVGAMenv("beta", prefix = ".VGAM.CQO.")
if (control$ITolerances) {
if (NOS == M) {
- coefs = c(t(matrix(coefs, ncol=M))) # Get into right order
+ coefs <- c(t(matrix(coefs, ncol = M))) # Get into right order
} else {
- coefs = coefs
+ coefs <- coefs
}
}
@@ -494,15 +503,15 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
}
df.residual <- 55 - 8 - Rank*p2
- fit <- list(assign=asgn,
- coefficients=coefs,
- constraints=Blist,
- df.residual=df.residual,
- df.total=n*M,
- fitted.values=mu,
- offset=offset,
- residuals=residuals,
- terms=Terms) # terms: This used to be done in vglm()
+ fit <- list(assign = asgn,
+ coefficients = coefs,
+ constraints = Blist,
+ df.residual = df.residual,
+ df.total = n*M,
+ fitted.values = mu,
+ offset = offset,
+ residuals = residuals,
+ terms = Terms) # terms: This used to be done in vglm()
if (M == 1) {
wz <- as.vector(wz) # Convert wz into a vector
@@ -528,8 +537,8 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
if (length(family at last))
eval(family at last)
- edeviance = getfromVGAMenv("deviance", prefix = ".VGAM.CQO.")
- crit.list = list(deviance = edeviance[1], alldeviance = edeviance[-1])
+ edeviance <- getfromVGAMenv("deviance", prefix = ".VGAM.CQO.")
+ crit.list <- list(deviance = edeviance[1], alldeviance = edeviance[-1])
if (is.character(y.names) &&
length(y.names) == length(crit.list$alldeviance))
names(crit.list$alldeviance) = y.names
@@ -550,119 +559,123 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
-.Init.Poisson.QO = function(ymat, X1, X2, Rank=1, epsilon=1/32,
- max.ncol.etamat = 10,
- trace=FALSE, Crow1positive=rep(TRUE, len=Rank),
- isdlv = rep(1, lengt=Rank),
- constwt=FALSE, takelog=TRUE) {
+.Init.Poisson.QO <-
+ function(ymat, X1, X2, Rank = 1, epsilon = 1/32,
+ max.ncol.etamat = 10,
+ trace = FALSE, Crow1positive = rep(TRUE, len = Rank),
+ isdlv = rep(1, lengt = Rank),
+ constwt = FALSE, takelog = TRUE) {
print.CQO.expression = expression({
if (trace && length(X2)) {
cat("\nUsing initial values\n")
- dimnames(ans) = list(dimnames(X2)[[2]],
+ dimnames(ans) <- list(dimnames(X2)[[2]],
if (Rank == 1) "lv" else
- paste("lv", 1:Rank, sep=""))
- if (p2 > 5) print(ans, dig=3) else print(t(ans), dig=3)
+ paste("lv", 1:Rank, sep = ""))
+ if (p2 > 5) print(ans, dig = 3) else print(t(ans), dig = 3)
}
flush.console()
})
- sd.scale.X2.expression = expression({
+ sd.scale.X2.expression <- expression({
if (length(isdlv)) {
- actualSD = c( sqrt(diag(var(X2 %*% ans))) )
+ actualSD <- c( sqrt(diag(var(X2 %*% ans))) )
for(ii in 1:Rank)
- ans[,ii] = ans[,ii] * isdlv[ii] / actualSD[ii]
+ ans[,ii] <- ans[,ii] * isdlv[ii] / actualSD[ii]
}
})
- Crow1positive = if (length(Crow1positive))
- rep(Crow1positive, len=Rank) else
- rep(TRUE, len=Rank)
+ Crow1positive <- if (length(Crow1positive))
+ rep(Crow1positive, len = Rank) else
+ rep(TRUE, len = Rank)
if (epsilon <= 0)
stop("epsilon > 0 is required")
- ymat = cbind(ymat) + epsilon # ymat == 0 cause problems
- NOS = ncol(ymat)
- p2 = ncol(X2)
+ ymat <- cbind(ymat) + epsilon # ymat == 0 cause problems
+ NOS <- ncol(ymat)
+ p2 <- ncol(X2)
if (NOS < 2*Rank) {
- ans=crow1C(matrix(rnorm(p2 * Rank, sd = 0.02), p2, Rank),
- Crow1positive)
- eval(sd.scale.X2.expression)
- if (NOS == 1) {
- eval(print.CQO.expression)
- return(ans)
- } else ans.save = ans; # ans.save contains scaled guesses
+ ans <- crow1C(matrix(rnorm(p2 * Rank, sd = 0.02), p2, Rank),
+ Crow1positive)
+ eval(sd.scale.X2.expression)
+ if (NOS == 1) {
+ eval(print.CQO.expression)
+ return(ans)
+ } else {
+ ans.save <- ans; # ans.save contains scaled guesses
+ }
}
- calS = 1:NOS # Set of all species available for the approximation
- effrank = min(Rank, floor(NOS/2)) # effective rank
- ncol.etamat = min(if (length(X2)) floor(NOS/2) else effrank,
+ calS <- 1:NOS # Set of all species available for the approximation
+ effrank <- min(Rank, floor(NOS/2)) # effective rank
+ ncol.etamat <- min(if (length(X2)) floor(NOS/2) else effrank,
max.ncol.etamat)
- etamat =
- wts = matrix(0, nrow=nrow(ymat), ncol=ncol.etamat) # has >=1 coln
- rr = 1
+ etamat <-
+ wts <- matrix(0, nrow = nrow(ymat), ncol = ncol.etamat) # has >=1 coln
+ rr <- 1
for(ii in 1:floor(NOS/2)) {
if (length(calS) < 2) break
- index = sample(calS, size=2) # Randomness here
- etamat[,rr] = etamat[,rr] + (if (takelog)
+ index <- sample(calS, size = 2) # Randomness here
+ etamat[,rr] <- etamat[,rr] + (if (takelog)
log(ymat[,index[1]] / ymat[,index[2]]) else
ymat[,index[1]] - ymat[,index[2]])
- wts[,rr] = wts[,rr] +
+ wts[,rr] <- wts[,rr] +
(if (constwt) 1 else ymat[,index[1]] + ymat[,index[2]])
- calS = setdiff(calS, index)
- rr = (rr %% ncol.etamat) + 1
+ calS <- setdiff(calS, index)
+ rr <- (rr %% ncol.etamat) + 1
}
if (trace)
cat("\nObtaining initial values\n")
if (length(X2)) {
- alt = valt(x=cbind(X1, X2), z=etamat, U=sqrt(t(wts)), Rank=effrank,
- Blist=NULL, Cinit=NULL, trace=FALSE,
- colx1.index=1:ncol(X1), Criterion="rss")
- temp.control = list(Rank=effrank, colx1.index = 1:ncol(X1),
- Alpha=0.5,
+ alt <- valt(x = cbind(X1, X2), z = etamat,
+ U = sqrt(t(wts)), Rank = effrank,
+ Blist = NULL, Cinit = NULL, trace = FALSE,
+ colx1.index = 1:ncol(X1), Criterion = "rss")
+ temp.control <- list(Rank = effrank, colx1.index = 1:ncol(X1),
+ Alpha = 0.5,
colx2.index = (ncol(X1)+1):(ncol(X1) + ncol(X2)),
- Corner=FALSE, Svd.arg=TRUE,
- Uncorrelated.lv=TRUE, Quadratic=FALSE)
+ Corner = FALSE, Svd.arg = TRUE,
+ Uncorrelated.lv = TRUE, Quadratic = FALSE)
- ans2 = if (Rank > 1)
- rrr.normalize(rrcontrol=temp.control, A=alt$A,
- C=alt$C, x=cbind(X1, X2)) else alt
- ans = crow1C(ans2$C, rep(Crow1positive, length.out = effrank))
+ ans2 <- if (Rank > 1)
+ rrr.normalize(rrcontrol = temp.control, A = alt$A,
+ C = alt$C, x = cbind(X1, X2)) else alt
+ ans <- crow1C(ans2$C, rep(Crow1positive, length.out = effrank))
- Rank.save = Rank
- Rank = effrank
+ Rank.save <- Rank
+ Rank <- effrank
eval(sd.scale.X2.expression)
- Rank = Rank.save
+ Rank <- Rank.save
if (effrank < Rank) {
- ans = cbind(ans, ans.save[,-(1:effrank)]) # ans is better
+ ans <- cbind(ans, ans.save[,-(1:effrank)]) # ans is better
}
eval(print.CQO.expression)
} else {
- xij = NULL # temporary measure
- U = t(sqrt(wts))
- tmp = vlm.wfit(xmat=X1, zmat=etamat, Blist=NULL, U=U,
- matrix.out=TRUE,
- is.vlmX=FALSE, rss=TRUE, qr=FALSE, xij=xij)
- ans = crow1C(as.matrix(tmp$resid),
+ xij <- NULL # temporary measure
+ U <- t(sqrt(wts))
+ tmp <- vlm.wfit(xmat = X1, zmat = etamat, Blist = NULL, U = U,
+ matrix.out = TRUE,
+ is.vlmX = FALSE, rss = TRUE, qr = FALSE, xij = xij)
+ ans <- crow1C(as.matrix(tmp$resid),
rep(Crow1positive, length.out = effrank))
if (effrank < Rank) {
- ans = cbind(ans, ans.save[,-(1:effrank)]) # ans is better
+ ans <- cbind(ans, ans.save[,-(1:effrank)]) # ans is better
}
if (Rank > 1) {
- evnu = eigen(var(ans))
- ans = ans %*% evnu$vector
+ evnu <- eigen(var(ans))
+ ans <- ans %*% evnu$vector
}
if (length(isdlv)) {
- actualSD = apply(cbind(ans), 2, sd)
+ actualSD <- apply(cbind(ans), 2, sd)
for(ii in 1:Rank)
- ans[,ii] = ans[,ii] * isdlv[ii] / actualSD[ii]
+ ans[,ii] <- ans[,ii] * isdlv[ii] / actualSD[ii]
}
- ans = crow1C(ans, rep(Crow1positive, length.out = Rank))
- dimnames(ans) = list(dimnames(X1)[[1]],
- if (Rank == 1) "lv" else paste("lv", 1:Rank, sep=""))
+ ans <- crow1C(ans, rep(Crow1positive, length.out = Rank))
+ dimnames(ans) <- list(dimnames(X1)[[1]],
+ if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = ""))
if (trace) {
if (nrow(ans) > 10) print(t(ans), dig = 3) else
print(ans, dig = 3)
@@ -674,7 +687,7 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
cqo.init.derivative.expression <- expression({
- which.optimizer = if (is.R()) {
+ which.optimizer <- if (is.R()) {
if (control$Quadratic && control$FastAlgorithm) {
"BFGS"
} else {
@@ -688,27 +701,27 @@ cqo.init.derivative.expression <- expression({
if (FALSE) {
- constraints = replace.constraints(constraints, diag(M),
+ constraints <- replace.constraints(constraints, diag(M),
rrcontrol$colx2.index)
- nice31 = (!control$EqualTol || control$ITolerances) &&
+ nice31 <- (!control$EqualTol || control$ITolerances) &&
all(trivial.constraints(constraints) == 1)
}
- NOS = ifelse(modelno == 3 || modelno == 5, M/2, M)
- canfitok = if (is.R())
+ NOS <- ifelse(modelno == 3 || modelno == 5, M/2, M)
+ canfitok <- if (is.R())
(exists("CQO.FastAlgorithm", envir=VGAM:::VGAMenv) &&
get("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)) else
(exists("CQO.FastAlgorithm",inherits=TRUE) && CQO.FastAlgorithm)
if (!canfitok)
stop("cannot fit this model using fast algorithm")
- p2star = if (nice31)
+ p2star <- if (nice31)
ifelse(control$IToleran, Rank, Rank+0.5*Rank*(Rank+1)) else
(NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol,1,NOS))
- p1star = if (nice31) ifelse(modelno == 3 || modelno == 5,1+p1,p1) else
+ p1star <- if (nice31) ifelse(modelno == 3 || modelno == 5,1+p1,p1) else
(ncol(X_vlm_save)-p2star)
- X_vlm_1save = if (p1star > 0) X_vlm_save[,-(1:p2star)] else NULL
+ X_vlm_1save <- if (p1star > 0) X_vlm_save[,-(1:p2star)] else NULL
})
@@ -718,40 +731,42 @@ cqo.derivative.expression <- expression({
if (iter == 1 || quasi.newton$convergence) {
- quasi.newton = optim(par=Cmat, fn=callcqoc,
+ quasi.newton <- optim(par = Cmat, fn = callcqoc,
gr = if (control$GradientFunction) calldcqo else NULL,
- method=which.optimizer,
- control=list(fnscale=1,trace=as.integer(control$trace),
- parscale=rep(control$Parscale, len=length(Cmat)),
- maxit=control$Maxit.optim),
- etamat=eta, xmat=x, ymat=y, wvec=w,
- X_vlm_1save = X_vlm_1save,
- modelno=modelno, Control=control,
- n=n, M=M, p1star=p1star, p2star=p2star, nice31=nice31)
-
- z = matrix(getfromVGAMenv("z", prefix = ".VGAM.CQO."), n, M)
- U = matrix(getfromVGAMenv("U", prefix = ".VGAM.CQO."), M, n)
+ method = which.optimizer,
+ control = list(fnscale = 1,trace = as.integer(control$trace),
+ parscale = rep(control$Parscale, len = length(Cmat)),
+ maxit = control$Maxit.optim),
+ etamat = eta, xmat = x, ymat = y, wvec = w,
+ X_vlm_1save = X_vlm_1save,
+ modelno = modelno, Control = control,
+ n = n, M = M, p1star = p1star,
+ p2star = p2star, nice31 = nice31)
+
+ z <- matrix(getfromVGAMenv("z", prefix = ".VGAM.CQO."), n, M)
+ U <- matrix(getfromVGAMenv("U", prefix = ".VGAM.CQO."), M, n)
}
- ocmatrix = getfromVGAMenv("ocmatrix", prefix = ".VGAM.CQO.")
- maxdiff = max(abs(c(ocmatrix) - c(quasi.newton$par)) / (1 +
+ ocmatrix <- getfromVGAMenv("ocmatrix", prefix = ".VGAM.CQO.")
+ maxdiff <- max(abs(c(ocmatrix) - c(quasi.newton$par)) / (1 +
abs(c(ocmatrix))))
if (maxdiff < 1.0e-4) {
- Cmat = getfromVGAMenv("cmatrix", prefix = ".VGAM.CQO.")
+ Cmat <- getfromVGAMenv("cmatrix", prefix = ".VGAM.CQO.")
} else {
warning("solution does not correspond to .VGAM.CQO.cmatrix")
}
- alt = valt.1iter(x=x, z=z, U=U, Blist=Blist, C=Cmat, nice31=nice31,
- control=rrcontrol, lp.names=predictors.names,
- MSratio=M/NOS)
+ alt <- valt.1iter(x = x, z = z, U = U, Blist = Blist,
+ C = Cmat, nice31 = nice31,
+ control = rrcontrol, lp.names = predictors.names,
+ MSratio = M/NOS)
if (length(alt$offset))
- offset = alt$offset
+ offset <- alt$offset
- B1.save = alt$B1 # Put later into extra
- tmp.fitted = alt$fitted # contains \bI_{Rank} \bnu if Corner
+ B1.save <- alt$B1 # Put later into extra
+ tmp.fitted <- alt$fitted # contains \bI_{Rank} \bnu if Corner
if (trace && control$OptimizeWrtC) {
cat("\n")
@@ -775,28 +790,28 @@ cqo.derivative.expression <- expression({
flush.console()
}
- Amat = alt$Amat #
- Cmat = alt$Cmat #
- Dmat = alt$Dmat #
+ Amat <- alt$Amat #
+ Cmat <- alt$Cmat #
+ Dmat <- alt$Dmat #
eval(cqo.end.expression) #
})
-cqo.end.expression = expression({
+cqo.end.expression <- expression({
rmfromVGAMenv(c("etamat"), prefix = ".VGAM.CQO.")
if (control$Quadratic) {
- if (!length(extra)) extra=list()
- extra$Amat = Amat # Not the latest iteration ??
- extra$Cmat = Cmat # Saves the latest iteration
- extra$Dmat = Dmat # Not the latest iteration
- extra$B1 = B1.save # Not the latest iteration (not good)
+ if (!length(extra)) extra =list()
+ extra$Amat <- Amat # Not the latest iteration ??
+ extra$Cmat <- Cmat # Saves the latest iteration
+ extra$Dmat <- Dmat # Not the latest iteration
+ extra$B1 <- B1.save # Not the latest iteration (not good)
} else {
- Blist = replace.constraints(Blist.save, Amat, colx2.index)
+ Blist <- replace.constraints(Blist.save, Amat, colx2.index)
}
@@ -810,17 +825,17 @@ cqo.end.expression = expression({
deriv.mu <- eval(family at deriv)
wz <- eval(family at weight)
if (control$checkwz)
- wz = checkwz(wz, M=M, trace=trace, wzeps=control$wzepsilon)
- U <- vchol(wz, M=M, n=n, silent=!trace)
- tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
- z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset # Contains \bI \bnu
+ wz <- checkwz(wz, M = M, trace = trace, wzeps = control$wzepsilon)
+ U <- vchol(wz, M = M, n = n, silent = !trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
+ z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset # Contains \bI \bnu
})
-crow1C = function(cmat,
+crow1C <- function(cmat,
crow1positive = rep(TRUE, length.out = ncol(cmat)),
amat = NULL) {
if (!is.logical(crow1positive) || length(crow1positive) != ncol(cmat))
@@ -829,10 +844,10 @@ crow1C = function(cmat,
for(LV in 1:ncol(cmat))
if (( crow1positive[LV] && cmat[1,LV] < 0) ||
(!crow1positive[LV] && cmat[1,LV] > 0)) {
- cmat[,LV] = -cmat[,LV]
- if (length(amat)) amat[,LV] = -amat[,LV]
+ cmat[,LV] <- -cmat[,LV]
+ if (length(amat)) amat[,LV] <- -amat[,LV]
}
- if (length(amat)) list(cmat=cmat, amat=amat) else cmat
+ if (length(amat)) list(cmat = cmat, amat = amat) else cmat
}
@@ -861,10 +876,10 @@ printqrrvglm <- function(x, ...)
cat("Residual deviance:", format(deviance(x)), "\n")
if (FALSE && length(x at criterion)) {
- ncrit <- names(x at criterion)
- for(ii in ncrit)
- if (ii != "loglikelihood" && ii != "deviance")
- cat(paste(ii, ":", sep=""), format(x at criterion[[ii]]), "\n")
+ ncrit <- names(x at criterion)
+ for(ii in ncrit)
+ if (ii != "loglikelihood" && ii != "deviance")
+ cat(paste(ii, ":", sep=""), format(x at criterion[[ii]]), "\n")
}
invisible(x)
diff --git a/R/deviance.vlm.q b/R/deviance.vlm.q
index 5a4b7b5..c68f626 100644
--- a/R/deviance.vlm.q
+++ b/R/deviance.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -53,21 +53,21 @@ setMethod("df.residual", "vlm", function(object, ...)
nvar_vlm <- function(object, ...) {
- M = npred(object)
- allH = matrix(unlist(constraints(object, type = "lm")), nrow = M)
- checkNonZero = function(m) sum(as.logical(m))
- numPars = apply(allH, 1, checkNonZero)
+ M <- npred(object)
+ allH <- matrix(unlist(constraints(object, type = "lm")), nrow = M)
+ checkNonZero <- function(m) sum(as.logical(m))
+ numPars <- apply(allH, 1, checkNonZero)
if (length(object at misc$predictors.names) == M)
- names(numPars) = object at misc$predictors.names
+ names(numPars) <- object at misc$predictors.names
- NumPars = rep(0, length = M)
+ NumPars <- rep(0, length = M)
for (jay in 1:M) {
- X_lm_jay = model.matrix(object, type = "lm", lapred.index = jay)
- NumPars[jay] = ncol(X_lm_jay)
+ X_lm_jay <- model.matrix(object, type = "lm", lapred.index = jay)
+ NumPars[jay] <- ncol(X_lm_jay)
}
if (length(object at misc$predictors.names) == M)
- names(NumPars) = object at misc$predictors.names
+ names(NumPars) <- object at misc$predictors.names
if (!all(NumPars == numPars)) {
print(NumPars - numPars) # Should be all 0s
stop("something wrong in nvar_vlm()")
diff --git a/R/effects.vglm.q b/R/effects.vglm.q
index 9f00083..afe953c 100644
--- a/R/effects.vglm.q
+++ b/R/effects.vglm.q
@@ -1,26 +1,25 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
-effects.vlm <- function(object, ...)
-{
- cat("Sorry, this function has not been written yet. Returning a NULL.\n")
- invisible(NULL)
+effects.vlm <- function(object, ...) {
+ cat("Sorry, this function has not been written yet. Returning a NULL.\n")
+ invisible(NULL)
}
if(!isGeneric("effects"))
- setGeneric("effects", function(object, ...) standardGeneric("effects"))
+ setGeneric("effects", function(object, ...) standardGeneric("effects"))
if(is.R()) {
- setMethod("effects", "vlm", function(object, ...)
- effects.vlm(object, ...))
+ setMethod("effects", "vlm", function(object, ...)
+ effects.vlm(object, ...))
} else {
- setMethod("effects", "vlm", function(object, ...)
- effects.vlm(object, ...))
+ setMethod("effects", "vlm", function(object, ...)
+ effects.vlm(object, ...))
}
diff --git a/R/family.actuary.R b/R/family.actuary.R
index bc700b2..00bf114 100644
--- a/R/family.actuary.R
+++ b/R/family.actuary.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -104,12 +104,12 @@ rgumbelII <- function(n, shape, scale = 1) {
lshape <- as.list(substitute(lshape))
- e.shape <- link2list(lshape)
- l.shape <- attr(e.shape, "function.name")
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
lscale <- as.list(substitute(lscale))
- e.scale <- link2list(lscale)
- l.scale <- attr(e.scale, "function.name")
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
if (length(zero) &&
@@ -140,8 +140,8 @@ rgumbelII <- function(n, shape, scale = 1) {
new("vglmff",
blurb = c("Gumbel Type II distribution\n\n",
"Links: ",
- namesof("shape", l.shape, e.shape), ", ",
- namesof("scale", l.scale, e.scale), "\n",
+ namesof("shape", lshape, eshape), ", ",
+ namesof("scale", lscale, escale), "\n",
"Mean: scale^(1/shape) * gamma(1 - 1 / shape)\n",
"Variance: scale^(2/shape) * (gamma(1 - 2/shape) - ",
"gamma(1 + 1/shape)^2)"),
@@ -184,8 +184,8 @@ rgumbelII <- function(n, shape, scale = 1) {
predictors.names <-
- c(namesof(mynames1, .l.shape , .e.shape , tag = FALSE),
- namesof(mynames2, .l.scale , .e.scale , tag = FALSE))[
+ c(namesof(mynames1, .lshape , .eshape , tag = FALSE),
+ namesof(mynames2, .lscale , .escale , tag = FALSE))[
interleave.VGAM(M, M = Musual)]
@@ -216,20 +216,20 @@ rgumbelII <- function(n, shape, scale = 1) {
} # ilocal
etastart <-
- cbind(theta2eta(Shape.init, .l.shape , .e.shape ),
- theta2eta(Scale.init, .l.scale , .e.scale ))[,
+ cbind(theta2eta(Shape.init, .lshape , .eshape ),
+ theta2eta(Scale.init, .lscale , .escale ))[,
interleave.VGAM(M, M = Musual)]
}
}
}), list(
- .l.scale = l.scale, .l.shape = l.shape,
- .e.scale = e.scale, .e.shape = e.shape,
+ .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape,
.iscale = iscale, .ishape = ishape,
.probs.y = probs.y,
.imethod = imethod ) )),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Shape <- eta2theta(eta[, c(TRUE, FALSE)], .l.shape , .e.shape )
- Scale <- eta2theta(eta[, c(FALSE, TRUE)], .l.scale , .e.scale )
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
Shape <- as.matrix(Shape)
if (length( .perc.out ) > 1 && ncol(Shape) > 1)
@@ -250,24 +250,24 @@ rgumbelII <- function(n, shape, scale = 1) {
colnames(ans) <- paste(as.character( .perc.out ), "%", sep = "")
ans
}, list(
- .l.scale = l.scale, .l.shape = l.shape,
- .e.scale = e.scale, .e.shape = e.shape,
+ .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape,
.perc.out = perc.out ) )),
last = eval(substitute(expression({
Musual <- extra$Musual
misc$link <-
- c(rep( .l.shape , length = ncoly),
- rep( .l.scale , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ c(rep( .lshape , length = ncoly),
+ rep( .lscale , length = ncoly))[interleave.VGAM(M, M = Musual)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for(ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .e.shape
- misc$earg[[Musual*ii ]] <- .e.scale
+ misc$earg[[Musual*ii-1]] <- .eshape
+ misc$earg[[Musual*ii ]] <- .escale
}
misc$Musual <- Musual
@@ -279,40 +279,40 @@ rgumbelII <- function(n, shape, scale = 1) {
}), list(
- .l.scale = l.scale, .l.shape = l.shape,
- .e.scale = e.scale, .e.shape = e.shape,
+ .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape,
.perc.out = perc.out,
.imethod = imethod ) )),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
- Shape <- eta2theta(eta[, c(TRUE, FALSE)], .l.shape , .e.shape )
- Scale <- eta2theta(eta[, c(FALSE, TRUE)], .l.scale , .e.scale )
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(c(w) * dgumbelII(x = y, shape = Shape,
scale = Scale, log = TRUE))
- }, list( .l.scale = l.scale, .l.shape = l.shape,
- .e.scale = e.scale, .e.shape = e.shape
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape
) )),
vfamily = c("gumbelII"),
deriv = eval(substitute(expression({
Musual <- 2
- Shape <- eta2theta(eta[, c(TRUE, FALSE)], .l.shape , .e.shape )
- Scale <- eta2theta(eta[, c(FALSE, TRUE)], .l.scale , .e.scale )
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
dl.dshape <- 1 / Shape + log(Scale / y) -
log(Scale / y) * (Scale / y)^Shape
dl.dscale <- Shape / Scale - (Shape / y) * (Scale / y)^(Shape - 1)
- dshape.deta <- dtheta.deta(Shape, .l.shape , .e.shape )
- dscale.deta <- dtheta.deta(Scale, .l.scale , .e.scale )
+ dshape.deta <- dtheta.deta(Shape, .lshape , .eshape )
+ dscale.deta <- dtheta.deta(Scale, .lscale , .escale )
myderiv <- c(w) * cbind(dl.dshape, dl.dscale) *
cbind(dshape.deta, dscale.deta)
myderiv[, interleave.VGAM(M, M = Musual)]
- }), list( .l.scale = l.scale, .l.shape = l.shape,
- .e.scale = e.scale, .e.shape = e.shape
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape
) )),
weight = eval(substitute(expression({
EulerM <- -digamma(1.0)
@@ -322,20 +322,14 @@ rgumbelII <- function(n, shape, scale = 1) {
ned2l.dscale2 <- (Shape / Scale)^2
ned2l.dshapescale <- digamma(2) / Scale
- wz <- matrix(0.0, n, M + M - 1) # wz is tridiagonal
- ind11 <- ind22 <- ind12 <- NULL
- for (ii in 1:(M / Musual)) {
- ind11 <- c(ind11, iam(Musual*ii - 1, Musual*ii - 1, M))
- ind22 <- c(ind22, iam(Musual*ii - 0, Musual*ii - 0, M))
- ind12 <- c(ind12, iam(Musual*ii - 1, Musual*ii - 0, M))
- }
- wz[, ind11] <- ned2l.dshape2 * dshape.deta^2
- wz[, ind22] <- ned2l.dscale2 * dscale.deta^2
- wz[, ind12] <- ned2l.dshapescale * dscale.deta * dshape.deta
-
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
- }), list( .l.scale = l.scale, .l.shape = l.shape ))))
+ wz <- array(c(c(w) * ned2l.dshape2 * dshape.deta^2,
+ c(w) * ned2l.dscale2 * dscale.deta^2,
+ c(w) * ned2l.dshapescale * dscale.deta * dshape.deta),
+ dim = c(n, M / Musual, 3))
+ wz <- arwz2wz(wz, M = M, Musual = Musual)
+ wz
+ }), list( .lscale = lscale, .lshape = lshape ))))
}
@@ -358,7 +352,7 @@ dmbeard <- function(x, shape, scale = 1, rho, epsilon, log = FALSE) {
if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
- index0 = (x < 0)
+ index0 <- (x < 0)
ans <- log(epsilon * exp(-x * scale) + shape) +
(-epsilon * x -
@@ -421,7 +415,7 @@ dmperks <- function(x, shape, scale = 1, epsilon, log = FALSE) {
if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
- index0 = (x < 0)
+ index0 <- (x < 0)
ans <- log(epsilon * exp(-x * scale) + shape) +
(-epsilon * x -
((epsilon - 1) / scale) *
@@ -487,7 +481,7 @@ dbeard <- function(x, shape, scale = 1, rho, log = FALSE) {
if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
if (length(rho) != LLL) rho <- rep(rho, length.out = LLL)
- index0 = (x < 0)
+ index0 <- (x < 0)
ans <- log(shape) - x * scale * (rho^(-1 / scale)) +
log(rho) + log(scale) +
(rho^(-1 / scale)) * log1p(shape * rho) -
@@ -513,7 +507,7 @@ dbeard <- function(x, shape, scale = 1, rho, log = FALSE) {
dbeard <- function(x, shape, scale = 1, rho, log = FALSE) {
-alpha=shape; beta=scale;
+alpha = shape; beta = scale;
warning("does not integrate to unity")
@@ -525,8 +519,9 @@ alpha=shape; beta=scale;
-qbeard=function(x,u=0.5,alpha=1,beta=1,rho=1) {
- ret = ifelse(x<=0 | u<=0 | u>=1 | length(x)!=length(u) | beta<=0,
+qbeard <- function(x, u = 0.5, alpha = 1, beta = 1,rho = 1) {
+ ret <- ifelse(x<=0 | u<=0 | u>=1 |
+ length(x)!=length(u) | beta<=0,
NaN, (1/beta)*
(log((u**(-beta*exp(rho)))*
(1+exp(alpha+rho+beta*x))-1)-alpha-rho)-x)
@@ -554,7 +549,7 @@ dperks <- function(x, shape, scale = 1, log = FALSE) {
if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
- index0 = (x < 0)
+ index0 <- (x < 0)
ans <- log(shape) - x +
log1p(shape) / scale -
(1 + 1 / scale) * log(shape + exp(-x * scale))
@@ -618,27 +613,26 @@ rperks <- function(n, shape, scale = 1) {
-perks.control <- function(save.weight = TRUE, ...)
-{
+perks.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
perks <-
- function(lshape = "loge", lscale = "loge",
- ishape = NULL, iscale = NULL,
+ function(lshape = "loge", lscale = "loge",
+ ishape = NULL, iscale = NULL,
+ gshape = exp(-5:5), gscale = exp(-5:5),
nsimEIM = 500,
oim.mean = FALSE,
- zero = NULL)
-{
+ zero = NULL) {
lshape <- as.list(substitute(lshape))
- e.shape <- link2list(lshape)
- l.shape <- attr(e.shape, "function.name")
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
lscale <- as.list(substitute(lscale))
- e.scale <- link2list(lscale)
- l.scale <- attr(e.scale, "function.name")
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
if (!is.Numeric(nsimEIM, allowable.length = 1,
@@ -667,8 +661,8 @@ perks.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Perks' distribution\n\n",
"Links: ",
- namesof("shape", l.shape, e.shape), ", ",
- namesof("scale", l.scale, e.scale), "\n",
+ namesof("shape", lshape, eshape), ", ",
+ namesof("scale", lscale, escale), "\n",
"Median: qperks(p = 0.5, shape, scale)"),
constraints = eval(substitute(expression({
@@ -708,8 +702,8 @@ perks.control <- function(save.weight = TRUE, ...)
mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
predictors.names <-
- c(namesof(mynames1, .l.shape , .e.shape , tag = FALSE),
- namesof(mynames2, .l.scale , .e.scale , tag = FALSE))[
+ c(namesof(mynames1, .lshape , .eshape , tag = FALSE),
+ namesof(mynames2, .lscale , .escale , tag = FALSE))[
interleave.VGAM(M, M = Musual)]
@@ -721,10 +715,8 @@ perks.control <- function(save.weight = TRUE, ...)
matC <- matrix(if (length( .iscale )) .iscale else 0 + NA,
n, ncoly, byrow = TRUE)
- shape.grid <- c(exp(-seq(4, 0.1, len = 07)), 1,
- exp( seq(0.1, 4, len = 07)))
- scale.grid <- c(exp(-seq(4, 0.1, len = 07)), 1,
- exp( seq(0.1, 4, len = 07)))
+ shape.grid <- .gshape
+ scale.grid <- .gscale
for (spp. in 1:ncoly) {
yvec <- y[, spp.]
@@ -755,35 +747,36 @@ perks.control <- function(save.weight = TRUE, ...)
} # spp.
etastart <-
- cbind(theta2eta(matH, .l.shape , .e.shape ),
- theta2eta(matC, .l.scale , .e.scale ))[,
+ cbind(theta2eta(matH, .lshape , .eshape ),
+ theta2eta(matC, .lscale , .escale ))[,
interleave.VGAM(M, M = Musual)]
} # End of !length(etastart)
- }), list( .l.scale = l.scale, .l.shape = l.shape,
- .e.scale = e.scale, .e.shape = e.shape,
+ }), list( .lscale = lscale, .lshape = lshape,
+ .eshape = eshape, .escale = escale,
+ .gshape = gshape, .gscale = gscale,
.ishape = ishape, .iscale = iscale
))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Shape <- eta2theta(eta[, c(TRUE, FALSE)], .l.shape , .e.shape )
- Scale <- eta2theta(eta[, c(FALSE, TRUE)], .l.scale , .e.scale )
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
qperks(p = 0.5, shape = Shape, scale = Scale)
- }, list( .l.scale = l.scale, .l.shape = l.shape,
- .e.scale = e.scale, .e.shape = e.shape ))),
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
last = eval(substitute(expression({
misc$link <-
- c(rep( .l.shape , length = ncoly),
- rep( .l.scale , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ c(rep( .lshape , length = ncoly),
+ rep( .lscale , length = ncoly))[interleave.VGAM(M, M = Musual)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for(ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .e.shape
- misc$earg[[Musual*ii ]] <- .e.scale
+ misc$earg[[Musual*ii-1]] <- .eshape
+ misc$earg[[Musual*ii ]] <- .escale
}
@@ -791,28 +784,28 @@ perks.control <- function(save.weight = TRUE, ...)
misc$expected <- TRUE
misc$multipleResponses <- TRUE
misc$nsimEIM <- .nsimEIM
- }), list( .l.scale = l.scale, .l.shape = l.shape,
- .e.scale = e.scale, .e.shape = e.shape,
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Shape <- eta2theta(eta[, c(TRUE, FALSE)], .l.shape , .e.shape )
- Scale <- eta2theta(eta[, c(FALSE, TRUE)], .l.scale , .e.scale )
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dperks(x = y, shape = Shape,
scale = Scale, log = TRUE))
}
- }, list( .l.scale = l.scale, .l.shape = l.shape,
- .e.scale = e.scale, .e.shape = e.shape ))),
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
vfamily = c("perks"),
deriv = eval(substitute(expression({
Musual <- 2
shape <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
- .l.shape , .e.shape )
+ .lshape , .eshape )
scale <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
- .l.scale , .e.scale )
+ .lscale , .escale )
temp2 <- exp(y * scale)
@@ -823,14 +816,14 @@ perks.control <- function(save.weight = TRUE, ...)
log1p(shape * temp2) / scale^2 -
(1 + 1 / scale) * shape * y * temp2 / temp3
- dshape.deta <- dtheta.deta(shape, .l.shape , .e.shape )
- dscale.deta <- dtheta.deta(scale, .l.scale , .e.scale )
+ dshape.deta <- dtheta.deta(shape, .lshape , .eshape )
+ dscale.deta <- dtheta.deta(scale, .lscale , .escale )
dthetas.detas <- cbind(dshape.deta, dscale.deta)
myderiv <- c(w) * cbind(dl.dshape, dl.dscale) * dthetas.detas
myderiv[, interleave.VGAM(M, M = Musual)]
- }), list( .l.scale = l.scale, .l.shape = l.shape,
- .e.scale = e.scale, .e.shape = e.shape ))),
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
weight = eval(substitute(expression({
@@ -917,8 +910,8 @@ if (ii < 3) {
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
- }), list( .l.scale = l.scale,
- .e.scale = e.scale,
+ }), list( .lscale = lscale,
+ .escale = escale,
.nsimEIM = nsimEIM, .oim.mean = oim.mean ))))
} # perks()
@@ -941,7 +934,7 @@ dmakeham <- function(x, shape, scale = 1, epsilon = 0, log = FALSE) {
if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
- index0 = (x < 0)
+ index0 <- (x < 0)
ans <- log(epsilon * exp(-x * scale) + shape) +
x * (scale - epsilon) -
(shape / scale) * expm1(x * scale)
@@ -1009,19 +1002,20 @@ rmakeham <- function(n, shape, scale = 1, epsilon = 0) {
-makeham.control <- function(save.weight = TRUE, ...)
-{
+makeham.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
makeham <-
function(lshape = "loge", lscale = "loge", lepsilon = "loge",
- ishape = NULL, iscale = NULL, iepsilon = 0.3,
+ ishape = NULL, iscale = NULL, iepsilon = NULL, # 0.3,
+ gshape = exp(-5:5),
+ gscale = exp(-5:5),
+ gepsilon = exp(-4:1),
nsimEIM = 500,
oim.mean = TRUE,
- zero = NULL)
-{
+ zero = NULL) {
@@ -1032,16 +1026,16 @@ makeham.control <- function(save.weight = TRUE, ...)
lshape <- as.list(substitute(lshape))
- e.shape <- link2list(lshape)
- l.shape <- attr(e.shape, "function.name")
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
lscale <- as.list(substitute(lscale))
- e.scale <- link2list(lscale)
- l.scale <- attr(e.scale, "function.name")
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
lepsil <- as.list(substitute(lepsil))
- e.epsil <- link2list(lepsil)
- l.epsil <- attr(e.epsil, "function.name")
+ eepsil <- link2list(lepsil)
+ lepsil <- attr(eepsil, "function.name")
if (!is.Numeric(nsimEIM, allowable.length = 1,
integer.valued = TRUE))
@@ -1074,9 +1068,9 @@ makeham.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Makeham distribution\n\n",
"Links: ",
- namesof("shape", l.shape, e.shape), ", ",
- namesof("scale", l.scale, e.scale), ", ",
- namesof("epsilon", l.epsil, e.epsil), "\n",
+ namesof("shape", lshape, eshape), ", ",
+ namesof("scale", lscale, escale), ", ",
+ namesof("epsilon", lepsil, eepsil), "\n",
"Median: qmakeham(p = 0.5, shape, scale, epsilon)"),
constraints = eval(substitute(expression({
@@ -1118,9 +1112,9 @@ makeham.control <- function(save.weight = TRUE, ...)
mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
mynames3 <- paste("epsilon", if (ncoly > 1) 1:ncoly else "", sep = "")
predictors.names <-
- c(namesof(mynames1, .l.shape , .e.shape , tag = FALSE),
- namesof(mynames2, .l.scale , .e.scale , tag = FALSE),
- namesof(mynames3, .l.epsil , .e.epsil , tag = FALSE))[
+ c(namesof(mynames1, .lshape , .eshape , tag = FALSE),
+ namesof(mynames2, .lscale , .escale , tag = FALSE),
+ namesof(mynames3, .lepsil , .eepsil , tag = FALSE))[
interleave.VGAM(M, M = Musual)]
@@ -1135,10 +1129,8 @@ makeham.control <- function(save.weight = TRUE, ...)
n, ncoly, byrow = TRUE)
- shape.grid <- c(exp(-seq(4, 0.1, len = 05)), 1,
- exp( seq(0.1, 4, len = 05)))
- scale.grid <- c(exp(-seq(4, 0.1, len = 05)), 1,
- exp( seq(0.1, 4, len = 05)))
+ shape.grid <- unique(sort(c( .gshape )))
+ scale.grid <- unique(sort(c( .gscale )))
@@ -1176,8 +1168,7 @@ makeham.control <- function(save.weight = TRUE, ...)
- epsil.grid <- c(exp(-seq(4, 0.1, len = 05)), 1,
- exp( seq(0.1, 1, len = 05)))
+ epsil.grid <- c( .gepsil )
for (spp. in 1:ncoly) {
yvec <- y[, spp.]
wvec <- w[, spp.]
@@ -1200,32 +1191,33 @@ makeham.control <- function(save.weight = TRUE, ...)
} # spp.
- etastart <- cbind(theta2eta(matH, .l.shape , .e.shape ),
- theta2eta(matC, .l.scale , .e.scale ),
- theta2eta(matE, .l.epsil , .e.epsil ))[,
+ etastart <- cbind(theta2eta(matH, .lshape , .eshape ),
+ theta2eta(matC, .lscale , .escale ),
+ theta2eta(matE, .lepsil , .eepsil ))[,
interleave.VGAM(M, M = Musual)]
} # End of !length(etastart)
}), list(
- .l.shape = l.shape, .l.scale = l.scale, .l.epsil = l.epsil,
- .e.shape = e.shape, .e.scale = e.scale, .e.epsil = e.epsil,
+ .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
+ .eshape = eshape, .escale = escale, .eepsil = eepsil,
+ .gshape = gshape, .gscale = gscale, .gepsil = gepsilon,
.ishape = ishape, .iscale = iscale, .iepsil = iepsil
))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .l.shape , .e.shape )
- scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .l.scale , .e.scale )
- epsil <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .l.epsil , .e.epsil )
+ shape <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lshape , .eshape )
+ scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lscale , .escale )
+ epsil <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lepsil , .eepsil )
qmakeham(p = 0.5, shape = shape, scale = scale, epsil = epsil)
}, list(
- .l.shape = l.shape, .l.scale = l.scale, .l.epsil = l.epsil,
- .e.shape = e.shape, .e.scale = e.scale, .e.epsil = e.epsil
+ .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
+ .eshape = eshape, .escale = escale, .eepsil = eepsil
))),
last = eval(substitute(expression({
Musual <- extra$Musual
misc$link <-
- c(rep( .l.shape , length = ncoly),
- rep( .l.scale , length = ncoly),
- rep( .l.epsil , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ c(rep( .lshape , length = ncoly),
+ rep( .lscale , length = ncoly),
+ rep( .lepsil , length = ncoly))[interleave.VGAM(M, M = Musual)]
temp.names <- c(mynames1, mynames2, mynames3)[
interleave.VGAM(M, M = Musual)]
names(misc$link) <- temp.names
@@ -1233,9 +1225,9 @@ makeham.control <- function(save.weight = TRUE, ...)
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for(ii in 1:ncoly) {
- misc$earg[[Musual*ii-2]] <- .e.shape
- misc$earg[[Musual*ii-1]] <- .e.scale
- misc$earg[[Musual*ii ]] <- .e.epsil
+ misc$earg[[Musual*ii-2]] <- .eshape
+ misc$earg[[Musual*ii-1]] <- .escale
+ misc$earg[[Musual*ii ]] <- .eepsil
}
misc$Musual <- Musual
@@ -1243,33 +1235,33 @@ makeham.control <- function(save.weight = TRUE, ...)
misc$multipleResponses <- TRUE
misc$nsimEIM <- .nsimEIM
}), list(
- .l.shape = l.shape, .l.scale = l.scale, .l.epsil = l.epsil,
- .e.shape = e.shape, .e.scale = e.scale, .e.epsil = e.epsil,
+ .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
+ .eshape = eshape, .escale = escale, .eepsil = eepsil,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .l.shape , .e.shape )
- scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .l.scale , .e.scale )
- epsil <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .l.epsil , .e.epsil )
+ shape <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lshape , .eshape )
+ scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lscale , .escale )
+ epsil <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lepsil , .eepsil )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dmakeham(x = y, shape = shape, scale = scale,
epsil = epsil, log = TRUE))
}
}, list(
- .l.shape = l.shape, .l.scale = l.scale, .l.epsil = l.epsil,
- .e.shape = e.shape, .e.scale = e.scale, .e.epsil = e.epsil
+ .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
+ .eshape = eshape, .escale = escale, .eepsil = eepsil
))),
vfamily = c("makeham"),
deriv = eval(substitute(expression({
Musual <- 3
shape <- eta2theta(eta[, c(TRUE, FALSE, FALSE), drop = FALSE],
- .l.shape , .e.shape )
+ .lshape , .eshape )
scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE), drop = FALSE],
- .l.scale , .e.scale )
+ .lscale , .escale )
epsil <- eta2theta(eta[, c(FALSE, FALSE, TRUE), drop = FALSE],
- .l.epsil , .e.epsil )
+ .lepsil , .eepsil )
temp2 <- exp(y * scale)
@@ -1281,9 +1273,9 @@ makeham.control <- function(save.weight = TRUE, ...)
dl.depsil <- 1 / temp3 - y
- dshape.deta <- dtheta.deta(shape, .l.shape , .e.shape )
- dscale.deta <- dtheta.deta(scale, .l.scale , .e.scale )
- depsil.deta <- dtheta.deta(epsil, .l.epsil , .e.epsil )
+ dshape.deta <- dtheta.deta(shape, .lshape , .eshape )
+ dscale.deta <- dtheta.deta(scale, .lscale , .escale )
+ depsil.deta <- dtheta.deta(epsil, .lepsil , .eepsil )
dthetas.detas <- cbind(dshape.deta, dscale.deta, depsil.deta)
myderiv <- c(w) * cbind(dl.dshape,
@@ -1291,8 +1283,8 @@ makeham.control <- function(save.weight = TRUE, ...)
dl.depsil) * dthetas.detas
myderiv[, interleave.VGAM(M, M = Musual)]
}), list(
- .l.shape = l.shape, .l.scale = l.scale, .l.epsil = l.epsil,
- .e.shape = e.shape, .e.scale = e.scale, .e.epsil = e.epsil
+ .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
+ .eshape = eshape, .escale = escale, .eepsil = eepsil
))),
@@ -1396,8 +1388,8 @@ if (ii < 3) {
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
}), list(
- .l.shape = l.shape, .l.scale = l.scale, .l.epsil = l.epsil,
- .e.shape = e.shape, .e.scale = e.scale, .e.epsil = e.epsil,
+ .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
+ .eshape = eshape, .escale = escale, .eepsil = eepsil,
.nsimEIM = nsimEIM, .oim.mean = oim.mean ))))
} # makeham()
@@ -1482,8 +1474,7 @@ rgompertz <- function(n, shape, scale = 1) {
-gompertz.control <- function(save.weight = TRUE, ...)
-{
+gompertz.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -1492,18 +1483,17 @@ gompertz.control <- function(save.weight = TRUE, ...)
function(lshape = "loge", lscale = "loge",
ishape = NULL, iscale = NULL,
nsimEIM = 500,
- zero = NULL)
-{
+ zero = NULL) {
lshape <- as.list(substitute(lshape))
- e.shape <- link2list(lshape)
- l.shape <- attr(e.shape, "function.name")
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
lscale <- as.list(substitute(lscale))
- e.scale <- link2list(lscale)
- l.scale <- attr(e.scale, "function.name")
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
@@ -1529,8 +1519,8 @@ gompertz.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Gompertz distribution\n\n",
"Links: ",
- namesof("shape", l.shape, e.shape ), ", ",
- namesof("scale", l.scale, e.scale ), "\n",
+ namesof("shape", lshape, eshape ), ", ",
+ namesof("scale", lscale, escale ), "\n",
"Median: scale * log(2 - 1 / shape)"),
constraints = eval(substitute(expression({
@@ -1570,8 +1560,8 @@ gompertz.control <- function(save.weight = TRUE, ...)
mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
predictors.names <-
- c(namesof(mynames1, .l.shape , .e.shape , tag = FALSE),
- namesof(mynames2, .l.scale , .e.scale , tag = FALSE))[
+ c(namesof(mynames1, .lshape , .eshape , tag = FALSE),
+ namesof(mynames2, .lscale , .escale , tag = FALSE))[
interleave.VGAM(M, M = Musual)]
@@ -1617,62 +1607,62 @@ gompertz.control <- function(save.weight = TRUE, ...)
matC[, spp.] <- mymat[index.shape, 1]
} # spp.
- etastart <- cbind(theta2eta(matH, .l.shape , .e.shape ),
- theta2eta(matC, .l.scale , .e.scale ))[,
+ etastart <- cbind(theta2eta(matH, .lshape , .eshape ),
+ theta2eta(matC, .lscale , .escale ))[,
interleave.VGAM(M, M = Musual)]
} # End of !length(etastart)
- }), list( .l.shape = l.shape, .l.scale = l.scale,
- .e.shape = e.shape, .e.scale = e.scale,
+ }), list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale,
.ishape = ishape, .iscale = iscale
))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape <- eta2theta(eta[, c(TRUE, FALSE)], .l.shape , .e.shape )
- scale <- eta2theta(eta[, c(FALSE, TRUE)], .l.scale , .e.scale )
+ shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
+ scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
log1p((scale / shape) * log(2)) / scale
- }, list( .l.shape = l.shape, .l.scale = l.scale,
- .e.shape = e.shape, .e.scale = e.scale ))),
+ }, list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
last = eval(substitute(expression({
Musual <- extra$Musual
misc$link <-
- c(rep( .l.shape , length = ncoly),
- rep( .l.scale , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ c(rep( .lshape , length = ncoly),
+ rep( .lscale , length = ncoly))[interleave.VGAM(M, M = Musual)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for(ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .e.shape
- misc$earg[[Musual*ii ]] <- .e.scale
+ misc$earg[[Musual*ii-1]] <- .eshape
+ misc$earg[[Musual*ii ]] <- .escale
}
misc$Musual <- Musual
misc$expected <- TRUE
misc$multipleResponses <- TRUE
misc$nsimEIM <- .nsimEIM
- }), list( .l.shape = l.shape, .l.scale = l.scale,
- .e.shape = e.shape, .e.scale = e.scale,
+ }), list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape <- eta2theta(eta[, c(TRUE, FALSE)], .l.shape , .e.shape )
- scale <- eta2theta(eta[, c(FALSE, TRUE)], .l.scale , .e.scale )
+ shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
+ scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dgompertz(x = y, shape = shape,
scale = scale, log = TRUE))
}
- }, list( .l.shape = l.shape, .l.scale = l.scale,
- .e.shape = e.shape, .e.scale = e.scale ))),
+ }, list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
vfamily = c("gompertz"),
deriv = eval(substitute(expression({
Musual <- 2
- shape <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .l.shape ,
- .e.shape )
- scale <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .l.scale ,
- .e.scale )
+ shape <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lshape ,
+ .eshape )
+ scale <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .lscale ,
+ .escale )
temp2 <- exp(y * scale)
@@ -1681,14 +1671,14 @@ gompertz.control <- function(save.weight = TRUE, ...)
dl.dscale <- y * (1 - shape * temp2 / scale) -
shape * temp4 / scale^2
- dshape.deta <- dtheta.deta(shape, .l.shape , .e.shape )
- dscale.deta <- dtheta.deta(scale, .l.scale , .e.scale )
+ dshape.deta <- dtheta.deta(shape, .lshape , .eshape )
+ dscale.deta <- dtheta.deta(scale, .lscale , .escale )
dthetas.detas <- cbind(dshape.deta, dscale.deta)
myderiv <- c(w) * cbind(dl.dshape, dl.dscale) * dthetas.detas
myderiv[, interleave.VGAM(M, M = Musual)]
- }), list( .l.shape = l.shape, .l.scale = l.scale,
- .e.shape = e.shape, .e.scale = e.scale ))),
+ }), list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
weight = eval(substitute(expression({
@@ -1746,8 +1736,8 @@ if (ii < 3) {
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
- }), list( .l.scale = l.scale,
- .e.scale = e.scale,
+ }), list( .lscale = lscale,
+ .escale = escale,
.nsimEIM = nsimEIM ))))
} # gompertz()
@@ -1766,7 +1756,7 @@ dmoe <- function (x, alpha = 1, lambda = 1, log = FALSE) {
if (length(alpha) != LLL) alpha <- rep(alpha, length.out = LLL)
if (length(lambda) != LLL) lambda <- rep(lambda, length.out = LLL)
- index0 = (x < 0)
+ index0 <- (x < 0)
if (log.arg) {
ans <- log(lambda) + (lambda * x) -
2 * log(expm1(lambda * x) + alpha)
@@ -1797,18 +1787,15 @@ qmoe <- function (p, alpha = 1, lambda = 1) {
-rmoe <- function (n, alpha = 1, lambda = 1)
-{
-
+rmoe <- function (n, alpha = 1, lambda = 1) {
qmoe(p = runif(n), alpha = alpha, lambda = lambda)
}
-exponential.mo.control <- function(save.weight = TRUE, ...)
-{
- list(save.weight = save.weight)
+exponential.mo.control <- function(save.weight = TRUE, ...) {
+ list(save.weight = save.weight)
}
@@ -1820,8 +1807,7 @@ exponential.mo.control <- function(save.weight = TRUE, ...)
ialpha = 1, ilambda = NULL,
imethod = 1,
nsimEIM = 200,
- zero = NULL)
-{
+ zero = NULL) {
stop("fundamentally unable to estimate the parameters as ",
"the support of the density depends on the parameters")
@@ -1960,8 +1946,8 @@ exponential.mo.control <- function(save.weight = TRUE, ...)
))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha0 = eta2theta(eta[, c(TRUE, FALSE)], .lalpha0 , .ealpha0 )
- lambda = eta2theta(eta[, c(FALSE, TRUE)], .llambda , .elambda )
+ alpha0 <- eta2theta(eta[, c(TRUE, FALSE)], .lalpha0 , .ealpha0 )
+ lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , .elambda )
log(3 - alpha0) / lambda
}, list( .lalpha0 = lalpha0, .llambda = llambda,
.ealpha0 = ealpha0, .elambda = elambda ))),
@@ -1984,15 +1970,15 @@ exponential.mo.control <- function(save.weight = TRUE, ...)
misc$imethod <- .imethod
misc$expected <- TRUE
misc$multipleResponses <- TRUE
- misc$nsimEIM = .nsimEIM
+ misc$nsimEIM <- .nsimEIM
}), list( .lalpha0 = lalpha0, .llambda = llambda,
.ealpha0 = ealpha0, .elambda = elambda,
.nsimEIM = nsimEIM,
.imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- alpha0 = eta2theta(eta[, c(TRUE, FALSE)], .lalpha0 , .ealpha0 )
- lambda = eta2theta(eta[, c(FALSE, TRUE)], .llambda , .elambda )
+ alpha0 <- eta2theta(eta[, c(TRUE, FALSE)], .lalpha0 , .ealpha0 )
+ lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , .elambda )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * log(dmoe(x = y, alpha = alpha0,
@@ -2004,21 +1990,21 @@ exponential.mo.control <- function(save.weight = TRUE, ...)
deriv = eval(substitute(expression({
Musual <- 2
- alpha0 = eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lalpha0 ,
+ alpha0 <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lalpha0 ,
.ealpha0 )
- lambda = eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .llambda ,
+ lambda <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .llambda ,
.elambda )
- temp2 = (expm1(lambda * y) + alpha0)
- dl.dalpha0 = -2 / temp2
- dl.dlambda = 1 / lambda + y - 2 * y * exp(lambda * y) / temp2
+ temp2 <- (expm1(lambda * y) + alpha0)
+ dl.dalpha0 <- -2 / temp2
+ dl.dlambda <- 1 / lambda + y - 2 * y * exp(lambda * y) / temp2
- dalpha0.deta = dtheta.deta(alpha0, .lalpha0 , .ealpha0 )
- dlambda.deta = dtheta.deta(lambda, .llambda , .elambda )
+ dalpha0.deta <- dtheta.deta(alpha0, .lalpha0 , .ealpha0 )
+ dlambda.deta <- dtheta.deta(lambda, .llambda , .elambda )
- dthetas.detas = cbind(dalpha0.deta,
- dlambda.deta)
- myderiv = c(w) * cbind(dl.dalpha0, dl.dlambda) * dthetas.detas
+ dthetas.detas <- cbind(dalpha0.deta,
+ dlambda.deta)
+ myderiv <- c(w) * cbind(dl.dalpha0, dl.dlambda) * dthetas.detas
myderiv[, interleave.VGAM(M, M = Musual)]
}), list( .lalpha0 = lalpha0, .llambda = llambda,
.ealpha0 = ealpha0, .elambda = elambda ))),
@@ -2026,52 +2012,52 @@ exponential.mo.control <- function(save.weight = TRUE, ...)
weight = eval(substitute(expression({
- NOS = M / Musual
- dThetas.detas = dthetas.detas[, interleave.VGAM(M, M = Musual)]
+ NOS <- M / Musual
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
- wz = matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
+ wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
- ind1 = iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
for(spp. in 1:NOS) {
- run.varcov = 0
- Alph = alpha0[, spp.]
- Lamb = lambda[, spp.]
+ run.varcov <- 0
+ Alph <- alpha0[, spp.]
+ Lamb <- lambda[, spp.]
for(ii in 1:( .nsimEIM )) {
- ysim = rmoe(n = n, alpha = Alph, lambda = Lamb)
+ ysim <- rmoe(n = n, alpha = Alph, lambda = Lamb)
if (ii < 3) {
}
- temp2 = (expm1(lambda * ysim) + alpha0)
- dl.dalpha0 = -2 / temp2
- dl.dlambda = 1 / lambda + ysim -
- 2 * ysim * exp(lambda * ysim) / temp2
+ temp2 <- (expm1(lambda * ysim) + alpha0)
+ dl.dalpha0 <- -2 / temp2
+ dl.dlambda <- 1 / lambda + ysim -
+ 2 * ysim * exp(lambda * ysim) / temp2
- temp3 = cbind(dl.dalpha0, dl.dlambda)
- run.varcov = run.varcov +
- temp3[, ind1$row.index] *
- temp3[, ind1$col.index]
+ temp3 <- cbind(dl.dalpha0, dl.dlambda)
+ run.varcov <- run.varcov +
+ temp3[, ind1$row.index] *
+ temp3[, ind1$col.index]
}
- run.varcov = cbind(run.varcov / .nsimEIM)
+ run.varcov <- cbind(run.varcov / .nsimEIM)
- wz1 = if (intercept.only)
+ wz1 <- if (intercept.only)
matrix(colMeans(run.varcov),
nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else
run.varcov
- wz1 = wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
- dThetas.detas[, Musual * (spp. - 1) + ind1$col]
+ wz1 <- wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
+ dThetas.detas[, Musual * (spp. - 1) + ind1$col]
for(jay in 1:Musual)
for(kay in jay:Musual) {
- cptr = iam((spp. - 1) * Musual + jay,
- (spp. - 1) * Musual + kay,
- M = M)
- wz[, cptr] = wz1[, iam(jay, kay, M = Musual)]
+ cptr <- iam((spp. - 1) * Musual + jay,
+ (spp. - 1) * Musual + kay,
+ M = M)
+ wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
}
} # End of for(spp.) loop
@@ -2088,3 +2074,1968 @@ if (ii < 3) {
+
+
+
+
+ genbetaII <- function(lshape1.a = "loge",
+ lscale = "loge",
+ lshape2.p = "loge",
+ lshape3.q = "loge",
+ ishape1.a = NULL,
+ iscale = NULL,
+ ishape2.p = 1.0,
+ ishape3.q = 1.0,
+ zero = NULL) {
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ lshape1.a <- as.list(substitute(lshape1.a))
+ eshape1.a <- link2list(lshape1.a)
+ lshape1.a <- attr(eshape1.a, "function.name")
+
+ lshape2.p <- as.list(substitute(lshape2.p))
+ eshape2.p <- link2list(lshape2.p)
+ lshape2.p <- attr(eshape2.p, "function.name")
+
+ lshape3.q <- as.list(substitute(lshape3.q))
+ eshape3.q <- link2list(lshape3.q)
+ lshape3.q <- attr(eshape3.q, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+ new("vglmff",
+ blurb = c("Generalized Beta II distribution\n\n",
+ "Links: ",
+ namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape2.p", lshape2.p, earg = eshape2.p), ", ",
+ namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
+ "Mean: scale * gamma(shape2.p + 1/shape1.a) * ",
+ "gamma(shape3.q - 1/shape1.a) / ",
+ "(gamma(shape2.p) * gamma(shape3.q))"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+ predictors.names <-
+ c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE),
+ namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
+
+ if (!length( .ishape1.a ) || !length( .iscale )) {
+ qvec <- c( .25, .5, .75) # Arbitrary; could be made an argument
+ ishape3.q <- if (length( .ishape3.q)) .ishape3.q else 1
+ xvec <- log( (1-qvec)^(-1/ ishape3.q ) - 1 )
+ fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
+ }
+
+ if (!length(etastart)) {
+ aa <- rep(if (length( .ishape1.a )) .ishape1.a else
+ abs(1 / fit0$coef[2]),
+ length.out = n)
+ scale <- rep(if (length( .iscale )) .iscale else
+ exp(fit0$coef[1]), length = n)
+ qq <- rep(if (length( .ishape3.q )) .ishape3.q else 1.0,
+ length.out = n)
+ parg <- rep(if (length( .ishape2.p )) .ishape2.p else 1.0,
+ length.out = n)
+
+
+ outOfRange <- (qq - 1/aa <= 0)
+ qq[outOfRange] <- 1 / aa[outOfRange] + 1
+ outOfRange <- (parg + 1/aa <= 0)
+ parg[outOfRange] <- 1 / aa[outOfRange] + 1
+
+
+ etastart <-
+ cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ theta2eta(scale, .lscale , earg = .escale ),
+ theta2eta(parg, .lshape2.p, earg = .eshape2.p),
+ theta2eta(qq, .lshape3.q, earg = .eshape3.q))
+ }
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .ishape1.a = ishape1.a, .iscale = iscale,
+ .ishape2.p = ishape2.p, .ishape3.q = ishape3.q ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
+ qq <- eta2theta(eta[, 4], .lshape3.q, earg = .eshape3.q)
+ ans <- Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] <- NA
+ ans[qq - 1/aa <= 0] <- NA
+ ans[aa <= 0] <- NA
+ ans[Scale <= 0] <- NA
+ ans[parg <= 0] <- NA
+ ans[qq <= 0] <- NA
+ ans
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
+ last = eval(substitute(expression({
+ misc$link <- c(shape1.a = .lshape1.a , scale = .lscale ,
+ shape2.p = .lshape2.p , shape3.q = .lshape3.q )
+
+ misc$earg <- list(shape1.a = .eshape1.a , scale = .escale ,
+ shape2.p = .eshape2.p , shape3.q = .eshape3.q )
+
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
+ qq <- eta2theta(eta[, 4], .lshape3.q, earg = .eshape3.q)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w) * (log(aa) + (aa * parg - 1) * log(y) -
+ aa * parg * log(scale) +
+ - lbeta(parg, qq) - (parg + qq) * log1p((y/scale)^aa)))
+ }
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
+ vfamily = c("genbetaII"),
+ deriv = eval(substitute(expression({
+ aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
+ qq <- eta2theta(eta[, 4], .lshape3.q, earg = .eshape3.q)
+
+ temp1 <- log(y/scale)
+ temp2 <- (y/scale)^aa
+ temp3 <- digamma(parg + qq)
+ temp3a <- digamma(parg)
+ temp3b <- digamma(qq)
+ temp4 <- log1p(temp2)
+
+ dl.da <- 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
+ dl.dscale <- (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ dl.dp <- aa * temp1 + temp3 - temp3a - temp4
+ dl.dq <- temp3 - temp3b - temp4
+
+ da.deta <- dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
+ dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
+ dp.deta <- dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
+ dq.deta <- dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
+
+ c(w) * cbind( dl.da * da.deta,
+ dl.dscale * dscale.deta,
+ dl.dp * dp.deta,
+ dl.dq * dq.deta )
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
+ weight = eval(substitute(expression({
+ temp5 <- trigamma(parg + qq)
+ temp5a <- trigamma(parg)
+ temp5b <- trigamma(qq)
+
+ ned2l.da <- (1 + parg+qq + parg * qq * (temp5a + temp5b +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
+ ned2l.dscale <- aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ned2l.dp <- temp5a - temp5
+ ned2l.dq <- temp5b - temp5
+ ned2l.dascale <- (parg - qq - parg * qq *
+ (temp3a -temp3b)) / (scale*(1 + parg+qq))
+ ned2l.dap <- -(qq * (temp3a -temp3b) -1) / (aa*(parg+qq))
+ ned2l.daq <- -(parg * (temp3b -temp3a) -1) / (aa*(parg+qq))
+ ned2l.dscalep <- aa * qq / (scale*(parg+qq))
+ ned2l.dscaleq <- -aa * parg / (scale*(parg+qq))
+ ned2l.dpq <- -temp5
+
+ wz <- matrix(as.numeric(NA), n, dimm(M)) # M==4 means 10=dimm(M)
+ wz[, iam(1, 1, M)] <- ned2l.da * da.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dscale * dscale.deta^2
+ wz[, iam(3, 3, M)] <- ned2l.dp * dp.deta^2
+ wz[, iam(4, 4, M)] <- ned2l.dq * dq.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dascale * da.deta * dscale.deta
+ wz[, iam(1, 3, M)] <- ned2l.dap * da.deta * dp.deta
+ wz[, iam(1, 4, M)] <- ned2l.daq * da.deta * dq.deta
+ wz[, iam(2, 3, M)] <- ned2l.dscalep * dscale.deta * dp.deta
+ wz[, iam(2, 4, M)] <- ned2l.dscaleq * dscale.deta * dq.deta
+ wz[, iam(3, 4, M)] <- ned2l.dpq * dp.deta * dq.deta
+ wz <- c(w) * wz
+ wz
+ }), list( .lshape1.a <- lshape1.a, .lscale = lscale,
+ .eshape1.a <- eshape1.a, .escale = escale,
+ .eshape2.p <- eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p <- lshape2.p, .lshape3.q = lshape3.q ))))
+}
+
+
+rsinmad <- function(n, shape1.a, scale = 1, shape3.q)
+ qsinmad(runif(n), shape1.a, scale = scale, shape3.q)
+
+
+rlomax <- function(n, scale = 1, shape3.q)
+ rsinmad(n, shape1.a = 1, scale = scale, shape3.q)
+
+
+rfisk <- function(n, shape1.a, scale = 1)
+ rsinmad(n, shape1.a, scale = scale, shape3.q = 1)
+
+
+rparalogistic <- function(n, shape1.a, scale = 1)
+ rsinmad(n, shape1.a, scale = scale, shape1.a)
+
+
+rdagum <- function(n, shape1.a, scale = 1, shape2.p)
+ qdagum(runif(n), shape1.a = shape1.a, scale = scale,
+ shape2.p = shape2.p)
+
+
+rinvlomax <- function(n, scale = 1, shape2.p)
+ rdagum(n, shape1.a = 1, scale = scale, shape2.p)
+
+
+rinvparalogistic <- function(n, shape1.a, scale = 1)
+ rdagum(n, shape1.a, scale = scale, shape1.a)
+
+
+
+
+qsinmad <- function(p, shape1.a, scale = 1, shape3.q) {
+ bad <- (p < 0) | (p > 1) | is.na(p)
+ ans <- NA * p
+ ans[is.nan(p)] <- NaN
+
+ LLL <- max(length(p), length(shape1.a), length(scale),
+ length(shape3.q))
+ if (length(p) != LLL)
+ p <- rep(p, length.out = LLL)
+ if (length(shape1.a) != LLL)
+ shape1.a <- rep(shape1.a, length.out = LLL)
+ if (length(scale) != LLL)
+ scale <- rep(scale, length.out = LLL)
+ if (length(shape3.q) != LLL)
+ shape3.q <- rep(shape3.q, length.out = LLL)
+
+ Shape1.a <- shape1.a[!bad]
+ Scale <- scale[!bad]
+ Shape3.q <- shape3.q[!bad]
+
+ QQ <- p[!bad]
+ ans[!bad] <- Scale * ((1 - QQ)^(-1/Shape3.q) - 1)^(1/Shape1.a)
+ ans
+}
+
+
+qlomax <- function(p, scale = 1, shape3.q)
+ qsinmad(p, shape1.a = 1, scale = scale, shape3.q)
+
+
+qfisk <- function(p, shape1.a, scale = 1)
+ qsinmad(p, shape1.a, scale = scale, shape3.q = 1)
+
+
+qparalogistic <- function(p, shape1.a, scale = 1)
+ qsinmad(p, shape1.a, scale = scale, shape1.a)
+
+
+
+qdagum <- function(p, shape1.a, scale = 1, shape2.p) {
+
+ LLL <- max(length(p), length(shape1.a), length(scale),
+ length(shape2.p))
+ if (length(p) != LLL)
+ p <- rep(p, length.out = LLL)
+ if (length(shape1.a) != LLL)
+ shape1.a <- rep(shape1.a, length.out = LLL)
+ if (length(scale) != LLL)
+ scale <- rep(scale, length.out = LLL)
+ if (length(shape2.p) != LLL)
+ shape2.p <- rep(shape2.p, length.out = LLL)
+
+
+ bad <- (p < 0) | (p > 1) | (scale <= 0) | is.na(p)
+
+ ans <- NA * p
+ ans[is.nan(p)] <- NaN
+ ans[!bad] <- scale[!bad] *
+ (p[!bad]^(-1/shape2.p[!bad]) - 1)^(-1/shape1.a[!bad])
+ ans
+}
+
+
+
+qinvlomax <- function(p, scale = 1, shape2.p)
+ qdagum(p, shape1.a = 1, scale = scale, shape2.p)
+
+
+qinvparalogistic <- function(p, shape1.a, scale = 1)
+ qdagum(p, shape1.a, scale = scale, shape1.a)
+
+
+
+
+
+
+psinmad <- function(q, shape1.a, scale = 1, shape3.q) {
+
+
+ LLL <- max(length(q), length(shape1.a), length(scale),
+ length(shape3.q))
+ if (length(q) != LLL)
+ q <- rep(q, length.out = LLL)
+ if (length(shape1.a) != LLL)
+ shape1.a <- rep(shape1.a, length.out = LLL)
+ if (length(scale) != LLL)
+ scale <- rep(scale, length.out = LLL)
+ if (length(shape3.q) != LLL)
+ shape3.q <- rep(shape3.q, length.out = LLL)
+
+
+ notpos <- (q <= 0) & !is.na(q)
+ Shape1.a <- shape1.a[!notpos]
+ Scale <- scale[!notpos]
+ Shape3.q <- shape3.q[!notpos]
+ QQ <- q[!notpos]
+
+ ans <- 0 * q # rep(0.0, len = LLL)
+ ans[!notpos] <- 1 - (1 + (QQ / Scale)^Shape1.a)^(-Shape3.q)
+
+ ans[scale <= 0] <- NaN
+ ans[shape1.a <= 0] <- NaN
+ ans[shape3.q <= 0] <- NaN
+
+ ans[q == -Inf] <- 0
+
+ ans
+}
+
+
+plomax <- function(q, scale = 1, shape3.q)
+ psinmad(q, shape1.a = 1, scale = scale, shape3.q)
+
+
+pfisk <- function(q, shape1.a, scale = 1)
+ psinmad(q, shape1.a, scale = scale, shape3.q = 1)
+
+
+pparalogistic <- function(q, shape1.a, scale = 1)
+ psinmad(q, shape1.a, scale = scale, shape1.a)
+
+
+
+pdagum <- function(q, shape1.a, scale = 1, shape2.p) {
+
+
+ LLL <- max(length(q), length(shape1.a), length(scale),
+ length(shape2.p))
+ if (length(q) != LLL)
+ q <- rep(q, length.out = LLL)
+ if (length(shape1.a) != LLL)
+ shape1.a <- rep(shape1.a, length.out = LLL)
+ if (length(scale) != LLL)
+ scale <- rep(scale, length.out = LLL)
+ if (length(shape2.p) != LLL)
+ shape2.p <- rep(shape2.p, length.out = LLL)
+
+ notpos <- (q <= 0) & !is.na(q)
+ Shape1.a <- shape1.a[!notpos]
+ Scale <- scale[!notpos]
+ Shape2.p <- shape2.p[!notpos]
+ QQ <- q[!notpos]
+
+ ans <- 0 * q
+ ans[!notpos] <- (1 + (QQ/Scale)^(-Shape1.a))^(-Shape2.p)
+
+ ans[scale <= 0] <- NaN
+ ans[shape1.a <= 0] <- NaN
+ ans[shape2.p <= 0] <- NaN
+ ans[q == -Inf] <- 0
+
+ ans
+}
+
+
+
+
+
+pinvlomax <- function(q, scale = 1, shape2.p)
+ pdagum(q, shape1.a = 1, scale = scale, shape2.p)
+
+
+pinvparalogistic <- function(q, shape1.a, scale = 1)
+ pdagum(q, shape1.a, scale = scale, shape1.a)
+
+
+
+dsinmad <- function(x, shape1.a, scale = 1, shape3.q, log = FALSE) {
+
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL <- max(length(x), length(shape1.a),
+ length(scale), length(shape3.q))
+ x <- rep(x, length.out = LLL);
+ shape1.a <- rep(shape1.a, length.out = LLL)
+ scale <- rep(scale, length.out = LLL);
+ shape3.q <- rep(shape3.q, length.out = LLL)
+
+ Loglik <- rep(log(0), length.out = LLL)
+ xok <- (x > 0) & !is.na(x) # Avoids log(x) if x<0, and handles NAs
+ Loglik[xok] <- log(shape1.a[xok]) + log(shape3.q[xok]) +
+ (shape1.a[xok]-1) * log(x[xok]) -
+ shape1.a[xok] * log(scale[xok]) -
+ (1 + shape3.q[xok]) * log1p((x[xok]/scale[xok])^shape1.a[xok])
+ x.eq.0 <- (x == 0) & !is.na(x)
+ Loglik[x.eq.0] <- log(shape1.a[x.eq.0]) + log(shape3.q[x.eq.0]) -
+ shape1.a[x.eq.0] * log(scale[x.eq.0])
+ Loglik[is.na(x)] <- NA
+ Loglik[is.nan(x)] <- NaN
+ Loglik[x == Inf] <- log(0)
+
+ if (log.arg) Loglik else exp(Loglik)
+}
+
+
+dlomax <- function(x, scale = 1, shape3.q, log = FALSE)
+ dsinmad(x, shape1.a = 1, scale = scale, shape3.q, log = log)
+
+
+dfisk <- function(x, shape1.a, scale = 1, log = FALSE)
+ dsinmad(x, shape1.a, scale = scale, shape3.q = 1, log = log)
+
+
+dparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
+ dsinmad(x, shape1.a, scale = scale, shape1.a, log = log)
+
+
+
+ddagum <- function(x, shape1.a, scale = 1, shape2.p, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL <- max(length(x), length(shape1.a), length(scale),
+ length(shape2.p))
+ x <- rep(x, length.out = LLL)
+ shape1.a <- rep(shape1.a, length.out = LLL)
+ scale <- rep(scale, length.out = LLL)
+ shape2.p <- rep(shape2.p, length.out = LLL)
+
+ Loglik <- rep(log(0), length.out = LLL)
+ xok <- (x > 0) & !is.na(x) # Avoids log(x) if x<0, and handles NAs
+ Loglik[xok] <- log(shape1.a[xok]) +
+ log(shape2.p[xok]) +
+ (shape1.a[xok] * shape2.p[xok]-1) * log( x[xok]) -
+ shape1.a[xok] * shape2.p[xok] * log(scale[xok]) -
+ (1 + shape2.p[xok]) *
+ log1p((x[xok]/scale[xok])^shape1.a[xok])
+ Loglik[shape2.p <= 0] <- NaN
+
+ x.eq.0 <- (x == 0) & !is.na(x)
+ Loglik[x.eq.0] <- log(shape1.a[x.eq.0]) +
+ log(shape2.p[x.eq.0]) -
+ shape1.a[x.eq.0] * shape2.p[x.eq.0] * log(scale[x.eq.0])
+ Loglik[is.na(x)] <- NA
+ Loglik[is.nan(x)] <- NaN
+ Loglik[x == Inf] <- log(0)
+
+ if (log.arg) Loglik else exp(Loglik)
+}
+
+
+dinvlomax <- function(x, scale = 1, shape2.p, log = FALSE)
+ ddagum(x, shape1.a = 1, scale = scale, shape2.p, log = log)
+
+
+dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
+ ddagum(x, shape1.a, scale = scale, shape1.a, log = log)
+
+
+
+ sinmad <- function(lshape1.a = "loge",
+ lscale = "loge",
+ lshape3.q = "loge",
+ ishape1.a = NULL,
+ iscale = NULL,
+ ishape3.q = 1.0,
+ zero = NULL) {
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ lshape1.a <- as.list(substitute(lshape1.a))
+ eshape1.a <- link2list(lshape1.a)
+ lshape1.a <- attr(eshape1.a, "function.name")
+
+ lshape3.q <- as.list(substitute(lshape3.q))
+ eshape3.q <- link2list(lshape3.q)
+ lshape3.q <- attr(eshape3.q, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+ new("vglmff",
+ blurb = c("Singh-Maddala distribution\n\n",
+ "Links: ",
+ namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
+ "Mean: scale * gamma(1 + 1/shape1.a) * ",
+ "gamma(shape3.q - 1/shape1.a) / ",
+ "gamma(shape3.q)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+ predictors.names <-
+ c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
+ parg <- 1
+
+ if (!length( .ishape1.a) || !length( .iscale )) {
+ qvec <- c( .25, .5, .75) # Arbitrary; could be made an argument
+ ishape3.q <- if (length( .ishape3.q)) .ishape3.q else 1
+ xvec <- log( (1-qvec)^(-1/ ishape3.q ) - 1 )
+ fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
+ }
+
+
+
+ if (!length(etastart)) {
+ aa <- rep(if (length( .ishape1.a)) .ishape1.a else 1/fit0$coef[2],
+ length.out = n)
+ scale <- rep(if (length( .iscale )) .iscale else exp(fit0$coef[1]),
+ length.out = n)
+ qq <- rep(if (length( .ishape3.q)) .ishape3.q else 1.0,
+ length.out = n)
+
+
+ outOfRange <- (aa * qq <= 1)
+ qq[outOfRange] <- 1 / aa[outOfRange] + 1
+
+
+ etastart <-
+ cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ theta2eta(scale, .lscale , earg = .escale ),
+ theta2eta(qq, .lshape3.q, earg = .eshape3.q))
+ }
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .lshape3.q = lshape3.q,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape3.q = eshape3.q,
+ .ishape1.a = ishape1.a, .iscale = iscale,
+ .ishape3.q = ishape3.q ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- 1
+ qq <- eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
+
+ ans <- Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] <- NA
+ ans[qq - 1/aa <= 0] <- NA
+ ans[aa <= 0] <- NA
+ ans[Scale <= 0] <- NA
+ ans[qq <= 0] <- NA
+ ans
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape3.q = eshape3.q,
+ .lshape3.q = lshape3.q ))),
+
+ last = eval(substitute(expression({
+ misc$link <-
+ c(shape1.a = .lshape1.a, scale = .lscale , shape3.q = .lshape3.q)
+
+ misc$earg <-
+ list(shape1.a = .eshape1.a, scale = .escale , shape3.q = .eshape3.q)
+
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape3.q = eshape3.q,
+ .lshape3.q = lshape3.q ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a)
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- 1
+ qq <- eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q )
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w) * dsinmad(x = y, shape1.a = aa, scale = scale,
+ shape3.q = qq, log = TRUE))
+ }
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .lshape3.q = lshape3.q,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape3.q = eshape3.q ))),
+ vfamily = c("sinmad"),
+ deriv = eval(substitute(expression({
+ aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- 1
+ qq <- eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
+
+ temp1 <- log(y/scale)
+ temp2 <- (y/scale)^aa
+ temp3a <- digamma(parg)
+ temp3b <- digamma(qq)
+
+ dl.da <- 1 / aa + parg * temp1 - (parg + qq) * temp1 / (1 + 1 / temp2)
+ dl.dscale <- (aa / scale) * (-parg + (parg + qq) / (1 + 1 / temp2))
+ dl.dq <- digamma(parg + qq) - temp3b - log1p(temp2)
+
+ da.deta <- dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
+ dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
+ dq.deta <- dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
+
+ c(w) * cbind(dl.da * da.deta,
+ dl.dscale * dscale.deta,
+ dl.dq * dq.deta )
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape3.q = eshape3.q,
+ .lshape3.q = lshape3.q ))),
+
+ weight = eval(substitute(expression({
+ ned2l.da <- (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1 + parg + qq))
+ ned2l.dscale <- aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ned2l.dq <- 1/qq^2
+ ned2l.dascale <- (parg - qq - parg*qq *
+ (temp3a -temp3b)) / (scale*(1 + parg+qq))
+ ned2l.daq <- -(parg * (temp3b -temp3a) -1) / (aa*(parg+qq))
+ ned2l.dscaleq <- -aa * parg / (scale*(parg+qq))
+
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+ wz[, iam(1, 1, M)] <- ned2l.da * da.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dscale * dscale.deta^2
+ wz[, iam(3, 3, M)] <- ned2l.dq * dq.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dascale * da.deta * dscale.deta
+ wz[, iam(1, 3, M)] <- ned2l.daq * da.deta * dq.deta
+ wz[, iam(2, 3, M)] <- ned2l.dscaleq * dscale.deta * dq.deta
+ wz <- c(w) * wz
+ wz
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape3.q = eshape3.q,
+ .lshape3.q = lshape3.q ))))
+}
+
+
+ dagum <- function(lshape1.a = "loge",
+ lscale = "loge",
+ lshape2.p = "loge",
+ ishape1.a = NULL,
+ iscale = NULL,
+ ishape2.p = 1.0,
+ zero = NULL) {
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ lshape1.a <- as.list(substitute(lshape1.a))
+ eshape1.a <- link2list(lshape1.a)
+ lshape1.a <- attr(eshape1.a, "function.name")
+
+ lshape2.p <- as.list(substitute(lshape2.p))
+ eshape2.p <- link2list(lshape2.p)
+ lshape2.p <- attr(eshape2.p, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+ new("vglmff",
+ blurb = c("Dagum distribution\n\n",
+ "Links: ",
+ namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape2.p", lshape2.p, earg = eshape2.p), "\n",
+ "Mean: scale * gamma(shape2.p + 1/shape1.a) * ",
+ "gamma(1 - 1/shape1.a) / ",
+ "gamma(shape2.p)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+ predictors.names <-
+ c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE))
+
+ if (!length( .ishape1.a) || !length( .iscale )) {
+ qvec <- c( .25, .5, .75) # Arbitrary; could be made an argument
+ ishape2.p <- if (length( .ishape2.p)) .ishape2.p else 1
+ xvec <- log( qvec^(-1/ ishape2.p ) - 1 )
+ fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
+ }
+
+ if (!length(etastart)) {
+ parg <- rep(if (length( .ishape2.p )) .ishape2.p else 1.0,
+ length.out = n)
+ aa <- rep(if (length( .ishape1.a )) .ishape1.a else
+ -1/fit0$coef[2],
+ length.out = n)
+ scale <- rep(if (length( .iscale )) .iscale else
+ exp(fit0$coef[1]),
+ length.out = n)
+
+
+ outOfRange <- (parg + 1/aa <= 0)
+ parg[outOfRange] <- 1 / aa[outOfRange] + 1
+
+
+
+
+
+ etastart <-
+ cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ theta2eta(scale, .lscale , earg = .escale ),
+ theta2eta(parg, .lshape2.p, earg = .eshape2.p))
+ }
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .lshape2.p = lshape2.p,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p,
+ .ishape1.a = ishape1.a, .iscale = iscale,
+ .ishape2.p = ishape2.p ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
+ qq <- 1
+
+ ans <- Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] <- NA
+ ans[qq - 1/aa <= 0] <- NA
+ ans[aa <= 0] <- NA
+ ans[Scale <= 0] <- NA
+ ans[parg <= 0] <- NA
+ ans
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p,
+ .lshape2.p = lshape2.p ))),
+ last = eval(substitute(expression({
+ misc$link <- c(shape1.a = .lshape1.a, scale = .lscale ,
+ p = .lshape2.p )
+
+ misc$earg <- list(shape1.a = .eshape1.a, scale = .escale ,
+ p = .eshape2.p )
+
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .lshape2.p = lshape2.p,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
+ qq <- 1
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w) * ddagum(x = y, shape1.a = aa, scale = Scale,
+ shape2.p = parg, log = TRUE))
+ }
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .lshape2.p = lshape2.p,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p ))),
+ vfamily = c("dagum"),
+ deriv = eval(substitute(expression({
+ aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
+ qq <- 1
+
+ temp1 <- log(y / Scale)
+ temp2 <- (y / Scale)^aa
+ temp3a <- digamma(parg)
+ temp3b <- digamma(qq)
+
+ dl.da <- 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
+ dl.dscale <- (aa/Scale) * (-parg + (parg+qq) / (1+1/temp2))
+ dl.dp <- aa * temp1 + digamma(parg + qq) - temp3a - log1p(temp2)
+
+ da.deta <- dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+ dp.deta <- dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
+
+ c(w) * cbind( dl.da * da.deta,
+ dl.dscale * dscale.deta,
+ dl.dp * dp.deta )
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p,
+ .lshape2.p = lshape2.p ))),
+ weight = eval(substitute(expression({
+ ned2l.da <- (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
+ ned2l.dscale <- aa^2 * parg * qq / (Scale^2 * (1+parg+qq))
+ ned2l.dp <- 1 / parg^2
+ ned2l.dascale <- (parg - qq - parg * qq *(temp3a -temp3b)
+ ) / (Scale * (1 + parg+qq))
+ ned2l.dap= -(qq * (temp3a -temp3b) -1) / (aa*(parg+qq))
+ ned2l.dscalep <- aa * qq / (Scale * (parg + qq))
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+ wz[, iam(1, 1, M)] <- ned2l.da * da.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dscale * dscale.deta^2
+ wz[, iam(3, 3, M)] <- ned2l.dp * dp.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dascale * da.deta * dscale.deta
+ wz[, iam(1, 3, M)] <- ned2l.dap * da.deta * dp.deta
+ wz[, iam(2, 3, M)] <- ned2l.dscalep * dscale.deta * dp.deta
+ wz <- c(w) * wz
+ wz
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p,
+ .lshape2.p = lshape2.p ))))
+}
+
+
+
+ betaII <-
+ function(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
+ iscale = NULL, ishape2.p = 2, ishape3.q = 2,
+ zero = NULL) {
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+
+ lshape2.p <- as.list(substitute(lshape2.p))
+ eshape2.p <- link2list(lshape2.p)
+ lshape2.p <- attr(eshape2.p, "function.name")
+
+ lshape3.q <- as.list(substitute(lshape3.q))
+ eshape3.q <- link2list(lshape3.q)
+ lshape3.q <- attr(eshape3.q, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+ new("vglmff",
+ blurb = c("Beta II distribution\n\n",
+ "Links: ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape2.p", lshape2.p, earg = eshape2.p), ", ",
+ namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
+ "Mean: scale * gamma(shape2.p + 1) * ",
+ "gamma(shape3.q - 1) / ",
+ "(gamma(shape2.p) * gamma(shape3.q))"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+ predictors.names <-
+ c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE),
+ namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
+
+ if (!length( .iscale )) {
+ qvec <- c(0.25, .5, .75) # Arbitrary; could be made an argument
+ ishape3.q <- if (length( .ishape3.q)) .ishape3.q else 1
+ xvec <- log( (1-qvec)^(-1/ ishape3.q ) - 1 )
+ fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
+ }
+
+ if (!length(etastart)) {
+ scale <- rep(if (length( .iscale )) .iscale else
+ exp(fit0$coef[1]),
+ length.out = n)
+ qq <- rep(if (length( .ishape3.q)) .ishape3.q else 1.0,
+ length.out = n)
+ parg <- rep(if (length( .ishape2.p)) .ishape2.p else 1.0,
+ length.out = n)
+
+
+
+ aa <- 1
+ outOfRange <- (parg + 1/aa <= 0)
+ parg[outOfRange] <- 1 / aa[outOfRange] + 1
+ outOfRange <- (qq - 1/aa <= 0)
+ qq[outOfRange] <- 1 / aa + 1
+
+
+ etastart <-
+ cbind(theta2eta(scale, .lscale , earg = .escale ),
+ theta2eta(parg, .lshape2.p, earg = .eshape2.p),
+ theta2eta(qq, .lshape3.q, earg = .eshape3.q))
+ }
+ }), list( .lscale = lscale,
+ .escale = escale,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .iscale = iscale,
+ .ishape2.p = ishape2.p,
+ .ishape3.q = ishape3.q ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa <- 1
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
+ qq <- eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
+
+ ans <- Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] <- NA
+ ans[qq - 1/aa <= 0] <- NA
+ ans[Scale <= 0] <- NA
+ ans[parg <= 0] <- NA
+ ans[qq <= 0] <- NA
+ ans
+ }, list( .lscale = lscale,
+ .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
+ last = eval(substitute(expression({
+ misc$link <- c(scale = .lscale ,
+ shape2.p = .lshape2.p,
+ shape3.q = .lshape3.q)
+
+ misc$earg <- list(scale = .escale ,
+ shape2.p = .eshape2.p,
+ shape3.q = .eshape3.q)
+
+ misc$expected <- TRUE
+ }), list(
+ .lscale = lscale, .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
+ .escale = escale, .eshape2.p = eshape2.p, .eshape3.q = eshape3.q ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa <- 1
+ scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
+ qq <- eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w) * (log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
+ (-lbeta(parg, qq)) - (parg+qq)*log1p((y/scale)^aa)))
+ }
+ }, list( .lscale = lscale,
+ .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
+ vfamily = c("betaII"),
+ deriv = eval(substitute(expression({
+ aa <- 1
+ scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
+ qq <- eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
+
+ temp1 <- log(y/scale)
+ temp2 <- (y/scale)^aa
+ temp3 <- digamma(parg + qq)
+ temp3a <- digamma(parg)
+ temp3b <- digamma(qq)
+ temp4 <- log1p(temp2)
+
+ dl.dscale <- (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ dl.dp <- aa * temp1 + temp3 - temp3a - temp4
+ dl.dq <- temp3 - temp3b - temp4
+
+ dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
+ dp.deta <- dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
+ dq.deta <- dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
+
+ c(w) * cbind( dl.dscale * dscale.deta,
+ dl.dp * dp.deta,
+ dl.dq * dq.deta )
+ }), list( .lscale = lscale,
+ .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
+ weight = eval(substitute(expression({
+ temp5 <- trigamma(parg + qq)
+ ned2l.dscale <- aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ned2l.dp <- trigamma(parg) - temp5
+ ned2l.dq <- trigamma(qq) - temp5
+ ned2l.dscalep <- aa * qq / (scale*(parg+qq))
+ ned2l.dscaleq <- -aa * parg / (scale*(parg+qq))
+ ned2l.dpq <- -temp5
+
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+ wz[, iam(1, 1, M)] <- ned2l.dscale * dscale.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dp * dp.deta^2
+ wz[, iam(3, 3, M)] <- ned2l.dq * dq.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dscalep * dscale.deta * dp.deta
+ wz[, iam(1, 3, M)] <- ned2l.dscaleq * dscale.deta * dq.deta
+ wz[, iam(2, 3, M)] <- ned2l.dpq * dp.deta * dq.deta
+ wz <- c(w) * wz
+ wz
+ }), list(
+ .lscale = lscale, .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
+ .escale = escale, .eshape2.p = eshape2.p, .eshape3.q = eshape3.q ))))
+}
+
+
+
+ lomax <- function(lscale = "loge", lshape3.q = "loge",
+ iscale = NULL, ishape3.q = NULL, # 2.0,
+ gshape3.q = exp(-5:5),
+ zero = NULL) {
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+
+ lshape3.q <- as.list(substitute(lshape3.q))
+ eshape3.q <- link2list(lshape3.q)
+ lshape3.q <- attr(eshape3.q, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+ new("vglmff",
+ blurb = c("Lomax distribution\n\n",
+ "Links: ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
+ "Mean: scale / (shape3.q - 1)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+ mynames1 <- "scale"
+ mynames2 <- "shape3.q"
+ predictors.names <-
+ c(namesof(mynames1, .lscale , earg = .escale , tag = FALSE),
+ namesof(mynames2, .lshape3.q , earg = .eshape3.q , tag = FALSE))
+
+ aa <- parg <- 1
+
+
+
+
+ lomax.Loglikfun <- function(shape3.q, y, x, w, extraargs) {
+ qvec <- c(0.25, 0.5, 0.75) # Arbitrary; could be made an argument
+ xvec <- log( (1-qvec)^(-1/ shape3.q ) - 1 )
+ fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
+ init.scale <- exp(fit0$coef[1])
+
+ ans <- sum(c(w) * dlomax(x = y, shape3.q = shape3.q,
+ scale = init.scale, log = TRUE))
+ ans
+ }
+
+ shape3.q.grid <- .gshape3.q
+ yvec <- y
+ wvec <- w
+ Init.shape3.q <-
+ getMaxMin(shape3.q.grid,
+ objfun = lomax.Loglikfun,
+ y = yvec, x = x, w = wvec,
+ extraargs = NULL)
+
+
+
+
+ if (!length( .iscale )) {
+ qvec <- c(0.25, 0.5, 0.75) # Arbitrary; could be made an argument
+ ishape3.q <- if (length( .ishape3.q )) .ishape3.q else Init.shape3.q
+ xvec <- log( (1-qvec)^(-1/ ishape3.q ) - 1 )
+ fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
+ }
+
+ if (!length(etastart)) {
+ qq <- rep(if (length( .ishape3.q)) .ishape3.q else Init.shape3.q,
+ length.out = n)
+ scale <- rep(if (length( .iscale )) .iscale else
+ exp(fit0$coef[1]),
+ length.out = n)
+
+
+ aa <- 1
+ outOfRange <- (qq - 1/aa <= 0)
+ qq[outOfRange] <- 1 / aa + 1
+
+
+
+ etastart <-
+ cbind(theta2eta(scale, .lscale , earg = .escale ),
+ theta2eta(qq, .lshape3.q, earg = .eshape3.q))
+ }
+ }), list( .lscale = lscale, .lshape3.q = lshape3.q,
+ .escale = escale, .eshape3.q = eshape3.q,
+ .gshape3.q = gshape3.q,
+ .iscale = iscale, .ishape3.q = ishape3.q ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa <- 1
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ parg <- 1
+ qq <- eta2theta(eta[, 2], .lshape3.q , earg = .eshape3.q )
+
+
+
+
+
+ ans <- Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] <- NA
+ ans[qq - 1/aa <= 0] <- NA
+ ans[Scale <= 0] <- NA
+ ans[qq <= 0] <- NA
+ ans
+ }, list( .lscale = lscale, .lshape3.q = lshape3.q,
+ .escale = escale, .eshape3.q = eshape3.q ))),
+ last = eval(substitute(expression({
+ misc$link <- c(scale = .lscale , shape3.q = .lshape3.q)
+
+ misc$earg <- list(scale = .escale , shape3.q = .eshape3.q)
+
+
+ misc$expected <- TRUE
+ }), list( .lscale = lscale, .lshape3.q = lshape3.q,
+ .escale = escale, .eshape3.q = eshape3.q ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa <- 1
+ scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ parg <- 1
+ qq <- eta2theta(eta[, 2], .lshape3.q, earg = .eshape3.q)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w) * dlomax(x = y, scale = scale,
+ shape3.q = qq, log = TRUE))
+ }
+ }, list( .lscale = lscale, .lshape3.q = lshape3.q,
+ .escale = escale, .eshape3.q = eshape3.q ))),
+ vfamily = c("lomax"),
+ deriv = eval(substitute(expression({
+ aa <- 1
+ scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ parg <- 1
+ qq <- eta2theta(eta[, 2], .lshape3.q, earg = .eshape3.q)
+ temp2 <- (y/scale)^aa
+
+ dl.dscale <- (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ dl.dq <- digamma(parg + qq) - digamma(qq) - log1p(temp2)
+
+ dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
+ dq.deta <- dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
+
+ c(w) * cbind( dl.dscale * dscale.deta,
+ dl.dq * dq.deta )
+ }), list( .lscale = lscale, .lshape3.q = lshape3.q,
+ .escale = escale, .eshape3.q = eshape3.q ))),
+ weight = eval(substitute(expression({
+ ned2l.dscale <- aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ned2l.dq <- 1/qq^2
+ ned2l.dscaleq <- -aa * parg / (scale*(parg+qq))
+
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #M == 2 means 3=dimm(M)
+ wz[, iam(1, 1, M)] <- ned2l.dscale * dscale.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dq * dq.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dscaleq * dscale.deta * dq.deta
+ wz <- c(w) * wz
+ wz
+ }), list( .lscale = lscale, .lshape3.q = lshape3.q,
+ .escale = escale, .eshape3.q = eshape3.q ))))
+}
+
+
+
+ fisk <- function(lshape1.a = "loge", lscale = "loge",
+ ishape1.a = NULL, iscale = NULL,
+ zero = NULL) {
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ lshape1.a <- as.list(substitute(lshape1.a))
+ eshape1.a <- link2list(lshape1.a)
+ lshape1.a <- attr(eshape1.a, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+ new("vglmff",
+ blurb = c("Fisk distribution\n\n",
+ "Links: ",
+ namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
+ namesof("scale", lscale, earg = escale), "\n",
+ "Mean: scale * gamma(1 + 1/shape1.a) * ",
+ "gamma(1 - 1/shape1.a)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+ predictors.names <-
+ c(namesof("shape1.a", .lshape1.a , earg = .eshape1.a , tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
+
+ qq <- parg <- 1
+
+ if (!length( .iscale )) {
+ qvec <- c( 0.25, 0.5, 0.75) # Arbitrary; could be made an argument
+ xvec <- log( 1 / qvec - 1 )
+ fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
+ }
+
+ if (!length(etastart)) {
+ aa <- rep(if (length( .ishape1.a)) .ishape1.a else
+ abs(-1 / fit0$coef[2]),
+ length.out = n)
+ scale <- rep(if (length( .iscale )) .iscale else
+ exp(fit0$coef[1]),
+ length.out = n)
+
+
+ parg <- 1
+ qq <- 1
+ outOfRange <- (parg + 1/aa <= 0)
+ parg[outOfRange] <- 1 / aa[outOfRange] + 1
+ outOfRange <- (qq - 1/aa <= 0)
+ qq[outOfRange] <- 1 / aa + 1
+
+
+
+ etastart <-
+ cbind(theta2eta(aa, .lshape1.a , earg = .eshape1.a ),
+ theta2eta(scale, .lscale , earg = .escale ))
+ }
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .ishape1.a = ishape1.a, .iscale = iscale ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a)
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- 1
+ qq <- 1
+
+ ans <- Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] <- NA
+ ans[qq - 1/aa <= 0] <- NA
+ ans[aa <= 0] <- NA
+ ans[Scale <= 0] <- NA
+ ans
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
+ last = eval(substitute(expression({
+ misc$link <- c(shape1.a = .lshape1.a , scale = .lscale )
+
+ misc$earg <- list(shape1.a = .eshape1.a , scale = .escale )
+
+ misc$expected <- TRUE
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- qq <- 1
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w) * dfisk(x = y, shape1.a = aa, scale = scale, log = TRUE))
+ }
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
+ vfamily = c("fisk"),
+ deriv = eval(substitute(expression({
+ aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- qq <- 1
+
+ temp1 <- log(y/scale)
+ temp2 <- (y/scale)^aa
+ temp3a <- digamma(parg)
+ temp3b <- digamma(qq)
+
+ dl.da <- 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
+ dl.dscale <- (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+
+ da.deta <- dtheta.deta(aa, .lshape1.a , earg = .eshape1.a )
+ dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
+
+ c(w) * cbind( dl.da * da.deta,
+ dl.dscale * dscale.deta )
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
+ weight = eval(substitute(expression({
+ ned2l.da <- (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
+ ned2l.dscale <- aa^2 * parg * qq / (scale^2 * (1 + parg + qq))
+ ned2l.dascale <- (parg - qq - parg*qq*(temp3a - temp3b)) / (
+ scale * (1 + parg + qq))
+
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #M == 2 means 3=dimm(M)
+ wz[, iam(1, 1, M)] <- ned2l.da * da.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dscale * dscale.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dascale * da.deta * dscale.deta
+
+ wz <- c(w) * wz
+ wz
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))))
+}
+
+
+ invlomax <- function(lscale = "loge",
+ lshape2.p = "loge",
+ iscale = NULL,
+ ishape2.p = 1.0,
+ zero = NULL) {
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ lshape2.p <- as.list(substitute(lshape2.p))
+ eshape2.p <- link2list(lshape2.p)
+ lshape2.p <- attr(eshape2.p, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+
+ new("vglmff",
+ blurb = c("Inverse Lomax distribution\n\n",
+ "Links: ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape2.p", lshape2.p, earg = eshape2.p), "\n",
+ "Mean: does not exist"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+ predictors.names <-
+ c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE))
+
+ qq <- aa <- 1
+
+ if (!length( .iscale )) {
+ qvec <- c(0.25, .5, .75) # Arbitrary; could be made an argument
+ ishape2.p <- if (length( .ishape2.p)) .ishape2.p else 1
+ xvec <- log( qvec^(-1/ ishape2.p ) - 1 )
+ fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
+ }
+ if (!length(etastart)) {
+ scale <- rep(if (length( .iscale )) .iscale else
+ exp(fit0$coef[1]),
+ length.out = n)
+ parg <- rep(if (length( .ishape2.p)) .ishape2.p else 1.0,
+ length.out = n)
+
+
+
+
+ etastart <-
+ cbind(theta2eta(scale, .lscale , earg = .escale ),
+ theta2eta(parg, .lshape2.p, earg = .eshape2.p))
+ }
+ }), list( .lscale = lscale, .lshape2.p = lshape2.p,
+ .escale = escale, .eshape2.p = eshape2.p,
+ .iscale = iscale, .ishape2.p = ishape2.p ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
+
+ NA * Scale
+ }, list( .lscale = lscale,
+ .escale = escale,
+ .eshape2.p = eshape2.p,
+ .lshape2.p = lshape2.p ))),
+ last = eval(substitute(expression({
+ misc$link <- c(scale = .lscale , shape2.p = .lshape2.p )
+
+ misc$earg <- list(scale = .escale , shape2.p = .eshape2.p )
+
+ misc$expected <- TRUE
+ }), list( .lscale = lscale,
+ .escale = escale,
+ .eshape2.p = eshape2.p,
+ .lshape2.p = lshape2.p ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa <- 1
+ scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
+ qq <- 1
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w) * dinvlomax(x = y, scale = scale,
+ shape2.p = parg, log = TRUE))
+ }
+ }, list( .lscale = lscale, .lshape2.p = lshape2.p,
+ .escale = escale, .eshape2.p = eshape2.p ))),
+ vfamily = c("invlomax"),
+ deriv = eval(substitute(expression({
+ aa <- qq <- 1
+ scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ parg <- eta2theta(eta[, 2], .lshape2.p , earg = .eshape2.p)
+
+ temp1 <- log(y/scale)
+ temp2 <- (y/scale)^aa
+
+ dl.dscale <- (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ dl.dp <- aa * temp1 + digamma(parg + qq) - digamma(parg) - log1p(temp2)
+
+ dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
+ dp.deta <- dtheta.deta(parg, .lshape2.p , earg = .eshape2.p)
+
+ c(w) * cbind( dl.dscale * dscale.deta,
+ dl.dp * dp.deta )
+ }), list( .lscale = lscale, .lshape2.p = lshape2.p,
+ .escale = escale, .eshape2.p = eshape2.p ))),
+ weight = eval(substitute(expression({
+ ned2l.dscale <- aa^2 * parg * qq / (scale^2 * (1 + parg + qq))
+ ned2l.dp <- 1 / parg^2
+ ned2l.dscalep <- aa * qq / (scale * (parg + qq))
+
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #M == 2 means 3=dimm(M)
+ wz[, iam(1, 1, M)] <- ned2l.dscale * dscale.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dp * dp.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dscalep * dscale.deta * dp.deta
+ wz <- c(w) * wz
+ wz
+ }), list( .lscale = lscale, .lshape2.p = lshape2.p,
+ .escale = escale, .eshape2.p = eshape2.p ))))
+}
+
+
+ paralogistic <- function(lshape1.a = "loge",
+ lscale = "loge",
+ ishape1.a = 2,
+ iscale = NULL,
+ zero = NULL) {
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ lshape1.a <- as.list(substitute(lshape1.a))
+ eshape1.a <- link2list(lshape1.a)
+ lshape1.a <- attr(eshape1.a, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+ new("vglmff",
+ blurb = c("Paralogistic distribution\n\n",
+ "Links: ",
+ namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
+ namesof("scale", lscale, earg = escale), "\n",
+ "Mean: scale * gamma(1 + 1/shape1.a) * ",
+ "gamma(shape1.a - 1/shape1.a) / gamma(shape1.a)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+ predictors.names <-
+ c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
+
+ parg <- 1
+
+ if (!length( .ishape1.a) || !length( .iscale )) {
+ qvec <- c( .25, .5, .75) # Arbitrary; could be made an argument
+ ishape1.a <- if (length( .ishape1.a)) .ishape1.a else 1
+ xvec <- log( (1-qvec)^(-1/ ishape1.a ) - 1 )
+ fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
+ }
+
+ if (!length(etastart)) {
+ aa <- rep(if (length( .ishape1.a)) .ishape1.a else
+ abs(1/fit0$coef[2]),
+ length.out = n)
+ scale <- rep(if (length( .iscale )) .iscale else
+ exp(fit0$coef[1]),
+ length.out = n)
+
+
+
+
+ parg <- 1
+ qq <- aa
+ outOfRange <- (parg + 1/aa <= 0)
+ parg[outOfRange] <- 1 / aa[outOfRange] + 1
+ outOfRange <- (qq - 1/aa <= 0)
+ aa[outOfRange] <-
+ qq[outOfRange] <- 2 # Need aa > 1, where aa == qq
+
+
+ etastart <-
+ cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ theta2eta(scale, .lscale , earg = .escale ))
+ }
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .ishape1.a = ishape1.a, .iscale = iscale
+ ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- 1
+ qq <- aa
+
+ ans <- Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] <- NA
+ ans[qq - 1/aa <= 0] <- NA
+ ans[aa <= 0] <- NA
+ ans[Scale <= 0] <- NA
+ ans
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
+ last = eval(substitute(expression({
+ misc$link <- c(shape1.a = .lshape1.a, scale = .lscale)
+
+ misc$earg <- list(shape1.a = .eshape1.a, scale = .escale )
+
+ misc$expected <- TRUE
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- 1
+ qq <- aa
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w) * dparalogistic(x = y, shape1.a = aa,
+ scale = scale, log = TRUE))
+ }
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
+ vfamily = c("paralogistic"),
+ deriv = eval(substitute(expression({
+ aa <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a)
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- 1
+ qq <- aa
+
+ temp1 <- log(y/scale)
+ temp2 <- (y/scale)^aa
+ temp3a <- digamma(parg)
+ temp3b <- digamma(qq)
+
+ dl.da <- 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
+ dl.dscale <- (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+
+ da.deta <- dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
+ dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
+
+ c(w) * cbind( dl.da * da.deta,
+ dl.dscale * dscale.deta)
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
+ weight = eval(substitute(expression({
+ ned2l.da <- (1 + parg + qq +
+ parg * qq * (trigamma(parg) + trigamma(qq) +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
+ ned2l.dscale <- aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ned2l.dascale <- (parg - qq - parg*qq*(temp3a -temp3b)) /(
+ scale*(1 + parg+qq))
+
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #M == 2 means 3=dimm(M)
+ wz[, iam(1, 1, M)] <- ned2l.da * da.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dscale * dscale.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dascale * da.deta * dscale.deta
+ wz <- c(w) * wz
+ wz
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))))
+}
+
+
+ invparalogistic <- function(lshape1.a = "loge", lscale = "loge",
+ ishape1.a = 2, iscale = NULL,
+ zero = NULL) {
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ lshape1.a <- as.list(substitute(lshape1.a))
+ eshape1.a <- link2list(lshape1.a)
+ lshape1.a <- attr(eshape1.a, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+ new("vglmff",
+ blurb = c("Inverse paralogistic distribution\n\n",
+ "Links: ",
+ namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
+ namesof("scale", lscale, earg = escale), "\n",
+ "Mean: scale * gamma(shape1.a + 1/shape1.a) * ",
+ "gamma(1 - 1/shape1.a)/gamma(shape1.a)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+ predictors.names <-
+ c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a , tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
+
+ if (!length( .ishape1.a) || !length( .iscale )) {
+ qvec <- c( .25, .5, .75) # Arbitrary; could be made an argument
+ ishape2.p <- if (length( .ishape1.a )) .ishape1.a else 1
+ xvec <- log( qvec^(-1/ ishape2.p ) - 1 )
+ fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
+ }
+
+ qq <- 1
+ if (!length(etastart)) {
+ aa <- rep(if (length( .ishape1.a)) .ishape1.a else -1/fit0$coef[2],
+ length = n)
+ scale <- rep(if (length( .iscale )) .iscale else
+ exp(fit0$coef[1]), length = n)
+
+
+
+
+
+ parg <- aa
+ qq <- 1
+ outOfRange <- (parg + 1/aa <= 0)
+ parg[outOfRange] <-
+ aa[outOfRange] <- 2
+ outOfRange <- (qq - 1/aa <= 0)
+ qq[outOfRange] <- 1 / aa[outOfRange] + 1
+
+
+
+ etastart <-
+ cbind(theta2eta(aa, .lshape1.a , earg = .eshape1.a),
+ theta2eta(scale, .lscale , earg = .escale ))
+ }
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .ishape1.a = ishape1.a, .iscale = iscale ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- aa
+ qq <- 1
+
+ ans <- Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] <- NA
+ ans[qq - 1/aa <= 0] <- NA
+ ans[aa <= 0] <- NA
+ ans[Scale <= 0] <- NA
+ ans
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
+ last = eval(substitute(expression({
+ misc$link <- c(shape1.a = .lshape1.a, scale = .lscale )
+
+ misc$earg <- list(shape1.a = .eshape1.a, scale = .escale )
+
+ misc$expected <- TRUE
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- aa
+ qq <- 1
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w) * dinvparalogistic(x = y, shape1.a = aa,
+ scale = scale, log = TRUE))
+ }
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
+ vfamily = c("invparalogistic"),
+ deriv = eval(substitute(expression({
+ aa <- eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg <- aa
+ qq <- 1
+
+ temp1 <- log(y/scale)
+ temp2 <- (y/scale)^aa
+ temp3a <- digamma(parg)
+ temp3b <- digamma(qq)
+
+ dl.da <- 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
+ dl.dscale <- (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+
+ da.deta <- dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
+ dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
+
+ c(w) * cbind( dl.da * da.deta,
+ dl.dscale * dscale.deta )
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
+
+ weight = eval(substitute(expression({
+ ned2l.da <- (1 + parg + qq +
+ parg * qq * (trigamma(parg) + trigamma(qq) +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1 + parg + qq))
+ ned2l.dscale <- aa^2 * parg * qq / (scale^2 * (1 + parg + qq))
+ ned2l.dascale <- (parg - qq -
+ parg * qq * (temp3a -temp3b)) / (scale *
+ (1 + parg + qq))
+
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+ wz[, iam(1, 1, M)] <- ned2l.da * da.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dscale * dscale.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dascale * da.deta * dscale.deta
+ wz <- c(w) * wz
+ wz
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))))
+}
+
+
+
+
+
+
+
+
+
+ if (FALSE)
+ genlognormal <- function(link.sigma = "loge", link.r = "loge",
+ init.sigma = 1, init.r = 1, zero = NULL) {
+warning("2/4/04; doesn't work, possibly because first derivs are ",
+ "not continuous (sign() is used). Certainly, the derivs wrt ",
+ "mymu are problematic (run with maxit=4:9 and look at weight ",
+ "matrices). Possibly fundamentally cannot be estimated by IRLS. ",
+ "Pooling doesn't seem to help")
+
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+
+ link.sigma <- as.list(substitute(link.sigma))
+ esigma <- link2list(link.sigma)
+ link.sigma <- attr(esigma, "function.name")
+
+ link.r <- as.list(substitute(link.r))
+ er <- link2list(link.r)
+ link.r <- attr(er, "function.name")
+
+
+ new("vglmff",
+ blurb = c("Three-parameter generalized lognormal distribution\n\n",
+ "Links: ",
+ "loc; ",
+ namesof("sigma", link.sigma, earg = esigma, tag = TRUE), ", ",
+ namesof("r", link.r, earg = er, tag = TRUE)),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ predictors.names <-
+ c(namesof("loc", "identity", earg = list(), tag = FALSE),
+ namesof("sigma", .link.sigma, earg = .esigma, tag = FALSE),
+ namesof("r", .link.r, earg = .er, tag = FALSE))
+
+ if (!length( .init.sigma) || !length( .init.r)) {
+ init.r <- if (length( .init.r)) .init.r else 1
+ sigma.init <- (0.5 * sum(abs(log(y) -
+ mean(log(y )))^init.r))^(1/init.r)
+ }
+ if (any(y <= 0)) stop("y must be positive")
+
+ if (!length(etastart)) {
+ sigma.init <- rep(if (length( .init.sigma)) .init.sigma else
+ sigma.init, length.out = n)
+ r.init <- if (length( .init.r)) .init.r else init.r
+ etastart <-
+ cbind(mu = rep(log(median(y)), length.out = n),
+ sigma = sigma.init,
+ r = r.init)
+ }
+ }), list( .link.sigma = link.sigma, .link.r = link.r,
+ .init.sigma = init.sigma, .init.r = init.r ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ mymu <- eta2theta(eta[, 1], "identity", earg = list())
+ sigma <- eta2theta(eta[, 2], .link.sigma, earg = .esigma)
+ r <- eta2theta(eta[, 3], .link.r, earg = .er)
+ r
+ }, list( .link.sigma = link.sigma, .link.r = link.r ))),
+
+ last = eval(substitute(expression({
+ misc$link = c(loc = "identity",
+ "sigma" = .link.sigma,
+ r = .link.r )
+ misc$expected = TRUE
+ }), list( .link.sigma = link.sigma, .link.r = link.r ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mymu <- eta2theta(eta[, 1], "identity", earg = list())
+ sigma <- eta2theta(eta[, 2], .link.sigma, earg = .esigma)
+ r <- eta2theta(eta[, 3], .link.r, earg = .er)
+ temp89 <- (abs(log(y)-mymu)/sigma)^r
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else
+ sum(c(w) * (-log(r^(1/r) * sigma) - lgamma(1+1/r) - temp89/r))
+ }, list( .link.sigma = link.sigma, .link.r = link.r ))),
+ vfamily = c("genlognormal3"),
+ deriv = eval(substitute(expression({
+ mymu <- eta2theta(eta[, 1], "identity", earg = list())
+ sigma <- eta2theta(eta[, 2], .link.sigma, earg = .esigma)
+
+ r <- eta2theta(eta[, 3], .link.r, earg = .er)
+ ss <- 1 + 1/r
+ temp33 <- (abs(log(y)-mymu)/sigma)
+ temp33r1 <- temp33^(r-1)
+
+ dl.dmymu <- temp33r1 * sign(log(y)-mymu) / sigma
+ dl.dsigma <- (temp33*temp33r1 - 1) / sigma
+ dl.dr <- (log(r) - 1 + digamma(ss) + temp33*temp33r1)/r^2 -
+ temp33r1 * log(temp33r1) / r
+
+ dmymu.deta <- dtheta.deta(mymu, "identity", earg = list())
+ dsigma.deta <- dtheta.deta(sigma, .link.sigma, earg = .esigma)
+ dr.deta <- dtheta.deta(r, .link.r, earg = .er)
+
+ c(w) * cbind(dl.dmymu * dmymu.deta,
+ dl.dsigma * dsigma.deta,
+ dl.dr * dr.deta)
+ }), list( .link.sigma = link.sigma, .link.r = link.r ))),
+ weight = expression({
+ wz <- matrix(0, n, 6) # 5 will have small savings of 1 column
+
+ B <- log(r) + digamma(ss)
+ ned2l.dmymu2 <- (r-1) * gamma(1-1/r) / (sigma^2 * r^(2/r) * gamma(ss))
+ ned2l.dsigma2 <- r / sigma^2
+ ned2l.dr2 <- (ss * trigamma(ss) + B^2 - 1) / r^3
+ ned2l.dsigmar <- -B / (r * sigma)
+
+ wz[, iam(1, 1, M)] <- ned2l.dmymu2 * dmymu.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dsigma2 * dsigma.deta^2
+ wz[, iam(3, 3, M)] <- ned2l.dr2 * dr.deta^2
+ wz[, iam(2, 3, M)] <- ned2l.dsigmar * dsigma.deta * dr.deta
+ wz = c(w) * wz
+ wz
+ }))
+}
+
+
+
+
+
+
+
diff --git a/R/family.aunivariate.R b/R/family.aunivariate.R
index 677d5b0..06e3b35 100644
--- a/R/family.aunivariate.R
+++ b/R/family.aunivariate.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -69,8 +69,7 @@ pkumar <- function(q, shape1, shape2) {
kumar <- function(lshape1 = "loge", lshape2 = "loge",
ishape1 = NULL, ishape2 = NULL,
grid.shape1 = c(0.4, 6.0),
- tol12 = 1.0e-4, zero = NULL)
-{
+ tol12 = 1.0e-4, zero = NULL) {
lshape1 <- as.list(substitute(lshape1))
@@ -272,20 +271,14 @@ pkumar <- function(q, shape1, shape2) {
- wz <- matrix(0.0, n, M + M - 1) # wz is tridiagonal
-
- ind11 <- ind22 <- ind12 <- NULL
- for (ii in 1:(M / Musual)) {
- ind11 <- c(ind11, iam(Musual*ii - 1, Musual*ii - 1, M))
- ind22 <- c(ind22, iam(Musual*ii - 0, Musual*ii - 0, M))
- ind12 <- c(ind12, iam(Musual*ii - 1, Musual*ii - 0, M))
- }
+ wz <- array(c(c(w) * ned2l.dshape11 * dshape1.deta^2,
+ c(w) * ned2l.dshape22 * dshape2.deta^2,
+ c(w) * ned2l.dshape12 * dshape1.deta * dshape2.deta),
+ dim = c(n, M / Musual, 3))
+ wz <- arwz2wz(wz, M = M, Musual = Musual)
- wz[, ind11] <- ned2l.dshape11 * dshape1.deta^2
- wz[, ind22] <- ned2l.dshape22 * dshape2.deta^2
- wz[, ind12] <- ned2l.dshape12 * dshape1.deta * dshape2.deta
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ wz
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2,
.tol12 = tol12 ))))
@@ -312,7 +305,7 @@ drice <- function(x, vee, sigma, log = FALSE) {
x.abs <- abs(x[xok] * vee[xok] / sigma[xok]^2)
logdensity[xok] <- log(x[xok]) - 2 * log(sigma[xok]) +
(-(x[xok]^2+vee[xok]^2)/(2*sigma[xok]^2)) +
- log(besselI(x.abs, nu=0, expon.scaled = TRUE)) +
+ log(besselI(x.abs, nu = 0, expon.scaled = TRUE)) +
x.abs
logdensity[sigma <= 0] <- NaN
logdensity[vee < 0] <- NaN
@@ -338,9 +331,7 @@ riceff.control <- function(save.weight = TRUE, ...) {
riceff <- function(lvee = "loge", lsigma = "loge",
ivee = NULL, isigma = NULL,
- nsimEIM = 100, zero = NULL)
-{
-
+ nsimEIM = 100, zero = NULL) {
lvee <- as.list(substitute(lvee))
evee <- link2list(lvee)
lvee <- attr(evee, "function.name")
@@ -396,36 +387,38 @@ riceff.control <- function(save.weight = TRUE, ...) {
if (!length(etastart)) {
- riceff.Loglikfun <- function(vee, y, x, w, extraargs) {
- sigma.init = sd(rep(y, w))
+ riceff.Loglikfun <- function(vee, y, x, w, extraargs) {
+ sigma.init <- sd(rep(y, w))
sum(c(w) * (log(y) - 2*log(sigma.init) +
- log(besselI(y*vee/sigma.init^2, nu=0)) -
+ log(besselI(y*vee/sigma.init^2, nu = 0)) -
(y^2 + vee^2)/(2*sigma.init^2)))
}
- vee.grid =
+ vee.grid <-
seq(quantile(rep(y, w), probs = seq(0, 1, 0.2))["20%"],
quantile(rep(y, w), probs = seq(0, 1, 0.2))["80%"], len=11)
- vee.init = if (length( .ivee )) .ivee else
+ vee.init <- if (length( .ivee )) .ivee else
getMaxMin(vee.grid, objfun = riceff.Loglikfun,
- y = y, x = x, w = w)
- vee.init = rep(vee.init, length = length(y))
- sigma.init = if (length( .isigma )) .isigma else
+ y = y, x = x, w = w)
+ vee.init <- rep(vee.init, length = length(y))
+ sigma.init <- if (length( .isigma )) .isigma else
sqrt(max((weighted.mean(y^2, w) - vee.init^2)/2, 0.001))
- sigma.init = rep(sigma.init, length = length(y))
- etastart = cbind(theta2eta(vee.init, .lvee, earg = .evee),
- theta2eta(sigma.init, .lsigma, earg = .esigma))
+ sigma.init <- rep(sigma.init, length = length(y))
+ etastart <-
+ cbind(theta2eta(vee.init, .lvee, earg = .evee),
+ theta2eta(sigma.init, .lsigma, earg = .esigma))
}
}), list( .lvee = lvee, .lsigma = lsigma,
.ivee = ivee, .isigma = isigma,
.evee = evee, .esigma = esigma ))),
linkinv = eval(substitute(function(eta, extra = NULL){
- vee = eta2theta(eta[, 1], link = .lvee, earg = .evee)
- sigma = eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
- temp9 = -vee^2 / (2*sigma^2)
+ vee <- eta2theta(eta[, 1], link = .lvee, earg = .evee)
+ sigma <- eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
+ temp9 <- -vee^2 / (2*sigma^2)
- sigma * sqrt(pi/2) * ((1-temp9) * besselI(-temp9/2,nu=0,expon = TRUE) -
- temp9 * besselI(-temp9/2,nu=1,expon = TRUE))
+ sigma * sqrt(pi/2) *
+ ((1-temp9) * besselI(-temp9/2, nu = 0, expon = TRUE) -
+ temp9 * besselI(-temp9/2, nu = 1, expon = TRUE))
}, list( .lvee = lvee, .lsigma = lsigma,
.evee = evee, .esigma = esigma ))),
last = eval(substitute(expression({
@@ -433,15 +426,15 @@ riceff.control <- function(save.weight = TRUE, ...) {
misc$earg <- list("vee" = .evee, "sigma" = .esigma)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
+ misc$expected <- TRUE
+ misc$nsimEIM <- .nsimEIM
misc$multipleResponses <- FALSE
}), list( .lvee = lvee, .lsigma = lsigma,
.evee = evee, .esigma = esigma, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu,y, w, residuals = FALSE,eta,extra = NULL) {
- vee = eta2theta(eta[, 1], link = .lvee, earg = .evee)
- sigma = eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
+ vee <- eta2theta(eta[, 1], link = .lvee , earg = .evee )
+ sigma <- eta2theta(eta[, 2], link = .lsigma , earg = .esigma )
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
sum(c(w) * drice(x = y, vee = vee, sigma = sigma, log = TRUE))
@@ -450,45 +443,47 @@ riceff.control <- function(save.weight = TRUE, ...) {
.evee = evee, .esigma = esigma ))),
vfamily = c("riceff"),
deriv = eval(substitute(expression({
- vee = eta2theta(eta[, 1], link = .lvee, earg = .evee)
- sigma = eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
+ vee <- eta2theta(eta[, 1], link = .lvee, earg = .evee)
+ sigma <- eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
- dvee.deta = dtheta.deta(vee, link = .lvee, earg = .evee)
- dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
+ dvee.deta <- dtheta.deta(vee, link = .lvee, earg = .evee)
+ dsigma.deta <- dtheta.deta(sigma, link = .lsigma, earg = .esigma)
- temp8 = y * vee / sigma^2
- dl.dvee = -vee/sigma^2 + (y/sigma^2) *
- besselI(temp8, nu=1) / besselI(temp8, nu=0)
- dl.dsigma = -2/sigma + (y^2 + vee^2)/(sigma^3) - (2 * temp8 / sigma) *
- besselI(temp8, nu=1) / besselI(temp8, nu=0)
+ temp8 <- y * vee / sigma^2
+ dl.dvee <- -vee/sigma^2 + (y/sigma^2) *
+ besselI(temp8, nu = 1) / besselI(temp8, nu = 0)
+ dl.dsigma <- -2/sigma + (y^2 + vee^2)/(sigma^3) -
+ (2 * temp8 / sigma) *
+ besselI(temp8, nu = 1) / besselI(temp8, nu = 0)
c(w) * cbind(dl.dvee * dvee.deta,
dl.dsigma * dsigma.deta)
}), list( .lvee = lvee, .lsigma = lsigma,
.evee = evee, .esigma = esigma, .nsimEIM = nsimEIM ))),
weight = eval(substitute(expression({
- run.var = run.cov = 0
+ run.var <- run.cov <- 0
for(ii in 1:( .nsimEIM )) {
- ysim = rrice(n, vee = vee, sigma = sigma)
- temp8 = ysim * vee / sigma^2
- dl.dvee = -vee/sigma^2 + (ysim/sigma^2) *
- besselI(temp8, nu=1) / besselI(temp8, nu=0)
- dl.dsigma = -2/sigma + (ysim^2 + vee^2)/(sigma^3) -
- (2 * temp8 / sigma) *
- besselI(temp8, nu=1) / besselI(temp8, nu=0)
+ ysim <- rrice(n, vee = vee, sigma = sigma)
+ temp8 <- ysim * vee / sigma^2
+ dl.dvee <- -vee/sigma^2 + (ysim/sigma^2) *
+ besselI(temp8, nu = 1) / besselI(temp8, nu = 0)
+ dl.dsigma <- -2/sigma + (ysim^2 + vee^2)/(sigma^3) -
+ (2 * temp8 / sigma) *
+ besselI(temp8, nu = 1) / besselI(temp8, nu = 0)
rm(ysim)
- temp3 = cbind(dl.dvee, dl.dsigma)
- run.var = ((ii-1) * run.var + temp3^2) / ii
- run.cov = ((ii-1) * run.cov + temp3[, 1] * temp3[, 2]) / ii
+ temp3 <- cbind(dl.dvee, dl.dsigma)
+ run.var <- ((ii-1) * run.var + temp3^2) / ii
+ run.cov <- ((ii-1) * run.cov + temp3[, 1] * temp3[, 2]) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(cbind(run.var, run.cov)),
n, dimm(M), byrow = TRUE) else cbind(run.var, run.cov)
- dtheta.detas = cbind(dvee.deta, dsigma.deta)
- index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- wz = wz * dtheta.detas[, index0$row] * dtheta.detas[, index0$col]
+ dtheta.detas <- cbind(dvee.deta, dsigma.deta)
+ index0 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ wz <- wz * dtheta.detas[, index0$row] *
+ dtheta.detas[, index0$col]
c(w) * wz
}), list( .lvee = lvee, .lsigma = lsigma,
.evee = evee, .esigma = esigma, .nsimEIM = nsimEIM ))))
@@ -503,31 +498,32 @@ dskellam <- function(x, mu1, mu2, log = FALSE) {
rm(log)
- L = max(length(x), length(mu1), length(mu2))
- x = rep(x, len = L);
- mu1 = rep(mu1, len = L);
- mu2 = rep(mu2, len = L);
+ L <- max(length(x), length(mu1), length(mu2))
+ x <- rep(x, len = L);
+ mu1 <- rep(mu1, len = L);
+ mu2 <- rep(mu2, len = L);
ok2 <- is.finite(mu1) && is.finite(mu2) & (mu1 >= 0) & (mu2 >= 0)
ok3 <- (mu1 == 0) & (mu2 > 0)
ok4 <- (mu1 > 0) & (mu2 == 0)
ok5 <- (mu1 == 0) & (mu2 == 0)
if (log.arg) {
- ans = -mu1 - mu2 + 2 * sqrt(mu1*mu2) +
- 0.5 * x * log(mu1) - 0.5 * x * log(mu2) +
- log(besselI(2 * sqrt(mu1*mu2), nu = x, expon.scaled = TRUE))
- ans[ok3] = dpois(x = -x[ok3], lambda = mu2[ok3], log = TRUE)
- ans[ok4] = dpois(x = -x[ok4], lambda = mu1[ok4], log = TRUE)
- ans[ok5] = dpois(x = x[ok5], lambda = 0.0, log = TRUE)
+ ans <- -mu1 - mu2 + 2 * sqrt(mu1*mu2) +
+ 0.5 * x * log(mu1) - 0.5 * x * log(mu2) +
+ log(besselI(2 * sqrt(mu1*mu2), nu = x,
+ expon.scaled = TRUE))
+ ans[ok3] <- dpois(x = -x[ok3], lambda = mu2[ok3], log = TRUE)
+ ans[ok4] <- dpois(x = -x[ok4], lambda = mu1[ok4], log = TRUE)
+ ans[ok5] <- dpois(x = x[ok5], lambda = 0.0, log = TRUE)
ans[x != round(x)] = log(0.0)
} else {
- ans = (mu1/mu2)^(x/2) * exp(-mu1-mu2 + 2 * sqrt(mu1*mu2)) *
- besselI(2 * sqrt(mu1*mu2), nu = x, expon.scaled = TRUE)
- ans[ok3] = dpois(x = -x[ok3], lambda = mu2[ok3])
- ans[ok4] = dpois(x = -x[ok4], lambda = mu1[ok4])
- ans[ok5] = dpois(x = x[ok5], lambda = 0.0)
- ans[x != round(x)] = 0.0
+ ans <- (mu1/mu2)^(x/2) * exp(-mu1-mu2 + 2 * sqrt(mu1*mu2)) *
+ besselI(2 * sqrt(mu1*mu2), nu = x, expon.scaled = TRUE)
+ ans[ok3] <- dpois(x = -x[ok3], lambda = mu2[ok3])
+ ans[ok4] <- dpois(x = -x[ok4], lambda = mu1[ok4])
+ ans[ok5] <- dpois(x = x[ok5], lambda = 0.0)
+ ans[x != round(x)] <- 0.0
}
- ans[!ok2] = NaN
+ ans[!ok2] <- NaN
ans
}
@@ -549,8 +545,7 @@ skellam.control <- function(save.weight = TRUE, ...) {
skellam <- function(lmu1 = "loge", lmu2 = "loge",
imu1 = NULL, imu2 = NULL,
- nsimEIM = 100, parallel = FALSE, zero = NULL)
-{
+ nsimEIM = 100, parallel = FALSE, zero = NULL) {
lmu1 <- as.list(substitute(lmu1))
emu1 <- link2list(lmu1)
@@ -583,8 +578,8 @@ skellam.control <- function(save.weight = TRUE, ...) {
"Mean: mu1-mu2", "\n",
"Variance: mu1+mu2"),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
- intercept.apply = TRUE)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
+ apply.int = TRUE)
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .parallel = parallel, .zero = zero ))),
initialize = eval(substitute(expression({
@@ -607,98 +602,101 @@ skellam.control <- function(save.weight = TRUE, ...) {
if (!length(etastart)) {
- junk = lm.wfit(x = x, y = c(y), w = c(w))
- var.y.est = sum(c(w) * junk$resid^2) / junk$df.residual
- mean.init = weighted.mean(y, w)
- mu1.init = max((var.y.est + mean.init)/2, 0.01)
- mu2.init = max((var.y.est - mean.init)/2, 0.01)
- mu1.init = rep(if(length( .imu1 )) .imu1 else mu1.init,
- length = n)
- mu2.init = rep(if(length( .imu2 )) .imu2 else mu2.init,
- length = n)
- etastart = cbind(theta2eta(mu1.init, .lmu1, earg = .emu1),
- theta2eta(mu2.init, .lmu2, earg = .emu2))
+ junk <- lm.wfit(x = x, y = c(y), w = c(w))
+ var.y.est <- sum(c(w) * junk$resid^2) / junk$df.residual
+ mean.init <- weighted.mean(y, w)
+ mu1.init <- max((var.y.est + mean.init) / 2, 0.01)
+ mu2.init <- max((var.y.est - mean.init) / 2, 0.01)
+ mu1.init <- rep(if(length( .imu1 )) .imu1 else mu1.init,
+ length <- n)
+ mu2.init <- rep(if(length( .imu2 )) .imu2 else mu2.init,
+ length = n)
+ etastart <- cbind(theta2eta(mu1.init, .lmu1, earg = .emu1),
+ theta2eta(mu2.init, .lmu2, earg = .emu2))
}
}), list( .lmu1 = lmu1, .lmu2 = lmu2,
.imu1 = imu1, .imu2 = imu2,
.emu1 = emu1, .emu2 = emu2 ))),
linkinv = eval(substitute(function(eta, extra = NULL){
- mu1 = eta2theta(eta[, 1], link = .lmu1, earg = .emu1)
- mu2 = eta2theta(eta[, 2], link = .lmu2, earg = .emu2)
+ mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1)
+ mu2 <- eta2theta(eta[, 2], link = .lmu2, earg = .emu2)
mu1 - mu2
}, list( .lmu1 = lmu1, .lmu2 = lmu2,
.emu1 = emu1, .emu2 = emu2 ))),
last = eval(substitute(expression({
misc$link <- c("mu1" = .lmu1, "mu2" = .lmu2)
+
misc$earg <- list("mu1" = .emu1, "mu2" = .emu2)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
+
+ misc$expected <- TRUE
+ misc$nsimEIM <- .nsimEIM
}), list( .lmu1 = lmu1, .lmu2 = lmu2,
.emu1 = emu1, .emu2 = emu2, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu,y, w, residuals = FALSE,eta,extra = NULL) {
- mu1 = eta2theta(eta[, 1], link = .lmu1, earg = .emu1)
- mu2 = eta2theta(eta[, 2], link = .lmu2, earg = .emu2)
+ mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1)
+ mu2 <- eta2theta(eta[, 2], link = .lmu2, earg = .emu2)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- if ( is.logical( .parallel ) && length( .parallel )== 1 &&
- .parallel )
- sum(c(w) * log(besselI(2*mu1, nu=y, expon = TRUE))) else
- sum(c(w) * (-mu1 - mu2 +
- 0.5 * y * log(mu1) -
- 0.5 * y * log(mu2) +
- 2 * sqrt(mu1*mu2) + # Use this when expon = TRUE
- log(besselI(2 * sqrt(mu1*mu2), nu=y, expon = TRUE))))
- }
- }, list( .lmu1 = lmu1, .lmu2 = lmu2,
- .parallel = parallel,
- .emu1 = emu1, .emu2 = emu2 ))),
- vfamily = c("skellam"),
- deriv = eval(substitute(expression({
- mu1 = eta2theta(eta[, 1], link = .lmu1, earg = .emu1)
- mu2 = eta2theta(eta[, 2], link = .lmu2, earg = .emu2)
- dmu1.deta = dtheta.deta(mu1, link = .lmu1, earg = .emu1)
- dmu2.deta = dtheta.deta(mu2, link = .lmu2, earg = .emu2)
- temp8 = 2 * sqrt(mu1*mu2)
- temp9 = besselI(temp8, nu=y, expon = TRUE)
- temp7 = (besselI(temp8, nu=y-1, expon = TRUE) +
- besselI(temp8, nu=y+1, expon = TRUE)) / 2
- temp6 = temp7 / temp9
- dl.dmu1 = -1 + 0.5 * y / mu1 + sqrt(mu2/mu1) * temp6
- dl.dmu2 = -1 - 0.5 * y / mu2 + sqrt(mu1/mu2) * temp6
- c(w) * cbind(dl.dmu1 * dmu1.deta,
- dl.dmu2 * dmu2.deta)
- }), list( .lmu1 = lmu1, .lmu2 = lmu2,
- .emu1 = emu1, .emu2 = emu2, .nsimEIM = nsimEIM ))),
- weight = eval(substitute(expression({
- run.var = run.cov = 0
- for(ii in 1:( .nsimEIM )) {
- ysim = rskellam(n, mu1=mu1, mu2=mu2)
- temp9 = besselI(temp8, nu=ysim, expon = TRUE)
- temp7 = (besselI(temp8, nu=ysim-1, expon = TRUE) +
- besselI(temp8, nu=ysim+1, expon = TRUE)) / 2
- temp6 = temp7 / temp9
- dl.dmu1 = -1 + 0.5 * ysim/mu1 + sqrt(mu2/mu1) * temp6
- dl.dmu2 = -1 - 0.5 * ysim/mu2 + sqrt(mu1/mu2) * temp6
- rm(ysim)
- temp3 = cbind(dl.dmu1, dl.dmu2)
- run.var = ((ii-1) * run.var + temp3^2) / ii
- run.cov = ((ii-1) * run.cov + temp3[, 1] * temp3[, 2]) / ii
+ if ( is.logical( .parallel ) && length( .parallel )== 1 &&
+ .parallel )
+ sum(c(w) * log(besselI(2*mu1, nu = y, expon = TRUE))) else
+ sum(c(w) * (-mu1 - mu2 +
+ 0.5 * y * log(mu1) -
+ 0.5 * y * log(mu2) +
+ 2 * sqrt(mu1*mu2) + # Use this when expon = TRUE
+ log(besselI(2 * sqrt(mu1*mu2), nu = y, expon = TRUE))))
}
- wz = if (intercept.only)
- matrix(colMeans(cbind(run.var, run.cov)),
- n, dimm(M), byrow = TRUE) else cbind(run.var, run.cov)
-
- dtheta.detas = cbind(dmu1.deta, dmu2.deta)
- index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- wz = wz * dtheta.detas[, index0$row] * dtheta.detas[, index0$col]
- c(w) * wz
- }), list( .lmu1 = lmu1, .lmu2 = lmu2,
- .emu1 = emu1, .emu2 = emu2, .nsimEIM = nsimEIM ))))
+ }, list( .lmu1 = lmu1, .lmu2 = lmu2,
+ .parallel = parallel,
+ .emu1 = emu1, .emu2 = emu2 ))),
+ vfamily = c("skellam"),
+ deriv = eval(substitute(expression({
+ mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1)
+ mu2 <- eta2theta(eta[, 2], link = .lmu2, earg = .emu2)
+ dmu1.deta <- dtheta.deta(mu1, link = .lmu1, earg = .emu1)
+ dmu2.deta <- dtheta.deta(mu2, link = .lmu2, earg = .emu2)
+ temp8 <- 2 * sqrt(mu1*mu2)
+ temp9 <- besselI(temp8, nu = y , expon = TRUE)
+ temp7 <- (besselI(temp8, nu = y-1, expon = TRUE) +
+ besselI(temp8, nu = y+1, expon = TRUE)) / 2
+ temp6 <- temp7 / temp9
+ dl.dmu1 <- -1 + 0.5 * y / mu1 + sqrt(mu2/mu1) * temp6
+ dl.dmu2 <- -1 - 0.5 * y / mu2 + sqrt(mu1/mu2) * temp6
+ c(w) * cbind(dl.dmu1 * dmu1.deta,
+ dl.dmu2 * dmu2.deta)
+ }), list( .lmu1 = lmu1, .lmu2 = lmu2,
+ .emu1 = emu1, .emu2 = emu2, .nsimEIM = nsimEIM ))),
+ weight = eval(substitute(expression({
+ run.var <- run.cov <- 0
+ for(ii in 1:( .nsimEIM )) {
+ ysim <- rskellam(n, mu1=mu1, mu2=mu2)
+ temp9 <- besselI(temp8, nu = ysim, expon = TRUE)
+ temp7 <- (besselI(temp8, nu = ysim-1, expon = TRUE) +
+ besselI(temp8, nu = ysim+1, expon = TRUE)) / 2
+ temp6 <- temp7 / temp9
+ dl.dmu1 <- -1 + 0.5 * ysim/mu1 + sqrt(mu2/mu1) * temp6
+ dl.dmu2 <- -1 - 0.5 * ysim/mu2 + sqrt(mu1/mu2) * temp6
+ rm(ysim)
+ temp3 <- cbind(dl.dmu1, dl.dmu2)
+ run.var <- ((ii-1) * run.var + temp3^2) / ii
+ run.cov <- ((ii-1) * run.cov + temp3[, 1] * temp3[, 2]) / ii
+ }
+ wz <- if (intercept.only)
+ matrix(colMeans(cbind(run.var, run.cov)),
+ n, dimm(M), byrow = TRUE) else cbind(run.var, run.cov)
+
+ dtheta.detas <- cbind(dmu1.deta, dmu2.deta)
+ index0 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ wz <- wz * dtheta.detas[, index0$row] *
+ dtheta.detas[, index0$col]
+ c(w) * wz
+ }), list( .lmu1 = lmu1, .lmu2 = lmu2,
+ .emu1 = emu1, .emu2 = emu2, .nsimEIM = nsimEIM ))))
}
@@ -714,7 +712,7 @@ dyules <- function(x, rho, log = FALSE) {
ans <- log(rho) + lbeta(abs(x), rho+1)
ans[(x != round(x)) | (x < 1)] <- log(0)
} else {
- ans = rho * beta(x, rho+1)
+ ans <- rho * beta(x, rho+1)
ans[(x != round(x)) | (x < 1)] <- 0
}
ans[!is.finite(rho) | (rho <= 0) | (rho <= 0)] <- NA
@@ -747,8 +745,7 @@ yulesimon.control <- function(save.weight = TRUE, ...) {
yulesimon <- function(link = "loge",
irho = NULL, nsimEIM = 200,
- zero = NULL)
-{
+ zero = NULL) {
if (length(irho) &&
!is.Numeric(irho, positive = TRUE))
@@ -822,16 +819,16 @@ yulesimon.control <- function(save.weight = TRUE, ...) {
namesof(mynames1, .link , earg = .earg , tag = FALSE)
if (!length(etastart)) {
- wmeany = colSums(y * w) / colSums(w) + 1/8
+ wmeany <- colSums(y * w) / colSums(w) + 1/8
- rho.init = wmeany / (wmeany - 1)
- rho.init = matrix(if (length( .irho )) .irho else
- rho.init, n, M, byrow = TRUE)
- etastart = theta2eta(rho.init, .link , earg = .earg )
+ rho.init <- wmeany / (wmeany - 1)
+ rho.init <- matrix(if (length( .irho )) .irho else
+ rho.init, n, M, byrow = TRUE)
+ etastart <- theta2eta(rho.init, .link , earg = .earg )
}
}), list( .link = link, .earg = earg, .irho = irho ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- ans = rho = eta2theta(eta, .link , earg = .earg )
+ ans <- rho <- eta2theta(eta, .link , earg = .earg )
ans[rho > 1] <- rho / (rho - 1)
ans[rho <= 1] <- NA
ans
@@ -856,7 +853,7 @@ yulesimon.control <- function(save.weight = TRUE, ...) {
.irho = irho ))),
loglikelihood = eval(substitute(
function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
- rho = eta2theta(eta, .link , earg = .earg )
+ rho <- eta2theta(eta, .link , earg = .earg )
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
sum(c(w) * dyules(x = y, rho = rho, log = TRUE))
@@ -872,10 +869,10 @@ yulesimon.control <- function(save.weight = TRUE, ...) {
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- run.var = 0
+ run.var <- 0
for(ii in 1:( .nsimEIM )) {
- ysim <- ryules(n, rho = rho)
- dl.drho = 1/rho + digamma(1+rho) - digamma(1+rho+ysim)
+ ysim <- ryules(n, rho <- rho)
+ dl.drho <- 1/rho + digamma(1+rho) - digamma(1+rho+ysim)
rm(ysim)
temp3 <- dl.drho
run.var <- ((ii-1) * run.var + temp3^2) / ii
@@ -931,8 +928,6 @@ plind <- function(q, theta) {
rlind <- function(n, theta) {
-
-
ifelse(runif(n) < theta / (1 + theta),
rexp(n, theta),
rgamma(n, shape = 2, scale = 1 / theta))
@@ -1246,7 +1241,7 @@ if (FALSE)
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- run.var = 0
+ run.var <- 0
for(ii in 1:( .nsimEIM )) {
ysim <- rpoislindley(n, theta = theta)
dl.dtheta <- 2 / theta + 1 / (ysim + 2 + theta) -
@@ -1274,19 +1269,19 @@ if (FALSE)
dslash <- function(x, mu = 0, sigma = 1, log = FALSE,
- smallno =.Machine$double.eps*1000){
+ smallno = .Machine$double.eps * 1000) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
if (!is.Numeric(sigma) || any(sigma <= 0))
stop("argument 'sigma' must be positive")
- L = max(length(x), length(mu), length(sigma))
- x = rep(x, len = L);
- mu = rep(mu, len = L);
- sigma = rep(sigma, len = L)
+ L <- max(length(x), length(mu), length(sigma))
+ x <- rep(x, len = L);
+ mu <- rep(mu, len = L);
+ sigma <- rep(sigma, len = L)
- zedd = (x-mu)/sigma
+ zedd <- (x-mu)/sigma
if (log.arg)
ifelse(abs(zedd)<smallno, -log(2*sigma*sqrt(2*pi)),
log1p(-exp(-zedd^2/2)) - log(sqrt(2*pi)*sigma*zedd^2)) else
@@ -1295,49 +1290,43 @@ dslash <- function(x, mu = 0, sigma = 1, log = FALSE,
}
-pslash <- function(q, mu = 0, sigma = 1){
- if (!is.Numeric(sigma) || any(sigma <= 0))
- stop("argument 'sigma' must be positive")
- L = max(length(q), length(mu), length(sigma))
- q = rep(q, len = L);
- mu = rep(mu, len = L);
- sigma = rep(sigma, len = L)
-
- ans = q * NA
- for (ii in 1:L) {
- temp = integrate(dslash, lower = -Inf, upper = q[ii])
- if (temp$message != "OK") {
- warning("integrate() failed")
- } else
- ans[ii] = temp$value
+pslash <- function(q, mu = 0, sigma = 1) {
+ if (!is.Numeric(sigma) || any(sigma <= 0))
+ stop("argument 'sigma' must be positive")
+ L <- max(length(q), length(mu), length(sigma))
+ q <- rep(q, len = L);
+ mu <- rep(mu, len = L);
+ sigma <- rep(sigma, len = L)
+
+ ans <- q * NA
+ for (ii in 1:L) {
+ temp <- integrate(dslash, lower = -Inf, upper = q[ii])
+ if (temp$message != "OK") {
+ warning("integrate() failed")
+ } else {
+ ans[ii] <- temp$value
}
- ans
+ }
+ ans
}
-rslash <- function (n, mu = 0, sigma = 1){
- if (!is.Numeric(n, positive = TRUE, integer.valued = TRUE,
- allowable.length = 1))
- stop("bad input for argument 'n'")
- if (any(sigma <= 0))
- stop("argument 'sigma' must be positive")
+rslash <- function (n, mu = 0, sigma = 1) {
rnorm(n = n, mean = mu, sd = sigma) / runif(n = n)
}
-slash.control <- function(save.weight = TRUE, ...)
-{
+slash.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
slash <- function(lmu = "identity", lsigma = "loge",
- imu = NULL, isigma = NULL,
- iprobs = c(0.1, 0.9),
- nsimEIM = 250, zero = NULL,
- smallno = .Machine$double.eps * 1000)
-{
+ imu = NULL, isigma = NULL,
+ iprobs = c(0.1, 0.9),
+ nsimEIM = 250, zero = NULL,
+ smallno = .Machine$double.eps * 1000) {
lmu <- as.list(substitute(lmu))
emu <- link2list(lmu)
@@ -1383,7 +1372,7 @@ slash.control <- function(save.weight = TRUE, ...)
"\n1/(2*sigma*sqrt(2*pi))",
"\t\t\t\t\t\t\ty=mu\n")),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -1397,7 +1386,7 @@ slash.control <- function(save.weight = TRUE, ...)
y <- temp5$y
- predictors.names = c(
+ predictors.names <- c(
namesof("mu", .lmu, earg = .emu, tag = FALSE),
namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
@@ -1405,29 +1394,29 @@ slash.control <- function(save.weight = TRUE, ...)
if (!length(etastart)) {
slash.Loglikfun <- function(mu, y, x, w, extraargs) {
- sigma = if (is.Numeric(.isigma)) .isigma else
+ sigma <- if (is.Numeric(.isigma)) .isigma else
max(0.01,
((quantile(rep(y, w), prob = 0.75)/2)-mu)/qnorm(0.75))
- zedd = (y-mu)/sigma
+ zedd <- (y-mu)/sigma
sum(c(w) * ifelse(abs(zedd)<.smallno,
-log(2*sigma*sqrt(2*pi)),
log1p(-exp(-zedd^2/2)) -
log(sqrt(2*pi) * sigma * zedd^2)))
}
- iprobs = .iprobs
- mu.grid = quantile(rep(y, w), probs=iprobs)
- mu.grid = seq(mu.grid[1], mu.grid[2], length=100)
- mu.init = if (length( .imu )) .imu else
+ iprobs <- .iprobs
+ mu.grid <- quantile(rep(y, w), probs=iprobs)
+ mu.grid <- seq(mu.grid[1], mu.grid[2], length=100)
+ mu.init <- if (length( .imu )) .imu else
getMaxMin(mu.grid, objfun = slash.Loglikfun,
y = y, x = x, w = w)
- sigma.init = if (is.Numeric(.isigma)) .isigma else
+ sigma.init <- if (is.Numeric(.isigma)) .isigma else
max(0.01,
((quantile(rep(y, w), prob = 0.75)/2) -
mu.init) / qnorm(0.75))
- mu.init = rep(mu.init, length = length(y))
- etastart = matrix(0, n, 2)
- etastart[, 1] = theta2eta(mu.init, .lmu, earg = .emu)
- etastart[, 2] = theta2eta(sigma.init, .lsigma, earg = .esigma)
+ mu.init <- rep(mu.init, length = length(y))
+ etastart <- matrix(0, n, 2)
+ etastart[, 1] <- theta2eta(mu.init, .lmu, earg = .emu)
+ etastart[, 2] <- theta2eta(sigma.init, .lsigma, earg = .esigma)
}
}), list( .lmu = lmu, .lsigma = lsigma,
.imu = imu, .isigma = isigma,
@@ -1441,15 +1430,15 @@ slash.control <- function(save.weight = TRUE, ...)
misc$earg <- list("mu" = .emu, "sigma" = .esigma)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
+ misc$expected <- TRUE
+ misc$nsimEIM <- .nsimEIM
}), list( .lmu = lmu, .lsigma = lsigma,
.emu = emu, .esigma = esigma, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu,y, w, residuals = FALSE,eta,extra = NULL) {
- mu = eta2theta(eta[, 1], link = .lmu, earg = .emu)
- sigma = eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
- zedd = (y - mu) / sigma
+ mu <- eta2theta(eta[, 1], link = .lmu, earg = .emu)
+ sigma <- eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
+ zedd <- (y - mu) / sigma
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
sum(c(w) * dslash(x = y, mu = mu, sigma = sigma, log = TRUE,
@@ -1459,51 +1448,55 @@ slash.control <- function(save.weight = TRUE, ...)
.emu = emu, .esigma = esigma, .smallno = smallno ))),
vfamily = c("slash"),
deriv = eval(substitute(expression({
- mu = eta2theta(eta[, 1], link = .lmu, earg = .emu)
- sigma = eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
- dmu.deta = dtheta.deta(mu, link = .lmu, earg = .emu)
- dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
- zedd = (y-mu)/sigma
- d3 = deriv3(~ w * log(1-exp(-(((y-mu)/sigma)^2)/2))-
+ mu <- eta2theta(eta[, 1], link = .lmu, earg = .emu)
+ sigma <- eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
+
+ dmu.deta <- dtheta.deta(mu, link = .lmu, earg = .emu)
+ dsigma.deta <- dtheta.deta(sigma, link = .lsigma, earg = .esigma)
+
+ zedd <- (y-mu)/sigma
+ d3 <- deriv3(~ w * log(1-exp(-(((y-mu)/sigma)^2)/2))-
log(sqrt(2*pi)*sigma*((y-mu)/sigma)^2),
c("mu", "sigma"))
- eval.d3 = eval(d3)
- dl.dthetas = attr(eval.d3, "gradient")
- dl.dmu = dl.dthetas[, 1]
- dl.dsigma = dl.dthetas[, 2]
- ind0 = (abs(zedd) < .smallno)
- dl.dmu[ind0] = 0
- dl.dsigma[ind0] = -1/sigma[ind0]
- ans = c(w) * cbind(dl.dmu * dmu.deta,
- dl.dsigma * dsigma.deta)
+ eval.d3 <- eval(d3)
+ dl.dthetas <- attr(eval.d3, "gradient")
+ dl.dmu <- dl.dthetas[, 1]
+ dl.dsigma <- dl.dthetas[, 2]
+ ind0 <- (abs(zedd) < .smallno)
+ dl.dmu[ind0] <- 0
+ dl.dsigma[ind0] <- -1/sigma[ind0]
+ ans <- c(w) * cbind(dl.dmu * dmu.deta,
+ dl.dsigma * dsigma.deta)
ans
}), list( .lmu = lmu, .lsigma = lsigma,
.emu = emu, .esigma = esigma, .smallno = smallno ))),
weight=eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- sd3 = deriv3(~ w * log(1-exp(-(((ysim-mu)/sigma)^2)/2))-
- log(sqrt(2*pi)*sigma*((ysim-mu)/sigma)^2),
- c("mu", "sigma"))
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ sd3 <- deriv3(~ w * log(1-exp(-(((ysim-mu)/sigma)^2)/2))-
+ log(sqrt(2*pi)*sigma*((ysim-mu)/sigma)^2),
+ c("mu", "sigma"))
for(ii in 1:( .nsimEIM )) {
- ysim = rslash(n, mu = mu, sigma = sigma)
- seval.d3 = eval(sd3)
+ ysim <- rslash(n, mu = mu, sigma = sigma)
+ seval.d3 <- eval(sd3)
- dl.dthetas = attr(seval.d3, "gradient")
- dl.dmu = dl.dthetas[, 1]
- dl.dsigma = dl.dthetas[, 2]
+ dl.dthetas <- attr(seval.d3, "gradient")
+ dl.dmu <- dl.dthetas[, 1]
+ dl.dsigma <- dl.dthetas[, 2]
- temp3 = cbind(dl.dmu, dl.dsigma)
- run.varcov = ((ii-1) * run.varcov +
- temp3[, ind1$row.index]*temp3[, ind1$col.index]) / ii
+ temp3 <- cbind(dl.dmu, dl.dsigma)
+ run.varcov <- ((ii-1) * run.varcov +
+ temp3[, ind1$row.index] *
+ temp3[, ind1$col.index]) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(run.varcov, na.rm = FALSE),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- dthetas.detas = cbind(dmu.deta, dsigma.deta)
- wz = wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
+ dthetas.detas <- cbind(dmu.deta, dsigma.deta)
+ wz <- wz * dthetas.detas[, ind1$row] *
+ dthetas.detas[, ind1$col]
c(w) * wz
}), list( .lmu = lmu, .lsigma = lsigma,
.emu = emu, .esigma = esigma,
@@ -1519,26 +1512,24 @@ dnefghs <- function(x, tau, log = FALSE) {
rm(log)
- N = max(length(x), length(tau))
- x = rep(x, len = N); tau = rep(tau, len = N);
+ N <- max(length(x), length(tau))
+ x <- rep(x, len = N); tau = rep(tau, len = N);
- logdensity = log(sin(pi*tau)) + (1-tau)*x - log(pi) - log1p(exp(x))
- logdensity[tau < 0] = NaN
- logdensity[tau > 1] = NaN
+ logdensity <- log(sin(pi*tau)) + (1-tau)*x - log(pi) - log1p(exp(x))
+ logdensity[tau < 0] <- NaN
+ logdensity[tau > 1] <- NaN
if (log.arg) logdensity else exp(logdensity)
}
nefghs <- function(link = "logit",
- itau = NULL, imethod = 1)
-{
+ itau = NULL, imethod = 1) {
if (length(itau) &&
!is.Numeric(itau, positive = TRUE) ||
any(itau >= 1))
stop("argument 'itau' must be in (0, 1)")
-
link <- as.list(substitute(link))
earg <- link2list(link)
link <- attr(earg, "function.name")
@@ -1551,13 +1542,12 @@ dnefghs <- function(x, tau, log = FALSE) {
new("vglmff",
blurb = c("Natural exponential family generalized hyperbolic ",
- "secant distribution\n",
- "f(y) = sin(pi*tau)*exp((1-tau)*y)/(pi*(1+exp(y))\n\n",
- "Link: ",
- namesof("tau", link, earg = earg), "\n\n",
- "Mean: pi / tan(pi * tau)\n"),
+ "secant distribution\n",
+ "f(y) = sin(pi*tau)*exp((1-tau)*y)/(pi*(1+exp(y))\n\n",
+ "Link: ",
+ namesof("tau", link, earg = earg), "\n\n",
+ "Mean: pi / tan(pi * tau)\n"),
initialize = eval(substitute(expression({
-
temp5 <-
w.y.check(w = w, y = y,
ncol.w.max = 1,
@@ -1573,46 +1563,49 @@ dnefghs <- function(x, tau, log = FALSE) {
if (!length(etastart)) {
- wmeany = if ( .imethod == 1) weighted.mean(y, w) else
- median(rep(y, w))
- if (abs(wmeany) < 0.01) wmeany = 0.01
- tau.init = atan(pi / wmeany) / pi + 0.5
- tau.init[tau.init < 0.03] = 0.03
- tau.init[tau.init > 0.97] = 0.97
- tau.init = rep(if (length( .itau )) .itau else tau.init,
- len = n)
- etastart = theta2eta(tau.init, .link , earg = .earg )
- }
- }), list( .link = link, .earg = earg, .itau = itau,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- tau = eta2theta(eta, .link , earg = .earg )
- pi / tan(pi * tau)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link <- c(tau = .link)
- misc$earg <- list(tau = .earg )
- misc$expected = TRUE
- misc$imethod= .imethod
- }), list( .link = link, .earg = earg, .imethod = imethod ))),
- loglikelihood = eval(substitute(
- function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
- tau = eta2theta(eta, .link , earg = .earg )
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(c(w) * dnefghs(x = y, tau = tau, log = TRUE))
- }
- }, list( .link = link, .earg = earg ))),
- vfamily = c("nefghs"),
- deriv = eval(substitute(expression({
- tau = eta2theta(eta, .link , earg = .earg )
- dl.dtau = pi / tan(pi * tau) - y
- dtau.deta = dtheta.deta(tau, .link , earg = .earg )
- w * dl.dtau * dtau.deta
- }), list( .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- d2l.dtau2 = (pi / sin(pi * tau))^2
- wz = d2l.dtau2 * dtau.deta^2
+ wmeany <- if ( .imethod == 1) weighted.mean(y, w) else
+ median(rep(y, w))
+ if (abs(wmeany) < 0.01)
+ wmeany <- 0.01
+ tau.init <- atan(pi / wmeany) / pi + 0.5
+ tau.init[tau.init < 0.03] <- 0.03
+ tau.init[tau.init > 0.97] <- 0.97
+ tau.init <- rep(if (length( .itau )) .itau else tau.init,
+ len = n)
+ etastart <- theta2eta(tau.init, .link , earg = .earg )
+ }
+ }), list( .link = link, .earg = earg, .itau = itau,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ tau <- eta2theta(eta, .link , earg = .earg )
+ pi / tan(pi * tau)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link <- c(tau = .link)
+
+ misc$earg <- list(tau = .earg )
+
+ misc$expected <- TRUE
+ misc$imethod <- .imethod
+ }), list( .link = link, .earg = earg, .imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
+ tau <- eta2theta(eta, .link , earg = .earg )
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(c(w) * dnefghs(x = y, tau = tau, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("nefghs"),
+ deriv = eval(substitute(expression({
+ tau <- eta2theta(eta, .link , earg = .earg )
+ dl.dtau <- pi / tan(pi * tau) - y
+ dtau.deta <- dtheta.deta(tau, .link , earg = .earg )
+ w * dl.dtau * dtau.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ d2l.dtau2 <- (pi / sin(pi * tau))^2
+ wz <- d2l.dtau2 * dtau.deta^2
c(w) * wz
}), list( .link = link ))))
}
@@ -1626,8 +1619,8 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
rm(log)
- logdensity = -shape2*x - lbeta(shape1, shape2) -
- (shape1 + shape2) * log1p(exp(-x))
+ logdensity <- -shape2*x - lbeta(shape1, shape2) -
+ (shape1 + shape2) * log1p(exp(-x))
if (log.arg) logdensity else exp(logdensity)
}
@@ -1635,9 +1628,8 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
logF <- function(lshape1 = "loge", lshape2 = "loge",
- ishape1 = NULL, ishape2 = 1,
- imethod = 1)
-{
+ ishape1 = NULL, ishape2 = 1,
+ imethod = 1) {
if (length(ishape1) &&
!is.Numeric(ishape1, positive = TRUE))
stop("argument 'ishape1' must be positive")
@@ -1663,12 +1655,12 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
new("vglmff",
blurb = c("log F distribution\n",
- "f(y) = exp(-shape2*y)/(beta(shape1,shape2)*",
- "(1+exp(-y))^(shape1+shape2))\n\n",
- "Link: ",
- namesof("shape1", lshape1, earg = eshape1), ", ",
- namesof("shape2", lshape2, earg = eshape2), "\n\n",
- "Mean: digamma(shape1) - digamma(shape2)"),
+ "f(y) = exp(-shape2 * y) / (beta(shape1, shape2) * ",
+ "(1 + exp(-y))^(shape1 + shape2))\n\n",
+ "Link: ",
+ namesof("shape1", lshape1, earg = eshape1), ", ",
+ namesof("shape2", lshape2, earg = eshape2), "\n\n",
+ "Mean: digamma(shape1) - digamma(shape2)"),
initialize = eval(substitute(expression({
temp5 <-
@@ -1681,26 +1673,26 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
y <- temp5$y
- predictors.names = c(
+ predictors.names <- c(
namesof("shape1", .lshape1, earg = .eshape1, tag = FALSE),
namesof("shape2", .lshape2, earg = .eshape2, tag = FALSE))
if (!length(etastart)) {
- wmeany = if ( .imethod == 1) weighted.mean(y, w) else
- median(rep(y, w))
+ wmeany <- if ( .imethod == 1) weighted.mean(y, w) else
+ median(rep(y, w))
- shape1.init = shape2.init = rep( .ishape2, len = n)
- shape1.init = if (length( .ishape1))
+ shape1.init <- shape2.init = rep( .ishape2, len = n)
+ shape1.init <- if (length( .ishape1))
rep( .ishape1, len = n) else {
- index1 = (y > wmeany)
- shape1.init[index1] = shape2.init[index1] + 1/1
- shape1.init[!index1] = shape2.init[!index1] - 1/1
- shape1.init = pmax(shape1.init, 1/8)
+ index1 <- (y > wmeany)
+ shape1.init[ index1] <- shape2.init[ index1] + 1/1
+ shape1.init[!index1] <- shape2.init[!index1] - 1/1
+ shape1.init <- pmax(shape1.init, 1/8)
shape1.init
}
- etastart =
+ etastart <-
cbind(theta2eta(shape1.init, .lshape1, earg = .eshape1),
theta2eta(shape2.init, .lshape2, earg = .eshape2))
}
@@ -1709,23 +1701,25 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
.ishape1 = ishape1, .ishape2 = ishape2,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape1 = eta2theta(eta[, 1], .lshape1, earg = .eshape1)
- shape2 = eta2theta(eta[, 2], .lshape2, earg = .eshape2)
+ shape1 <- eta2theta(eta[, 1], .lshape1, earg = .eshape1)
+ shape2 <- eta2theta(eta[, 2], .lshape2, earg = .eshape2)
digamma(shape1) - digamma(shape2)
}, list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
last = eval(substitute(expression({
misc$link <- c(shape1 = .lshape1, shape2 = .lshape2)
+
misc$earg <- list(shape1 = .eshape1, shape2 = .eshape2)
- misc$expected = TRUE
- misc$imethod= .imethod
+
+ misc$expected <- TRUE
+ misc$imethod <- .imethod
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2,
.imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
- shape1 = eta2theta(eta[, 1], .lshape1, earg = .eshape1)
- shape2 = eta2theta(eta[, 2], .lshape2, earg = .eshape2)
+ shape1 <- eta2theta(eta[, 1], .lshape1, earg = .eshape1)
+ shape2 <- eta2theta(eta[, 2], .lshape2, earg = .eshape2)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
sum(c(w) * dlogF(x = y, shape1 = shape1,
@@ -1735,27 +1729,31 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
.eshape1 = eshape1, .eshape2 = eshape2 ))),
vfamily = c("logF"),
deriv = eval(substitute(expression({
- shape1 = eta2theta(eta[, 1], .lshape1, earg = .eshape1)
- shape2 = eta2theta(eta[, 2], .lshape2, earg = .eshape2)
- tmp888 = digamma(shape1 + shape2) - log1p(exp(-y))
- dl.dshape1 = tmp888 - digamma(shape1)
- dl.dshape2 = tmp888 - digamma(shape2) - y
- dshape1.deta = dtheta.deta(shape1, .lshape1, earg = .eshape1)
- dshape2.deta = dtheta.deta(shape2, .lshape2, earg = .eshape2)
+ shape1 <- eta2theta(eta[, 1], .lshape1, earg = .eshape1)
+ shape2 <- eta2theta(eta[, 2], .lshape2, earg = .eshape2)
+
+ tmp888 <- digamma(shape1 + shape2) - log1p(exp(-y))
+ dl.dshape1 <- tmp888 - digamma(shape1)
+ dl.dshape2 <- tmp888 - digamma(shape2) - y
+
+ dshape1.deta <- dtheta.deta(shape1, .lshape1, earg = .eshape1)
+ dshape2.deta <- dtheta.deta(shape2, .lshape2, earg = .eshape2)
+
c(w) * cbind(dl.dshape1 * dshape1.deta,
dl.dshape2 * dshape2.deta)
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
weight = eval(substitute(expression({
- tmp888 = trigamma(shape1 + shape2)
- d2l.dshape12 = trigamma(shape1) - tmp888
- d2l.dshape22 = trigamma(shape2) - tmp888
- d2l.dshape1shape2 = -tmp888
- wz = matrix(0, n, dimm(M))
- wz[,iam(1, 1, M = M)] = d2l.dshape12 * dshape1.deta^2
- wz[,iam(2, 2, M = M)] = d2l.dshape22 * dshape2.deta^2
- wz[,iam(1, 2, M = M)] = d2l.dshape1shape2 * dshape1.deta *
- dshape2.deta
+ tmp888 <- trigamma(shape1 + shape2)
+ ned2l.dshape12 <- trigamma(shape1) - tmp888
+ ned2l.dshape22 <- trigamma(shape2) - tmp888
+ ned2l.dshape1shape2 <- -tmp888
+
+ wz <- matrix(0, n, dimm(M))
+ wz[,iam(1, 1, M = M)] <- ned2l.dshape12 * dshape1.deta^2
+ wz[,iam(2, 2, M = M)] <- ned2l.dshape22 * dshape2.deta^2
+ wz[,iam(1, 2, M = M)] <- ned2l.dshape1shape2 * dshape1.deta *
+ dshape2.deta
c(w) * wz
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))))
@@ -1865,3 +1863,210 @@ qbenf <- function(p, ndigits = 1) {
+
+
+
+
+
+
+ truncgeometric <-
+ function(upper.limit = Inf, # lower.limit = 1, # Inclusive
+ link = "logit", expected = TRUE,
+ imethod = 1, iprob = NULL, zero = NULL) {
+
+ if (is.finite(upper.limit) &&
+ !is.Numeric(upper.limit, integer.valued = TRUE,
+ positive = TRUE))
+ stop("bad input for argument 'upper.limit'")
+
+ if (any(upper.limit < 0))
+ stop("bad input for argument 'upper.limit'")
+
+
+
+ if (!is.logical(expected) || length(expected) != 1)
+ stop("bad input for argument 'expected'")
+
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+ uu.ll <- min(upper.limit)
+
+
+ new("vglmff",
+ blurb = c("Truncated geometric distribution ",
+ "(P[Y=y] =\n",
+ " ",
+ "prob * (1 - prob)^y / [1-(1-prob)^",
+ uu.ll+1, "], y = 0,1,...,",
+ uu.ll, ")\n",
+ "Link: ",
+ namesof("prob", link, earg = earg), "\n",
+ "Mean: mu = 1 / prob - 1 ",
+ ifelse(is.finite(upper.limit),
+ paste("- (", upper.limit+1, ") * (1 - prob)^",
+ upper.limit+1, " / (1 - ",
+ "(1 - prob)^", upper.limit+1, ")", sep = ""),
+ "")),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 1
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ upper.limit = .upper.limit ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .upper.limit = upper.limit ))),
+
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ ncoly <- ncol(y)
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+ extra$upper.limit <- matrix( .upper.limit , n, ncoly, byrow = TRUE)
+
+ if (any(y > extra$upper.limit))
+ stop("some response values greater than argument 'upper.limit'")
+
+
+ mynames1 <- paste("prob", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ namesof(mynames1, .link , earg = .earg , tag = FALSE)
+
+
+ if (!length(etastart)) {
+ prob.init <- if ( .imethod == 2)
+ 1 / (1 + y + 1/16) else
+ if ( .imethod == 3)
+ 1 / (1 + apply(y, 2, median) + 1/16) else
+ 1 / (1 + colSums(y * w) / colSums(w) + 1/16)
+
+ if (!is.matrix(prob.init))
+ prob.init <- matrix(prob.init, n, M, byrow = TRUE)
+
+
+ if (length( .iprob ))
+ prob.init <- matrix( .iprob , n, M, byrow = TRUE)
+
+
+ etastart <- theta2eta(prob.init, .link , earg = .earg )
+ }
+ }), list( .link = link, .earg = earg,
+ .upper.limit = upper.limit,
+ .imethod = imethod, .iprob = iprob ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ prob <- eta2theta(eta, .link , earg = .earg )
+ QQQ <- 1 - prob
+ upper.limit <- extra$upper.limit
+ tmp1 <- QQQ^(upper.limit+1)
+ answer <- 1 / prob - 1 - (upper.limit+1) * tmp1 / (1 - tmp1)
+ answer[!is.finite(answer)] <- 1 / prob[!is.finite(answer)] - 1
+ answer
+ }, list( .link = link, .earg = earg ))),
+
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <- c(rep( .link , length = ncoly))
+ names(misc$link) <- mynames1
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
+ for(ii in 1:ncoly) {
+ misc$earg[[ii]] <- .earg
+ }
+
+ misc$Musual <- Musual
+ misc$multipleResponses <- TRUE
+ misc$expected <- .expected
+ misc$imethod <- .imethod
+ misc$iprob <- .iprob
+ }), list( .link = link, .earg = earg,
+ .iprob = iprob,
+ .upper.limit = upper.limit,
+ .expected = expected, .imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ prob <- eta2theta(eta, .link , earg = .earg )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ upper.limit <- extra$upper.limit
+ sum(c(w) * (dgeom(x = y, prob = prob, log = TRUE) -
+ log1p(-(1.0 - prob)^(1 + upper.limit))))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("truncgeometric"),
+ deriv = eval(substitute(expression({
+ prob <- eta2theta(eta, .link , earg = .earg )
+ sss <- upper.limit <- extra$upper.limit # Is a matrix
+
+ QQQ <- 1 - prob
+ tmp1 <- QQQ^(upper.limit + 1)
+ dl.dprob <- 1 / prob + (0 - y) / (1 - prob) -
+ (1+upper.limit) * QQQ^(upper.limit - 0) / (1 - tmp1)
+ dl.dprob[!is.finite(upper.limit)] <- 1 / prob[!is.finite(upper.limit)] +
+ (0 - y[!is.finite(upper.limit)]) / (1 - prob[!is.finite(upper.limit)])
+
+
+ dprobdeta <- dtheta.deta(prob, .link , earg = .earg )
+ c(w) * cbind(dl.dprob * dprobdeta)
+ }), list( .link = link, .earg = earg,
+ .upper.limit = upper.limit,
+ .expected = expected ))),
+ weight = eval(substitute(expression({
+
+ eim.oim.fun <- function(mu.y, sss)
+ ifelse(is.finite(sss),
+ 1/prob^2 + (0 + mu.y) / QQQ^2 - (1+sss) *
+ ((sss-0) * QQQ^(sss-1) / (1 - tmp1) +
+ (1+sss) * QQQ^(2*sss) / (1 - tmp1)^2),
+ 1 / (prob^2 * (1 - prob)))
+
+
+ ned2l.dprob2 <- if ( .expected ) {
+ eim.oim.fun(mu, sss)
+ } else {
+ eim.oim.fun(y, sss)
+ }
+ wz <- ned2l.dprob2 * dprobdeta^2
+ if ( !( .expected ))
+ wz <- wz - dl.dprob * d2theta.deta2(prob, .link , earg = .earg )
+ c(w) * wz
+ }), list( .link = link, .earg = earg,
+ .expected = expected ))))
+}
+
+
+
+
diff --git a/R/family.basics.R b/R/family.basics.R
index 4dd751a..63e833c 100644
--- a/R/family.basics.R
+++ b/R/family.basics.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -8,124 +8,142 @@
-getind <- function(constraints, M, ncolx) {
+ getind <- function(constraints, M, ncolx) {
- if (!length(constraints)) {
- constraints = vector("list", ncolx)
- for (ii in 1:ncolx)
- constraints[[ii]] <- diag(M)
- }
+ if (!length(constraints)) {
+
+ constraints <- vector("list", ncolx)
+ for (ii in 1:ncolx)
+ constraints[[ii]] <- diag(M)
+ }
- ans <- vector("list", M+1)
- names(ans) <- c(paste("eta", 1:M, sep = ""), "ncolX_vlm")
-
- temp2 <- matrix(unlist(constraints), nrow=M)
- for (kk in 1:M) {
- ansx <- NULL
- for (ii in 1:length(constraints)) {
- temp <- constraints[[ii]]
- isfox <- any(temp[kk,] != 0)
- if (isfox) {
- ansx <- c(ansx, ii)
- }
- }
- ans[[kk]] <- list(xindex = ansx,
- X_vlmindex = (1:ncol(temp2))[temp2[kk,] != 0])
+ ans <- vector("list", M+1)
+ names(ans) <- c(paste("eta", 1:M, sep = ""), "ncolX_vlm")
+
+ temp2 <- matrix(unlist(constraints), nrow = M)
+ for (kk in 1:M) {
+ ansx <- NULL
+ for (ii in 1:length(constraints)) {
+ temp <- constraints[[ii]]
+ isfox <- any(temp[kk, ] != 0)
+ if (isfox) {
+ ansx <- c(ansx, ii)
+ }
}
- ans[[M+1]] <- ncol(temp2)
+ ans[[kk]] <- list(xindex = ansx,
+ X_vlmindex = (1:ncol(temp2))[temp2[kk,] != 0])
+ }
+ ans[[M+1]] <- ncol(temp2)
- ans
+ ans
}
cm.vgam <- function(cm, x, bool, constraints,
- intercept.apply = FALSE, overwrite = FALSE)
-{
+ apply.int = FALSE, overwrite = FALSE,
+ cm.default = diag(nrow(cm)), # 20121226
+ cm.intercept.default = diag(nrow(cm)) # 20121226
+ ) {
- if (is.null(bool)) return(NULL)
- M <- nrow(cm)
- asgn <- attr(x, "assign")
- if(is.null(asgn))
- stop("the 'assign' attribute is missing from 'x'; this ",
- "may be due to some missing values") # 20100306
- nasgn <- names(asgn)
- ninasgn <- nasgn[nasgn != "(Intercept)"]
-
- if (!length(constraints)) {
- constraints <- vector("list", length(nasgn))
- for (ii in 1:length(nasgn)) {
- constraints[[ii]] <- diag(M)
- }
- names(constraints) <- nasgn
- }
- if (!is.list(constraints))
- stop("argument 'constraints' must be a list")
-
- if (length(constraints) != length(nasgn) ||
- any(sort(names(constraints)) != sort(nasgn))) {
- cat("\nnames(constraints)\n")
- print(names(constraints) )
- cat("\nnames(attr(x, 'assign'))\n")
- print( nasgn )
- stop("The above don't match; 'constraints' is half-pie")
+ if (is.null(bool))
+ return(NULL)
+
+ if (!is.matrix(cm))
+ stop("argument 'cm' is not a matrix")
+ M <- nrow(cm)
+ asgn <- attr(x, "assign")
+ if(is.null(asgn))
+ stop("the 'assign' attribute is missing from 'x'; this ",
+ "may be due to some missing values") # 20100306
+ nasgn <- names(asgn)
+ ninasgn <- nasgn[nasgn != "(Intercept)"]
+
+ if (!length(constraints)) {
+ constraints <- vector("list", length(nasgn))
+ for (ii in 1:length(nasgn)) {
+ constraints[[ii]] <- cm.default # diag(M)
}
+ names(constraints) <- nasgn
+
+
+ if (any(nasgn == "(Intercept)"))
+ constraints[["(Intercept)"]] <- cm.intercept.default
+ }
+
+ if (!is.list(constraints))
+ stop("argument 'constraints' must be a list")
+
+ if (length(constraints) != length(nasgn) ||
+ any(sort(names(constraints)) != sort(nasgn))) {
+ cat("\nnames(constraints)\n")
+ print(names(constraints) )
+ cat("\nnames(attr(x, 'assign'))\n")
+ print( nasgn )
+ stop("The above do not match; 'constraints' is half-pie")
+ }
- if (is.logical(bool)) {
- if (bool) {
- if (intercept.apply && any(nasgn == "(Intercept)"))
- constraints[["(Intercept)"]] <- cm
- if (length(ninasgn))
- for (ii in ninasgn)
- constraints[[ii]] <- cm
- } else {
- return(constraints)
- }
+
+
+
+ if (is.logical(bool)) {
+ if (bool) {
+ if (any(nasgn == "(Intercept)") && apply.int)
+ constraints[["(Intercept)"]] <- cm
+
+
+ if (length(ninasgn))
+ for (ii in ninasgn)
+ constraints[[ii]] <- cm
} else {
- tbool <- terms(bool)
- if (attr(tbool, "response")) {
- ii <- attr(tbool, "factors")
- default <- dimnames(ii)[[1]]
- default <- default[1]
- default <- parse(text = default[1])[[1]]
- default <- as.logical(eval(default))
- } else {
- default <- TRUE
- }
- tl <- attr(tbool, "term.labels")
- if (attr(tbool, "intercept"))
- tl <- c("(Intercept)", tl)
-
- for (ii in nasgn) {
- if (default && any(tl == ii))
- constraints[[ii]] <- cm
- if (!default && !any(tl == ii))
- constraints[[ii]] <- cm
- }
+ return(constraints)
}
+ } else {
+ tbool <- terms(bool)
+ if (attr(tbool, "response")) {
+ ii <- attr(tbool, "factors")
+ default <- dimnames(ii)[[1]]
+ default <- default[1]
+ default <- parse(text = default[1])[[1]]
+ default <- as.logical(eval(default))
+ } else {
+ default <- TRUE
+ }
+ tl <- attr(tbool, "term.labels")
+ if (attr(tbool, "intercept"))
+ tl <- c("(Intercept)", tl)
+
+ for (ii in nasgn) {
+ if ( default && any(tl == ii))
+ constraints[[ii]] <- cm
+ if (!default && !any(tl == ii))
+ constraints[[ii]] <- cm
+ }
+ }
- constraints
+ constraints
}
-cm.nointercept.vgam <- function(constraints, x, nointercept, M)
-{
+
+cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
asgn <- attr(x, "assign")
nasgn <- names(asgn)
if (is.null(constraints)) {
- constraints <- vector("list", length(nasgn)) # list()
+ constraints <- vector("list", length(nasgn)) # list()
names(constraints) <- nasgn
}
if (!is.list(constraints))
stop("'constraints' must be a list")
+
for (ii in 1:length(asgn))
constraints[[nasgn[ii]]] <- if (is.null(constraints[[nasgn[ii]]]))
diag(M) else eval(constraints[[nasgn[ii]]])
@@ -154,8 +172,8 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M)
-cm.zero.vgam <- function(constraints, x, zero, M)
-{
+
+ cm.zero.vgam <- function(constraints, x, zero, M) {
asgn <- attr(x, "assign")
nasgn <- names(asgn)
@@ -165,6 +183,7 @@ cm.zero.vgam <- function(constraints, x, zero, M)
}
if (!is.list(constraints))
stop("'constraints' must be a list")
+
for (ii in 1:length(asgn))
constraints[[nasgn[ii]]] <- if (is.null(constraints[[nasgn[ii]]]))
diag(M) else eval(constraints[[nasgn[ii]]])
@@ -179,25 +198,25 @@ cm.zero.vgam <- function(constraints, x, zero, M)
stop("cannot fit an intercept to a no-intercept model")
if (2 <= length(constraints))
- for (ii in 2:length(constraints)) {
- Hmatk <- constraints[[nasgn[ii]]]
- Hmatk[zero, ] <- 0
- index <- NULL
- for (kk in 1:ncol(Hmatk))
- if (all(Hmatk[,kk] == 0)) index <- c(index, kk)
- if (length(index) == ncol(Hmatk))
- stop("constraint matrix has no columns!")
- if (!is.null(index))
- Hmatk <- Hmatk[, -index, drop = FALSE]
- constraints[[nasgn[ii]]] <- Hmatk
- }
+ for (ii in 2:length(constraints)) {
+ Hmatk <- constraints[[nasgn[ii]]]
+ Hmatk[zero, ] <- 0
+ index <- NULL
+ for (kk in 1:ncol(Hmatk))
+ if (all(Hmatk[,kk] == 0)) index <- c(index, kk)
+ if (length(index) == ncol(Hmatk))
+ stop("constraint matrix has no columns!")
+ if (!is.null(index))
+ Hmatk <- Hmatk[, -index, drop = FALSE]
+ constraints[[nasgn[ii]]] <- Hmatk
+ }
constraints
}
-process.constraints <- function(constraints, x, M,
- by.col = TRUE, specialCM = NULL)
-{
+
+ process.constraints <- function(constraints, x, M,
+ by.col = TRUE, specialCM = NULL) {
@@ -206,10 +225,10 @@ process.constraints <- function(constraints, x, M,
nasgn <- names(asgn)
if (is.null(constraints)) {
- constraints <- vector("list", length(nasgn))
- for (ii in 1:length(nasgn))
- constraints[[ii]] <- diag(M)
- names(constraints) <- nasgn
+ constraints <- vector("list", length(nasgn))
+ for (ii in 1:length(nasgn))
+ constraints[[ii]] <- diag(M)
+ names(constraints) <- nasgn
}
if (is.matrix(constraints))
@@ -220,32 +239,32 @@ process.constraints <- function(constraints, x, M,
lenconstraints <- length(constraints)
if (lenconstraints > 0)
- for (ii in 1:lenconstraints) {
+ for (ii in 1:lenconstraints) {
constraints[[ii]] <- eval(constraints[[ii]])
if (!is.null (constraints[[ii]]) &&
!is.matrix(constraints[[ii]]))
stop("'constraints[[", ii, "]]' is not a matrix")
- }
+ }
if (is.null(names(constraints)))
- names(constraints) <- rep(nasgn, length.out = lenconstraints)
+ names(constraints) <- rep(nasgn, length.out = lenconstraints)
temp <- if (!is.R()) list() else {
- junk <- vector("list", length(nasgn))
- names(junk) <- nasgn
- junk
+ junk <- vector("list", length(nasgn))
+ names(junk) <- nasgn
+ junk
}
for (ii in 1:length(nasgn))
- temp[[nasgn[ii]]] <-
- if (is.null(constraints[[nasgn[ii]]])) diag(M) else
- eval(constraints[[nasgn[ii]]])
+ temp[[nasgn[ii]]] <-
+ if (is.null(constraints[[nasgn[ii]]])) diag(M) else
+ eval(constraints[[nasgn[ii]]])
for (ii in 1:length(asgn)) {
if (!is.matrix(temp[[ii]])) {
- stop("not a constraint matrix")
+ stop("not a constraint matrix")
}
if (ncol(temp[[ii]]) > M)
- stop("constraint matrix has too many columns")
+ stop("constraint matrix has too many columns")
}
if (!by.col)
@@ -254,17 +273,17 @@ process.constraints <- function(constraints, x, M,
constraints <- temp
Blist <- vector("list", ncol(x))
for (ii in 1:length(asgn)) {
- cols <- asgn[[ii]]
- ictr = 0
- for (jay in cols) {
- ictr = ictr + 1
- cm = if (is.list(specialCM) &&
- any(nasgn[ii] == names(specialCM))) {
- slist = specialCM[[(nasgn[ii])]]
- slist[[ictr]]
- } else constraints[[ii]]
- Blist[[jay]] <- cm
- }
+ cols <- asgn[[ii]]
+ ictr <- 0
+ for (jay in cols) {
+ ictr <- ictr + 1
+ cm <- if (is.list(specialCM) &&
+ any(nasgn[ii] == names(specialCM))) {
+ slist <- specialCM[[(nasgn[ii])]]
+ slist[[ictr]]
+ } else constraints[[ii]]
+ Blist[[jay]] <- cm
+ }
}
names(Blist) <- dimnames(x)[[2]]
Blist
@@ -273,8 +292,9 @@ process.constraints <- function(constraints, x, M,
- trivial.constraints <- function(Blist, target = diag(M))
-{
+
+ trivial.constraints <- function(Blist, target = diag(M)) {
+
if (is.null(Blist))
return(1)
@@ -285,7 +305,7 @@ process.constraints <- function(constraints, x, M,
if (!is.matrix(target))
stop("target is not a matrix")
- dimtar = dim(target)
+ dimtar <- dim(target)
trivc <- rep(1, length(Blist))
names(trivc) <- names(Blist)
@@ -305,9 +325,9 @@ process.constraints <- function(constraints, x, M,
}
-add.constraints <- function(constraints, new.constraints,
- overwrite = FALSE, check = FALSE)
-{
+
+ add.constraints <- function(constraints, new.constraints,
+ overwrite = FALSE, check = FALSE) {
empty.list <- function(l)
(is.null(l) || (is.list(l) && length(l) == 0))
@@ -400,7 +420,7 @@ add.constraints <- function(constraints, new.constraints,
-dimm <- function(M, hbw = M) {
+ dimm <- function(M, hbw = M) {
if (!is.numeric(hbw))
hbw <- M
@@ -424,8 +444,7 @@ dimm <- function(M, hbw = M) {
}
- m2adefault <- function(m, M, upper = FALSE, allow.vector = FALSE)
-{
+ m2adefault <- function(m, M, upper = FALSE, allow.vector = FALSE) {
if (!is.numeric(m))
stop("argument 'm' is not numeric")
@@ -454,7 +473,7 @@ dimm <- function(M, hbw = M) {
-a2m <- function(a, hbw = M) {
+ a2m <- function(a, hbw = M) {
@@ -486,9 +505,8 @@ a2m <- function(a, hbw = M) {
-vindex <- function(M, row.arg = FALSE, col.arg = FALSE,
- length.arg = M * (M + 1) / 2)
-{
+ vindex <- function(M, row.arg = FALSE, col.arg = FALSE,
+ length.arg = M * (M + 1) / 2) {
@@ -512,6 +530,8 @@ vindex <- function(M, row.arg = FALSE, col.arg = FALSE,
+
+
if(!exists("is.R"))
is.R <- function()
exists("version") &&
@@ -522,84 +542,84 @@ if(!exists("is.R"))
-wweights <- function(object, matrix.arg = TRUE, deriv.arg = FALSE,
- ignore.slot = FALSE, checkwz = TRUE) {
+ wweights <- function(object, matrix.arg = TRUE, deriv.arg = FALSE,
+ ignore.slot = FALSE, checkwz = TRUE) {
- if (length(wz <- object at weights) && !ignore.slot && !deriv.arg) {
- return(wz)
- }
+ if (length(wz <- object at weights) && !ignore.slot && !deriv.arg) {
+ return(wz)
+ }
- M <- object at misc$M # Done below
- n <- object at misc$n # Done below
+ M <- object at misc$M # Done below
+ n <- object at misc$n # Done below
- if (any(slotNames(object) == "extra")) {
- extra <- object at extra
- if (length(extra) == 1 && !length(names(extra))) {
- # Usage was something like vglm(..., extra = 5)
- # so, internally, extra == 5 and not a list
- extra <- extra[[1]]
- }
+ if (any(slotNames(object) == "extra")) {
+ extra <- object at extra
+ if (length(extra) == 1 && !length(names(extra))) {
+ extra <- extra[[1]]
}
- mu <- object at fitted.values
- if (any(slotNames(object) == "predictors"))
- eta <- object at predictors
- mt <- terms(object) # object at terms$terms; 11/8/03
- Blist <- constraints <- object at constraints
- new.coeffs <- object at coefficients
- if (any(slotNames(object) == "iter"))
- iter <- object at iter
-
+ }
+ mu <- object at fitted.values
+ if (any(slotNames(object) == "predictors"))
+ eta <- object at predictors
+ mt <- terms(object) # object at terms$terms; 11/8/03
+ Blist <- constraints <- object at constraints
+ new.coeffs <- object at coefficients
+ if (any(slotNames(object) == "iter"))
+ iter <- object at iter
+
+ w <- rep(1, n)
+ if (any(slotNames(object) == "prior.weights"))
+ w <- object at prior.weights
+ if (!length(w))
w <- rep(1, n)
- if (any(slotNames(object) == "prior.weights"))
- w <- object at prior.weights
- if (!length(w))
- w <- rep(1, n)
-
- x <- object at x
- if (!length(x))
- x <- model.matrixvlm(object, type = "lm")
- y <- object at y
-
- if (any(slotNames(object) == "control"))
- for (ii in names(object at control)) {
- assign(ii, object at control[[ii]])
- }
-
- if (length(object at misc))
- for (ii in names(object at misc)) {
- assign(ii, object at misc[[ii]])
- }
-
- if (any(slotNames(object) == "family")) {
- expr <- object at family@deriv
- deriv.mu <- eval(expr)
- if (!length(wz)) {
- expr <- object at family@weight
- wz <- eval(expr)
-
-
- if (M > 1)
- dimnames(wz) = list(dimnames(wz)[[1]], NULL) # Remove colnames
- wz = if (matrix.arg) as.matrix(wz) else c(wz)
- }
- if (deriv.arg) list(deriv=deriv.mu, weights=wz) else wz
- } else NULL
+
+ x <- object at x
+ if (!length(x))
+ x <- model.matrixvlm(object, type = "lm")
+ y <- object at y
+
+ if (any(slotNames(object) == "control"))
+ for (ii in names(object at control)) {
+ assign(ii, object at control[[ii]])
+ }
+
+ if (length(object at misc))
+ for (ii in names(object at misc)) {
+ assign(ii, object at misc[[ii]])
+ }
+
+ if (any(slotNames(object) == "family")) {
+ expr <- object at family@deriv
+ deriv.mu <- eval(expr)
+ if (!length(wz)) {
+ expr <- object at family@weight
+ wz <- eval(expr)
+
+
+ if (M > 1)
+ dimnames(wz) <- list(dimnames(wz)[[1]], NULL) # Remove colnames
+ wz <- if (matrix.arg) as.matrix(wz) else c(wz)
+ }
+ if (deriv.arg) list(deriv = deriv.mu, weights = wz) else wz
+ } else {
+ NULL
+ }
}
-pweights <- function(object, ...) {
- ans = object at prior.weights
+ pweights <- function(object, ...) {
+ ans <- object at prior.weights
if (length(ans)) {
ans
} else {
- temp = object at y
- ans = rep(1, nrow(temp)) # Assumed all equal and unity.
- names(ans) = dimnames(temp)[[1]]
+ temp <- object at y
+ ans <- rep(1, nrow(temp)) # Assumed all equal and unity.
+ names(ans) <- dimnames(temp)[[1]]
ans
}
}
@@ -613,45 +633,43 @@ procVec <- function(vec, yn, Default) {
- if (any(is.na(vec)))
- stop("vec cannot contain any NAs")
- L = length(vec)
- nvec <- names(vec) # vec[""] undefined
- named = length(nvec) # FALSE for c(1,3)
- if (named) {
- index = (1:L)[nvec == ""]
- default = if (length(index)) vec[index] else Default
- } else {
- default = vec
- }
+ if (any(is.na(vec)))
+ stop("vec cannot contain any NAs")
+ L <- length(vec)
+ nvec <- names(vec) # vec[""] undefined
+ named <- length(nvec) # FALSE for c(1,3)
+ if (named) {
+ index <- (1:L)[nvec == ""]
+ default <- if (length(index)) vec[index] else Default
+ } else {
+ default <- vec
+ }
- answer = rep(default, length.out = length(yn))
- names(answer) = yn
- if (named) {
- nvec2 = nvec[nvec != ""]
- if (length(nvec2)) {
- if (any(!is.element(nvec2, yn)))
- stop("some names given which are superfluous")
- answer = rep(as.numeric(NA), length.out = length(yn))
- names(answer) = yn
- answer[nvec2] = vec[nvec2]
- answer[is.na(answer)] =
- rep(default, length.out = sum(is.na(answer)))
- }
+ answer <- rep(default, length.out = length(yn))
+ names(answer) <- yn
+ if (named) {
+ nvec2 <- nvec[nvec != ""]
+ if (length(nvec2)) {
+ if (any(!is.element(nvec2, yn)))
+ stop("some names given which are superfluous")
+ answer <- rep(as.numeric(NA), length.out = length(yn))
+ names(answer) <- yn
+ answer[nvec2] <- vec[nvec2]
+ answer[is.na(answer)] <-
+ rep(default, length.out <- sum(is.na(answer)))
}
+ }
- answer
+ answer
}
if (FALSE) {
-
if (!isGeneric("m2a"))
setGeneric("m2a", function(object, ...) standardGeneric("m2a"))
-
setMethod("m2a", "vglm",
function(object, ...)
m2avglm(object, ...))
@@ -659,7 +677,7 @@ setMethod("m2a", "vglm",
-weightsvglm <- function(object, type = c("prior", "working"),
+ weightsvglm <- function(object, type = c("prior", "working"),
matrix.arg = TRUE, ignore.slot = FALSE,
deriv.arg = FALSE, ...) {
weightsvlm(object, type = type, matrix.arg = matrix.arg,
@@ -669,12 +687,12 @@ weightsvglm <- function(object, type = c("prior", "working"),
-weightsvlm <- function(object, type = c("prior", "working"),
+ weightsvlm <- function(object, type = c("prior", "working"),
matrix.arg = TRUE, ignore.slot = FALSE,
deriv.arg = FALSE, ...) {
if (mode(type) != "character" && mode(type) != "name")
- type = as.character(substitute(type))
- type = match.arg(type, c("prior", "working"))[1]
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("prior", "working"))[1]
if (type == "working") {
wweights(object = object,
@@ -683,14 +701,15 @@ weightsvlm <- function(object, type = c("prior", "working"),
} else {
if (deriv.arg)
stop("cannot set 'deriv = TRUE' when 'type=\"prior\"'")
- ans = pweights(object)
+ ans <- pweights(object)
if (matrix.arg) as.matrix(ans) else c(ans)
}
}
if (!isGeneric("weights"))
- setGeneric("weights", function(object, ...) standardGeneric("weights"))
+ setGeneric("weights", function(object, ...)
+ standardGeneric("weights"))
setMethod("weights", "vlm",
@@ -728,24 +747,24 @@ qnupdate <- function(w, wzold, dderiv, deta, M, keeppd = TRUE,
if (M == 1) {
- dderiv = cbind(dderiv)
- deta = cbind(deta)
+ dderiv <- cbind(dderiv)
+ deta <- cbind(deta)
}
- Bs = mux22(t(wzold), deta, M = M, upper = FALSE, as.matrix = TRUE) # n x M
- sBs = c( (deta * Bs) %*% rep(1, M) ) # should have positive values
- sy = c( (dderiv * deta) %*% rep(1, M) )
- wznew = wzold
- index = iam(NA, NA, M = M, both = TRUE)
- index$row.index = rep(index$row.index, len=ncol(wzold))
- index$col.index = rep(index$col.index, len=ncol(wzold))
- updateThese = if (keeppd) (sy > effpos) else rep(TRUE, len=length(sy))
+ Bs <- mux22(t(wzold), deta, M = M, upper = FALSE, as.matrix = TRUE) # n x M
+ sBs <- c( (deta * Bs) %*% rep(1, M) ) # should have positive values
+ sy <- c( (dderiv * deta) %*% rep(1, M) )
+ wznew <- wzold
+ index <- iam(NA, NA, M = M, both = TRUE)
+ index$row.index <- rep(index$row.index, len=ncol(wzold))
+ index$col.index <- rep(index$col.index, len=ncol(wzold))
+ updateThese <- if (keeppd) (sy > effpos) else rep(TRUE, len=length(sy))
if (!keeppd || any(updateThese)) {
- wznew[updateThese,] = wznew[updateThese,] -
+ wznew[updateThese,] <- wznew[updateThese,] -
Bs[updateThese,index$row] *
Bs[updateThese,index$col] / sBs[updateThese] +
dderiv[updateThese,index$row] *
dderiv[updateThese,index$col] / sy[updateThese]
- notupdated = sum(!updateThese)
+ notupdated <- sum(!updateThese)
if (notupdated && trace)
cat(notupdated,
"weight matrices not updated out of", length(sy), "\n")
@@ -766,21 +785,20 @@ mbesselI0 <- function(x, deriv.arg = 0) {
if (!is.Numeric(deriv.arg, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) &&
deriv.arg != 0)
- stop("argument 'deriv.arg' must be a single non-negative integer")
+ stop("argument 'deriv.arg' must be a single non-negative integer")
if (!(deriv.arg == 0 || deriv.arg == 1 || deriv.arg == 2))
- stop("argument 'deriv' must be 0, 1, or 2")
+ stop("argument 'deriv' must be 0, 1, or 2")
if (!is.Numeric(x))
- stop("bad input for argument 'x'")
- nn = length(x)
+ stop("bad input for argument 'x'")
+ nn <- length(x)
if (FALSE) {
}
- # Use finite differences
- ans = matrix(as.numeric(NA), nrow=nn, ncol=deriv.arg+1)
- ans[, 1] = besselI(x, nu=0)
- if (deriv.arg>=1) ans[,2] = besselI(x, nu=1)
- if (deriv.arg>=2) ans[,3] = ans[,1] - ans[,2] / x
+ ans <- matrix(as.numeric(NA), nrow = nn, ncol = deriv.arg+1)
+ ans[, 1] <- besselI(x, nu = 0)
+ if (deriv.arg>=1) ans[,2] <- besselI(x, nu = 1)
+ if (deriv.arg>=2) ans[,3] <- ans[,1] - ans[,2] / x
ans
}
@@ -809,22 +827,14 @@ VGAM.matrix.norm <- function(A, power = 2, suppressWarning = FALSE) {
rmfromVGAMenv <- function(varnames, prefix = "") {
- evarnames = paste(prefix, varnames, sep = "")
- if (is.R()) {
- for (ii in evarnames) {
- mytext1 = "exists(x = ii, envir = VGAM:::VGAMenv)"
- myexp1 = parse(text = mytext1)
- is.there = eval(myexp1)
- if (is.there) {
- rm(list = ii, envir = VGAM:::VGAMenv)
- }
+ evarnames <- paste(prefix, varnames, sep = "")
+ for (ii in evarnames) {
+ mytext1 <- "exists(x = ii, envir = VGAM:::VGAMenv)"
+ myexp1 <- parse(text = mytext1)
+ is.there <- eval(myexp1)
+ if (is.there) {
+ rm(list = ii, envir = VGAM:::VGAMenv)
}
- } else {
- warning("this code needs checking 9")
- for (ii in evarnames)
- while(exists(ii, inherits = TRUE))
- rm(ii, inherits = TRUE)
-
}
}
@@ -832,35 +842,23 @@ rmfromVGAMenv <- function(varnames, prefix = "") {
existsinVGAMenv <- function(varnames, prefix = "") {
- evarnames = paste(prefix, varnames, sep = "")
- ans = NULL
- if (is.R()) {
- for (ii in evarnames) {
- mytext1 = "exists(x = ii, envir = VGAM:::VGAMenv)"
- myexp1 = parse(text = mytext1)
- is.there = eval(myexp1)
- ans = c(ans, is.there)
- }
- } else {
- warning("this code needs checking 8")
- for (ii in evarnames) {
- is.there = exists(ii, inherits = TRUE)
- ans = c(ans, is.there)
- }
+ evarnames <- paste(prefix, varnames, sep = "")
+ ans <- NULL
+ for (ii in evarnames) {
+ mytext1 <- "exists(x = ii, envir = VGAM:::VGAMenv)"
+ myexp1 <- parse(text = mytext1)
+ is.there <- eval(myexp1)
+ ans <- c(ans, is.there)
}
ans
}
assign2VGAMenv <- function(varnames, mylist, prefix = "") {
- evarnames = paste(prefix, varnames, sep = "")
- if (is.R()) {
- for (ii in 1:length(varnames)) {
- assign(evarnames[ii], mylist[[(varnames[ii])]],
- envir = VGAM:::VGAMenv)
- }
- } else {
- stop("uncomment the lines below")
+ evarnames <- paste(prefix, varnames, sep = "")
+ for (ii in 1:length(varnames)) {
+ assign(evarnames[ii], mylist[[(varnames[ii])]],
+ envir = VGAM:::VGAMenv)
}
}
@@ -870,38 +868,39 @@ assign2VGAMenv <- function(varnames, mylist, prefix = "") {
getfromVGAMenv <- function(varname, prefix = "") {
- varname = paste(prefix, varname, sep = "")
- if (length(varname) > 1) stop("'varname' must be of length 1")
- get(varname, envir = VGAM:::VGAMenv)
+ varname <- paste(prefix, varname, sep = "")
+ if (length(varname) > 1)
+ stop("'varname' must be of length 1")
+ get(varname, envir = VGAM:::VGAMenv)
}
lerch <- function(x, s, v, tolerance = 1.0e-10, iter = 100) {
- if (!is.Numeric(x) || !is.Numeric(s) || !is.Numeric(v))
- stop("bad input in 'x', 's', and/or 'v'")
- if (is.complex(c(x,s,v)))
- stop("complex arguments not allowed in 'x', 's' and 'v'")
- if (!is.Numeric(tolerance, allowable.length = 1, positive = TRUE) ||
- tolerance > 0.01)
- stop("bad input for argument 'tolerance'")
- if (!is.Numeric(iter, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'iter'")
-
- L = max(length(x), length(s), length(v))
- x = rep(x, length.out = L);
- s = rep(s, length.out = L);
- v = rep(v, length.out = L);
- xok = abs(x) < 1 & !(v <= 0 & v == round(v))
- x[!xok] = 0 # Fix this later
-
- ans = dotC(name = "lerchphi123", err = integer(L), as.integer(L),
- as.double(x), as.double(s), as.double(v),
- acc=as.double(tolerance), result=double(L),
- as.integer(iter))
-
- ifelse(ans$err == 0 & xok , ans$result, NA)
+ if (!is.Numeric(x) || !is.Numeric(s) || !is.Numeric(v))
+ stop("bad input in 'x', 's', and/or 'v'")
+ if (is.complex(c(x,s,v)))
+ stop("complex arguments not allowed in 'x', 's' and 'v'")
+ if (!is.Numeric(tolerance, allowable.length = 1, positive = TRUE) ||
+ tolerance > 0.01)
+ stop("bad input for argument 'tolerance'")
+ if (!is.Numeric(iter, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'iter'")
+
+ L <- max(length(x), length(s), length(v))
+ x <- rep(x, length.out = L);
+ s <- rep(s, length.out = L);
+ v <- rep(v, length.out = L);
+ xok <- abs(x) < 1 & !(v <= 0 & v == round(v))
+ x[!xok] <- 0 # Fix this later
+
+ ans <- dotC(name = "lerchphi123", err = integer(L), as.integer(L),
+ as.double(x), as.double(s), as.double(v),
+ acc=as.double(tolerance), result=double(L),
+ as.integer(iter))
+
+ ifelse(ans$err == 0 & xok , ans$result, NA)
}
@@ -945,7 +944,7 @@ negzero.expression <- expression({
-is.empty.list = function(mylist) {
+is.empty.list <- function(mylist) {
is.list(mylist) &&
length(unlist(mylist)) == 0
}
@@ -955,7 +954,8 @@ is.empty.list = function(mylist) {
-interleave.VGAM = function(L, M) c(matrix(1:L, nrow = M, byrow = TRUE))
+interleave.VGAM <- function(L, M)
+ c(matrix(1:L, nrow = M, byrow = TRUE))
@@ -983,7 +983,7 @@ w.wz.merge <- function(w, wz, n, M, ndepy,
w <- matrix(w, n, ndepy)
w.rep <- matrix(0, n, ncol(wz))
Musual <- M / ndepy
- all.indices = iam(NA, NA, M = M, both = TRUE)
+ all.indices <- iam(NA, NA, M = M, both = TRUE)
@@ -1075,11 +1075,11 @@ w.y.check <- function(w, y,
if (maximize) {
- Ncol.max.w = max(ncol(w), ncol(y) / colsyperw)
- Ncol.max.y = max(ncol(y), ncol(w) * colsyperw)
+ Ncol.max.w <- max(ncol(w), ncol(y) / colsyperw)
+ Ncol.max.y <- max(ncol(y), ncol(w) * colsyperw)
} else {
- Ncol.max.w = ncol(w)
- Ncol.max.y = ncol(y)
+ Ncol.max.w <- ncol(w)
+ Ncol.max.y <- ncol(y)
}
if (out.wy && ncol(w) < Ncol.max.w) {
@@ -1108,6 +1108,50 @@ w.y.check <- function(w, y,
+
+arwz2wz <- function(arwz, M = 1, Musual = 1) {
+
+
+
+ if (length(dim.arwz <- dim(arwz)) != 3)
+ stop("dimension of 'arwz' should be of length 3")
+ n <- dim.arwz[1]
+ ndepy <- dim.arwz[2]
+ dim.val <- dim.arwz[3]
+
+ if (ndepy == 1) {
+ dim(arwz) <- c(n, dim.val)
+ return(arwz)
+ }
+
+ wz <- matrix(0.0, nrow = n, ncol = sum(M:(M-Musual+1)))
+ ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ len.ind1 <- dim.val # length(ind1$col.index)
+
+ for (ii in 1:ndepy) {
+ for (jlocal in 1:len.ind1) {
+ wz[, iam(Musual * (ii - 1) + ind1$row[jlocal],
+ Musual * (ii - 1) + ind1$col[jlocal],
+ M = M)] <- arwz[, ii, jlocal]
+ }
+ }
+
+ colind <- ncol(wz)
+ while (all(wz[, colind] == 0))
+ colind <- colind - 1
+
+ if (colind < ncol(wz))
+ wz <- wz[, 1:colind, drop = FALSE]
+
+ wz
+}
+
+
+
+
+
+
+
vweighted.mean.default <- function (x, w, ..., na.rm = FALSE) {
temp5 <- w.y.check(w = w, y = x, ncol.w.max = Inf, ncol.y.max = Inf,
out.wy = TRUE,
diff --git a/R/family.binomial.R b/R/family.binomial.R
index a347537..8517ed0 100644
--- a/R/family.binomial.R
+++ b/R/family.binomial.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -18,7 +18,7 @@ process.binomial2.data.vgam <- expression({
if (!all(w == 1))
- extra$orig.w = w
+ extra$orig.w <- w
if (!is.matrix(y)) {
@@ -73,11 +73,11 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
- betabinomial <- function(lmu = "logit", lrho = "logit",
+ betabinomial <- function(lmu = "logit",
+ lrho = "logit",
irho = NULL,
imethod = 1, shrinkage.init = 0.95,
- nsimEIM = NULL, zero = 2)
-{
+ nsimEIM = NULL, zero = 2) {
lmu <- as.list(substitute(lmu))
emu <- link2list(lmu)
lmu <- attr(emu, "function.name")
@@ -113,7 +113,7 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
"Mean: mu", "\n",
"Variance: mu*(1-mu)*(1+(w-1)*rho)/w"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
if (!all(w == 1))
@@ -135,8 +135,9 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
warning("the response (as counts) does not appear to ",
"be integer-valued. Am rounding to integer values.")
ycounts <- round(ycounts) # Make sure it is an integer
- predictors.names <- c(namesof("mu", .lmu, earg = .emu, tag = FALSE),
- namesof("rho", .lrho, earg = .erho, tag = FALSE))
+ predictors.names <-
+ c(namesof("mu", .lmu , earg = .emu , tag = FALSE),
+ namesof("rho", .lrho , earg = .erho , tag = FALSE))
if (!length(etastart)) {
betabinomial.Loglikfun <- function(rhoval, y, x, w, extraargs) {
shape1 <- extraargs$mustart * (1-rhoval) / rhoval
@@ -144,7 +145,7 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
ycounts <- extraargs$ycounts # Ought to be integer-valued
nvec <- extraargs$nvec
sum(dbetabinom.ab(x = ycounts, size = nvec, shape1 = shape1,
- shape2 = shape2, log = TRUE))
+ shape2 = shape2, log = TRUE))
}
rho.grid <- seq(0.05, 0.95, len=21) # rvar =
mustart.use =
@@ -185,11 +186,13 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
list( .lmu = lmu, .emu = emu ))),
last = eval(substitute(expression({
misc$link <- c(mu = .lmu, rho = .lrho)
+
misc$earg <- list(mu = .emu, rho = .erho)
+
misc$zero <- .zero
misc$expected <- TRUE
- misc$nsimEIM = .nsimEIM
- misc$rho = 1 / (shape1 + shape2 + 1)
+ misc$nsimEIM <- .nsimEIM
+ misc$rho <- 1 / (shape1 + shape2 + 1)
}), list( .lmu = lmu, .lrho = lrho,
.emu = emu, .erho = erho,
.nsimEIM = nsimEIM, .zero = zero ))),
@@ -199,11 +202,11 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
y * w # Convert proportions to counts
mymu <- eta2theta(eta[, 1], .lmu, earg = .emu)
- rho <- eta2theta(eta[, 2], .lrho, earg = .erho)
+ rho <- eta2theta(eta[, 2], .lrho , earg = .erho)
smallno <- 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
+ warning("converting 'ycounts' to integer in @loglikelihood")
ycounts <- round(ycounts)
rho <- pmax(rho, smallno)
@@ -231,8 +234,8 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
y * w # Convert proportions to counts
ycounts <- round(ycounts)
- mymu <- eta2theta(eta[, 1], .lmu, earg = .emu)
- rho <- eta2theta(eta[, 2], .lrho, earg = .erho)
+ mymu <- eta2theta(eta[, 1], .lmu, earg = .emu)
+ rho <- eta2theta(eta[, 2], .lrho , earg = .erho)
smallno <- 100 * .Machine$double.eps
rho <- pmax(rho, smallno)
rho <- pmin(rho, 1-smallno)
@@ -244,8 +247,8 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
dshape1.drho <- -mymu / rho^2
dshape2.drho <- -(1 - mymu) / rho^2
- dmu.deta <- dtheta.deta(mymu, .lmu, earg = .emu)
- drho.deta <- dtheta.deta(rho, .lrho, earg = .erho)
+ dmu.deta <- dtheta.deta(mymu, .lmu , earg = .emu)
+ drho.deta <- dtheta.deta(rho, .lrho , earg = .erho)
dl.dmu <- dshape1.dmu * (digamma(shape1+ycounts) -
digamma(shape2+nvec-ycounts) -
@@ -262,7 +265,7 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
.emu = emu, .erho = erho ))),
weight = eval(substitute(expression({
if (is.null( .nsimEIM )) {
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
wz11 <- -(expected.betabin.ab(nvec, shape1, shape2, TRUE) -
trigamma(shape1+shape2+nvec) -
trigamma(shape1) + trigamma(shape1+shape2))
@@ -288,8 +291,9 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
dthetas.detas <- cbind(dmu.deta, drho.deta)
for (ii in 1:( .nsimEIM )) {
- ysim <- rbetabinom.ab(n = n, size = nvec, shape1 = shape1,
- shape2 = shape2)
+ ysim <- rbetabinom.ab(n = n, size = nvec,
+ shape1 = shape1,
+ shape2 = shape2)
dl.dmu <- dshape1.dmu * (digamma(shape1+ysim) -
digamma(shape2+nvec-ysim) -
digamma(shape1) + digamma(shape2))
@@ -332,8 +336,7 @@ dbinom2.or <- function(mu1,
exchangeable = FALSE,
tol = 0.001,
colnames = c("00", "01", "10", "11"),
- ErrorCheck = TRUE)
-{
+ ErrorCheck = TRUE) {
if (ErrorCheck) {
if (!is.Numeric(mu1, positive = TRUE) || max(mu1) >= 1)
stop("bad input for argument 'mu1'")
@@ -348,20 +351,20 @@ dbinom2.or <- function(mu1,
stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
}
- n = max(length(mu1), length(mu2), length(oratio))
- oratio = rep(oratio, len = n)
- mu1 = rep(mu1, len = n)
- mu2 = rep(mu2, len = n)
+ n <- max(length(mu1), length(mu2), length(oratio))
+ oratio <- rep(oratio, len = n)
+ mu1 <- rep(mu1, len = n)
+ mu2 <- rep(mu2, len = n)
- a.temp = 1 + (mu1+mu2)*(oratio-1)
- b.temp = -4 * oratio * (oratio-1) * mu1 * mu2
- temp = sqrt(a.temp^2 + b.temp)
- p11 = ifelse(abs(oratio-1) < tol,
+ a.temp <- 1 + (mu1+mu2)*(oratio-1)
+ b.temp <- -4 * oratio * (oratio-1) * mu1 * mu2
+ temp <- sqrt(a.temp^2 + b.temp)
+ p11 <- ifelse(abs(oratio-1) < tol,
mu1*mu2,
(a.temp-temp)/(2*(oratio-1)))
- p01 = mu2 - p11
- p10 = mu1 - p11
- p00 = 1 - p11 - p01 - p10
+ p01 <- mu2 - p11
+ p10 <- mu1 - p11
+ p00 <- 1 - p11 - p01 - p10
matrix(c(p00, p01, p10, p11), n, 4, dimnames = list(NULL, colnames))
}
@@ -377,8 +380,7 @@ rbinom2.or <- function(n, mu1,
twoCols = TRUE,
colnames = if (twoCols) c("y1", "y2") else
c("00", "01", "10", "11"),
- ErrorCheck = TRUE)
-{
+ ErrorCheck = TRUE) {
if (ErrorCheck) {
if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE,
allowable.length = 1))
@@ -396,25 +398,25 @@ rbinom2.or <- function(n, mu1,
stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
}
- dmat = dbinom2.or(mu1 = mu1, mu2 = mu2, oratio = oratio,
- exchangeable = exchangeable,
- tol = tol, ErrorCheck = ErrorCheck)
-
- answer = matrix(0, n, 2,
- dimnames = list(NULL,
- if (twoCols) colnames else NULL))
- yy = runif(n)
- cs1 = dmat[, "00"] + dmat[, "01"]
- cs2 = cs1 + dmat[, "10"]
- index = (dmat[, "00"] < yy) & (yy <= cs1)
- answer[index, 2] = 1
- index = (cs1 < yy) & (yy <= cs2)
- answer[index, 1] = 1
- index = (yy > cs2)
- answer[index,] = 1
+ dmat <- dbinom2.or(mu1 = mu1, mu2 = mu2, oratio = oratio,
+ exchangeable = exchangeable,
+ tol = tol, ErrorCheck = ErrorCheck)
+
+ answer <- matrix(0, n, 2,
+ dimnames = list(NULL,
+ if (twoCols) colnames else NULL))
+ yy <- runif(n)
+ cs1 <- dmat[, "00"] + dmat[, "01"]
+ cs2 <- cs1 + dmat[, "10"]
+ index <- (dmat[, "00"] < yy) & (yy <= cs1)
+ answer[index, 2] <- 1
+ index <- (cs1 < yy) & (yy <= cs2)
+ answer[index, 1] <- 1
+ index <- (yy > cs2)
+ answer[index,] <- 1
if (twoCols) answer else {
- answer4 = matrix(0, n, 4, dimnames = list(NULL, colnames))
- answer4[cbind(1:n, 1 + 2*answer[, 1] + answer[, 2])] = 1
+ answer4 <- matrix(0, n, 4, dimnames = list(NULL, colnames))
+ answer4[cbind(1:n, 1 + 2*answer[, 1] + answer[, 2])] <- 1
answer4
}
}
@@ -426,8 +428,7 @@ rbinom2.or <- function(n, mu1,
loratio = "loge",
imu1 = NULL, imu2 = NULL, ioratio = NULL,
zero = 3, exchangeable = FALSE, tol = 0.001,
- morerobust = FALSE)
-{
+ morerobust = FALSE) {
lmu1 <- lmu1
lmu2 <- lmu2
@@ -466,70 +467,71 @@ rbinom2.or <- function(n, mu1,
namesof("mu2", lmu2, earg = emu2), "; ",
namesof("oratio", loratio, earg = eoratio)),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(c(1, 1,0,0,0, 1), 3, 2), x,
- .exchangeable, constraints,
- intercept.apply = TRUE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.vgam(matrix(c(1, 1,0,0,0, 1), 3, 2), x,
+ .exchangeable , constraints,
+ apply.int = TRUE)
+ constraints = cm.zero.vgam(constraints, x, .zero , M)
}), list( .exchangeable = exchangeable, .zero = zero ))),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
- mustart.orig = mustart
+ mustart.orig <- mustart
eval(process.binomial2.data.vgam)
if (length(mustart.orig))
- mustart = mustart.orig # Retain it if inputted
+ mustart <- mustart.orig # Retain it if inputted
- predictors.names =
+ predictors.names <-
c(namesof("mu1", .lmu1, earg = .emu1, short = TRUE),
namesof("mu2", .lmu2, earg = .emu2, short = TRUE),
namesof("oratio", .loratio, earg = .eoratio, short = TRUE))
if (!length(etastart)) {
- pmargin = cbind(mustart[, 3] + mustart[, 4],
- mustart[, 2] + mustart[, 4])
- ioratio = if (length( .ioratio)) rep( .ioratio , len = n) else
- mustart[, 4] * mustart[, 1] / (mustart[, 2] *
- mustart[, 3])
- if (length( .imu1 )) pmargin[, 1] = .imu1
- if (length( .imu2 )) pmargin[, 2] = .imu2
- etastart = cbind(theta2eta(pmargin[, 1], .lmu1, earg = .emu1),
- theta2eta(pmargin[, 2], .lmu2, earg = .emu2),
- theta2eta(ioratio, .loratio, earg = .eoratio))
+ pmargin <- cbind(mustart[, 3] + mustart[, 4],
+ mustart[, 2] + mustart[, 4])
+ ioratio <- if (length( .ioratio)) rep( .ioratio , len = n) else
+ mustart[, 4] * mustart[, 1] / (mustart[, 2] *
+ mustart[, 3])
+ if (length( .imu1 )) pmargin[, 1] <- .imu1
+ if (length( .imu2 )) pmargin[, 2] <- .imu2
+ etastart <- cbind(theta2eta(pmargin[, 1], .lmu1, earg = .emu1),
+ theta2eta(pmargin[, 2], .lmu2, earg = .emu2),
+ theta2eta(ioratio, .loratio, earg = .eoratio))
}
}), list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
.emu1 = emu1, .emu2 = emu2, .eoratio = eoratio,
.imu1 = imu1, .imu2 = imu2, .ioratio = ioratio ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- pmargin = cbind(eta2theta(eta[, 1], .lmu1, earg = .emu1),
- eta2theta(eta[, 2], .lmu2, earg = .emu2))
- oratio = eta2theta(eta[, 3], .loratio, earg = .eoratio)
- a.temp = 1 + (pmargin[, 1]+pmargin[, 2])*(oratio-1)
- b.temp = -4 * oratio * (oratio-1) * pmargin[, 1] * pmargin[, 2]
- temp = sqrt(a.temp^2 + b.temp)
- pj4 = ifelse(abs(oratio-1) < .tol, pmargin[, 1]*pmargin[, 2],
+ pmargin <- cbind(eta2theta(eta[, 1], .lmu1, earg = .emu1),
+ eta2theta(eta[, 2], .lmu2, earg = .emu2))
+ oratio <- eta2theta(eta[, 3], .loratio, earg = .eoratio)
+ a.temp <- 1 + (pmargin[, 1]+pmargin[, 2])*(oratio-1)
+ b.temp <- -4 * oratio * (oratio-1) * pmargin[, 1] * pmargin[, 2]
+ temp <- sqrt(a.temp^2 + b.temp)
+ pj4 <- ifelse(abs(oratio-1) < .tol, pmargin[, 1]*pmargin[, 2],
(a.temp-temp)/(2*(oratio-1)))
- pj2 = pmargin[, 2] - pj4
- pj3 = pmargin[, 1] - pj4
+ pj2 <- pmargin[, 2] - pj4
+ pj3 <- pmargin[, 1] - pj4
cbind("00" = 1-pj4-pj2-pj3,
"01" = pj2,
"10" = pj3,
- "11" = pj4)
+ "11" = pj4)
}, list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
.emu1 = emu1, .emu2 = emu2, .eoratio = eoratio,
.tol = tol ))),
last = eval(substitute(expression({
- misc$link = c(mu1 = .lmu1 , mu2 = .lmu2 , oratio = .loratio)
- misc$earg = list(mu1 = .emu1 , mu2 = .emu2 , oratio = .eoratio)
+ misc$link <- c(mu1 = .lmu1 , mu2 = .lmu2 , oratio = .loratio )
- misc$tol = .tol
- misc$expected = TRUE
+ misc$earg <- list(mu1 = .emu1 , mu2 = .emu2 , oratio = .eoratio )
+
+ misc$tol <- .tol
+ misc$expected <- TRUE
}), list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
.emu1 = emu1, .emu2 = emu2, .eoratio = eoratio,
.tol = tol ))),
linkfun = eval(substitute(function(mu, extra = NULL) {
- pmargin = cbind(mu[, 3]+mu[, 4], mu[, 2]+mu[, 4])
- oratio = mu[, 4]*mu[, 1] / (mu[, 2]*mu[, 3])
+ pmargin <- cbind(mu[, 3]+mu[, 4], mu[, 2]+mu[, 4])
+ oratio <- mu[, 4]*mu[, 1] / (mu[, 2]*mu[, 3])
cbind(theta2eta(pmargin[, 1], .lmu1 , earg = .emu1),
theta2eta(pmargin[, 2], .lmu2 , earg = .emu2),
theta2eta(oratio, .loratio, earg = .eoratio))
@@ -540,19 +542,19 @@ rbinom2.or <- function(n, mu1,
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
if ( .morerobust) {
- vsmallno = 1.0e4 * .Machine$double.xmin
- mu[mu < vsmallno] = vsmallno
+ vsmallno <- 1.0e4 * .Machine$double.xmin
+ mu[mu < vsmallno] <- vsmallno
}
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
- smallno = 1.0e4 * .Machine$double.eps
+ smallno <- 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ ycounts <- round(ycounts)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
@@ -562,36 +564,36 @@ rbinom2.or <- function(n, mu1,
}, list( .morerobust = morerobust ))),
vfamily = c("binom2.or", "binom2"),
deriv = eval(substitute(expression({
- smallno = 1.0e4 * .Machine$double.eps
- mu.use = mu
- mu.use[mu.use < smallno] = smallno
- mu.use[mu.use > 1 - smallno] = 1 - smallno
- pmargin = cbind(mu.use[, 3] + mu.use[, 4],
- mu.use[, 2] + mu.use[, 4])
- pmargin[, 1] = pmax( smallno, pmargin[, 1])
- pmargin[, 1] = pmin(1 - smallno, pmargin[, 1])
- pmargin[, 2] = pmax( smallno, pmargin[, 2])
- pmargin[, 2] = pmin(1 - smallno, pmargin[, 2])
-
- oratio = mu.use[, 4]*mu.use[, 1] / (mu.use[, 2]*mu.use[, 3])
- use.oratio = pmax(smallno, oratio)
- a.temp = 1 + (pmargin[, 1]+pmargin[, 2])*(oratio-1)
- b.temp = -4 * oratio * (oratio-1) * pmargin[, 1] * pmargin[, 2]
- temp9 = sqrt(a.temp^2 + b.temp)
-
- coeff12 = -0.5 + (2*oratio*pmargin - a.temp) / (2*temp9)
- dl.dmu1 = coeff12[, 2] * (y[, 1]/mu.use[, 1]-y[, 3]/mu.use[, 3]) -
+ smallno <- 1.0e4 * .Machine$double.eps
+ mu.use <- mu
+ mu.use[mu.use < smallno] <- smallno
+ mu.use[mu.use > 1 - smallno] <- 1 - smallno
+ pmargin <- cbind(mu.use[, 3] + mu.use[, 4],
+ mu.use[, 2] + mu.use[, 4])
+ pmargin[, 1] <- pmax( smallno, pmargin[, 1])
+ pmargin[, 1] <- pmin(1 - smallno, pmargin[, 1])
+ pmargin[, 2] <- pmax( smallno, pmargin[, 2])
+ pmargin[, 2] <- pmin(1 - smallno, pmargin[, 2])
+
+ oratio <- mu.use[, 4]*mu.use[, 1] / (mu.use[, 2]*mu.use[, 3])
+ use.oratio <- pmax(smallno, oratio)
+ a.temp <- 1 + (pmargin[, 1]+pmargin[, 2])*(oratio-1)
+ b.temp <- -4 * oratio * (oratio-1) * pmargin[, 1] * pmargin[, 2]
+ temp9 <- sqrt(a.temp^2 + b.temp)
+
+ coeff12 <- -0.5 + (2*oratio*pmargin - a.temp) / (2*temp9)
+ dl.dmu1 <- coeff12[, 2] * (y[, 1]/mu.use[, 1]-y[, 3]/mu.use[, 3]) -
(1+coeff12[, 2]) * (y[, 2]/mu.use[, 2]-y[, 4]/mu.use[, 4])
- dl.dmu2 = coeff12[, 1] * (y[, 1]/mu.use[, 1]-y[, 2]/mu.use[, 2]) -
+ dl.dmu2 <- coeff12[, 1] * (y[, 1]/mu.use[, 1]-y[, 2]/mu.use[, 2]) -
(1+coeff12[, 1]) * (y[, 3]/mu.use[, 3]-y[, 4]/mu.use[, 4])
- coeff3 = (y[, 1]/mu.use[, 1] - y[, 2]/mu.use[, 2] -
+ coeff3 <- (y[, 1]/mu.use[, 1] - y[, 2]/mu.use[, 2] -
y[, 3]/mu.use[, 3] + y[, 4]/mu.use[, 4])
- Vab = pmax(smallno, 1 / (1/mu.use[, 1] + 1/mu.use[, 2] +
+ Vab <- pmax(smallno, 1 / (1/mu.use[, 1] + 1/mu.use[, 2] +
1/mu.use[, 3] + 1/mu.use[, 4]))
- dp11.doratio = Vab / use.oratio
- dl.doratio = coeff3 * dp11.doratio
+ dp11.doratio <- Vab / use.oratio
+ dl.doratio <- coeff3 * dp11.doratio
c(w) * cbind(dl.dmu1 * dtheta.deta(pmargin[, 1], .lmu1, earg = .emu1),
dl.dmu2 * dtheta.deta(pmargin[, 2], .lmu2, earg = .emu2),
@@ -599,20 +601,20 @@ rbinom2.or <- function(n, mu1,
}), list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
.emu1 = emu1, .emu2 = emu2, .eoratio = eoratio ))),
weight = eval(substitute(expression({
- Deltapi = mu.use[, 3]*mu.use[, 2] - mu.use[, 4]*mu.use[, 1]
- myDelta = pmax(smallno, mu.use[, 1] * mu.use[, 2] *
+ Deltapi <- mu.use[, 3]*mu.use[, 2] - mu.use[, 4]*mu.use[, 1]
+ myDelta <- pmax(smallno, mu.use[, 1] * mu.use[, 2] *
mu.use[, 3] * mu.use[, 4])
- pqmargin = pmargin * (1-pmargin)
- pqmargin[pqmargin < smallno] = smallno
+ pqmargin <- pmargin * (1-pmargin)
+ pqmargin[pqmargin < smallno] <- smallno
- wz = matrix(0, n, 4)
- wz[, iam(1, 1, M)] = (pqmargin[, 2] * Vab / myDelta) *
+ wz <- matrix(0, n, 4)
+ wz[, iam(1, 1, M)] <- (pqmargin[, 2] * Vab / myDelta) *
dtheta.deta(pmargin[, 1], .lmu1, earg = .emu1)^2
- wz[, iam(2, 2, M)] = (pqmargin[, 1] * Vab / myDelta) *
+ wz[, iam(2, 2, M)] <- (pqmargin[, 1] * Vab / myDelta) *
dtheta.deta(pmargin[, 2], .lmu2, earg = .emu2)^2
- wz[, iam(3, 3, M)] = (Vab / use.oratio^2) *
+ wz[, iam(3, 3, M)] <- (Vab / use.oratio^2) *
dtheta.deta(use.oratio, .loratio, earg = .eoratio)^2
- wz[, iam(1, 2, M)] = (Vab * Deltapi / myDelta) *
+ wz[, iam(1, 2, M)] <- (Vab * Deltapi / myDelta) *
dtheta.deta(pmargin[, 1], .lmu1, earg = .emu1) *
dtheta.deta(pmargin[, 2], .lmu2, earg = .emu2)
c(w) * wz
@@ -621,14 +623,13 @@ rbinom2.or <- function(n, mu1,
}
-dbinom2.rho =
+dbinom2.rho <-
function(mu1,
mu2 = if (exchangeable) mu1 else stop("'mu2' not specified"),
rho = 0,
exchangeable = FALSE,
colnames = c("00", "01", "10", "11"),
- ErrorCheck = TRUE)
-{
+ ErrorCheck = TRUE) {
if (ErrorCheck) {
if (!is.Numeric(mu1, positive = TRUE) || max(mu1) >= 1)
stop("bad input for argument 'mu1'")
@@ -640,23 +641,23 @@ dbinom2.rho =
stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
}
- nn = max(length(mu1), length(mu2), length(rho))
- rho = rep(rho, len = nn)
- mu1 = rep(mu1, len = nn)
- mu2 = rep(mu2, len = nn)
- eta1 = qnorm(mu1)
- eta2 = qnorm(mu2)
- p11 = pnorm2(eta1, eta2, rho)
- p01 = mu2 - p11
- p10 = mu1 - p11
- p00 = 1.0 - p01 - p10 - p11
+ nn <- max(length(mu1), length(mu2), length(rho))
+ rho <- rep(rho, len = nn)
+ mu1 <- rep(mu1, len = nn)
+ mu2 <- rep(mu2, len = nn)
+ eta1 <- qnorm(mu1)
+ eta2 <- qnorm(mu2)
+ p11 <- pnorm2(eta1, eta2, cov12 = rho)
+ p01 <- mu2 - p11
+ p10 <- mu1 - p11
+ p00 <- 1.0 - p01 - p10 - p11
matrix(c(p00, p01, p10, p11), nn, 4,
dimnames = list(NULL, colnames))
}
-rbinom2.rho =
+rbinom2.rho <-
function(n, mu1,
mu2 = if (exchangeable) mu1 else
stop("argument 'mu2' not specified"),
@@ -665,8 +666,7 @@ rbinom2.rho =
twoCols = TRUE,
colnames = if (twoCols) c("y1", "y2") else
c("00", "01", "10", "11"),
- ErrorCheck = TRUE)
-{
+ ErrorCheck = TRUE) {
if (ErrorCheck) {
if (!is.Numeric(n, integer.valued = TRUE,
positive = TRUE, allowable.length = 1))
@@ -685,25 +685,25 @@ rbinom2.rho =
stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
}
- dmat = dbinom2.rho(mu1 = mu1, mu2 = mu2, rho = rho,
- exchangeable = exchangeable,
- ErrorCheck = ErrorCheck)
-
- answer = matrix(0, n, 2,
- dimnames = list(NULL,
- if (twoCols) colnames else NULL))
- yy = runif(n)
- cs1 = dmat[, "00"] + dmat[, "01"]
- cs2 = cs1 + dmat[, "10"]
- index = (dmat[, "00"] < yy) & (yy <= cs1)
- answer[index, 2] = 1
- index = (cs1 < yy) & (yy <= cs2)
- answer[index, 1] = 1
- index = (yy > cs2)
- answer[index,] = 1
+ dmat <- dbinom2.rho(mu1 = mu1, mu2 = mu2, rho = rho,
+ exchangeable = exchangeable,
+ ErrorCheck = ErrorCheck)
+
+ answer <- matrix(0, n, 2,
+ dimnames = list(NULL,
+ if (twoCols) colnames else NULL))
+ yy <- runif(n)
+ cs1 <- dmat[, "00"] + dmat[, "01"]
+ cs2 <- cs1 + dmat[, "10"]
+ index <- (dmat[, "00"] < yy) & (yy <= cs1)
+ answer[index, 2] <- 1
+ index <- (cs1 < yy) & (yy <= cs2)
+ answer[index, 1] <- 1
+ index <- (yy > cs2)
+ answer[index,] <- 1
if (twoCols) answer else {
- answer4 = matrix(0, n, 4, dimnames = list(NULL, colnames))
- answer4[cbind(1:n, 1 + 2*answer[, 1] + answer[, 2])] = 1
+ answer4 <- matrix(0, n, 4, dimnames = list(NULL, colnames))
+ answer4[cbind(1:n, 1 + 2*answer[, 1] + answer[, 2])] <- 1
answer4
}
}
@@ -712,9 +712,8 @@ rbinom2.rho =
-binom2.rho.control <- function(save.weight = TRUE, ...)
-{
- list(save.weight = save.weight)
+binom2.rho.control <- function(save.weight = TRUE, ...) {
+ list(save.weight = save.weight)
}
@@ -724,8 +723,9 @@ binom2.rho.control <- function(save.weight = TRUE, ...)
imu1 = NULL, imu2 = NULL, irho = NULL,
imethod = 1,
zero = 3, exchangeable = FALSE,
- nsimEIM = NULL)
-{
+ grho = seq(-0.95, 0.95, by = 0.05),
+ nsimEIM = NULL) {
+
lrho <- as.list(substitute(lrho))
@@ -744,7 +744,6 @@ binom2.rho.control <- function(save.weight = TRUE, ...)
-
if (is.Numeric(nsimEIM)) {
if (!is.Numeric(nsimEIM, allowable.length = 1,
integer.valued = TRUE))
@@ -766,12 +765,21 @@ binom2.rho.control <- function(save.weight = TRUE, ...)
"Links: ",
namesof("mu1", lmu12, earg = emu12), ", ",
namesof("mu2", lmu12, earg = emu12), ", ",
- namesof("rho", lrho, earg = erho)),
+ namesof("rho", lrho, earg = erho)),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x,
- .exchangeable, constraints, intercept.apply = TRUE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.vgam(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x,
+ .exchangeable , constraints,
+ apply.int = TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .exchangeable = exchangeable, .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 3,
+ multipleResponses = FALSE,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+
initialize = eval(substitute(expression({
mustart.orig <- mustart
eval(process.binomial2.data.vgam)
@@ -780,9 +788,9 @@ binom2.rho.control <- function(save.weight = TRUE, ...)
mustart <- mustart.orig # Retain it if inputted
predictors.names <- c(
- namesof("mu1", .lmu12, earg = .emu12, short = TRUE),
- namesof("mu2", .lmu12, earg = .emu12, short = TRUE),
- namesof("rho", .lrho, earg = .erho, short = TRUE))
+ namesof("mu1", .lmu12 , earg = .emu12 , short = TRUE),
+ namesof("mu2", .lmu12 , earg = .emu12 , short = TRUE),
+ namesof("rho", .lrho , earg = .erho, short = TRUE))
if (is.null( .nsimEIM)) {
save.weight <- control$save.weight <- FALSE
@@ -790,7 +798,7 @@ binom2.rho.control <- function(save.weight = TRUE, ...)
ycounts <- if (is.numeric(extra$orig.w)) y * c(w) / extra$orig.w else
- y * c(w) # Convert proportions to counts
+ y * c(w) # Convert proportions to counts
if (max(abs(ycounts - round(ycounts))) > 1.0e-6)
warning("the response (as counts) does not appear to ",
"be integer-valued. Am rounding to integer values.")
@@ -825,32 +833,32 @@ binom2.rho.control <- function(save.weight = TRUE, ...)
- binom2.rho.Loglikfun =
- function(rhoval, y, x, w, extraargs) {
- init.mu1 = extraargs$initmu1
- init.mu2 = extraargs$initmu2
- ycounts = extraargs$ycounts
- nvec = extraargs$nvec
- eta1 = qnorm(init.mu1)
- eta2 = qnorm(init.mu2)
- p11 = pnorm2(eta1, eta2, rhoval)
- p01 = pmin(init.mu2 - p11, init.mu2)
- p10 = pmin(init.mu1 - p11, init.mu1)
- p00 = 1.0 - p01 - p10 - p11
- mumat = abs(cbind("00" = p00,
- "01" = p01,
- "10" = p10,
- "11" = p11))
- mumat = mumat / rowSums(mumat)
- mumat[mumat < 1.0e-100] = 1.0e-100
+ binom2.rho.Loglikfun <-
+ function(rhoval, y, x, w, extraargs) {
+ init.mu1 <- extraargs$initmu1
+ init.mu2 <- extraargs$initmu2
+ ycounts <- extraargs$ycounts
+ nvec <- extraargs$nvec
+ eta1 <- qnorm(init.mu1)
+ eta2 <- qnorm(init.mu2)
+ p11 <- pnorm2(eta1, eta2, cov12 = rhoval)
+ p01 <- pmin(init.mu2 - p11, init.mu2)
+ p10 <- pmin(init.mu1 - p11, init.mu1)
+ p00 <- 1.0 - p01 - p10 - p11
+ mumat <- abs(cbind("00" = p00,
+ "01" = p01,
+ "10" = p10,
+ "11" = p11))
+ mumat <- mumat / rowSums(mumat)
+ mumat[mumat < 1.0e-100] <- 1.0e-100
sum((if (is.numeric(extraargs$orig.w)) extraargs$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mumat,
log = TRUE, dochecking = FALSE))
}
- rho.grid = seq(-0.95, 0.95, len=31)
- try.this = getMaxMin(rho.grid, objfun=binom2.rho.Loglikfun,
- y=y, x=x, w=w, extraargs = list(
+ rho.grid <- .grho # seq(-0.95, 0.95, len = 31)
+ try.this <- getMaxMin(rho.grid, objfun = binom2.rho.Loglikfun,
+ y = y, x = x, w = w, extraargs = list(
orig.w = extra$orig.w,
ycounts = ycounts,
initmu1 = mu1.init,
@@ -859,36 +867,45 @@ binom2.rho.control <- function(save.weight = TRUE, ...)
))
- rho.init = if (is.Numeric( .irho ))
+ rho.init <- if (is.Numeric( .irho ))
rep( .irho , len = n) else {
try.this
}
- etastart = cbind(theta2eta(mu1.init, .lmu12, earg = .emu12),
- theta2eta(mu2.init, .lmu12, earg = .emu12),
- theta2eta(rho.init, .lrho, earg = .erho))
- mustart <- NULL # Since etastart has been computed.
+ etastart <- cbind(theta2eta(mu1.init, .lmu12 , earg = .emu12 ),
+ theta2eta(mu2.init, .lmu12 , earg = .emu12 ),
+ theta2eta(rho.init, .lrho , earg = .erho))
+ mustart <- NULL # Since etastart has been computed.
}
- }), list( .lmu12 = lmu12, .emu12 = emu12, .nsimEIM = nsimEIM,
- .lrho = lrho, .erho = erho,
- .imethod = imethod,
- .imu1 = imu1, .imu2 = imu2, .irho = irho ))),
+ }), list( .lmu12 = lmu12, .lrho = lrho,
+ .emu12 = emu12, .erho = erho,
+ .grho = grho,
+ .irho = irho,
+ .imethod = imethod, .nsimEIM = nsimEIM,
+ .imu1 = imu1, .imu2 = imu2 ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- pmargin = cbind(eta2theta(eta[, 1], .lmu12, earg = .emu12),
- eta2theta(eta[, 2], .lmu12, earg = .emu12))
- rho = eta2theta(eta[, 3], .lrho, earg = .erho)
- p11 = pnorm2(eta[, 1], eta[, 2], rho)
- p01 = pmin(pmargin[, 2] - p11, pmargin[, 2])
- p10 = pmin(pmargin[, 1] - p11, pmargin[, 1])
- p00 = 1.0 - p01 - p10 - p11
- ansmat = abs(cbind("00" = p00, "01" = p01, "10" = p10, "11" = p11))
+ pmargin <- cbind(eta2theta(eta[, 1], .lmu12 , earg = .emu12 ),
+ eta2theta(eta[, 2], .lmu12 , earg = .emu12 ))
+ rho <- eta2theta(eta[, 3], .lrho , earg = .erho)
+ p11 <- pnorm2(eta[, 1], eta[, 2], cov12 = rho)
+ p01 <- pmin(pmargin[, 2] - p11, pmargin[, 2])
+ p10 <- pmin(pmargin[, 1] - p11, pmargin[, 1])
+ p00 <- 1.0 - p01 - p10 - p11
+ ansmat <- abs(cbind("00" = p00,
+ "01" = p01,
+ "10" = p10,
+ "11" = p11))
ansmat / rowSums(ansmat)
- }, list( .lmu12 = lmu12, .emu12 = emu12, .lrho = lrho, .erho = erho ))),
+ }, list( .lmu12 = lmu12, .lrho = lrho,
+ .emu12 = emu12, .erho = erho ))),
last = eval(substitute(expression({
- misc$link = c(mu1 = .lmu12, mu2 = .lmu12, rho = .lrho)
- misc$earg = list(mu1 = .emu12, mu2 = .emu12, rho = .erho)
- misc$nsimEIM = .nsimEIM
- misc$expected = TRUE
+ misc$link <- c(mu1 = .lmu12 , mu2 = .lmu12 , rho = .lrho )
+
+ misc$earg <- list(mu1 = .emu12 , mu2 = .emu12 , rho = .erho )
+
+ misc$nsimEIM <- .nsimEIM
+ misc$expected <- TRUE
+ misc$multipleResponses <- FALSE
}), list( .lmu12 = lmu12, .lrho = lrho, .nsimEIM = nsimEIM,
.emu12 = emu12, .erho = erho ))),
@@ -897,108 +914,115 @@ binom2.rho.control <- function(save.weight = TRUE, ...)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- ycounts = if (is.numeric(extra$orig.w))
- y * c(w) / extra$orig.w else
- y * c(w) # Convert proportions to counts
+ ycounts <- if (is.numeric(extra$orig.w))
+ y * c(w) / extra$orig.w else
+ y * c(w) # Convert proportions to counts
- smallno = 1.0e4 * .Machine$double.eps
+ smallno <- 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ ycounts <- round(ycounts)
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE))
}
}, list( .erho = erho ))),
vfamily = c("binom2.rho", "binom2"),
deriv = eval(substitute(expression({
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
- ycounts = if (is.numeric(extra$orig.w)) y * c(w) / extra$orig.w else
- y * c(w) # Convert proportions to counts
-
- pmargin = cbind(eta2theta(eta[, 1], .lmu12, earg = .emu12),
- eta2theta(eta[, 2], .lmu12, earg = .emu12))
- rhovec = eta2theta(eta[, 3], .lrho, earg = .erho)
- p11 = pnorm2(eta[, 1], eta[, 2], rhovec)
- p01 = pmargin[, 2]-p11
- p10 = pmargin[, 1]-p11
- p00 = 1-p01-p10-p11
-
- ABmat = (eta[, 1:2] - rhovec * eta[, 2:1]) / sqrt(1.0 - rhovec^2)
- PhiA = pnorm(ABmat[, 1])
- PhiB = pnorm(ABmat[, 2])
- onemPhiA = pnorm(ABmat[, 1], lower.tail = FALSE)
- onemPhiB = pnorm(ABmat[, 2], lower.tail = FALSE)
-
- smallno = 1000 * .Machine$double.eps
- p00[p00 < smallno] = smallno
- p01[p01 < smallno] = smallno
- p10[p10 < smallno] = smallno
- p11[p11 < smallno] = smallno
-
- dprob00 = dnorm2(eta[, 1], eta[, 2], rhovec)
- dl.dprob1 = PhiB * (ycounts[, 4]/p11 - ycounts[, 2]/p01) +
- onemPhiB * (ycounts[, 3]/p10 - ycounts[, 1]/p00)
- dl.dprob2 = PhiA * (ycounts[, 4]/p11 - ycounts[, 3]/p10) +
- onemPhiA * (ycounts[, 2]/p01 - ycounts[, 1]/p00)
- dl.drho = (ycounts[, 4]/p11 - ycounts[, 3]/p10 -
- ycounts[, 2]/p01 + ycounts[, 1]/p00) * dprob00
- dprob1.deta = dtheta.deta(pmargin[, 1], .lmu12, earg = .emu12)
- dprob2.deta = dtheta.deta(pmargin[, 2], .lmu12, earg = .emu12)
- drho.deta = dtheta.deta(rhovec, .lrho, earg = .erho)
- dthetas.detas = cbind(dprob1.deta, dprob2.deta, drho.deta)
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+ ycounts <- if (is.numeric(extra$orig.w)) y * c(w) / extra$orig.w else
+ y * c(w) # Convert proportions to counts
+
+ pmargin <- cbind(eta2theta(eta[, 1], .lmu12 , earg = .emu12 ),
+ eta2theta(eta[, 2], .lmu12 , earg = .emu12 ))
+ rhovec <- eta2theta(eta[, 3], .lrho , earg = .erho)
+ p11 <- pnorm2(eta[, 1], eta[, 2], cov12 = rhovec)
+ p01 <- pmargin[, 2] - p11
+ p10 <- pmargin[, 1] - p11
+ p00 <- 1 - p01 - p10 - p11
+
+ ABmat <- (eta[, 1:2] -
+ rhovec * eta[, 2:1]) / sqrt(pmax(1e5 * .Machine$double.eps,
+ 1.0 - rhovec^2))
+ PhiA <- pnorm(ABmat[, 1])
+ PhiB <- pnorm(ABmat[, 2])
+ onemPhiA <- pnorm(ABmat[, 1], lower.tail = FALSE)
+ onemPhiB <- pnorm(ABmat[, 2], lower.tail = FALSE)
+
+ smallno <- 1000 * .Machine$double.eps
+ p00[p00 < smallno] <- smallno
+ p01[p01 < smallno] <- smallno
+ p10[p10 < smallno] <- smallno
+ p11[p11 < smallno] <- smallno
+
+ dprob00 <- dnorm2(eta[, 1], eta[, 2], rho = rhovec)
+ dl.dprob1 <- PhiB * (ycounts[, 4]/p11 - ycounts[, 2]/p01) +
+ onemPhiB * (ycounts[, 3]/p10 - ycounts[, 1]/p00)
+ dl.dprob2 <- PhiA * (ycounts[, 4]/p11 - ycounts[, 3]/p10) +
+ onemPhiA * (ycounts[, 2]/p01 - ycounts[, 1]/p00)
+ dl.drho <- (ycounts[, 4]/p11 - ycounts[, 3]/p10 -
+ ycounts[, 2]/p01 + ycounts[, 1]/p00) * dprob00
+
+ dprob1.deta <- dtheta.deta(pmargin[, 1], .lmu12 , earg = .emu12 )
+ dprob2.deta <- dtheta.deta(pmargin[, 2], .lmu12 , earg = .emu12 )
+ drho.deta <- dtheta.deta(rhovec, .lrho , earg = .erho)
+ dthetas.detas <- cbind(dprob1.deta, dprob2.deta, drho.deta)
(if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- cbind(dl.dprob1, dl.dprob2, dl.drho) * dthetas.detas
- }), list( .lmu12 = lmu12, .emu12 = emu12, .lrho = lrho, .erho = erho ))),
+ cbind(dl.dprob1,
+ dl.dprob2,
+ dl.drho) * dthetas.detas
+ }), list( .lmu12 = lmu12, .lrho = lrho,
+ .emu12 = emu12, .erho = erho ))),
weight = eval(substitute(expression({
if (is.null( .nsimEIM )) {
- d2l.dprob1prob1 = PhiB^2 * (1/p11 + 1/p01) +
- onemPhiB^2 * (1/p10 + 1/p00)
- d2l.dprob2prob2 = PhiA^2 * (1/p11 + 1/p10) +
- onemPhiA^2 * (1/p01 + 1/p00)
- d2l.dprob1prob2 = PhiA * ( PhiB/p11 - onemPhiB/p10) +
- onemPhiA * (onemPhiB/p00 - PhiB/p01)
- d2l.dprob1rho = (PhiB * (1/p11 + 1/p01) -
- onemPhiB * (1/p10 + 1/p00)) * dprob00
- d2l.dprob2rho = (PhiA * (1/p11 + 1/p10) -
- onemPhiA * (1/p01 + 1/p00)) * dprob00
- d2l.drho2 = (1/p11 + 1/p01 + 1/p10 + 1/p00) * dprob00^2
- wz = matrix(0, n, dimm(M)) # 6=dimm(M)
- wz[, iam(1, 1, M)] = d2l.dprob1prob1 * dprob1.deta^2
- wz[, iam(2, 2, M)] = d2l.dprob2prob2 * dprob2.deta^2
- wz[, iam(1, 2, M)] = d2l.dprob1prob2 * dprob1.deta * dprob2.deta
- wz[, iam(1, 3, M)] = d2l.dprob1rho * dprob1.deta * drho.deta
- wz[, iam(2, 3, M)] = d2l.dprob2rho * dprob2.deta * drho.deta
- wz[, iam(3, 3, M)] = d2l.drho2 * drho.deta^2
+ ned2l.dprob1prob1 <- PhiB^2 * (1/p11 + 1/p01) +
+ onemPhiB^2 * (1/p10 + 1/p00)
+ ned2l.dprob2prob2 <- PhiA^2 * (1/p11 + 1/p10) +
+ onemPhiA^2 * (1/p01 + 1/p00)
+ ned2l.dprob1prob2 <- PhiA * ( PhiB/p11 - onemPhiB/p10) +
+ onemPhiA * (onemPhiB/p00 - PhiB/p01)
+ ned2l.dprob1rho <- (PhiB * (1/p11 + 1/p01) -
+ onemPhiB * (1/p10 + 1/p00)) * dprob00
+ ned2l.dprob2rho <- (PhiA * (1/p11 + 1/p10) -
+ onemPhiA * (1/p01 + 1/p00)) * dprob00
+ ned2l.drho2 <- (1/p11 + 1/p01 + 1/p10 + 1/p00) * dprob00^2
+
+ wz <- matrix(0, n, dimm(M)) # 6=dimm(M)
+ wz[, iam(1, 1, M)] <- ned2l.dprob1prob1 * dprob1.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dprob2prob2 * dprob2.deta^2
+ wz[, iam(3, 3, M)] <- ned2l.drho2 * drho.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dprob1prob2 * dprob1.deta * dprob2.deta
+ wz[, iam(2, 3, M)] <- ned2l.dprob2rho * dprob2.deta * drho.deta
+ wz[, iam(1, 3, M)] <- ned2l.dprob1rho * dprob1.deta * drho.deta
} else {
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
for (ii in 1:( .nsimEIM )) {
- ysim = rbinom2.rho(n, mu1 = pmargin[, 1], mu2 = pmargin[, 2],
- twoCols = FALSE, rho = rhovec)
- dl.dprob1 = PhiB * (ysim[, 4]/p11 - ysim[, 2]/p01) +
- onemPhiB * (ysim[, 3]/p10 - ysim[, 1]/p00)
- dl.dprob2 = PhiA * (ysim[, 4]/p11 - ysim[, 3]/p10) +
- onemPhiA * (ysim[, 2]/p01 - ysim[, 1]/p00)
- dl.drho = (ysim[, 4]/p11 - ysim[, 3]/p10 -
- ysim[, 2]/p01 + ysim[, 1]/p00) * dprob00
+ ysim <- rbinom2.rho(n, mu1 = pmargin[, 1], mu2 = pmargin[, 2],
+ twoCols = FALSE, rho = rhovec)
+ dl.dprob1 <- PhiB * (ysim[, 4]/p11 - ysim[, 2]/p01) +
+ onemPhiB * (ysim[, 3]/p10 - ysim[, 1]/p00)
+ dl.dprob2 <- PhiA * (ysim[, 4]/p11 - ysim[, 3]/p10) +
+ onemPhiA * (ysim[, 2]/p01 - ysim[, 1]/p00)
+ dl.drho <- (ysim[, 4]/p11 - ysim[, 3]/p10 -
+ ysim[, 2]/p01 + ysim[, 1]/p00) * dprob00
rm(ysim)
- temp3 = cbind(dl.dprob1, dl.dprob2, dl.drho)
- run.varcov = ((ii-1) * run.varcov +
+ temp3 <- cbind(dl.dprob1, dl.dprob2, dl.drho)
+ run.varcov <- ((ii-1) * run.varcov +
temp3[, ind1$row.index] * temp3[, ind1$col.index]) / ii
}
- wz = if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow = TRUE) else run.varcov
+ wz <- if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
+ wz <- wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
}
c(w) * wz
}), list( .nsimEIM = nsimEIM ))))
@@ -1011,26 +1035,69 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- if (log.arg) {
- (-0.5*(x^2 + y^2 - 2*x*y*rho)/(1.0-rho^2)) - log(2) - log(pi) -
+ logdnorm2 <-
+ (-0.5*(x * (x - 2*y*rho) + y^2) / (1.0 - rho^2)) - log(2 * pi) -
0.5 * log1p(-rho^2)
+
+ if (log.arg) {
+ logdnorm2
} else {
- exp(-0.5*(x^2 + y^2 - 2*x*y*rho)/(1.0-rho^2)) / (2*pi*sqrt(1.0-rho^2))
+ exp(logdnorm2)
}
}
-pnorm2 <- function(ah, ak, r) {
+ pnorm2 <- function(x1, x2,
+ mean1 = 0, mean2 = 0,
+ var1 = 1, var2 = 1,
+ cov12 = 0) {
+
+
+
+ sd1 <- sqrt(var1)
+ sd2 <- sqrt(var2)
+ rho <- cov12 / (sd1 * sd2)
+
+ if (any(is.na(x1) | is.na(x2) |
+ is.na(sd1) | is.na(sd2) |
+ is.na(mean1) | is.na(mean2) | is.na(rho)))
+ stop("no NAs allowed in arguments or variables 'x1', 'x2', 'mean1', ",
+ "'mean2', 'sd1', 'sd2', 'cov12'")
+ if (min(rho) < -1 || max(rho) > +1)
+ stop("correlation 'rho' is out of range")
+
+
+ if (length(mean1) > 1 && length(mean2) == 1 &&
+ length(var1) == 1 && length(var2) == 1 && length(cov12) == 1)
+ warning("the call to pnorm2() seems based on the old version ",
+ "of the arguments")
+
+ LLL <- max(length(x1), length(x2),
+ length(mean1), length(mean2),
+ length(sd1), length(sd2),
+ length(rho))
+ if (length(x1) != LLL) x1 <- rep(x1, len = LLL)
+ if (length(x2) != LLL) x2 <- rep(x2, len = LLL)
+ if (length(mean1) != LLL) mean1 <- rep(mean1, len = LLL)
+ if (length(mean2) != LLL) mean2 <- rep(mean2, len = LLL)
+ if (length(sd1) != LLL) sd1 <- rep(sd1, len = LLL)
+ if (length(sd2) != LLL) sd2 <- rep(sd2, len = LLL)
+ if (length(rho) != LLL) rho <- rep(rho, len = LLL)
+
+ Z1 <- (x1 - mean1) / sd1
+ Z2 <- (x2 - mean2) / sd2
- ans <- ah
- size <- length(ah)
- singler <- ifelse(length(r) == 1, 1, 0)
- dotC(name = "pnorm2", ah = as.double(-ah), ak = as.double(-ak),
- r = as.double(r),
- size = as.integer(size), singler = as.integer(singler),
+ ans <- Z1
+ singler <- ifelse(length(rho) == 1, 1, 0)
+ answer <- dotC(name = "pnorm2",
+ ah = as.double(-Z1), ak = as.double(-Z2), r = as.double(rho),
+ size = as.integer(LLL), singler = as.integer(singler),
ans = as.double(ans))$ans
+ if (any(answer < 0.0))
+ warning("some negative values returned")
+ answer
}
@@ -1039,18 +1106,16 @@ pnorm2 <- function(ah, ak, r) {
my.dbinom <- function(x,
size = stop("no 'size' argument"),
- prob = stop("no 'prob' argument"))
-{
+ prob = stop("no 'prob' argument")) {
- exp(lgamma(size + 1) - lgamma(size - x +1) - lgamma(x + 1) +
+ exp(lgamma(size + 1) - lgamma(size - x + 1) - lgamma(x + 1) +
x * log(prob / (1 - prob)) + size * log1p(-prob))
}
- size.binomial <- function(prob = 0.5, link = "loge")
-{
- if (any(prob <= 0 || prob >= 1))
+ size.binomial <- function(prob = 0.5, link = "loge") {
+ if (any(prob <= 0 | prob >= 1))
stop("some values of prob out of range")
@@ -1081,8 +1146,10 @@ my.dbinom <- function(x,
nvec * extra$temp2
}, list( .link = link ))),
last = eval(substitute(expression({
- misc$link <- c(size = .link)
+ misc$link <- c(size = .link )
+
misc$prob <- extra$temp2
+
}), list( .link = link ))),
linkfun = eval(substitute(function(mu, extra = NULL) {
nvec <- mu / extra$temp2
@@ -1102,14 +1169,14 @@ my.dbinom <- function(x,
vfamily = c("size.binomial"),
deriv = eval(substitute(expression({
nvec <- mu/extra$temp2
- dldnvec = digamma(nvec+1) - digamma(nvec-y+1) + log1p(-extra$temp2)
+ dldnvec <- digamma(nvec+1) - digamma(nvec-y+1) + log1p(-extra$temp2)
dnvecdeta <- dtheta.deta(nvec, .link)
c(w) * cbind(dldnvec * dnvecdeta)
}), list( .link = link ))),
weight = eval(substitute(expression({
d2ldnvec2 <- trigamma(nvec+1) - trigamma(nvec-y+1)
- d2ldnvec2[y == 0] = -sqrt(.Machine$double.eps)
- wz = -c(w) * dnvecdeta^2 * d2ldnvec2
+ d2ldnvec2[y == 0] <- -sqrt( .Machine$double.eps )
+ wz <- -c(w) * dnvecdeta^2 * d2ldnvec2
wz
}), list( .link = link ))))
}
@@ -1136,43 +1203,43 @@ my.dbinom <- function(x,
stop("negative values for argument 'shape2' not allowed")
- LLL = max(length(x), length(size), length(shape1), length(shape2))
- if (length(x) != LLL) x = rep(x, len = LLL)
- if (length(size) != LLL) size = rep(size, len = LLL)
- if (length(shape1) != LLL) shape1 = rep(shape1, len = LLL)
- if (length(shape2) != LLL) shape2 = rep(shape2, len = LLL)
+ LLL <- max(length(x), length(size), length(shape1), length(shape2))
+ if (length(x) != LLL) x <- rep(x, len = LLL)
+ if (length(size) != LLL) size <- rep(size, len = LLL)
+ if (length(shape1) != LLL) shape1 <- rep(shape1, len = LLL)
+ if (length(shape2) != LLL) shape2 <- rep(shape2, len = LLL)
- ans = 0 * x
- ok = (round(x) == x) & (x >= 0) & (x <= size) &
- is.finite(shape1) & is.finite(shape2)
+ ans <- 0 * x
+ ok <- (round(x) == x) & (x >= 0) & (x <= size) &
+ is.finite(shape1) & is.finite(shape2)
if (any(ok)) {
- ans[ok] = lchoose(size[ok], x[ok]) +
- lbeta(shape1[ok] + x[ok], shape2[ok] + size[ok] - x[ok]) -
- lbeta(shape1[ok], shape2[ok])
+ ans[ok] <- lchoose(size[ok], x[ok]) +
+ lbeta(shape1[ok] + x[ok], shape2[ok] + size[ok] - x[ok]) -
+ lbeta(shape1[ok], shape2[ok])
if (log.arg) {
} else {
- ans[ok] = exp(ans[ok])
+ ans[ok] <- exp(ans[ok])
}
}
- okay1 = is.na(shape1) & is.infinite(shape2) # rho = 0 and prob == 0
- okay2 = is.infinite(shape1) & is.na(shape2) # rho = 0 and prob == 1
- okay3 = is.infinite(shape1) & is.infinite(shape2) # rho = 0 and 0 < prob < 1
+ okay1 <- is.na(shape1) & is.infinite(shape2) # rho = 0 and prob == 0
+ okay2 <- is.infinite(shape1) & is.na(shape2) # rho = 0 and prob == 1
+ okay3 <- is.infinite(shape1) & is.infinite(shape2) # rho = 0 and 0 < prob < 1
if (sum.okay1 <- sum(okay1))
- ans[okay1] = dbinom(x = x[okay1], size = size[okay1],
- prob = 0,
- log = log.arg)
+ ans[okay1] <- dbinom(x = x[okay1], size = size[okay1],
+ prob = 0,
+ log = log.arg)
if (sum.okay2 <- sum(okay2))
- ans[okay2] = dbinom(x = x[okay2], size = size[okay2],
- prob = 1,
- log = log.arg)
+ ans[okay2] <- dbinom(x = x[okay2], size = size[okay2],
+ prob = 1,
+ log = log.arg)
if (sum.okay3 <- sum(okay3)) {
if (length(.dontuse.prob) != LLL)
- .dontuse.prob = rep( .dontuse.prob , len = LLL)
- ans[okay3] = dbinom(x = x[okay3], size = size[okay3],
- prob = .dontuse.prob[okay3],
- log = log.arg)
+ .dontuse.prob <- rep( .dontuse.prob , len = LLL)
+ ans[okay3] <- dbinom(x = x[okay3], size = size[okay3],
+ prob = .dontuse.prob[okay3],
+ log = log.arg)
}
ans
@@ -1193,38 +1260,38 @@ my.dbinom <- function(x,
stop("bad input for argument 'shape1'")
if (!is.Numeric(shape2, positive = TRUE))
stop("bad input for argument 'shape2'")
- LLL = max(length(q), length(size), length(shape1), length(shape2))
+ LLL <- max(length(q), length(size), length(shape1), length(shape2))
- if (length(q) != LLL) q = rep(q, len = LLL)
- if (length(shape1) != LLL) shape1 = rep(shape1, len = LLL)
- if (length(shape2) != LLL) shape2 = rep(shape2, len = LLL)
- if (length(size) != LLL) size = rep(size, len = LLL);
+ if (length(q) != LLL) q <- rep(q, len = LLL)
+ if (length(shape1) != LLL) shape1 <- rep(shape1, len = LLL)
+ if (length(shape2) != LLL) shape2 <- rep(shape2, len = LLL)
+ if (length(size) != LLL) size <- rep(size, len = LLL);
- ans = q * 0 # Retains names(q)
+ ans <- q * 0 # Retains names(q)
if (max(abs(size - size[1])) < 1.0e-08 &&
max(abs(shape1 - shape1[1])) < 1.0e-08 &&
max(abs(shape2 - shape2[1])) < 1.0e-08) {
- qstar = floor(q)
- temp = if (max(qstar) >= 0) {
+ qstar <- floor(q)
+ temp <- if (max(qstar) >= 0) {
dbetabinom.ab(0:max(qstar), size = size[1],
- shape1 = shape1[1],
- shape2 = shape2[1])
+ shape1 = shape1[1],
+ shape2 = shape2[1])
} else {
0 * qstar
}
- unq = unique(qstar)
+ unq <- unique(qstar)
for (ii in unq) {
- index = qstar == ii
- ans[index] = if (ii >= 0) sum(temp[1:(1+ii)]) else 0
+ index <- qstar == ii
+ ans[index] <- if (ii >= 0) sum(temp[1:(1+ii)]) else 0
}
} else {
for (ii in 1:LLL) {
- qstar = floor(q[ii])
- ans[ii] = if (qstar >= 0) {
+ qstar <- floor(q[ii])
+ ans[ii] <- if (qstar >= 0) {
sum(dbetabinom.ab(x = 0:qstar, size = size[ii],
- shape1 = shape1[ii],
- shape2 = shape2[ii]))
+ shape1 = shape1[ii],
+ shape2 = shape2[ii]))
} else 0
}
}
@@ -1234,7 +1301,8 @@ my.dbinom <- function(x,
rbetabinom.ab <- function(n, size, shape1, shape2,
- .dontuse.prob = NULL) {
+ .dontuse.prob = NULL) {
+ # checkargs = TRUE
if (!is.Numeric(size, integer.valued = TRUE))
stop("bad input for argument 'size'")
@@ -1243,36 +1311,36 @@ my.dbinom <- function(x,
if (any(shape2 < 0, na.rm = TRUE))
stop("negative values for argument 'shape2' not allowed")
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- if (length(size) != use.n) size = rep(size, len = use.n)
- if (length(shape1) != use.n) shape1 = rep(shape1, len = use.n)
- if (length(shape2) != use.n) shape2 = rep(shape2, len = use.n)
+ if (length(size) != use.n) size <- rep(size, len = use.n)
+ if (length(shape1) != use.n) shape1 <- rep(shape1, len = use.n)
+ if (length(shape2) != use.n) shape2 <- rep(shape2, len = use.n)
- ans = rep(as.numeric(NA), len = use.n)
- okay0 = is.finite(shape1) & is.finite(shape2)
+ ans <- rep(as.numeric(NA), len = use.n)
+ okay0 <- is.finite(shape1) & is.finite(shape2)
if (smalln <- sum(okay0))
- ans[okay0] = rbinom(n = smalln, size = size[okay0],
- prob = rbeta(n = smalln, shape1 = shape1[okay0],
- shape2 = shape2[okay0]))
+ ans[okay0] <- rbinom(n = smalln, size = size[okay0],
+ prob = rbeta(n = smalln, shape1 = shape1[okay0],
+ shape2 = shape2[okay0]))
- okay1 = is.na(shape1) & is.infinite(shape2) # rho = 0 and prob == 0
- okay2 = is.infinite(shape1) & is.na(shape2) # rho = 0 and prob == 1
- okay3 = is.infinite(shape1) & is.infinite(shape2) # rho = 0 and 0 < prob < 1
+ okay1 <- is.na(shape1) & is.infinite(shape2) # rho = 0 and prob == 0
+ okay2 <- is.infinite(shape1) & is.na(shape2) # rho = 0 and prob == 1
+ okay3 <- is.infinite(shape1) & is.infinite(shape2) # rho = 0 and 0 < prob < 1
if (sum.okay1 <- sum(okay1))
- ans[okay1] = rbinom(n = sum.okay1, size = size[okay1],
- prob = 0)
+ ans[okay1] <- rbinom(n = sum.okay1, size = size[okay1],
+ prob = 0)
if (sum.okay2 <- sum(okay2))
- ans[okay2] = rbinom(n = sum.okay2, size = size[okay2],
- prob = 1)
+ ans[okay2] <- rbinom(n = sum.okay2, size = size[okay2],
+ prob = 1)
if (sum.okay3 <- sum(okay3)) {
- if (length(.dontuse.prob) != use.n)
- .dontuse.prob = rep(.dontuse.prob, len = use.n)
- ans[okay3] = rbinom(n = sum.okay3, size = size[okay3],
- prob = .dontuse.prob[okay3])
+ if (length( .dontuse.prob ) != use.n)
+ .dontuse.prob <- rep(.dontuse.prob, len = use.n)
+ ans[okay3] <- rbinom(n = sum.okay3, size = size[okay3],
+ prob = .dontuse.prob[okay3])
}
ans
@@ -1383,85 +1451,87 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
"Variance: mu * (1-mu) * (1+(w-1)*rho) / w, ",
"where rho = 1 / (shape1+shape2+1)"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
if (!all(w == 1))
- extra$orig.w = w
+ extra$orig.w <- w
if (is.null( .nsimEIM)) {
save.weight <- control$save.weight <- FALSE
}
- mustart.orig = mustart
+ mustart.orig <- mustart
eval(binomialff()@initialize) # Note: n,w,y,mustart is changed
if (length(mustart.orig))
- mustart = mustart.orig # Retain it if inputted
- predictors.names =
+ mustart <- mustart.orig # Retain it if inputted
+ predictors.names <-
c(namesof("shape1", .lshape12, earg = .earg, tag = FALSE),
namesof("shape2", .lshape12, earg = .earg, tag = FALSE))
if (!length(etastart)) {
- mustart.use = if (length(mustart.orig)) mustart.orig else
+ mustart.use <- if (length(mustart.orig)) mustart.orig else
mustart
- shape1 = rep( .i1 , len = n)
- shape2 = if (length( .i2 )) {
- rep( .i2 , len = n)
- } else if (length(mustart.orig)) {
- shape1 * (1 / mustart.use - 1)
- } else if ( .imethod == 1) {
- shape1 * (1 / weighted.mean(y, w) - 1)
- } else if ( .imethod == 2) {
- temp777 = .sinit * weighted.mean(y, w) +
- (1 - .sinit) * y
- shape1 * (1 / temp777 - 1)
- } else {
- shape1 * (1 / weighted.mean(mustart.use, w) - 1)
- }
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ shape1 <- rep( .i1 , len = n)
+ shape2 <- if (length( .i2 )) {
+ rep( .i2 , len = n)
+ } else if (length(mustart.orig)) {
+ shape1 * (1 / mustart.use - 1)
+ } else if ( .imethod == 1) {
+ shape1 * (1 / weighted.mean(y, w) - 1)
+ } else if ( .imethod == 2) {
+ temp777 <- .sinit * weighted.mean(y, w) +
+ (1 - .sinit) * y
+ shape1 * (1 / temp777 - 1)
+ } else {
+ shape1 * (1 / weighted.mean(mustart.use, w) - 1)
+ }
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
if (max(abs(ycounts - round(ycounts))) > 1.0e-6)
warning("the response (as counts) does not appear to ",
"be integer-valued. Am rounding to integer values.")
- ycounts = round(ycounts) # Make sure it is an integer
- etastart = cbind(theta2eta(shape1, .lshape12, earg = .earg),
- theta2eta(shape2, .lshape12, earg = .earg))
+ ycounts <- round(ycounts) # Make sure it is an integer
+ etastart <- cbind(theta2eta(shape1, .lshape12, earg = .earg),
+ theta2eta(shape2, .lshape12, earg = .earg))
mustart <- NULL # Since etastart has been computed.
}
}), list( .lshape12 = lshape12, .earg = earg, .i1 = i1, .i2 = i2,
.nsimEIM = nsimEIM,
.imethod = imethod, .sinit = shrinkage.init ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape1 = eta2theta(eta[, 1], .lshape12, earg = .earg)
- shape2 = eta2theta(eta[, 2], .lshape12, earg = .earg)
+ shape1 <- eta2theta(eta[, 1], .lshape12, earg = .earg)
+ shape2 <- eta2theta(eta[, 2], .lshape12, earg = .earg)
shape1 / (shape1 + shape2)
}, list( .lshape12 = lshape12, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c("shape1" = .lshape12, "shape2" = .lshape12)
- misc$earg <- list(shape1 = .earg, shape2 = .earg)
- shape1 = eta2theta(eta[, 1], .lshape12, earg = .earg)
- shape2 = eta2theta(eta[, 2], .lshape12, earg = .earg)
- misc$rho = 1 / (shape1 + shape2 + 1)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
+ misc$link <- c("shape1" = .lshape12 , "shape2" = .lshape12 )
+
+ misc$earg <- list("shape1" = .earg , "shape2" = .earg )
+
+ shape1 <- eta2theta(eta[, 1], .lshape12, earg = .earg)
+ shape2 <- eta2theta(eta[, 2], .lshape12, earg = .earg)
+ misc$rho <- 1 / (shape1 + shape2 + 1)
+ misc$expected <- TRUE
+ misc$nsimEIM <- .nsimEIM
misc$zero <- .zero
}), list( .lshape12 = lshape12, .earg = earg,
.nsimEIM = nsimEIM, .zero = zero ))),
loglikelihood = eval(substitute(
function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
- smallno = 1.0e4 * .Machine$double.eps
+ smallno <- 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ ycounts <- round(ycounts)
- shape1 = eta2theta(eta[, 1], .lshape12, earg = .earg)
- shape2 = eta2theta(eta[, 2], .lshape12, earg = .earg)
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ shape1 <- eta2theta(eta[, 1], .lshape12, earg = .earg)
+ shape2 <- eta2theta(eta[, 2], .lshape12, earg = .earg)
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
@@ -1472,67 +1542,70 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
}, list( .lshape12 = lshape12, .earg = earg ))),
vfamily = c("betabinomial.ab"),
deriv = eval(substitute(expression({
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
- shape1 = eta2theta(eta[, 1], .lshape12, earg = .earg)
- shape2 = eta2theta(eta[, 2], .lshape12, earg = .earg)
- dshape1.deta = dtheta.deta(shape1, .lshape12, earg = .earg)
- dshape2.deta = dtheta.deta(shape2, .lshape12, earg = .earg)
- dl.dshape1 = digamma(shape1+ycounts) -
- digamma(shape1+shape2+nvec) -
- digamma(shape1) + digamma(shape1 + shape2)
- dl.dshape2 = digamma(nvec + shape2 - ycounts) -
- digamma(shape1 + shape2 + nvec) -
- digamma(shape2) + digamma(shape1 + shape2)
+ shape1 <- eta2theta(eta[, 1], .lshape12, earg = .earg)
+ shape2 <- eta2theta(eta[, 2], .lshape12, earg = .earg)
+
+ dshape1.deta <- dtheta.deta(shape1, .lshape12, earg = .earg)
+ dshape2.deta <- dtheta.deta(shape2, .lshape12, earg = .earg)
+
+ dl.dshape1 <- digamma(shape1+ycounts) -
+ digamma(shape1+shape2+nvec) -
+ digamma(shape1) + digamma(shape1 + shape2)
+ dl.dshape2 <- digamma(nvec + shape2 - ycounts) -
+ digamma(shape1 + shape2 + nvec) -
+ digamma(shape2) + digamma(shape1 + shape2)
+
(if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
cbind(dl.dshape1 * dshape1.deta,
dl.dshape2 * dshape2.deta)
}), list( .lshape12 = lshape12, .earg = earg ))),
weight = eval(substitute(expression({
if (is.null( .nsimEIM)) {
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
- wz[, iam(1, 1, M)] = -(expected.betabin.ab(nvec,shape1,shape2,
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
+ wz[, iam(1, 1, M)] <- -(expected.betabin.ab(nvec,shape1,shape2,
TRUE) -
trigamma(shape1+shape2+nvec) -
trigamma(shape1) + trigamma(shape1+shape2)) *
dshape1.deta^2
- wz[, iam(2, 2, M)] = -(expected.betabin.ab(nvec,shape1,shape2,
+ wz[, iam(2, 2, M)] <- -(expected.betabin.ab(nvec,shape1,shape2,
FALSE) -
trigamma(shape1+shape2+nvec) -
trigamma(shape2) + trigamma(shape1+shape2)) *
dshape2.deta^2
- wz[, iam(2, 1, M)] = -(trigamma(shape1+shape2) -
+ wz[, iam(2, 1, M)] <- -(trigamma(shape1+shape2) -
trigamma(shape1+shape2+nvec)) *
dshape1.deta * dshape2.deta
wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
} else {
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- dthetas.detas = cbind(dshape1.deta, dshape2.deta)
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ dthetas.detas <- cbind(dshape1.deta, dshape2.deta)
for (ii in 1:( .nsimEIM )) {
- ysim = rbetabinom.ab(n = n, size = nvec, shape1 = shape1,
+ ysim <- rbetabinom.ab(n = n, size = nvec, shape1 = shape1,
shape2 = shape2)
checkargs = .checkargs
- dl.dshape1 = digamma(shape1+ysim) -
- digamma(shape1+shape2+nvec) -
- digamma(shape1) + digamma(shape1+shape2)
- dl.dshape2 = digamma(nvec+shape2-ysim) -
- digamma(shape1+shape2+nvec) -
- digamma(shape2) + digamma(shape1+shape2)
+ dl.dshape1 <- digamma(shape1+ysim) -
+ digamma(shape1+shape2+nvec) -
+ digamma(shape1) + digamma(shape1+shape2)
+ dl.dshape2 <- digamma(nvec+shape2-ysim) -
+ digamma(shape1+shape2+nvec) -
+ digamma(shape2) + digamma(shape1+shape2)
rm(ysim)
- temp3 = cbind(dl.dshape1, dl.dshape2) # n x M matrix
- run.varcov = ((ii-1) * run.varcov +
+ temp3 <- cbind(dl.dshape1, dl.dshape2) # n x M matrix
+ run.varcov <- ((ii-1) * run.varcov +
temp3[, ind1$row.index]*
temp3[, ind1$col.index]) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
+ wz <- wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
}
}), list( .lshape12 = lshape12, .earg = earg,
@@ -1545,8 +1618,7 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
iprob = NULL, ishape = 0.1,
moreSummation = c(2, 100),
tolerance = 1.0e-10,
- zero = NULL)
-{
+ zero = NULL) {
lprob <- as.list(substitute(lprob))
eprob <- link2list(lprob)
lprob <- attr(eprob, "function.name")
@@ -1574,22 +1646,22 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
namesof("prob", lprob, earg = eprob), ", ",
namesof("shape", lshape, earg = eshape)),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
eval(geometric()@initialize)
- predictors.names =
+ predictors.names <-
c(namesof("prob", .lprob, earg = .eprob, tag = FALSE),
namesof("shape", .lshape, earg = .eshape, short = FALSE))
- if (length( .iprob))
- prob.init = rep( .iprob , len = n)
+ if (length( .iprob ))
+ prob.init <- rep( .iprob , len = n)
if (!length(etastart) ||
ncol(cbind(etastart)) != 2) {
- shape.init = rep( .ishape , len = n)
- etastart =
+ shape.init <- rep( .ishape , len = n)
+ etastart <-
cbind(theta2eta(prob.init, .lprob, earg = .eprob),
theta2eta(shape.init, .lshape, earg = .eshape))
}
@@ -1597,40 +1669,43 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
.eprob = eprob, .eshape = eshape,
.lshape = lshape ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- prob = eta2theta(eta[, 1], .lprob, earg = .eprob)
- shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
- mymu = (1-prob) / (prob - shape)
+ prob <- eta2theta(eta[, 1], .lprob, earg = .eprob)
+ shape <- eta2theta(eta[, 2], .lshape, earg = .eshape)
+ mymu <- (1-prob) / (prob - shape)
ifelse(mymu >= 0, mymu, NA)
}, list( .lprob = lprob, .lshape = lshape,
.eprob = eprob, .eshape = eshape ))),
last = eval(substitute(expression({
- misc$link = c("prob" = .lprob, "shape" = .lshape)
- misc$earg <- list(prob = .eprob, shape = .eshape)
+ misc$link <- c("prob" = .lprob , "shape" = .lshape )
+
+ misc$earg <- list("prob" = .eprob , "shape" = .eshape )
+
if (intercept.only) {
- misc$shape1 = shape1[1] # These quantities computed in @deriv
- misc$shape2 = shape2[1]
+ misc$shape1 <- shape1[1] # These quantities computed in @deriv
+ misc$shape2 <- shape2[1]
}
- misc$expected = TRUE
- misc$tolerance = .tolerance
- misc$zero = .zero
+ misc$expected <- TRUE
+ misc$tolerance <- .tolerance
+ misc$zero <- .zero
misc$moreSummation = .moreSummation
- }), list( .lprob = lprob, .lshape = lshape, .tolerance = tolerance,
+ }), list( .lprob = lprob, .lshape = lshape,
.eprob = eprob, .eshape = eshape,
+ .tolerance = tolerance,
.moreSummation = moreSummation, .zero = zero ))),
loglikelihood = eval(substitute(
function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
- prob = eta2theta(eta[, 1], .lprob, earg = .eprob)
- shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
- ans = log(prob)
- maxy = max(y)
+ prob <- eta2theta(eta[, 1], .lprob, earg = .eprob)
+ shape <- eta2theta(eta[, 2], .lshape, earg = .eshape)
+ ans <- log(prob)
+ maxy <- max(y)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
for (ii in 1:maxy) {
- index = ii <= y
- ans[index] = ans[index] + log1p(-prob[index]+(ii-1) *
- shape[index]) - log1p((ii-1)*shape[index])
+ index <- (ii <= y)
+ ans[index] <- ans[index] + log1p(-prob[index]+(ii-1) *
+ shape[index]) - log1p((ii-1)*shape[index])
}
- ans = ans - log1p((y+1-1)*shape)
+ ans <- ans - log1p((y+1-1)*shape)
@@ -1641,53 +1716,53 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
.eprob = eprob, .eshape = eshape ))),
vfamily = c("betageometric"),
deriv = eval(substitute(expression({
- prob = eta2theta(eta[, 1], .lprob, earg = .eprob)
- shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
- shape1 = prob / shape; shape2 = (1-prob) / shape;
- dprob.deta = dtheta.deta(prob, .lprob, earg = .eprob)
- dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape)
- dl.dprob = 1 / prob
- dl.dshape = 0 * y
- maxy = max(y)
+ prob <- eta2theta(eta[, 1], .lprob, earg = .eprob)
+ shape <- eta2theta(eta[, 2], .lshape, earg = .eshape)
+ shape1 <- prob / shape; shape2 <- (1 - prob) / shape;
+ dprob.deta <- dtheta.deta(prob, .lprob, earg = .eprob)
+ dshape.deta <- dtheta.deta(shape, .lshape, earg = .eshape)
+ dl.dprob <- 1 / prob
+ dl.dshape <- 0 * y
+ maxy <- max(y)
for (ii in 1:maxy) {
- index = ii <= y
- dl.dprob[index] = dl.dprob[index] -
- 1/(1-prob[index]+(ii-1)*shape[index])
- dl.dshape[index] = dl.dshape[index] +
- (ii-1)/(1-prob[index]+(ii-1)*shape[index]) -
- (ii-1)/(1+(ii-1)*shape[index])
+ index <- (ii <= y)
+ dl.dprob[index] <- dl.dprob[index] -
+ 1/(1-prob[index]+(ii-1)*shape[index])
+ dl.dshape[index] <- dl.dshape[index] +
+ (ii-1)/(1-prob[index]+(ii-1)*shape[index]) -
+ (ii-1)/(1+(ii-1)*shape[index])
}
- dl.dshape = dl.dshape - (y+1 -1)/(1+(y+1 -1)*shape)
+ dl.dshape <- dl.dshape - (y+1 -1)/(1+(y+1 -1)*shape)
c(w) * cbind(dl.dprob * dprob.deta,
dl.dshape * dshape.deta)
}), list( .lprob = lprob, .lshape = lshape,
.eprob = eprob, .eshape = eshape ))),
weight = eval(substitute(expression({
- wz = matrix(0, n, dimm(M)) #3=dimm(2)
- wz[, iam(1, 1, M)] = 1 / prob^2
- moresum = .moreSummation
- maxsummation = round(maxy * moresum[1] + moresum[2])
+ wz <- matrix(0, n, dimm(M)) #3=dimm(2)
+ wz[, iam(1, 1, M)] <- 1 / prob^2
+ moresum <- .moreSummation
+ maxsummation <- round(maxy * moresum[1] + moresum[2])
for (ii in 3:maxsummation) {
- temp7 = 1 - pbetageom(q = ii-1-1, shape1 = shape1,
- shape2 = shape2)
- denom1 = (1-prob+(ii-2)*shape)^2
- denom2 = (1+(ii-2)*shape)^2
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] + temp7 / denom1
- wz[, iam(1, 2, M)] = wz[, iam(1, 2, M)] - (ii-2) * temp7 / denom1
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] + (ii-2)^2 * temp7 / denom1 -
+ temp7 <- 1 - pbetageom(q = ii-1-1, shape1 = shape1,
+ shape2 = shape2)
+ denom1 <- (1-prob+(ii-2)*shape)^2
+ denom2 <- (1+(ii-2)*shape)^2
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] + temp7 / denom1
+ wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)] - (ii-2) * temp7 / denom1
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] + (ii-2)^2 * temp7 / denom1 -
(ii-1)^2 * temp7 / denom2
if (max(temp7) < .tolerance ) break;
}
- ii = 2
- temp7 = 1 - pbetageom(q=ii-1-1, shape1 = shape1, shape2 = shape2)
- denom1 = (1-prob+(ii-2)*shape)^2
- denom2 = (1+(ii-2)*shape)^2
-
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] + temp7 / denom1
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] - (ii-1)^2 * temp7 / denom2
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] * dprob.deta^2
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] * dshape.deta^2
- wz[, iam(2, 1, M)] = wz[, iam(2, 1, M)] * dprob.deta * dshape.deta
+ ii <- 2
+ temp7 <- 1 - pbetageom(q=ii-1-1, shape1 = shape1, shape2 = shape2)
+ denom1 <- (1-prob+(ii-2)*shape)^2
+ denom2 <- (1+(ii-2)*shape)^2
+
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] + temp7 / denom1
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] - (ii-1)^2 * temp7 / denom2
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * dprob.deta^2
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] * dshape.deta^2
+ wz[, iam(2, 1, M)] <- wz[, iam(2, 1, M)] * dprob.deta * dshape.deta
c(w) * wz
}), list( .lprob = lprob, .lshape = lshape,
.eprob = eprob, .eshape = eshape,
@@ -1699,9 +1774,9 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
- iprob1 = NULL, iprob2 = NULL,
- zero = NULL)
-{
+ iprob1 = NULL, iprob2 = NULL,
+ parallel = FALSE, apply.parint = TRUE,
+ zero = NULL) {
lprob1 <- as.list(substitute(lprob1))
eprob1 <- link2list(lprob1)
lprob1 <- attr(eprob1, "function.name")
@@ -1711,7 +1786,6 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
lprob2 <- attr(eprob2, "function.name")
-
if (length(iprob1) &&
(!is.Numeric(iprob1, positive = TRUE) ||
max(iprob1) >= 1))
@@ -1722,50 +1796,58 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
stop("bad input for argument 'iprob2'")
-
new("vglmff",
blurb = c("Sequential binomial distribution ",
"(Crowder and Sweeting, 1989)\n",
- "Links: ", namesof("prob1", lprob1, earg = eprob1), ", ",
- namesof("prob2", lprob2, earg = eprob2)),
+ "Links: ",
+ namesof("prob1", lprob1, earg = eprob1), ", ",
+ namesof("prob2", lprob2, earg = eprob2)),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+ apply.int = .apply.parint )
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .parallel = parallel,
+ .apply.parint = apply.parint,
+ .zero = zero ))),
initialize = eval(substitute(expression({
if (!is.vector(w))
stop("the 'weights' argument must be a vector")
+
if (any(abs(w - round(w)) > 0.000001))
stop("the 'weights' argument does not seem to be integer-valued")
+
if (ncol(y <- cbind(y)) != 2)
stop("the response must be a 2-column matrix")
+
if (any(y < 0 | y > 1))
stop("the response must have values between 0 and 1")
- w = round(w)
- rvector = w * y[, 1]
+
+ w <- round(w)
+ rvector <- w * y[, 1]
if (any(abs(rvector - round(rvector)) > 1.0e-8))
warning("number of successes in column one ",
"should be integer-valued")
- svector = rvector * y[, 2]
+ svector <- rvector * y[, 2]
if (any(abs(svector - round(svector)) > 1.0e-8))
warning("number of successes in",
" column two should be integer-valued")
- predictors.names =
+ predictors.names <-
c(namesof("prob1", .lprob1,earg = .eprob1, tag = FALSE),
namesof("prob2", .lprob2,earg = .eprob2, tag = FALSE))
- prob1.init = if (is.Numeric( .iprob1))
+ prob1.init <- if (is.Numeric( .iprob1))
rep( .iprob1 , len = n) else
rep(weighted.mean(y[, 1], w = w), len = n)
- prob2.init = if (is.Numeric( .iprob2 ))
+ prob2.init <- if (is.Numeric( .iprob2 ))
rep( .iprob2 , length = n) else
rep(weighted.mean(y[, 2], w = w*y[, 1]),
length = n)
if (!length(etastart)) {
- etastart =
+ etastart <-
cbind(theta2eta(prob1.init, .lprob1, earg = .eprob1),
theta2eta(prob2.init, .lprob2, earg = .eprob2))
}
@@ -1773,65 +1855,75 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
.lprob1 = lprob1, .lprob2 = lprob2,
.eprob1 = eprob1, .eprob2 = eprob2 ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- prob1 = eta2theta(eta[, 1], .lprob1, earg = .eprob1)
- prob2 = eta2theta(eta[, 2], .lprob2, earg = .eprob2)
+ prob1 <- eta2theta(eta[, 1], .lprob1, earg = .eprob1)
+ prob2 <- eta2theta(eta[, 2], .lprob2, earg = .eprob2)
cbind(prob1, prob2)
}, list( .lprob1 = lprob1, .lprob2 = lprob2,
.eprob1 = eprob1, .eprob2 = eprob2 ))),
last = eval(substitute(expression({
- misc$link = c("prob1" = .lprob1, "prob2" = .lprob2)
- misc$earg <- list(prob1 = .eprob1, prob2 = .eprob2)
+ misc$link <- c("prob1" = .lprob1 , "prob2" = .lprob2 )
+
+ misc$earg <- list("prob1" = .eprob1 , "prob2" = .eprob2 )
- misc$expected = TRUE
- misc$zero = .zero
+ misc$expected <- TRUE
+ misc$zero <- .zero
+ misc$parallel <- .parallel
+ misc$apply.parint <- .apply.parint
}), list( .lprob1 = lprob1, .lprob2 = lprob2,
.eprob1 = eprob1, .eprob2 = eprob2,
+ .parallel = parallel,
+ .apply.parint = apply.parint,
.zero = zero ))),
loglikelihood = eval(substitute(
function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
- prob1 = eta2theta(eta[, 1], .lprob1, earg = .eprob1)
- prob2 = eta2theta(eta[, 2], .lprob2, earg = .eprob2)
- smallno = 100 * .Machine$double.eps
- prob1 = pmax(prob1, smallno)
- prob1 = pmin(prob1, 1-smallno)
- prob2 = pmax(prob2, smallno)
- prob2 = pmin(prob2, 1-smallno)
- mvector = w
- rvector = w * y[, 1]
- svector = rvector * y[, 2]
+ prob1 <- eta2theta(eta[, 1], .lprob1, earg = .eprob1)
+ prob2 <- eta2theta(eta[, 2], .lprob2, earg = .eprob2)
+
+ smallno <- 100 * .Machine$double.eps
+ prob1 <- pmax(prob1, smallno)
+ prob1 <- pmin(prob1, 1-smallno)
+ prob2 <- pmax(prob2, smallno)
+ prob2 <- pmin(prob2, 1-smallno)
+ mvector <- w
+ rvector <- w * y[, 1]
+ svector <- rvector * y[, 2]
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- sum(rvector * log(prob1) + (mvector-rvector) * log1p(-prob1) +
- svector * log(prob2) + (rvector-svector) * log1p(-prob2))
+ ans1 <-
+ sum(dbinom(rvector, size = mvector, prob = prob1, log = TRUE) +
+ dbinom(svector, size = rvector, prob = prob2, log = TRUE))
+
+ ans1
}
}, list( .lprob1 = lprob1, .lprob2 = lprob2,
.eprob1 = eprob1, .eprob2 = eprob2 ))),
vfamily = c("seq2binomial"),
deriv = eval(substitute(expression({
- prob1 = eta2theta(eta[, 1], .lprob1, earg = .eprob1)
- prob2 = eta2theta(eta[, 2], .lprob2, earg = .eprob2)
- smallno = 100 * .Machine$double.eps
- prob1 = pmax(prob1, smallno)
- prob1 = pmin(prob1, 1-smallno)
- prob2 = pmax(prob2, smallno)
- prob2 = pmin(prob2, 1-smallno)
- dprob1.deta = dtheta.deta(prob1, .lprob1, earg = .eprob1)
- dprob2.deta = dtheta.deta(prob2, .lprob2, earg = .eprob2)
-
- mvector = w
- rvector = w * y[, 1]
- svector = rvector * y[, 2]
-
- dl.dprob1 = rvector / prob1 - (mvector-rvector) / (1-prob1)
- dl.dprob2 = svector / prob2 - (rvector-svector) / (1-prob2)
+ prob1 <- eta2theta(eta[, 1], .lprob1, earg = .eprob1)
+ prob2 <- eta2theta(eta[, 2], .lprob2, earg = .eprob2)
+ smallno <- 100 * .Machine$double.eps
+ prob1 <- pmax(prob1, smallno)
+ prob1 <- pmin(prob1, 1-smallno)
+ prob2 <- pmax(prob2, smallno)
+ prob2 <- pmin(prob2, 1-smallno)
+
+ dprob1.deta <- dtheta.deta(prob1, .lprob1, earg = .eprob1)
+ dprob2.deta <- dtheta.deta(prob2, .lprob2, earg = .eprob2)
+
+ mvector <- w
+ rvector <- w * y[, 1]
+ svector <- rvector * y[, 2]
+
+ dl.dprob1 <- rvector / prob1 - (mvector-rvector) / (1-prob1)
+ dl.dprob2 <- svector / prob2 - (rvector-svector) / (1-prob2)
cbind(dl.dprob1 * dprob1.deta, dl.dprob2 * dprob2.deta)
}), list( .lprob1 = lprob1, .lprob2 = lprob2,
.eprob1 = eprob1, .eprob2 = eprob2 ))),
weight = eval(substitute(expression({
- wz = matrix(0, n, M)
- wz[, iam(1, 1, M)] = (dprob1.deta^2) / (prob1 * (1-prob1))
- wz[, iam(2, 2, M)] = (dprob2.deta^2) * prob1 / (prob2 * (1-prob2))
+ wz <- matrix(0, n, M)
+ wz[, iam(1, 1, M)] <- (dprob1.deta^2) / (prob1 * (1-prob1))
+ wz[, iam(2, 2, M)] <- (dprob2.deta^2) * prob1 / (prob2 * (1-prob2))
c(w) * wz
}), list( .lprob1 = lprob1, .lprob2 = lprob2,
.eprob1 = eprob1, .eprob2 = eprob2 ))))
@@ -1844,8 +1936,7 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
loratio = "loge",
imu12 = NULL, iphi12 = NULL,
ioratio = NULL,
- zero = 2:3, tol = 0.001, addRidge = 0.001)
-{
+ zero = 2:3, tol = 0.001, addRidge = 0.001) {
lmu12 <- as.list(substitute(lmu12))
@@ -1882,34 +1973,34 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
namesof("phi12", lphi12, earg = ephi12), ", ",
namesof("oratio", loratio, earg = eoratio)),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
eval(process.binomial2.data.vgam)
- predictors.names = c(
- namesof("mu12", .lmu12, earg = .emu12, short = TRUE),
+ predictors.names <- c(
+ namesof("mu12", .lmu12 , earg = .emu12 , short = TRUE),
namesof("phi12", .lphi12, earg = .ephi12, short = TRUE),
namesof("oratio", .loratio, earg = .eoratio, short = TRUE))
- propY1.eq.0 = weighted.mean(y[,'00'], w) + weighted.mean(y[,'01'], w)
- propY2.eq.0 = weighted.mean(y[,'00'], w) + weighted.mean(y[,'10'], w)
+ propY1.eq.0 <- weighted.mean(y[,'00'], w) + weighted.mean(y[,'01'], w)
+ propY2.eq.0 <- weighted.mean(y[,'00'], w) + weighted.mean(y[,'10'], w)
if (length( .iphi12) && any( .iphi12 > propY1.eq.0))
warning("iphi12 must be less than the sample proportion of Y1==0")
if (length( .iphi12) && any( .iphi12 > propY2.eq.0))
warning("iphi12 must be less than the sample proportion of Y2==0")
if (!length(etastart)) {
- pstar.init = ((mu[, 3]+mu[, 4]) + (mu[, 2]+mu[, 4])) / 2
- phi.init = if (length(.iphi12)) rep(.iphi12, len = n) else
+ pstar.init <- ((mu[, 3]+mu[, 4]) + (mu[, 2]+mu[, 4])) / 2
+ phi.init <- if (length(.iphi12)) rep(.iphi12, len = n) else
min(propY1.eq.0 * 0.95, propY2.eq.0 * 0.95, pstar.init/1.5)
- oratio.init = if (length( .ioratio)) rep( .ioratio, len = n) else
+ oratio.init <- if (length( .ioratio)) rep( .ioratio, len = n) else
mu[, 4]*mu[, 1]/(mu[, 2]*mu[, 3])
- mu12.init = if (length(.imu12)) rep(.imu12, len = n) else
+ mu12.init <- if (length(.imu12)) rep(.imu12, len = n) else
pstar.init / (1-phi.init)
- etastart = cbind(
- theta2eta(mu12.init, .lmu12, earg = .emu12),
+ etastart <- cbind(
+ theta2eta(mu12.init, .lmu12 , earg = .emu12 ),
theta2eta(phi.init, .lphi12, earg = .ephi12),
theta2eta(oratio.init, .loratio, earg = .eoratio))
}
@@ -1917,30 +2008,33 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
.emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio,
.imu12 = imu12, .iphi12 = iphi12, .ioratio = ioratio ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- A1vec = eta2theta(eta[, 1], .lmu12, earg = .emu12)
- phivec = eta2theta(eta[, 2], .lphi12, earg = .ephi12)
- pmargin = matrix((1 - phivec) * A1vec, nrow(eta), 2)
- oratio = eta2theta(eta[, 3], .loratio, earg = .eoratio)
- a.temp = 1 + (pmargin[, 1]+pmargin[, 2])*(oratio-1)
- b.temp = -4 * oratio * (oratio-1) * pmargin[, 1] * pmargin[, 2]
- temp = sqrt(a.temp^2 + b.temp)
- pj4 = ifelse(abs(oratio-1) < .tol, pmargin[, 1]*pmargin[, 2],
+ A1vec <- eta2theta(eta[, 1], .lmu12 , earg = .emu12 )
+ phivec <- eta2theta(eta[, 2], .lphi12, earg = .ephi12)
+ pmargin <- matrix((1 - phivec) * A1vec, nrow(eta), 2)
+ oratio <- eta2theta(eta[, 3], .loratio, earg = .eoratio)
+ a.temp <- 1 + (pmargin[, 1]+pmargin[, 2])*(oratio-1)
+ b.temp <- -4 * oratio * (oratio-1) * pmargin[, 1] * pmargin[, 2]
+ temp <- sqrt(a.temp^2 + b.temp)
+ pj4 <- ifelse(abs(oratio-1) < .tol, pmargin[, 1]*pmargin[, 2],
(a.temp-temp)/(2*(oratio-1)))
- pj2 = pmargin[, 2] - pj4
- pj3 = pmargin[, 1] - pj4
+ pj2 <- pmargin[, 2] - pj4
+ pj3 <- pmargin[, 1] - pj4
cbind("00" = 1-pj4-pj2-pj3, "01" = pj2, "10" = pj3, "11" = pj4)
}, list( .tol = tol,
.lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio,
.emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio ))),
last = eval(substitute(expression({
- misc$link = c("mu12"= .lmu12 , "phi12" = .lphi12,
- "oratio" = .loratio)
- misc$earg = list("mu12"= .emu12 , "phi12" = .ephi12,
- "oratio" = .eoratio)
-
- misc$tol = .tol
- misc$expected = TRUE
- misc$addRidge = .addRidge
+ misc$link <- c("mu12"= .lmu12 ,
+ "phi12" = .lphi12,
+ "oratio" = .loratio)
+
+ misc$earg <- list("mu12"= .emu12 ,
+ "phi12" = .ephi12,
+ "oratio" = .eoratio)
+
+ misc$tol <- .tol
+ misc$expected <- TRUE
+ misc$addRidge <- .addRidge
}), list( .tol = tol, .addRidge = addRidge,
.lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio,
.emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio ))),
@@ -1948,15 +2042,15 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
- smallno = 1.0e4 * .Machine$double.eps
+ smallno <- 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ ycounts <- round(ycounts)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
@@ -1964,32 +2058,32 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
},
vfamily = c("zipebcom"),
deriv = eval(substitute(expression({
- A1vec = eta2theta(eta[, 1], .lmu12, earg = .emu12)
- smallno = .Machine$double.eps^(2/4)
- A1vec[A1vec > 1.0 -smallno] = 1.0 - smallno
-
- phivec = eta2theta(eta[, 2], .lphi12, earg = .ephi12)
- pmargin = matrix((1 - phivec) * A1vec, n, 2)
- oratio = eta2theta(eta[, 3], .loratio, earg = .eoratio)
-
- Vab = 1 / (1/mu[, 1] + 1/mu[, 2] + 1/mu[, 3] + 1/mu[, 4])
- Vabc = 1/mu[, 1] + 1/mu[, 2]
- denom3 = 2 * oratio * mu[, 2] + mu[, 1] + mu[, 4]
- temp1 = oratio * mu[, 2] + mu[, 4]
- dp11star.dp1unstar = 2*(1-phivec)*Vab * Vabc
- dp11star.dphi1 = -2 * A1vec * Vab * Vabc
- dp11star.doratio = Vab / oratio
- yandmu = (y[, 1]/mu[, 1] - y[, 2]/mu[, 2] - y[, 3]/mu[, 3] +
- y[, 4]/mu[, 4])
- dp11.doratio = Vab / oratio
- check.dl.doratio = yandmu * dp11.doratio
-
- cyandmu = (y[, 2]+y[, 3])/mu[, 2] - 2 * y[, 1]/mu[, 1]
- dl.dmu1 = dp11star.dp1unstar * yandmu + (1-phivec) * cyandmu
- dl.dphi1 = dp11star.dphi1 * yandmu - A1vec * cyandmu
- dl.doratio = check.dl.doratio
+ A1vec <- eta2theta(eta[, 1], .lmu12 , earg = .emu12 )
+ smallno <- .Machine$double.eps^(2/4)
+ A1vec[A1vec > 1.0 -smallno] <- 1.0 - smallno
+
+ phivec <- eta2theta(eta[, 2], .lphi12, earg = .ephi12)
+ pmargin <- matrix((1 - phivec) * A1vec, n, 2)
+ oratio <- eta2theta(eta[, 3], .loratio, earg = .eoratio)
+
+ Vab <- 1 / (1/mu[, 1] + 1/mu[, 2] + 1/mu[, 3] + 1/mu[, 4])
+ Vabc <- 1/mu[, 1] + 1/mu[, 2]
+ denom3 <- 2 * oratio * mu[, 2] + mu[, 1] + mu[, 4]
+ temp1 <- oratio * mu[, 2] + mu[, 4]
+ dp11star.dp1unstar <- 2*(1-phivec)*Vab * Vabc
+ dp11star.dphi1 <- -2 * A1vec * Vab * Vabc
+ dp11star.doratio <- Vab / oratio
+ yandmu <- (y[, 1]/mu[, 1] - y[, 2]/mu[, 2] - y[, 3]/mu[, 3] +
+ y[, 4]/mu[, 4])
+ dp11.doratio <- Vab / oratio
+ check.dl.doratio <- yandmu * dp11.doratio
+
+ cyandmu <- (y[, 2]+y[, 3])/mu[, 2] - 2 * y[, 1]/mu[, 1]
+ dl.dmu1 <- dp11star.dp1unstar * yandmu + (1-phivec) * cyandmu
+ dl.dphi1 <- dp11star.dphi1 * yandmu - A1vec * cyandmu
+ dl.doratio <- check.dl.doratio
dthetas.detas =
- cbind(dtheta.deta(A1vec, .lmu12, earg = .emu12),
+ cbind(dtheta.deta(A1vec, .lmu12 , earg = .emu12 ),
dtheta.deta(phivec, .lphi12, earg = .ephi12),
dtheta.deta(oratio, .loratio, earg = .eoratio))
c(w) * cbind(dl.dmu1,
@@ -1998,25 +2092,25 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
}), list( .lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio,
.emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio ))),
weight = eval(substitute(expression({
- wz = matrix(0, n, 4)
- alternwz11 = 2 * (1-phivec)^2 *
- (2/mu[, 1] + 1/mu[, 2] - 2*Vab*Vabc^2) *
- (dthetas.detas[, 1])^2
- wz[, iam(1, 1, M)] = alternwz11
+ wz <- matrix(0, n, 4)
+ alternwz11 <- 2 * (1-phivec)^2 *
+ (2/mu[, 1] + 1/mu[, 2] - 2*Vab*Vabc^2) *
+ (dthetas.detas[, 1])^2
+ wz[, iam(1, 1, M)] <- alternwz11
- alternwz22 = 2* A1vec^2 *(2/mu[, 1] + 1/mu[, 2] - 2*Vab*Vabc^2) *
- (dthetas.detas[, 2])^2
- wz[, iam(2, 2, M)] = alternwz22
+ alternwz22 <- 2* A1vec^2 *(2/mu[, 1] + 1/mu[, 2] - 2*Vab*Vabc^2) *
+ (dthetas.detas[, 2])^2
+ wz[, iam(2, 2, M)] <- alternwz22
- alternwz12 = -2*A1vec*(1-phivec)*
- (2/mu[, 1] + 1/mu[, 2] - 2*Vab*Vabc^2) *
- dthetas.detas[, 1] * dthetas.detas[, 2]
- wz[, iam(1, 2, M)] = alternwz12
+ alternwz12 <- -2*A1vec*(1-phivec)*
+ (2/mu[, 1] + 1/mu[, 2] - 2*Vab*Vabc^2) *
+ dthetas.detas[, 1] * dthetas.detas[, 2]
+ wz[, iam(1, 2, M)] <- alternwz12
- alternwz33 = (Vab / oratio^2) * dthetas.detas[, 3]^2
- wz[, iam(3, 3, M)] = alternwz33
+ alternwz33 <- (Vab / oratio^2) * dthetas.detas[, 3]^2
+ wz[, iam(3, 3, M)] <- alternwz33
- wz[, 1:2] = wz[, 1:2] * (1 + .addRidge)
+ wz[, 1:2] <- wz[, 1:2] * (1 + .addRidge)
c(w) * wz
}), list( .addRidge = addRidge ))))
}
@@ -2033,9 +2127,7 @@ if (FALSE)
erhopos = list(), erhoneg = list(),
irhopos = NULL, irhoneg = NULL,
iprob1 = NULL, iprob2 = NULL,
- zero = NULL)
-{
- print("hi 20100603")
+ zero = NULL) {
lrhopos <- as.list(substitute(lrhopos))
erhopos <- link2list(lrhopos)
@@ -2046,180 +2138,150 @@ if (FALSE)
lrhoneg <- attr(erhoneg, "function.name")
- new("vglmff",
- blurb = c("Lusted (1968)'s model\n",
- "Links: ",
- namesof("rhopos", lrhopos, earg = erhopos), ", ",
- namesof("rhoneg", lrhoneg, earg = erhoneg)),
+ new("vglmff",
+ blurb = c("Lusted (1968)'s model\n",
+ "Links: ",
+ namesof("rhopos", lrhopos, earg = erhopos), ", ",
+ namesof("rhoneg", lrhoneg, earg = erhoneg)),
initialize = eval(substitute(expression({
- print("head(y, 3) start")
- print( head(y, 3) )
eval(process.binomial2.data.vgam)
- print("head(mu, 3)")
- print( head(mu, 3) )
- print("head(y, 3) processed")
- print( head(y, 3) )
- print("head(w, 3)")
- print( head(w, 3) )
- predictors.names = c(
+ predictors.names <- c(
namesof("rhopos", .lrhopos, earg = .erhopos, short = TRUE),
namesof("rhoneg", .lrhoneg, earg = .erhoneg, short = TRUE))
if (!length(etastart)) {
- nnn1 = round(w * (y[, 1] + y[, 2]))
- nnn2 = round(w * (y[, 3] + y[, 4]))
- print("head(nnn1, 3)")
- print( head(nnn1, 3) )
- print("head(nnn2, 3)")
- print( head(nnn2, 3) )
- init.pee1 = if (length( .iprob1 )) rep( .iprob1 , len = n) else
- mu[, 1] / (mu[, 1] + mu[, 2])
- init.pee2 = if (length( .iprob2 )) rep( .iprob2 , len = n) else
- mu[, 3] / (mu[, 3] + mu[, 4])
- init.rhopos = pmax(1.1, init.pee1 / init.pee2) # Should be > 1
- init.rhoneg = pmin(0.4, (1 - init.pee1) / (1 - init.pee2)) # c. 0
- print("head(init.rhopos, 3)")
- print( head(init.rhopos, 3) )
- print("head(init.rhoneg, 3)")
- print( head(init.rhoneg, 3) )
-
- if (length( .irhopos)) init.rhopos = rep( .irhopos , len = n)
- if (length( .irhoneg)) init.rhoneg = rep( .irhoneg , len = n)
- etastart = cbind(theta2eta(init.rhopos, .lrhopos, earg = .erhopos),
- theta2eta(init.rhoneg, .lrhoneg, earg = .erhoneg))
- print("etastart[1:3,]")
- print( etastart[1:3,] )
+ nnn1 <- round(w * (y[, 1] + y[, 2]))
+ nnn2 <- round(w * (y[, 3] + y[, 4]))
+ init.pee1 <- if (length( .iprob1 )) rep( .iprob1 , len = n) else
+ mu[, 1] / (mu[, 1] + mu[, 2])
+ init.pee2 <- if (length( .iprob2 )) rep( .iprob2 , len = n) else
+ mu[, 3] / (mu[, 3] + mu[, 4])
+ init.rhopos <- pmax(1.1, init.pee1 / init.pee2) # Should be > 1
+ init.rhoneg <- pmin(0.4, (1 - init.pee1) / (1 - init.pee2)) # c. 0
+
+ if (length( .irhopos)) init.rhopos <- rep( .irhopos , len = n)
+ if (length( .irhoneg)) init.rhoneg <- rep( .irhoneg , len = n)
+ etastart <- cbind(theta2eta(init.rhopos, .lrhopos, earg = .erhopos),
+ theta2eta(init.rhoneg, .lrhoneg, earg = .erhoneg))
}
}), list( .lrhopos = lrhopos, .lrhoneg = lrhoneg,
- .erhopos = erhopos, .erhoneg = erhoneg,
- .iprob1 = iprob1, .iprob2 = iprob2,
- .irhopos = irhopos, .irhoneg = irhoneg ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- rhopos = eta2theta(eta[, 1], .lrhopos, earg = .erhopos)
- rhoneg = eta2theta(eta[, 2], .lrhoneg, earg = .erhoneg)
- pee2 = (1 - rhoneg) / (rhopos - rhoneg)
- pee1 = pee2 * rhopos
- cbind(rhopos, rhoneg, "mu1" = pee1, "mu2" = pee2)
- }, list( .lrhopos = lrhopos, .lrhoneg = lrhoneg,
- .erhopos = erhopos, .erhoneg = erhoneg ))),
- last = eval(substitute(expression({
- misc$link = c("rhopos" = .lrhopos, "rhoneg" = .lrhoneg)
- misc$earg = list("rhopos" = .erhopos, "rhoneg" = .erhoneg)
- misc$expected = TRUE
- }), list( .lrhopos = lrhopos, .lrhoneg = lrhoneg,
- .erhopos = erhopos, .erhoneg = erhoneg,
- .irhopos = irhopos, .irhoneg = irhoneg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- rhopos = eta2theta(eta[, 1], .lrhopos, earg = .erhopos)
- rhoneg = eta2theta(eta[, 2], .lrhoneg, earg = .erhoneg)
- pee2 = (1 - rhoneg) / (rhopos - rhoneg)
- pee1 = pee2 * rhopos
- if (min(pee1) <= 0.5) {
- warning("too small pee1 values")
- pee1[pee1 <= 0.5] = 0.66
- }
- if (max(pee1) >= 1) {
- warning("large pee1 values")
- pee1[pee1 >= 1] = 0.99
- }
- if (min(pee2) <= 0.0) {
- warning("too small pee2 values")
- pee2[pee2 <= 0.0] = 0.01
- }
- if (max(pee2) >= 0.5) {
- warning("too large pee2 values")
- pee2[pee2 >= 0.5] = 0.44
- }
+ .erhopos = erhopos, .erhoneg = erhoneg,
+ .iprob1 = iprob1, .iprob2 = iprob2,
+ .irhopos = irhopos, .irhoneg = irhoneg ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ rhopos <- eta2theta(eta[, 1], .lrhopos, earg = .erhopos)
+ rhoneg <- eta2theta(eta[, 2], .lrhoneg, earg = .erhoneg)
+ pee2 <- (1 - rhoneg) / (rhopos - rhoneg)
+ pee1 <- pee2 * rhopos
+ cbind(rhopos, rhoneg, "mu1" = pee1, "mu2" = pee2)
+ }, list( .lrhopos = lrhopos, .lrhoneg = lrhoneg,
+ .erhopos = erhopos, .erhoneg = erhoneg ))),
+ last = eval(substitute(expression({
+ misc$link <- c("rhopos" = .lrhopos, "rhoneg" = .lrhoneg )
+
+ misc$earg <- list("rhopos" = .erhopos, "rhoneg" = .erhoneg )
+
+ misc$expected <- TRUE
+ }), list( .lrhopos = lrhopos, .lrhoneg = lrhoneg,
+ .erhopos = erhopos, .erhoneg = erhoneg,
+ .irhopos = irhopos, .irhoneg = irhoneg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ rhopos <- eta2theta(eta[, 1], .lrhopos, earg = .erhopos)
+ rhoneg <- eta2theta(eta[, 2], .lrhoneg, earg = .erhoneg)
+ pee2 <- (1 - rhoneg) / (rhopos - rhoneg)
+ pee1 <- pee2 * rhopos
+ if (min(pee1) <= 0.5) {
+ warning("too small pee1 values")
+ pee1[pee1 <= 0.5] <- 0.66
+ }
+ if (max(pee1) >= 1) {
+ warning("large pee1 values")
+ pee1[pee1 >= 1] <- 0.99
+ }
+ if (min(pee2) <= 0.0) {
+ warning("too small pee2 values")
+ pee2[pee2 <= 0.0] <- 0.01
+ }
+ if (max(pee2) >= 0.5) {
+ warning("too large pee2 values")
+ pee2[pee2 >= 0.5] <- 0.44
+ }
+
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ nnn1 <- round(w * (y[, 1] + y[, 2]))
+ nnn2 <- round(w * (y[, 3] + y[, 4]))
+ index1 <- nnn1 > 0
+ index2 <- nnn2 > 0
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- nnn1 = round(w * (y[, 1] + y[, 2]))
- nnn2 = round(w * (y[, 3] + y[, 4]))
- index1 = nnn1 > 0
- index2 = nnn2 > 0
-
- print("head(round(w[index1] * y[index1, 1]), 18)")
- print( head(round(w[index1] * y[index1, 1]), 18) )
- print("head(nnn1[index1], 18)")
- print( head(nnn1[index1], 18) )
- print("head(pee1[index1], 18)")
- print( head(pee1[index1], 18) )
- print("summary(pee1[index1])")
- print( summary(pee1[index1]) )
- print("summary(pee2[index2])")
- print( summary(pee2[index2]) )
print(head(dbinom(round(w[index1] * y[index1, 1]), nnn1[index1],
- prob = pee1[index1], log = TRUE), 18))
+ prob = pee1[index1], log = TRUE), 18))
- sum(dbinom(round(w[index1] * y[index1, 1]), nnn1[index1],
- prob = pee1[index1], log = TRUE)) +
- sum(dbinom(round(w[index2] * y[index2, 3]), nnn2[index2],
- prob = pee2[index2], log = TRUE))
- }
- }, list( .lrhopos = lrhopos, .lrhoneg = lrhoneg,
- .erhopos = erhopos, .erhoneg = erhoneg,
- .irhopos = irhopos, .irhoneg = irhoneg ))),
- vfamily = c("lusted68", "binom2"),
- deriv = eval(substitute(expression({
- rhopos = eta2theta(eta[, 1], .lrhopos, earg = .erhopos)
- rhoneg = eta2theta(eta[, 2], .lrhoneg, earg = .erhoneg)
- pee2 = (1 - rhoneg) / (rhopos - rhoneg)
- pee1 = pee2 * rhopos
- nnn1 = round(w * (y[, 1] + y[, 3]))
- nnn2 = round(w * (y[, 2] + y[, 4]))
-
- print("summary(pee1)")
- print( summary(pee1) )
- print("summary(pee2)")
- print( summary(pee2) )
- rhodif = rhopos - rhoneg
- drhopos.deta = dtheta.deta(rhopos, .lrhopos, earg = .erhopos)
- drhoneg.deta = dtheta.deta(rhoneg, .lrhoneg, earg = .erhoneg)
-
- dl1.drhopos = y[, 1] / rhopos + y[, 2] / (rhopos - 1) - 1 / rhodif
- dl1.drhoneg = -y[, 1] / (1 - rhoneg) + y[, 2] / rhoneg + 1 / rhodif
- dl2.drhopos = y[, 4] / (rhopos - 1) - 1 / rhodif
- dl2.drhoneg = -y[, 3] / (1 - rhoneg) + 1 / rhodif
- cbind((nnn1 * dl1.drhopos + nnn2 * dl2.drhopos) * drhopos.deta,
- (nnn1 * dl1.drhoneg + nnn2 * dl2.drhoneg) * drhoneg.deta)
- }), list( .lrhopos = lrhopos, .lrhoneg = lrhoneg,
- .erhopos = erhopos, .erhoneg = erhoneg,
- .irhopos = irhopos, .irhoneg = irhoneg ))),
- weight = eval(substitute(expression({
- wz = matrix(0, n, dimm(M)) # 3 = dimm(2)
-
-
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] + nnn1 *
- (pee1 / rhopos^2 + (1 - pee1) / (rhopos - 1)^2 - 1 / rhodif^2)
-
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] + nnn1 *
- (pee1 / (1 - rhoneg)^2 + (1 - pee1) / rhoneg^2 - 1 / rhodif^2)
-
- wz[, iam(1, 2, M)] = wz[, iam(1, 2, M)] + nnn1 / rhodif^2
-
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] + nnn2 *
- ((1 - pee2) / (rhopos - 1)^2 - 1 / rhodif^2)
-
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] + nnn2 *
- (pee2 / (1 - rhoneg)^2 - 1 / rhodif^2)
-
- wz[, iam(1, 2, M)] = wz[, iam(1, 2, M)] + nnn2 / rhodif^2
-
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] * drhopos.deta^2
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] * drhoneg.deta^2
- wz[, iam(1, 2, M)] = wz[, iam(1, 2, M)] * drhopos.deta * drhoneg.deta
-
- wz
- }), list( .lrhopos = lrhopos, .lrhoneg = lrhoneg,
- .erhopos = erhopos, .erhoneg = erhoneg,
- .irhopos = irhopos, .irhoneg = irhoneg ))))
+ sum(dbinom(round(w[index1] * y[index1, 1]), nnn1[index1],
+ prob = pee1[index1], log = TRUE)) +
+ sum(dbinom(round(w[index2] * y[index2, 3]), nnn2[index2],
+ prob = pee2[index2], log = TRUE))
+ }
+ }, list( .lrhopos = lrhopos, .lrhoneg = lrhoneg,
+ .erhopos = erhopos, .erhoneg = erhoneg,
+ .irhopos = irhopos, .irhoneg = irhoneg ))),
+ vfamily = c("lusted68", "binom2"),
+ deriv = eval(substitute(expression({
+ rhopos <- eta2theta(eta[, 1], .lrhopos, earg = .erhopos)
+ rhoneg <- eta2theta(eta[, 2], .lrhoneg, earg = .erhoneg)
+ pee2 <- (1 - rhoneg) / (rhopos - rhoneg)
+ pee1 <- pee2 * rhopos
+ nnn1 <- round(w * (y[, 1] + y[, 3]))
+ nnn2 <- round(w * (y[, 2] + y[, 4]))
+
+ rhodif <- rhopos - rhoneg
+ drhopos.deta <- dtheta.deta(rhopos, .lrhopos, earg = .erhopos)
+ drhoneg.deta <- dtheta.deta(rhoneg, .lrhoneg, earg = .erhoneg)
+
+ dl1.drhopos <- y[, 1] / rhopos + y[, 2] / (rhopos - 1) - 1 / rhodif
+ dl1.drhoneg <- -y[, 1] / (1 - rhoneg) + y[, 2] / rhoneg + 1 / rhodif
+ dl2.drhopos <- y[, 4] / (rhopos - 1) - 1 / rhodif
+ dl2.drhoneg <- -y[, 3] / (1 - rhoneg) + 1 / rhodif
+ cbind((nnn1 * dl1.drhopos + nnn2 * dl2.drhopos) * drhopos.deta,
+ (nnn1 * dl1.drhoneg + nnn2 * dl2.drhoneg) * drhoneg.deta)
+ }), list( .lrhopos = lrhopos, .lrhoneg = lrhoneg,
+ .erhopos = erhopos, .erhoneg = erhoneg,
+ .irhopos = irhopos, .irhoneg = irhoneg ))),
+ weight = eval(substitute(expression({
+ wz <- matrix(0, n, dimm(M)) # 3 = dimm(2)
+
+
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] + nnn1 *
+ (pee1 / rhopos^2 + (1 - pee1) / (rhopos - 1)^2 - 1 / rhodif^2)
+
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] + nnn1 *
+ (pee1 / (1 - rhoneg)^2 + (1 - pee1) / rhoneg^2 - 1 / rhodif^2)
+
+ wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)] + nnn1 / rhodif^2
+
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] + nnn2 *
+ ((1 - pee2) / (rhopos - 1)^2 - 1 / rhodif^2)
+
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] + nnn2 *
+ (pee2 / (1 - rhoneg)^2 - 1 / rhodif^2)
+
+ wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)] + nnn2 / rhodif^2
+
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * drhopos.deta^2
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] * drhoneg.deta^2
+ wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)] * drhopos.deta * drhoneg.deta
+
+ wz
+ }), list( .lrhopos = lrhopos, .lrhoneg = lrhoneg,
+ .erhopos = erhopos, .erhoneg = erhoneg,
+ .irhopos = irhopos, .irhoneg = irhoneg ))))
}
@@ -2228,10 +2290,10 @@ if (FALSE)
binom2.Rho <- function(rho = 0, imu1 = NULL, imu2 = NULL,
- exchangeable = FALSE, nsimEIM = NULL)
-{
- lmu12 = "probit"
- emu12 = list()
+ exchangeable = FALSE, nsimEIM = NULL) {
+ lmu12 <- "probit"
+ emu12 <- list()
+
if (is.Numeric(nsimEIM)) {
if (!is.Numeric(nsimEIM, allowable.length = 1,
integer.valued = TRUE))
@@ -2240,142 +2302,150 @@ if (FALSE)
warning("'nsimEIM' should be an integer greater than 100")
}
- new("vglmff",
- blurb = c("Bivariate probit model with rho = ", format(rho), "\n",
- "Links: ",
- namesof("mu1", lmu12, earg = emu12), ", ",
- namesof("mu2", lmu12, earg = emu12)),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(c(1, 1), 2, 1), x,
- .exchangeable, constraints, intercept.apply = TRUE)
- }), list( .exchangeable = exchangeable ))),
- deviance = Deviance.categorical.data.vgam,
- initialize = eval(substitute(expression({
- eval(process.binomial2.data.vgam)
- predictors.names = c(
- namesof("mu1", .lmu12, earg = .emu12, short = TRUE),
- namesof("mu2", .lmu12, earg = .emu12, short = TRUE))
+ new("vglmff",
+ blurb = c("Bivariate probit model with rho = ", format(rho), "\n",
+ "Links: ",
+ namesof("mu1", lmu12, earg = emu12), ", ",
+ namesof("mu2", lmu12, earg = emu12)),
+ constraints = eval(substitute(expression({
+ constraints <- cm.vgam(matrix(c(1, 1), 2, 1), x,
+ .exchangeable , constraints,
+ apply.int = TRUE)
+ }), list( .exchangeable = exchangeable ))),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
+ eval(process.binomial2.data.vgam)
+ predictors.names <- c(
+ namesof("mu1", .lmu12 , earg = .emu12 , short = TRUE),
+ namesof("mu2", .lmu12 , earg = .emu12 , short = TRUE))
- if (is.null( .nsimEIM)) {
- save.weight <- control$save.weight <- FALSE
- }
- if (is.null(etastart)) {
- mu1.init= if (is.Numeric(.imu1))
- rep(.imu1, length = n) else
- mu[, 3] + mu[, 4]
- mu2.init= if (is.Numeric(.imu2))
- rep(.imu2, length = n) else
- mu[, 2] + mu[, 4]
- etastart = cbind(theta2eta(mu1.init, .lmu12, earg = .emu12),
- theta2eta(mu2.init, .lmu12, earg = .emu12))
- }
- }), list( .lmu12 = lmu12, .emu12 = emu12, .nsimEIM = nsimEIM,
- .imu1 = imu1, .imu2 = imu2 ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- pmargin = cbind(eta2theta(eta[, 1], .lmu12, earg = .emu12),
- eta2theta(eta[, 2], .lmu12, earg = .emu12))
- rhovec = rep( .rho , len = nrow(eta))
- p11 = pnorm2(eta[, 1], eta[, 2], rhovec)
- p01 = pmin(pmargin[, 2] - p11, pmargin[, 2])
- p10 = pmin(pmargin[, 1] - p11, pmargin[, 1])
- p00 = 1 - p01 - p10 - p11
- ansmat = abs(cbind("00"=p00, "01"=p01, "10"=p10, "11"=p11))
- ansmat / rowSums(ansmat)
- }, list( .lmu12 = lmu12, .emu12 = emu12, .rho = rho ))),
- last = eval(substitute(expression({
- misc$link = c(mu1 = .lmu12, mu2 = .lmu12)
- misc$earg = list(mu1 = .emu12, mu2 = .emu12)
- misc$nsimEIM = .nsimEIM
- misc$expected = TRUE
- misc$rho = .rho
- }), list( .lmu12 = lmu12, .emu12 = emu12, .rho = rho,
- .nsimEIM = nsimEIM ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
+ if (is.null( .nsimEIM)) {
+ save.weight <- control$save.weight <- FALSE
+ }
+ if (is.null(etastart)) {
+ mu1.init= if (is.Numeric(.imu1))
+ rep(.imu1, length = n) else
+ mu[, 3] + mu[, 4]
+ mu2.init= if (is.Numeric(.imu2))
+ rep(.imu2, length = n) else
+ mu[, 2] + mu[, 4]
+ etastart <-
+ cbind(theta2eta(mu1.init, .lmu12 , earg = .emu12 ),
+ theta2eta(mu2.init, .lmu12 , earg = .emu12 ))
+ }
+ }), list( .lmu12 = lmu12, .emu12 = emu12, .nsimEIM = nsimEIM,
+ .imu1 = imu1, .imu2 = imu2 ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ pmargin <- cbind(eta2theta(eta[, 1], .lmu12 , earg = .emu12 ),
+ eta2theta(eta[, 2], .lmu12 , earg = .emu12 ))
+ rhovec <- rep( .rho , len = nrow(eta))
+ p11 <- pnorm2(eta[, 1], eta[, 2], cov12 = rhovec)
+ p01 <- pmin(pmargin[, 2] - p11, pmargin[, 2])
+ p10 <- pmin(pmargin[, 1] - p11, pmargin[, 1])
+ p00 <- 1 - p01 - p10 - p11
+ ansmat <- abs(cbind("00"=p00, "01"=p01, "10"=p10, "11"=p11))
+ ansmat / rowSums(ansmat)
+ }, list( .lmu12 = lmu12,
+ .emu12 = emu12, .rho = rho ))),
+ last = eval(substitute(expression({
+ misc$link <- c(mu1 = .lmu12 , mu2 = .lmu12 )
- smallno = 1.0e4 * .Machine$double.eps
- if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ misc$earg <- list(mu1 = .emu12 , mu2 = .emu12 )
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
- }
- }, list( .rho = rho ))),
- vfamily = c("binom2.Rho", "binom2"),
- deriv = eval(substitute(expression({
- pmargin = cbind(eta2theta(eta[, 1], .lmu12, earg = .emu12),
- eta2theta(eta[, 2], .lmu12, earg = .emu12))
- rhovec = rep( .rho , len = nrow(eta))
- p11 = pnorm2(eta[, 1], eta[, 2], rhovec)
- p01 = pmargin[, 2]-p11
- p10 = pmargin[, 1]-p11
- p00 = 1-p01-p10-p11
-
- ABmat = (eta[, 1:2] - rhovec*eta[, 2:1]) / sqrt(1-rhovec^2)
- PhiA = pnorm(ABmat[, 1])
- PhiB = pnorm(ABmat[, 2])
- onemPhiA = pnorm(ABmat[, 1], lower.tail = FALSE)
- onemPhiB = pnorm(ABmat[, 2], lower.tail = FALSE)
-
- smallno = 1000 * .Machine$double.eps
- p00[p00 < smallno] = smallno
- p01[p01 < smallno] = smallno
- p10[p10 < smallno] = smallno
- p11[p11 < smallno] = smallno
-
- dprob00 = dnorm2(eta[, 1], eta[, 2], rhovec)
- dl.dprob1 = PhiB*(y[, 4]/p11-y[, 2]/p01) +
+ misc$nsimEIM <- .nsimEIM
+ misc$expected <- TRUE
+ misc$rho <- .rho
+ }), list( .lmu12 = lmu12,
+ .emu12 = emu12,
+ .rho = rho, .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+
+ smallno <- 1.0e4 * .Machine$double.eps
+ if (max(abs(ycounts - round(ycounts))) > smallno)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts <- round(ycounts)
+
+ sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE))
+ }
+ }, list( .rho = rho ))),
+ vfamily = c("binom2.Rho", "binom2"),
+ deriv = eval(substitute(expression({
+ pmargin <- cbind(eta2theta(eta[, 1], .lmu12 , earg = .emu12 ),
+ eta2theta(eta[, 2], .lmu12 , earg = .emu12 ))
+ rhovec <- rep( .rho , len = nrow(eta))
+ p11 <- pnorm2(eta[, 1], eta[, 2], cov12 = rhovec)
+ p01 <- pmargin[, 2]-p11
+ p10 <- pmargin[, 1]-p11
+ p00 <- 1-p01-p10-p11
+
+ ABmat <- (eta[, 1:2] -
+ rhovec * eta[, 2:1]) / sqrt(pmax(1e5 * .Machine$double.eps,
+ 1.0 - rhovec^2))
+ PhiA <- pnorm(ABmat[, 1])
+ PhiB <- pnorm(ABmat[, 2])
+ onemPhiA <- pnorm(ABmat[, 1], lower.tail = FALSE)
+ onemPhiB <- pnorm(ABmat[, 2], lower.tail = FALSE)
+
+ smallno <- 1000 * .Machine$double.eps
+ p00[p00 < smallno] <- smallno
+ p01[p01 < smallno] <- smallno
+ p10[p10 < smallno] <- smallno
+ p11[p11 < smallno] <- smallno
+
+ dprob00 <- dnorm2(eta[, 1], eta[, 2], rho = rhovec)
+ dl.dprob1 <- PhiB*(y[, 4]/p11-y[, 2]/p01) +
onemPhiB*(y[, 3]/p10-y[, 1]/p00)
- dl.dprob2 = PhiA*(y[, 4]/p11-y[, 3]/p10) +
+ dl.dprob2 <- PhiA*(y[, 4]/p11-y[, 3]/p10) +
onemPhiA*(y[, 2]/p01-y[, 1]/p00)
- dprob1.deta = dtheta.deta(pmargin[, 1], .lmu12, earg = .emu12)
- dprob2.deta = dtheta.deta(pmargin[, 2], .lmu12, earg = .emu12)
- dthetas.detas = cbind(dprob1.deta, dprob2.deta)
+ dprob1.deta <- dtheta.deta(pmargin[, 1], .lmu12 , earg = .emu12 )
+ dprob2.deta <- dtheta.deta(pmargin[, 2], .lmu12 , earg = .emu12 )
+ dthetas.detas <- cbind(dprob1.deta, dprob2.deta)
c(w) * cbind(dl.dprob1, dl.dprob2) * dthetas.detas
}), list( .lmu12 = lmu12, .emu12 = emu12, .rho = rho ))),
weight = eval(substitute(expression({
if (is.null( .nsimEIM)) {
- d2l.dprob1prob1 = PhiB^2 *(1/p11+1/p01) + onemPhiB^2 *(1/p10+1/p00)
- d2l.dprob2prob2 = PhiA^2 *(1/p11+1/p10) + onemPhiA^2 *(1/p01+1/p00)
- d2l.dprob1prob2 = PhiA * (PhiB/p11 - onemPhiB/p10) +
- onemPhiA * (onemPhiB/p00 - PhiB/p01)
- wz = matrix(0, n, dimm(M)) # 6=dimm(M)
- wz[, iam(1, 1, M)] = d2l.dprob1prob1 * dprob1.deta^2
- wz[, iam(2, 2, M)] = d2l.dprob2prob2 * dprob2.deta^2
- wz[, iam(1, 2, M)] = d2l.dprob1prob2 * dprob1.deta * dprob2.deta
+ ned2l.dprob1prob1 <- PhiB^2 *(1/p11+1/p01) + onemPhiB^2 *(1/p10+1/p00)
+ ned2l.dprob2prob2 <- PhiA^2 *(1/p11+1/p10) + onemPhiA^2 *(1/p01+1/p00)
+ ned2l.dprob1prob2 <- PhiA * (PhiB/p11 - onemPhiB/p10) +
+ onemPhiA * (onemPhiB/p00 - PhiB/p01)
+ wz <- matrix(0, n, dimm(M)) # 6=dimm(M)
+ wz[, iam(1, 1, M)] <- ned2l.dprob1prob1 * dprob1.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dprob2prob2 * dprob2.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dprob1prob2 * dprob1.deta * dprob2.deta
} else {
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
for (ii in 1:( .nsimEIM )) {
- ysim = rbinom2.rho(n = n, mu1 = pmargin[, 1],
- mu2 = pmargin[, 2],
+ ysim <- rbinom2.rho(n = n, mu1 = pmargin[, 1],
+ mu2 = pmargin[, 2],
twoCols = FALSE, rho = rhovec)
- dl.dprob1 = PhiB * (ysim[, 4]/p11-ysim[, 2]/p01) +
- onemPhiB * (ysim[, 3]/p10-ysim[, 1]/p00)
- dl.dprob2 = PhiA * (ysim[, 4]/p11-ysim[, 3]/p10) +
- onemPhiA * (ysim[, 2]/p01-ysim[, 1]/p00)
+ dl.dprob1 <- PhiB * (ysim[, 4]/p11-ysim[, 2]/p01) +
+ onemPhiB * (ysim[, 3]/p10-ysim[, 1]/p00)
+ dl.dprob2 <- PhiA * (ysim[, 4]/p11-ysim[, 3]/p10) +
+ onemPhiA * (ysim[, 2]/p01-ysim[, 1]/p00)
rm(ysim)
- temp3 = cbind(dl.dprob1, dl.dprob2)
- run.varcov = ((ii-1) * run.varcov +
- temp3[, ind1$row.index] *
- temp3[, ind1$col.index]) / ii
+ temp3 <- cbind(dl.dprob1, dl.dprob2)
+ run.varcov <- ((ii-1) * run.varcov +
+ temp3[, ind1$row.index] *
+ temp3[, ind1$col.index]) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = wz * dthetas.detas[, ind1$row] *
- dthetas.detas[, ind1$col]
+ wz <- wz * dthetas.detas[, ind1$row] *
+ dthetas.detas[, ind1$col]
}
c(w) * wz
}), list( .nsimEIM = nsimEIM ))))
@@ -2384,4 +2454,388 @@ if (FALSE)
+ binom2.rho.ss <-
+ function(lrho = "rhobit",
+ lmu = "probit", # added 20120817
+ imu1 = NULL, imu2 = NULL, irho = NULL,
+ imethod = 1,
+ zero = 3, exchangeable = FALSE,
+ grho = seq(-0.95, 0.95, by = 0.05)) {
+
+
+
+ lrho <- as.list(substitute(lrho))
+ e.rho <- link2list(lrho)
+ l.rho <- attr(e.rho, "function.name")
+
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
+
+ if (lmu != "probit")
+ warning("argument 'lmu' should be 'probit'. Changing it.")
+
+ lmu12 <- "probit" # But emu may contain some arguments.
+ emu12 <- emu # list()
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+
+ new("vglmff",
+ blurb = c("Bivariate probit model with sample selection\n",
+ "Links: ",
+ namesof("mu1", lmu12, earg = emu12), ", ",
+ namesof("mu2", lmu12, earg = emu12), ", ",
+ namesof("rho", l.rho, earg = e.rho)),
+ constraints = eval(substitute(expression({
+ constraints <- cm.vgam(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x,
+ .exchangeable , constraints,
+ apply.int = TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .exchangeable = exchangeable, .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 3,
+ multipleResponses = FALSE,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+
+ initialize = eval(substitute(expression({
+
+ if (!is.matrix(y))
+ stop("response must be a 2- or 3-column matrix")
+ ncoly <- ncol(y)
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.min = 1,
+ ncol.w.max = 1,
+ ncol.y.min = 2,
+ ncol.y.max = 3,
+ Is.integer.y = TRUE,
+ Is.nonnegative.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = ncoly,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+ if (!all(c(y) == 0 | c(y) == 1))
+ stop("response matrix must have values 0 and 1 only")
+
+
+ if (ncoly == 2) {
+ extra$ymat2col <- y
+ y <- cbind("0" = 1 - y[, 1],
+ "10" = y[, 1] * (1 - y[, 2]),
+ "11" = y[, 1] * y[, 2])
+ } else {
+ if(!all(rowSums(y) == 1))
+ stop("response matrix must have two 0s and one 1 in each row")
+ y1vec <- 1 - y[, 1] # Not a 0 means a 1.
+ y2vec <- ifelse(y1vec == 1, y[, 3], 0)
+ extra$ymat2col <- cbind(y1vec, y2vec)
+ }
+
+
+ predictors.names <- c(
+ namesof("mu1", .lmu12 , earg = .emu12 , short = TRUE),
+ namesof("mu2", .lmu12 , earg = .emu12 , short = TRUE),
+ namesof("rho", .l.rho , earg = .e.rho, short = TRUE))
+
+
+ ycounts <- y
+ nvec <- 1
+
+
+
+ if (!length(etastart)) {
+ if (length(mustart)) {
+ mu1.init <- mustart[, 1]
+ mu2.init <- mustart[, 2]
+ } else if ( .imethod == 1) {
+ mu1.init <- weighted.mean(extra$ymat2col[, 1], c(w))
+ index1 <- (extra$ymat2col[, 1] == 1)
+ mu2.init <- weighted.mean(extra$ymat2col[index1, 2], w[index1, 1])
+ mu1.init <- rep(mu1.init, len = n)
+ mu2.init <- rep(mu2.init, len = n)
+
+ } else if ( .imethod == 2) {
+ warning("not working yet2")
+ glm1.fit <- glm(ycounts ~ x - 1,
+ weights = c(w),
+ fam = binomial("probit"))
+ glm2.fit <- glm(ycounts[, 2:1] ~ x - 1,
+ weights = c(w),
+ fam = binomial("probit"))
+ mu1.init <- fitted(glm1.fit)
+ mu2.init <- fitted(glm2.fit)
+ } else {
+ stop("bad value for argument 'imethod'")
+ }
+
+ if (length( .imu1 ))
+ mu1.init <- rep( .imu1 , length = n)
+ if (length( .imu2 ))
+ mu2.init <- rep( .imu2 , length = n)
+
+
+
+ binom2.rho.ss.Loglikfun <-
+ function(rhoval, y, x, w, extraargs) {
+ init.mu1 <- extraargs$initmu1
+ init.mu2 <- extraargs$initmu2
+ ymat2col <- extraargs$ymat2col
+ nvec <- extraargs$nvec
+ eta1 <- qnorm(init.mu1)
+ eta2 <- qnorm(init.mu2)
+
+ smallno <- 1000 * .Machine$double.eps
+ p11 <- pmax(smallno, pnorm2(eta1, eta2, cov12 = rhoval))
+ p10 <- pmax(smallno, pnorm( eta1) - p11)
+ p0 <- pmax(smallno, pnorm(-eta1))
+
+ mumat <- abs(cbind("0" = p0,
+ "10" = p10,
+ "11" = p11)) # rows sum to unity
+
+ smallpos <- 1.0e-100
+ mumat[mumat < smallpos] <- smallpos
+ ycounts <- y # n x 3
+ use.mu <- mumat # cbind(p0, p10, p11)
+
+ retval <-
+ sum(c(w) *
+ dmultinomial(x = ycounts, size = nvec, prob = use.mu, # mumat,
+ log = TRUE, dochecking = FALSE))
+ retval
+ }
+ rho.grid <- .grho # seq(-0.95, 0.95, len = 31)
+ try.this <- getMaxMin(rho.grid, objfun = binom2.rho.ss.Loglikfun,
+ y = y, x = x, w = w, extraargs = list(
+ ymat2col = extra$ymat2col,
+ initmu1 = mu1.init,
+ initmu2 = mu2.init,
+ nvec = nvec
+ ))
+
+ rho.init <- if (is.Numeric( .irho ))
+ rep( .irho , len = n) else {
+ try.this
+ }
+
+
+
+ etastart <- cbind(theta2eta(mu1.init, .lmu12 , earg = .emu12 ),
+ theta2eta(mu2.init, .lmu12 , earg = .emu12 ),
+ theta2eta(rho.init, .l.rho , earg = .e.rho ))
+ }
+ mustart <- NULL # Since etastart has been computed and/or no @linkfun.
+ }), list( .lmu12 = lmu12, .l.rho = l.rho,
+ .emu12 = emu12, .e.rho = e.rho,
+ .grho = grho,
+ .irho = irho,
+ .imethod = imethod,
+ .imu1 = imu1, .imu2 = imu2 ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ rhovec <- eta2theta(eta[, 3], .l.rho , earg = .e.rho )
+
+ smallno <- 1000 * .Machine$double.eps
+ p11 <- pmax(smallno, pnorm2(eta[, 1], eta[, 2], cov12 = rhovec))
+ p10 <- pmax(smallno, pnorm( eta[, 1]) - p11)
+ p0 <- pmax(smallno, pnorm(-eta[, 1]))
+ sumprob <- p11 + p10 + p0
+ p11 <- p11 / sumprob
+ p10 <- p10 / sumprob
+ p0 <- p0 / sumprob
+
+ ansmat <- abs(cbind("0" = p0, # p0 == P(Y_1 = 0)
+ "10" = p10,
+ "11" = p11))
+ ansmat
+ }, list( .lmu12 = lmu12, .l.rho = l.rho,
+ .emu12 = emu12, .e.rho = e.rho ))),
+ last = eval(substitute(expression({
+ misc$link <- c(mu1 = .lmu12 , mu2 = .lmu12 , rho = .l.rho )
+
+ misc$earg <- list(mu1 = .emu12 , mu2 = .emu12 , rho = .e.rho )
+
+ misc$expected <- TRUE
+ misc$multipleResponses <- FALSE
+ }), list( .lmu12 = lmu12, .l.rho = l.rho,
+ .emu12 = emu12, .e.rho = e.rho ))),
+
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+
+ ycounts <- y # n x 3
+ nvec <- 1
+
+ smallno <- 1000 * .Machine$double.eps
+ rhovec <- eta2theta(eta[, 3], .l.rho , earg = .e.rho )
+ p11 <- pmax(smallno, pnorm2(eta[, 1], eta[, 2], cov12 = rhovec))
+ p10 <- pmax(smallno, pnorm( eta[, 1]) - p11)
+ p0 <- pmax(smallno, pnorm(-eta[, 1]))
+ sumprob <- p11 + p10 + p0
+ p11 <- p11 / sumprob
+ p10 <- p10 / sumprob
+ p0 <- p0 / sumprob
+
+
+ sum(c(w) * dmultinomial(x = ycounts, size = nvec, prob = mu, # use.mu,
+ log = TRUE, dochecking = FALSE))
+ }
+ }, list( .l.rho = l.rho, .e.rho = e.rho ))),
+ vfamily = c("binom2.rho.ss", "binom2"),
+
+ deriv = eval(substitute(expression({
+ nvec <- 1
+ ycounts <- extra$ymat2col
+
+ pmargin <- cbind(eta2theta(eta[, 1], .lmu12 , earg = .emu12 ),
+ eta2theta(eta[, 2], .lmu12 , earg = .emu12 ))
+ rhovec <- eta2theta(eta[, 3], .l.rho , earg = .e.rho )
+
+
+ smallno <- 1000 * .Machine$double.eps
+ p11 <- pmax(smallno, pnorm2(eta[, 1], eta[, 2], cov12 = rhovec))
+ p10 <- pmax(smallno, pnorm( eta[, 1]) - p11)
+ p0 <- pmax(smallno, pnorm(-eta[, 1]))
+ sumprob <- p11 + p10 + p0
+ p11 <- p11 / sumprob
+ p10 <- p10 / sumprob
+ p0 <- p0 / sumprob
+
+
+ BAmat <- (eta[, 1:2] -
+ rhovec * eta[, 2:1]) / sqrt(pmax(1e5 * .Machine$double.eps,
+ 1.0 - rhovec^2))
+
+
+ PhiA <- pnorm(BAmat[, 2])
+ PhiB <- pnorm(BAmat[, 1])
+ onemPhiA <- pnorm(BAmat[, 2], lower.tail = FALSE)
+ onemPhiB <- pnorm(BAmat[, 1], lower.tail = FALSE)
+
+
+ mycode <- FALSE # zz
+ mycode <- TRUE # zz
+
+ if (mycode) {
+ dprob00 <- dnorm2(eta[, 1], eta[, 2], rho = rhovec)
+ dl.dprob1 <- PhiA * ycounts[, 1] * ycounts[, 2] / p11 +
+ onemPhiA * ycounts[, 1] * (1 - ycounts[, 2]) / p10 -
+ (1 - ycounts[, 1]) / p0
+ dl.dprob2 <- PhiB * ( ycounts[, 1] * ycounts[, 2] / p11 -
+ ycounts[, 1] * (1 - ycounts[, 2]) / p10)
+ dl.drho <- dprob00 * ( ycounts[, 1] * ycounts[, 2] / p11 -
+ ycounts[, 1] * (1 - ycounts[, 2]) / p10)
+
+ dprob1.deta <- dtheta.deta(pmargin[, 1], .lmu12 , earg = .emu12 )
+ dprob2.deta <- dtheta.deta(pmargin[, 2], .lmu12 , earg = .emu12 )
+ drho...deta <- dtheta.deta(rhovec, .l.rho , earg = .e.rho )
+
+ ans.deriv <- c(w) * cbind(dl.dprob1 * dprob1.deta,
+ dl.dprob2 * dprob2.deta,
+ dl.drho * drho...deta)
+ } # else {
+ eta1 <- eta[, 1] # dat1 %*% params[1:X1.d2]
+ eta2 <- eta[, 2] # dat2 %*% params[(X1.d2 + 1):(X1.d2 + X2.d2)]
+ corr.st <- eta[, 3] # params[(X1.d2 + X2.d2 + 1)]
+ corr <- rhovec # tanh(corr.st)
+
+ dat <- ycounts
+
+ y1.y2 <- dat[, 1] * dat[, 2]
+ y1.cy2 <- dat[, 1] * (1 - dat[, 2])
+ cy1 <- (1 - dat[, 1])
+
+ d.r <- 1/sqrt(pmax(10000 * .Machine$double.eps, 1 - corr^2))
+ A <- pnorm((eta2 - corr * eta1) * d.r)
+ A.c <- 1 - A
+ B <- pnorm((eta1 - corr * eta2) * d.r)
+ p11 <- pmax(pnorm2(eta1, eta2, cov12 = corr), 1000 * .Machine$double.eps)
+ p10 <- pmax(pnorm( eta1) - p11, 1000 * .Machine$double.eps)
+ p0 <- pmax(pnorm(-eta1), 1000 * .Machine$double.eps)
+ d.n1 <- dnorm(eta1)
+ d.n2 <- dnorm(eta2)
+ d.n1n2 <- dnorm2(eta1, eta2, rho = corr)
+ drh.drh.st <- 4 * exp(2 * corr.st)/(exp(2 * corr.st) + 1)^2
+
+ dl.dbe1 <- d.n1 * (y1.y2/p11 * A + y1.cy2/p10 * A.c - cy1/p0)
+ dl.dbe2 <- d.n2 * B * (y1.y2/p11 - y1.cy2/p10)
+ dl.drho <- d.n1n2 * (y1.y2/p11 - y1.cy2/p10) * drh.drh.st
+
+ ans.deriv2 <- c(w) * cbind(dl.dbe1, dl.dbe2, dl.drho)
+ # }
+
+
+
+
+
+
+
+
+
+
+ ans.deriv
+ }), list( .lmu12 = lmu12, .l.rho = l.rho,
+ .emu12 = emu12, .e.rho = e.rho ))),
+
+ weight = eval(substitute(expression({
+
+
+
+ if (mycode) {
+ ned2l.dprob1prob1 <- PhiA^2 / p11 +
+ onemPhiA^2 / p10 +
+ 1 / p0
+ ned2l.dprob2prob2 <- (1/p11 + 1/p10) * PhiB^2
+ ned2l.drho2 <- (1/p11 + 1/p10) * dprob00^2
+
+ ned2l.dprob1prob2 <- PhiA * PhiB / p11 - onemPhiA * PhiB / p10
+ ned2l.dprob1rho <- (PhiA/p11 - onemPhiA/p10) * dprob00
+ ned2l.dprob2rho <- (1/p11 + 1/p10) * PhiB * dprob00
+
+ wz <- matrix(0, n, dimm(M)) # 6=dimm(M)
+ wz[, iam(1, 1, M)] <- ned2l.dprob1prob1 * dprob1.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dprob2prob2 * dprob2.deta^2
+ wz[, iam(3, 3, M)] <- ned2l.drho2 * drho...deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dprob1prob2 * dprob1.deta * dprob2.deta
+ wz[, iam(1, 3, M)] <- ned2l.dprob1rho * dprob1.deta * drho...deta
+ wz[, iam(2, 3, M)] <- ned2l.dprob2rho * dprob2.deta * drho...deta
+ } # else {
+
+ ned2l.be1.be1 <- (A^2/p11 + A.c^2/p10 + 1/p0) * d.n1^2
+ ned2l.be2.be2 <- ( 1/p11 + 1/p10) * B^2 * d.n2^2
+ ned2l.rho.rho <- ( 1/p11 + 1/p10) * d.n1n2^2 * drh.drh.st^2
+
+ ned2l.be1.be2 <- (A * B/p11 - A.c * B/p10) * d.n1 * d.n2
+ ned2l.be1.rho <- (A * (1/p11) - A.c * (1/p10)) * d.n1n2 * d.n1 * drh.drh.st
+ ned2l.be2.rho <- B * (1/p11 + 1/p10) * d.n1n2 * d.n2 * drh.drh.st
+
+
+
+
+ WZ <- matrix(0, n, dimm(M)) # 6=dimm(M)
+ WZ[, iam(1, 1, M)] <- ned2l.be1.be1
+ WZ[, iam(2, 2, M)] <- ned2l.be2.be2
+ WZ[, iam(3, 3, M)] <- ned2l.rho.rho
+ WZ[, iam(1, 2, M)] <- ned2l.be1.be2
+ WZ[, iam(1, 3, M)] <- ned2l.be1.rho
+ WZ[, iam(2, 3, M)] <- ned2l.be2.rho
+
+ c(w) * wz
+ }), list( .zero = zero ))))
+}
+
+
+
diff --git a/R/family.bivariate.R b/R/family.bivariate.R
index 19b8473..6d0ea6b 100644
--- a/R/family.bivariate.R
+++ b/R/family.bivariate.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -13,9 +13,8 @@
-bilogistic4.control <- function(save.weight = TRUE, ...)
-{
- list(save.weight = save.weight)
+bilogistic4.control <- function(save.weight = TRUE, ...) {
+ list(save.weight = save.weight)
}
@@ -42,13 +41,13 @@ bilogistic4.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Bivariate logistic distribution\n\n",
- "Link: ",
- namesof("location1", llocat, elocat), ", ",
- namesof("scale1", lscale, escale), ", ",
- namesof("location2", llocat, elocat), ", ",
- namesof("scale2", lscale, escale),
- "\n", "\n",
- "Means: location1, location2"),
+ "Link: ",
+ namesof("location1", llocat, elocat), ", ",
+ namesof("scale1", lscale, escale), ", ",
+ namesof("location2", llocat, elocat), ", ",
+ namesof("scale2", lscale, escale),
+ "\n", "\n",
+ "Means: location1, location2"),
constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero))),
@@ -72,41 +71,42 @@ bilogistic4.control <- function(save.weight = TRUE, ...)
namesof("location2", .llocat, .elocat , tag = FALSE),
namesof("scale2", .lscale, .escale , tag = FALSE))
- if (!length(etastart)) {
- if ( .imethod == 1) {
- locat.init1 = y[, 1]
- scale.init1 = sqrt(3) * sd(y[, 1]) / pi
- locat.init2 = y[, 2]
- scale.init2 = sqrt(3) * sd(y[, 2]) / pi
- } else {
- locat.init1 = median(rep(y[, 1], w))
- locat.init2 = median(rep(y[, 2], w))
- const4 = sqrt(3) / (sum(w) * pi)
- scale.init1 = const4 * sum(c(w) *(y[, 1] - locat.init1)^2)
- scale.init2 = const4 * sum(c(w) *(y[, 2] - locat.init2)^2)
- }
- loc1.init = if (length( .iloc1 ))
- rep( .iloc1, length.out = n) else
- rep(locat.init1, length.out = n)
- loc2.init = if (length( .iloc2 ))
- rep( .iloc2, length.out = n) else
- rep(locat.init2, length.out = n)
- scale1.init = if (length( .iscale1 ))
- rep( .iscale1, length.out = n) else
- rep(1, length.out = n)
- scale2.init = if (length( .iscale2 ))
- rep( .iscale2, length.out = n) else
- rep(1, length.out = n)
-
- if ( .llocat == "loge")
- locat.init1 = abs(locat.init1) + 0.001
- if ( .llocat == "loge")
- locat.init2 = abs(locat.init2) + 0.001
-
- etastart = cbind(theta2eta(locat.init1, .llocat , .elocat ),
- theta2eta(scale1.init, .lscale , .escale ),
- theta2eta(locat.init2, .llocat , .elocat ),
- theta2eta(scale2.init, .lscale , .escale ))
+ if (!length(etastart)) {
+ if ( .imethod == 1) {
+ locat.init1 <- y[, 1]
+ scale.init1 <- sqrt(3) * sd(y[, 1]) / pi
+ locat.init2 <- y[, 2]
+ scale.init2 <- sqrt(3) * sd(y[, 2]) / pi
+ } else {
+ locat.init1 <- median(rep(y[, 1], w))
+ locat.init2 <- median(rep(y[, 2], w))
+ const4 <- sqrt(3) / (sum(w) * pi)
+ scale.init1 <- const4 * sum(c(w) *(y[, 1] - locat.init1)^2)
+ scale.init2 <- const4 * sum(c(w) *(y[, 2] - locat.init2)^2)
+ }
+ loc1.init <- if (length( .iloc1 ))
+ rep( .iloc1, length.out = n) else
+ rep(locat.init1, length.out = n)
+ loc2.init <- if (length( .iloc2 ))
+ rep( .iloc2, length.out = n) else
+ rep(locat.init2, length.out = n)
+ scale1.init <- if (length( .iscale1 ))
+ rep( .iscale1, length.out = n) else
+ rep(1, length.out = n)
+ scale2.init <- if (length( .iscale2 ))
+ rep( .iscale2, length.out = n) else
+ rep(1, length.out = n)
+
+ if ( .llocat == "loge")
+ locat.init1 <- abs(locat.init1) + 0.001
+ if ( .llocat == "loge")
+ locat.init2 <- abs(locat.init2) + 0.001
+
+ etastart <-
+ cbind(theta2eta(locat.init1, .llocat , .elocat ),
+ theta2eta(scale1.init, .lscale , .escale ),
+ theta2eta(locat.init2, .llocat , .elocat ),
+ theta2eta(scale2.init, .lscale , .escale ))
}
}), list(.imethod = imethod,
.iloc1 = iloc1, .iloc2 = iloc2,
@@ -117,77 +117,76 @@ bilogistic4.control <- function(save.weight = TRUE, ...)
cbind(eta[, 1], eta[, 2])
},
last = eval(substitute(expression({
- misc$link = c(location1 = .llocat, scale1 = .lscale,
- location2 = .llocat, scale2 = .lscale)
-
- misc$earg = list(location1 = .elocat, scale1 = .escale,
- location2 = .elocat, scale2 = .escale)
+ misc$link <- c(location1 = .llocat, scale1 = .lscale,
+ location2 = .llocat, scale2 = .lscale)
- misc$expected = FALSE
+ misc$earg <- list(location1 = .elocat, scale1 = .escale,
+ location2 = .elocat, scale2 = .escale)
- misc$BFGS = TRUE
+ misc$expected <- FALSE
+ misc$BFGS <- TRUE
misc$multipleResponses <- FALSE
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- locat1 = eta2theta(eta[, 1], .llocat , .elocat )
- Scale1 = eta2theta(eta[, 2], .lscale , .escale )
- locat2 = eta2theta(eta[, 3], .llocat , .elocat )
- Scale2 = eta2theta(eta[, 4], .lscale , .escale )
+ locat1 <- eta2theta(eta[, 1], .llocat , .elocat )
+ Scale1 <- eta2theta(eta[, 2], .lscale , .escale )
+ locat2 <- eta2theta(eta[, 3], .llocat , .elocat )
+ Scale2 <- eta2theta(eta[, 4], .lscale , .escale )
- zedd1 = (y[, 1]-locat1) / Scale1
- zedd2 = (y[, 2]-locat2) / Scale2
+ zedd1 <- (y[, 1]-locat1) / Scale1
+ zedd2 <- (y[, 2]-locat2) / Scale2
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(c(w) * (-zedd1 - zedd2 - 3 * log1p(exp(-zedd1)+exp(-zedd2)) -
log(Scale1) - log(Scale2)))
}, list( .llocat = llocat, .lscale = lscale,
- .elocat = elocat, .escale = escale ))),
+ .elocat = elocat, .escale = escale ))),
vfamily = c("bilogistic4"),
deriv = eval(substitute(expression({
- locat1 = eta2theta(eta[, 1], .llocat , .elocat )
- Scale1 = eta2theta(eta[, 2], .lscale , .escale )
- locat2 = eta2theta(eta[, 3], .llocat , .elocat )
- Scale2 = eta2theta(eta[, 4], .lscale , .escale )
-
- zedd1 = (y[, 1]-locat1) / Scale1
- zedd2 = (y[, 2]-locat2) / Scale2
- ezedd1 = exp(-zedd1)
- ezedd2 = exp(-zedd2)
- denom = 1 + ezedd1 + ezedd2
-
- dl.dlocat1 = (1 - 3 * ezedd1 / denom) / Scale1
- dl.dlocat2 = (1 - 3 * ezedd2 / denom) / Scale2
- dl.dscale1 = (zedd1 - 1 - 3 * ezedd1 * zedd1 / denom) / Scale1
- dl.dscale2 = (zedd2 - 1 - 3 * ezedd2 * zedd2 / denom) / Scale2
-
- dlocat1.deta = dtheta.deta(locat1, .llocat , .elocat )
- dlocat2.deta = dtheta.deta(locat2, .llocat , .elocat )
- dscale1.deta = dtheta.deta(Scale1, .lscale , .escale )
- dscale2.deta = dtheta.deta(Scale2, .lscale , .escale )
+ locat1 <- eta2theta(eta[, 1], .llocat , .elocat )
+ Scale1 <- eta2theta(eta[, 2], .lscale , .escale )
+ locat2 <- eta2theta(eta[, 3], .llocat , .elocat )
+ Scale2 <- eta2theta(eta[, 4], .lscale , .escale )
+
+ zedd1 <- (y[, 1]-locat1) / Scale1
+ zedd2 <- (y[, 2]-locat2) / Scale2
+ ezedd1 <- exp(-zedd1)
+ ezedd2 <- exp(-zedd2)
+ denom <- 1 + ezedd1 + ezedd2
+
+ dl.dlocat1 <- (1 - 3 * ezedd1 / denom) / Scale1
+ dl.dlocat2 <- (1 - 3 * ezedd2 / denom) / Scale2
+ dl.dscale1 <- (zedd1 - 1 - 3 * ezedd1 * zedd1 / denom) / Scale1
+ dl.dscale2 <- (zedd2 - 1 - 3 * ezedd2 * zedd2 / denom) / Scale2
+
+ dlocat1.deta <- dtheta.deta(locat1, .llocat , .elocat )
+ dlocat2.deta <- dtheta.deta(locat2, .llocat , .elocat )
+ dscale1.deta <- dtheta.deta(Scale1, .lscale , .escale )
+ dscale2.deta <- dtheta.deta(Scale2, .lscale , .escale )
if (iter == 1) {
- etanew = eta
+ etanew <- eta
} else {
- derivold = derivnew
- etaold = etanew
- etanew = eta
+ derivold <- derivnew
+ etaold <- etanew
+ etanew <- eta
}
- derivnew = c(w) * cbind(dl.dlocat1 * dlocat1.deta,
- dl.dscale1 * dscale1.deta,
- dl.dlocat2 * dlocat2.deta,
- dl.dscale2 * dscale2.deta)
+ derivnew <- c(w) * cbind(dl.dlocat1 * dlocat1.deta,
+ dl.dscale1 * dscale1.deta,
+ dl.dlocat2 * dlocat2.deta,
+ dl.dscale2 * dscale2.deta)
derivnew
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
weight = eval(substitute(expression({
if (iter == 1) {
- wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ wznew <- cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
} else {
- wzold = wznew
- wznew = qnupdate(w = w, wzold=wzold, dderiv=(derivold - derivnew),
+ wzold <- wznew
+ wznew <- qnupdate(w = w, wzold=wzold, dderiv=(derivold - derivnew),
deta=etanew-etaold, M = M,
trace=trace) # weights incorporated in args
}
@@ -211,44 +210,42 @@ dbilogis4 <- function(x1, x2, loc1 = 0, scale1 = 1,
- L = max(length(x1), length(x2), length(loc1), length(loc2),
- length(scale1), length(scale2))
- x1 = rep(x1, length.out = L); x2 = rep(x2, length.out = L);
- loc1 = rep(loc1, length.out = L); loc2 = rep(loc2, length.out = L);
- scale1 = rep(scale1, length.out = L); scale2 = rep(scale2, length.out = L);
- zedd1 = (-(x1-loc1)/scale1)
- zedd2 = (-(x2-loc2)/scale2)
- logdensity = log(2) + log(zedd1) + log(zedd2) - log(scale1) -
- log(scale1) - 3 * log1p(exp(zedd1) + exp(zedd2))
+ L <- max(length(x1), length(x2),
+ length(loc1), length(loc2),
+ length(scale1), length(scale2))
+ x1 <- rep(x1, length.out = L);
+ x2 <- rep(x2, length.out = L);
+ loc1 <- rep(loc1, length.out = L);
+ loc2 <- rep(loc2, length.out = L);
+ scale1 <- rep(scale1, length.out = L);
+ scale2 <- rep(scale2, length.out = L);
+ zedd1 <- (-(x1-loc1)/scale1)
+ zedd2 <- (-(x2-loc2)/scale2)
+ logdensity <- log(2) + log(zedd1) + log(zedd2) - log(scale1) -
+ log(scale1) - 3 * log1p(exp(zedd1) + exp(zedd2))
if (log.arg) logdensity else exp(logdensity)
}
-pbilogis4 <- function(q1, q2, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
- if (!is.Numeric(q1)) stop("bad input for 'q1'")
- if (!is.Numeric(q2)) stop("bad input for 'q2'")
- if (!is.Numeric(scale1, positive = TRUE)) stop("bad input for 'scale1'")
- if (!is.Numeric(scale2, positive = TRUE)) stop("bad input for 'scale2'")
-
+pbilogis4 <-
+ function(q1, q2, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
- 1 / (1 + exp(-(q1-loc1)/scale1) + exp(-(q2-loc2)/scale2))
+ ans <- 1 / (1 + exp(-(q1-loc1)/scale1) + exp(-(q2-loc2)/scale2))
+ ans[scale1 <= 0] <- NA
+ ans[scale2 <= 0] <- NA
+ ans
}
rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
- if (!is.Numeric(n, positive = TRUE,
- allowable.length = 1,integer.valued = TRUE))
- stop("bad input for 'n'")
- if (!is.Numeric(scale1, positive = TRUE))
- stop("bad input for 'scale1'")
- if (!is.Numeric(scale2, positive = TRUE))
- stop("bad input for 'scale2'")
- y1 = rlogis(n, location = loc1, scale = scale1)
- ezedd1 = exp(-(y1-loc1)/scale1)
- y2 = loc2 - scale2 * log(1/sqrt(runif(n) / (1 + ezedd1)^2) - 1 - ezedd1)
- cbind(y1, y2)
+ y1 <- rlogis(n = n, location = loc1, scale = scale1)
+ ezedd1 <- exp(-(y1-loc1)/scale1)
+ y2 <- loc2 - scale2 * log(1/sqrt(runif(n) / (1 + ezedd1)^2) - 1 - ezedd1)
+ ans <- cbind(y1, y2)
+ ans[scale2 <= 0, ] <- NA
+ ans
}
@@ -281,15 +278,15 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
new("vglmff",
blurb = c("Freund (1961) bivariate exponential distribution\n",
- "Links: ",
- namesof("a", la, earg = ea ), ", ",
- namesof("ap", lap, earg = eap), ", ",
- namesof("b", lb, earg = eb ), ", ",
- namesof("bp", lbp, earg = ebp)),
+ "Links: ",
+ namesof("a", la, earg = ea ), ", ",
+ namesof("ap", lap, earg = eap), ", ",
+ namesof("b", lb, earg = eb ), ", ",
+ namesof("bp", lbp, earg = ebp)),
constraints = eval(substitute(expression({
constraints <- cm.vgam(matrix(c(1, 1,0,0, 0,0, 1, 1), M, 2), x,
.independent, constraints,
- intercept.apply = TRUE)
+ apply.int = TRUE)
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list(.independent = independent, .zero = zero))),
initialize = eval(substitute(expression({
@@ -317,89 +314,90 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
stop("identifiability problem: either all y1<y2 or y2<y1")
if (!length(etastart)) {
- sumx = sum(y[ extra$y1.lt.y2, 1]);
- sumxp = sum(y[!extra$y1.lt.y2, 1])
- sumy = sum(y[ extra$y1.lt.y2, 2]);
- sumyp = sum(y[!extra$y1.lt.y2, 2])
-
- if (FALSE) { # Noise:
- arr = min(arr + n/10, n*0.95)
- sumx = sumx * 1.1; sumxp = sumxp * 1.2;
- sumy = sumy * 1.2; sumyp = sumyp * 1.3;
- }
- ainit = if (length(.ia)) rep(.ia, length.out = n) else
- arr / (sumx + sumyp)
- apinit = if (length(.iap)) rep(.iap,length.out = n) else
- (n-arr)/(sumxp-sumyp)
- binit = if (length(.ib)) rep(.ib, length.out = n) else
- (n-arr)/(sumx +sumyp)
- bpinit = if (length(.ib)) rep(.ibp,length.out = n) else
- arr / (sumy - sumx)
-
- etastart =
- cbind(theta2eta(rep(ainit, length.out = n), .la, earg = .ea ),
- theta2eta(rep(apinit, length.out = n), .lap, earg = .eap ),
- theta2eta(rep(binit, length.out = n), .lb, earg = .eb ),
- theta2eta(rep(bpinit, length.out = n), .lbp, earg = .ebp ))
+ sumx <- sum(y[ extra$y1.lt.y2, 1]);
+ sumxp <- sum(y[!extra$y1.lt.y2, 1])
+ sumy <- sum(y[ extra$y1.lt.y2, 2]);
+ sumyp <- sum(y[!extra$y1.lt.y2, 2])
+
+ if (FALSE) { # Noise:
+ arr <- min(arr + n/10, n*0.95)
+ sumx <- sumx * 1.1; sumxp <- sumxp * 1.2;
+ sumy <- sumy * 1.2; sumyp <- sumyp * 1.3;
+ }
+ ainit <- if (length(.ia)) rep(.ia, length.out = n) else
+ arr / (sumx + sumyp)
+ apinit <- if (length(.iap)) rep(.iap,length.out = n) else
+ (n-arr)/(sumxp-sumyp)
+ binit <- if (length(.ib)) rep(.ib, length.out = n) else
+ (n-arr)/(sumx +sumyp)
+ bpinit <- if (length(.ib)) rep(.ibp,length.out = n) else
+ arr / (sumy - sumx)
+
+ etastart <-
+ cbind(theta2eta(rep(ainit, length.out = n), .la, earg = .ea ),
+ theta2eta(rep(apinit, length.out = n), .lap, earg = .eap ),
+ theta2eta(rep(binit, length.out = n), .lb, earg = .eb ),
+ theta2eta(rep(bpinit, length.out = n), .lbp, earg = .ebp ))
}
}), list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
- .ea = ea, .eap = eap, .eb = eb, .ebp = ebp,
- .ia = ia, .iap = iap, .ib = ib, .ibp = ibp))),
+ .ea = ea, .eap = eap, .eb = eb, .ebp = ebp,
+ .ia = ia, .iap = iap, .ib = ib, .ibp = ibp))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha = eta2theta(eta[, 1], .la, earg = .ea )
- alphap = eta2theta(eta[, 2], .lap, earg = .eap )
- beta = eta2theta(eta[, 3], .lb, earg = .eb )
- betap = eta2theta(eta[, 4], .lbp, earg = .ebp )
+ alpha <- eta2theta(eta[, 1], .la, earg = .ea )
+ alphap <- eta2theta(eta[, 2], .lap, earg = .eap )
+ beta <- eta2theta(eta[, 3], .lb, earg = .eb )
+ betap <- eta2theta(eta[, 4], .lbp, earg = .ebp )
cbind((alphap + beta) / (alphap * (alpha + beta)),
(alpha + betap) / (betap * (alpha + beta)))
}, list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
.ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
last = eval(substitute(expression({
- misc$link = c("a" = .la, "ap" = .lap, "b" = .lb, "bp" = .lbp)
- misc$earg = list("a" = .ea, "ap" = .eap, "b" = .eb, "bp" = .ebp)
+ misc$link <- c("a" = .la, "ap" = .lap, "b" = .lb, "bp" = .lbp)
+
+ misc$earg <- list("a" = .ea, "ap" = .eap, "b" = .eb, "bp" = .ebp)
misc$multipleResponses <- FALSE
}), list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
.ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- alpha = eta2theta(eta[, 1], .la, earg = .ea )
- alphap = eta2theta(eta[, 2], .lap, earg = .eap )
- beta = eta2theta(eta[, 3], .lb, earg = .eb )
- betap = eta2theta(eta[, 4], .lbp, earg = .ebp )
+ alpha <- eta2theta(eta[, 1], .la, earg = .ea )
+ alphap <- eta2theta(eta[, 2], .lap, earg = .eap )
+ beta <- eta2theta(eta[, 3], .lb, earg = .eb )
+ betap <- eta2theta(eta[, 4], .lbp, earg = .ebp )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- tmp88 = extra$y1.lt.y2
- ell1 = log(alpha[tmp88]) + log(betap[tmp88]) -
- betap[tmp88] * y[tmp88, 2] -
- (alpha+beta-betap)[tmp88] * y[tmp88, 1]
- ell2 = log(beta[!tmp88]) + log(alphap[!tmp88]) -
- alphap[!tmp88] * y[!tmp88, 1] -
- (alpha+beta-alphap)[!tmp88] * y[!tmp88, 2]
+ tmp88 <- extra$y1.lt.y2
+ ell1 <- log(alpha[tmp88]) + log(betap[tmp88]) -
+ betap[tmp88] * y[tmp88, 2] -
+ (alpha+beta-betap)[tmp88] * y[tmp88, 1]
+ ell2 <- log(beta[!tmp88]) + log(alphap[!tmp88]) -
+ alphap[!tmp88] * y[!tmp88, 1] -
+ (alpha+beta-alphap)[!tmp88] * y[!tmp88, 2]
sum(w[tmp88] * ell1) + sum(w[!tmp88] * ell2) }
}, list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
.ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
vfamily = c("freund61"),
deriv = eval(substitute(expression({
- tmp88 = extra$y1.lt.y2
- alpha = eta2theta(eta[, 1], .la, earg = .ea )
- alphap = eta2theta(eta[, 2], .lap, earg = .eap )
- beta = eta2theta(eta[, 3], .lb, earg = .eb )
- betap = eta2theta(eta[, 4], .lbp, earg = .ebp )
-
- dalpha.deta = dtheta.deta(alpha, .la, earg = .ea )
- dalphap.deta = dtheta.deta(alphap, .lap, earg = .eap )
- dbeta.deta = dtheta.deta(beta, .lb, earg = .eb )
- dbetap.deta = dtheta.deta(betap, .lbp, earg = .ebp )
-
- d1 = 1/alpha - y[, 1]
- d1[!tmp88] = -y[!tmp88, 2]
- d2 = 0 * alphap
- d2[!tmp88] = 1/alphap[!tmp88] - y[!tmp88, 1] + y[!tmp88, 2]
- d3 = -y[, 1]
- d3[!tmp88] = 1/beta[!tmp88] - y[!tmp88, 2]
- d4 = 1/betap - y[, 2] + y[, 1]
- d4[!tmp88] = 0
+ tmp88 <- extra$y1.lt.y2
+ alpha <- eta2theta(eta[, 1], .la, earg = .ea )
+ alphap <- eta2theta(eta[, 2], .lap, earg = .eap )
+ beta <- eta2theta(eta[, 3], .lb, earg = .eb )
+ betap <- eta2theta(eta[, 4], .lbp, earg = .ebp )
+
+ dalpha.deta <- dtheta.deta(alpha, .la, earg = .ea )
+ dalphap.deta <- dtheta.deta(alphap, .lap, earg = .eap )
+ dbeta.deta <- dtheta.deta(beta, .lb, earg = .eb )
+ dbetap.deta <- dtheta.deta(betap, .lbp, earg = .ebp )
+
+ d1 <- 1/alpha - y[, 1]
+ d1[!tmp88] <- -y[!tmp88, 2]
+ d2 <- 0 * alphap
+ d2[!tmp88] <- 1/alphap[!tmp88] - y[!tmp88, 1] + y[!tmp88, 2]
+ d3 <- -y[, 1]
+ d3[!tmp88] <- 1/beta[!tmp88] - y[!tmp88, 2]
+ d4 <- 1/betap - y[, 2] + y[, 1]
+ d4[!tmp88] <- 0
c(w) * cbind(d1 * dalpha.deta,
d2 * dalphap.deta,
@@ -408,17 +406,17 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
}), list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
.ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
weight = eval(substitute(expression({
- py1.lt.y2 = alpha / (alpha+beta)
- d11 = py1.lt.y2 / alpha^2
- d22 = (1-py1.lt.y2) / alphap^2
- d33 = (1-py1.lt.y2) / beta^2
- d44 = py1.lt.y2 / betap^2
-
- wz = matrix(0, n, M) # diagonal
- wz[, iam(1, 1, M)] = dalpha.deta^2 * d11
- wz[, iam(2, 2, M)] = dalphap.deta^2 * d22
- wz[, iam(3, 3, M)] = dbeta.deta^2 * d33
- wz[, iam(4, 4, M)] = dbetap.deta^2 * d44
+ py1.lt.y2 <- alpha / (alpha+beta)
+ d11 <- py1.lt.y2 / alpha^2
+ d22 <- (1-py1.lt.y2) / alphap^2
+ d33 <- (1-py1.lt.y2) / beta^2
+ d44 <- py1.lt.y2 / betap^2
+
+ wz <- matrix(0, n, M) # diagonal
+ wz[, iam(1, 1, M)] <- dalpha.deta^2 * d11
+ wz[, iam(2, 2, M)] <- dalphap.deta^2 * d22
+ wz[, iam(3, 3, M)] <- dbeta.deta^2 * d33
+ wz[, iam(4, 4, M)] <- dbetap.deta^2 * d44
c(w) * wz
}), list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
@@ -472,12 +470,12 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
new("vglmff",
blurb = c("Bivariate gamma: McKay's distribution\n",
- "Links: ",
- namesof("scale", lscale), ", ",
- namesof("shape1", lshape1), ", ",
- namesof("shape2", lshape2)),
+ "Links: ",
+ namesof("scale", lscale), ", ",
+ namesof("shape1", lshape1), ", ",
+ namesof("shape2", lshape2)),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -504,7 +502,7 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
namesof("shape2", .lshape2, .eshape2, short = TRUE))
if (!length(etastart)) {
- momentsY = if ( .imethod == 1) {
+ momentsY <- if ( .imethod == 1) {
cbind(median(y[, 1]), # This may not be monotonic
median(y[, 2])) + 0.01
} else {
@@ -513,59 +511,61 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
}
mcg2.loglik <- function(thetaval, y, x, w, extraargs) {
- ainit = a = thetaval
- momentsY = extraargs$momentsY
- p = (1/a) * abs(momentsY[1]) + 0.01
- q = (1/a) * abs(momentsY[2] - momentsY[1]) + 0.01
+ ainit <- a <- thetaval
+ momentsY <- extraargs$momentsY
+ p <- (1/a) * abs(momentsY[1]) + 0.01
+ q <- (1/a) * abs(momentsY[2] - momentsY[1]) + 0.01
sum(c(w) * (-(p+q)*log(a) - lgamma(p) - lgamma(q) +
(p - 1)*log(y[, 1]) +
(q - 1)*log(y[, 2]-y[, 1]) - y[, 2] / a ))
- }
+ }
- a.grid = if (length( .iscale )) c( .iscale ) else
- c(0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100)
- extraargs = list(momentsY = momentsY)
- ainit = getMaxMin(a.grid, objfun=mcg2.loglik,
- y=y, x=x, w = w, maximize = TRUE,
- extraargs = extraargs)
- ainit = rep(if(is.Numeric( .iscale )) .iscale else ainit,
- length.out = n)
- pinit = (1/ainit) * abs(momentsY[1]) + 0.01
- qinit = (1/ainit) * abs(momentsY[2] - momentsY[1]) + 0.01
-
- pinit = rep(if(is.Numeric( .ishape1 )) .ishape1 else pinit,
- length.out = n)
- qinit = rep(if(is.Numeric( .ishape2 )) .ishape2 else qinit,
- length.out = n)
-
- etastart = cbind(theta2eta(ainit, .lscale),
- theta2eta(pinit, .lshape1),
- theta2eta(qinit, .lshape2))
- }
- }), list( .lscale = lscale, .lshape1 = lshape1, .lshape2 = lshape2,
- .escale = escale, .eshape1 = eshape1, .eshape2 = eshape2,
- .iscale = iscale, .ishape1 = ishape1, .ishape2 = ishape2,
- .imethod = imethod ))),
+ a.grid <- if (length( .iscale )) c( .iscale ) else
+ c(0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100)
+ extraargs <- list(momentsY = momentsY)
+ ainit <- getMaxMin(a.grid, objfun = mcg2.loglik,
+ y = y, x = x, w = w, maximize = TRUE,
+ extraargs = extraargs)
+ ainit <- rep(if(is.Numeric( .iscale )) .iscale else ainit,
+ length.out = n)
+ pinit <- (1/ainit) * abs(momentsY[1]) + 0.01
+ qinit <- (1/ainit) * abs(momentsY[2] - momentsY[1]) + 0.01
+
+ pinit <- rep(if(is.Numeric( .ishape1 )) .ishape1 else pinit,
+ length.out = n)
+ qinit <- rep(if(is.Numeric( .ishape2 )) .ishape2 else qinit,
+ length.out = n)
+
+ etastart <-
+ cbind(theta2eta(ainit, .lscale),
+ theta2eta(pinit, .lshape1),
+ theta2eta(qinit, .lshape2))
+ }
+ }), list( .lscale = lscale, .lshape1 = lshape1, .lshape2 = lshape2,
+ .escale = escale, .eshape1 = eshape1, .eshape2 = eshape2,
+ .iscale = iscale, .ishape1 = ishape1, .ishape2 = ishape2,
+ .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- a = eta2theta(eta[, 1], .lscale , .escale )
- p = eta2theta(eta[, 2], .lshape1 , .eshape1 )
- q = eta2theta(eta[, 3], .lshape2 , .eshape2 )
+ a <- eta2theta(eta[, 1], .lscale , .escale )
+ p <- eta2theta(eta[, 2], .lshape1 , .eshape1 )
+ q <- eta2theta(eta[, 3], .lshape2 , .eshape2 )
cbind("y1" = p*a,
"y2" = (p+q)*a)
}, list( .lscale = lscale, .lshape1 = lshape1, .lshape2 = lshape2,
.escale = escale, .eshape1 = eshape1, .eshape2 = eshape2 ))),
last = eval(substitute(expression({
- misc$link = c("scale" = .lscale ,
- "shape1" = .lshape1 ,
- "shape2" = .lshape2 )
- misc$earg = list("scale" = .escale ,
- "shape1" = .eshape1 ,
- "shape2" = .eshape2 )
-
- misc$ishape1 = .ishape1
- misc$ishape2 = .ishape2
- misc$iscale = .iscale
- misc$expected = TRUE
+ misc$link <- c("scale" = .lscale ,
+ "shape1" = .lshape1 ,
+ "shape2" = .lshape2 )
+
+ misc$earg <- list("scale" = .escale ,
+ "shape1" = .eshape1 ,
+ "shape2" = .eshape2 )
+
+ misc$ishape1 <- .ishape1
+ misc$ishape2 <- .ishape2
+ misc$iscale <- .iscale
+ misc$expected <- TRUE
misc$multipleResponses <- FALSE
}), list( .lscale = lscale, .lshape1 = lshape1, .lshape2 = lshape2,
.escale = escale, .eshape1 = eshape1, .eshape2 = eshape2,
@@ -573,9 +573,9 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
.imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- a = eta2theta(eta[, 1], .lscale , .escale )
- p = eta2theta(eta[, 2], .lshape1 , .eshape1 )
- q = eta2theta(eta[, 3], .lshape2 , .eshape2 )
+ a <- eta2theta(eta[, 1], .lscale , .escale )
+ p <- eta2theta(eta[, 2], .lshape1 , .eshape1 )
+ q <- eta2theta(eta[, 3], .lshape2 , .eshape2 )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
@@ -586,13 +586,13 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
.escale = escale, .eshape1 = eshape1, .eshape2 = eshape2 ))),
vfamily = c("bivgamma.mckay"),
deriv = eval(substitute(expression({
- aparam = eta2theta(eta[, 1], .lscale , .escale )
- shape1 = eta2theta(eta[, 2], .lshape1 , .eshape1 )
- shape2 = eta2theta(eta[, 3], .lshape2 , .eshape2 )
+ aparam <- eta2theta(eta[, 1], .lscale , .escale )
+ shape1 <- eta2theta(eta[, 2], .lshape1 , .eshape1 )
+ shape2 <- eta2theta(eta[, 3], .lshape2 , .eshape2 )
- dl.da = (-(shape1+shape2) + y[, 2] / aparam) / aparam
- dl.dshape1 = -log(aparam) - digamma(shape1) + log(y[, 1])
- dl.dshape2 = -log(aparam) - digamma(shape2) + log(y[, 2]-y[, 1])
+ dl.da <- (-(shape1+shape2) + y[, 2] / aparam) / aparam
+ dl.dshape1 <- -log(aparam) - digamma(shape1) + log(y[, 1])
+ dl.dshape2 <- -log(aparam) - digamma(shape2) + log(y[, 2]-y[, 1])
c(w) * cbind(dl.da * dtheta.deta(aparam, .lscale),
dl.dshape1 * dtheta.deta(shape1, .lshape1),
@@ -600,23 +600,23 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
}), list( .lscale = lscale, .lshape1 = lshape1, .lshape2 = lshape2,
.escale = escale, .eshape1 = eshape1, .eshape2 = eshape2 ))),
weight = eval(substitute(expression({
- d11 = (shape1+shape2) / aparam^2
- d22 = trigamma(shape1)
- d33 = trigamma(shape2)
- d12 = 1 / aparam
- d13 = 1 / aparam
- d23 = 0
-
- wz = matrix(0, n, dimm(M))
- wz[, iam(1, 1, M)] = dtheta.deta(aparam, .lscale)^2 * d11
- wz[, iam(2, 2, M)] = dtheta.deta(shape1, .lshape1)^2 * d22
- wz[, iam(3, 3, M)] = dtheta.deta(shape2, .lshape2)^2 * d33
- wz[, iam(1, 2, M)] = dtheta.deta(aparam, .lscale) *
- dtheta.deta(shape1, .lshape1) * d12
- wz[, iam(1, 3, M)] = dtheta.deta(aparam, .lscale) *
- dtheta.deta(shape2, .lshape2) * d13
- wz[, iam(2, 3, M)] = dtheta.deta(shape1, .lshape1) *
- dtheta.deta(shape2, .lshape2) * d23
+ d11 <- (shape1+shape2) / aparam^2
+ d22 <- trigamma(shape1)
+ d33 <- trigamma(shape2)
+ d12 <- 1 / aparam
+ d13 <- 1 / aparam
+ d23 <- 0
+
+ wz <- matrix(0, n, dimm(M))
+ wz[, iam(1, 1, M)] <- dtheta.deta(aparam, .lscale)^2 * d11
+ wz[, iam(2, 2, M)] <- dtheta.deta(shape1, .lshape1)^2 * d22
+ wz[, iam(3, 3, M)] <- dtheta.deta(shape2, .lshape2)^2 * d33
+ wz[, iam(1, 2, M)] <- dtheta.deta(aparam, .lscale) *
+ dtheta.deta(shape1, .lshape1) * d12
+ wz[, iam(1, 3, M)] <- dtheta.deta(aparam, .lscale) *
+ dtheta.deta(shape2, .lshape2) * d13
+ wz[, iam(2, 3, M)] <- dtheta.deta(shape1, .lshape1) *
+ dtheta.deta(shape2, .lshape2) * d23
c(w) * wz
}), list( .lscale = lscale, .lshape1 = lshape1,
@@ -632,57 +632,59 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
+
rfrank <- function(n, alpha) {
- if (!is.Numeric(n, positive = TRUE,
- allowable.length = 1, integer.valued = TRUE))
- stop("bad input for argument 'n'")
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
if (!is.Numeric(alpha, positive = TRUE))
stop("bad input for argument 'alpha'")
- alpha = rep(alpha, length.out = n)
- U = runif(n)
- V = runif(n)
- T = alpha^U + (alpha - alpha^U) * V
- X = U
- index = abs(alpha - 1) < .Machine$double.eps
- Y = U
+ alpha <- rep(alpha, length.out = use.n)
+ U <- runif(use.n)
+ V <- runif(use.n)
+ T <- alpha^U + (alpha - alpha^U) * V
+ X <- U
+ index <- abs(alpha - 1) < .Machine$double.eps
+ Y <- U
if (any(!index))
- Y[!index] = logb(T[!index]/(T[!index]+(1-alpha[!index])*V[!index]),
- base = alpha[!index])
- ans = matrix(c(X,Y), nrow = n, ncol = 2)
+ Y[!index] <- logb(T[!index]/(T[!index]+(1-alpha[!index])*V[!index]),
+ base = alpha[!index])
+ ans <- matrix(c(X, Y), nrow = use.n, ncol = 2)
if (any(index)) {
- ans[index, 1] = runif(sum(index)) # Uniform density for alpha == 1
- ans[index, 2] = runif(sum(index))
+ ans[index, 1] <- runif(sum(index)) # Uniform density for alpha == 1
+ ans[index, 2] <- runif(sum(index))
}
ans
}
pfrank <- function(q1, q2, alpha) {
- if (!is.Numeric(q1)) stop("bad input for 'q1'")
- if (!is.Numeric(q2)) stop("bad input for 'q2'")
- if (!is.Numeric(alpha, positive = TRUE)) stop("bad input for 'alpha'")
-
- L = max(length(q1), length(q2), length(alpha))
- alpha = rep(alpha, length.out = L)
- q1 = rep(q1, length.out = L)
- q2 = rep(q2, length.out = L)
-
- x=q1; y=q2
- index = (x >= 1 & y < 1) | (y >= 1 & x < 1) |
- (x <= 0 | y <= 0) | (x >= 1 & y >= 1) |
- (abs(alpha - 1) < .Machine$double.eps)
- ans = as.numeric(index)
- if (any(!index))
- ans[!index] = logb(1 + ((alpha[!index])^(x[!index]) - 1)*
- ((alpha[!index])^(y[!index]) - 1)/(alpha[!index] - 1),
- base=alpha[!index])
- ind2 = (abs(alpha - 1) < .Machine$double.eps)
- ans[ind2] = x[ind2] * y[ind2]
- ans[x >= 1 & y < 1] = y[x >= 1 & y < 1] # P(Y2 < q2) = q2
- ans[y >= 1 & x < 1] = x[y >= 1 & x < 1] # P(Y1 < q1) = q1
- ans[x <= 0 | y <= 0] = 0
- ans[x >= 1 & y >= 1] = 1
- ans
+ if (!is.Numeric(q1)) stop("bad input for 'q1'")
+ if (!is.Numeric(q2)) stop("bad input for 'q2'")
+ if (!is.Numeric(alpha, positive = TRUE)) stop("bad input for 'alpha'")
+
+ L <- max(length(q1), length(q2), length(alpha))
+ alpha <- rep(alpha, length.out = L)
+ q1 <- rep(q1, length.out = L)
+ q2 <- rep(q2, length.out = L)
+
+ x <- q1; y <- q2
+ index <- (x >= 1 & y < 1) | (y >= 1 & x < 1) |
+ (x <= 0 | y <= 0) | (x >= 1 & y >= 1) |
+ (abs(alpha - 1) < .Machine$double.eps)
+ ans <- as.numeric(index)
+ if (any(!index))
+ ans[!index] <- logb(1 + ((alpha[!index])^(x[!index]) - 1)*
+ ((alpha[!index])^(y[!index]) - 1)/(alpha[!index] - 1),
+ base=alpha[!index])
+ ind2 <- (abs(alpha - 1) < .Machine$double.eps)
+ ans[ind2] <- x[ind2] * y[ind2]
+ ans[x >= 1 & y < 1] <- y[x >= 1 & y < 1] # P(Y2 < q2) = q2
+ ans[y >= 1 & x < 1] <- x[y >= 1 & x < 1] # P(Y1 < q1) = q1
+ ans[x <= 0 | y <= 0] <- 0
+ ans[x >= 1 & y >= 1] <- 1
+ ans
}
@@ -692,37 +694,36 @@ dfrank <- function(x1, x2, alpha, log = FALSE) {
rm(log)
- if (!is.Numeric(x1)) stop("bad input for 'x1'")
- if (!is.Numeric(x2)) stop("bad input for 'x2'")
- if (!is.Numeric(alpha, positive = TRUE)) stop("bad input for 'alpha'")
+ if (!is.Numeric(x1)) stop("bad input for 'x1'")
+ if (!is.Numeric(x2)) stop("bad input for 'x2'")
+ if (!is.Numeric(alpha, positive = TRUE)) stop("bad input for 'alpha'")
- L = max(length(x1), length(x2), length(alpha))
- alpha = rep(alpha, length.out = L)
- x1 = rep(x1, length.out = L)
- x2 = rep(x2, length.out = L)
+ L <- max(length(x1), length(x2), length(alpha))
+ alpha <- rep(alpha, length.out = L)
+ x1 <- rep(x1, length.out = L)
+ x2 <- rep(x2, length.out = L)
- if (log.arg) {
- denom = alpha-1 + (alpha^x1 - 1) * (alpha^x2 - 1)
- denom = abs(denom) # Needed; Genest (1987) uses this too, eqn (4.1)
- log((alpha - 1) * log(alpha)) + (x1+x2)*log(alpha) - 2 * log(denom)
- } else {
- temp = (alpha - 1) + (alpha^x1 - 1) * (alpha^x2 - 1)
- index = (abs(alpha - 1) < .Machine$double.eps)
- ans = x1
- if (any(!index))
- ans[!index] = (alpha[!index] - 1) * log(alpha[!index]) *
- (alpha[!index])^(x1[!index]+x2[!index]) / (temp[!index])^2
- ans[x1 <= 0 | x2 <= 0 | x1 >= 1 | x2 >= 1] = 0
- ans[index] = 1
- ans
- }
+ if (log.arg) {
+ denom <- alpha-1 + (alpha^x1 - 1) * (alpha^x2 - 1)
+ denom <- abs(denom)
+ log((alpha - 1) * log(alpha)) + (x1+x2)*log(alpha) - 2 * log(denom)
+ } else {
+ temp <- (alpha - 1) + (alpha^x1 - 1) * (alpha^x2 - 1)
+ index <- (abs(alpha - 1) < .Machine$double.eps)
+ ans <- x1
+ if (any(!index))
+ ans[!index] <- (alpha[!index] - 1) * log(alpha[!index]) *
+ (alpha[!index])^(x1[!index]+x2[!index]) / (temp[!index])^2
+ ans[x1 <= 0 | x2 <= 0 | x1 >= 1 | x2 >= 1] <- 0
+ ans[index] <- 1
+ ans
+ }
}
-frank.control <- function(save.weight = TRUE, ...)
-{
+frank.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -773,33 +774,33 @@ frank.control <- function(save.weight = TRUE, ...)
c(namesof("apar", .lapar, earg = .eapar, short = TRUE))
if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
+ extra$dimnamesy2 <- dimnames(y)[[2]]
if (!length(etastart)) {
- apar.init = rep(.iapar, length.out = n)
- etastart = cbind(theta2eta(apar.init, .lapar, earg = .eapar ))
+ apar.init <- rep(.iapar, length.out = n)
+ etastart <- cbind(theta2eta(apar.init, .lapar, earg = .eapar ))
}
}), list( .lapar = lapar, .eapar = eapar, .iapar = iapar))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- apar = eta2theta(eta, .lapar, earg = .eapar )
- fv.matrix = matrix(0.5, length(apar), 2)
+ apar <- eta2theta(eta, .lapar, earg = .eapar )
+ fv.matrix <- matrix(0.5, length(apar), 2)
if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(names(eta), extra$dimnamesy2)
+ dimnames(fv.matrix) <- list(names(eta), extra$dimnamesy2)
fv.matrix
}, list( .lapar = lapar, .eapar = eapar ))),
last = eval(substitute(expression({
- misc$link = c("apar" = .lapar )
+ misc$link <- c("apar" = .lapar )
- misc$earg = list("apar" = .eapar )
+ misc$earg <- list("apar" = .eapar )
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- misc$pooled.weight = pooled.weight
+ misc$expected <- TRUE
+ misc$nsimEIM <- .nsimEIM
+ misc$pooled.weight <- pooled.weight
misc$multipleResponses <- FALSE
}), list( .lapar = lapar, .eapar = eapar, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- apar = eta2theta(eta, .lapar, earg = .eapar )
+ apar <- eta2theta(eta, .lapar, earg = .eapar )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dfrank(x1 = y[, 1], x2 = y[, 2],
@@ -808,65 +809,67 @@ frank.control <- function(save.weight = TRUE, ...)
}, list( .lapar = lapar, .eapar = eapar ))),
vfamily = c("frank"),
deriv = eval(substitute(expression({
- apar = eta2theta(eta, .lapar, earg = .eapar )
- dapar.deta = dtheta.deta(apar, .lapar, earg = .eapar )
+ apar <- eta2theta(eta, .lapar, earg = .eapar )
+ dapar.deta <- dtheta.deta(apar, .lapar, earg = .eapar )
- de3 = deriv3(~ (log((apar - 1) * log(apar)) + (y1+y2)*log(apar) -
+ de3 <- deriv3(~ (log((apar - 1) * log(apar)) + (y1+y2)*log(apar) -
2 * log(apar-1 + (apar^y1 - 1) * (apar^y2 - 1))),
name = "apar", hessian = TRUE)
- denom = apar-1 + (apar^y[, 1] - 1) * (apar^y[, 2] - 1)
- tmp700 = 2*apar^(y[, 1]+y[, 2]) - apar^y[, 1] - apar^y[, 2]
- numerator = 1 + y[, 1] * apar^(y[, 1] - 1) * (apar^y[, 2] - 1) +
- y[, 2] * apar^(y[, 2] - 1) * (apar^y[, 1] - 1)
- Dl.dapar = 1/(apar - 1) + 1/(apar*log(apar)) + (y[, 1]+y[, 2])/apar -
- 2 * numerator / denom
+ denom <- apar-1 + (apar^y[, 1] - 1) * (apar^y[, 2] - 1)
+ tmp700 <- 2*apar^(y[, 1]+y[, 2]) - apar^y[, 1] - apar^y[, 2]
+ numerator <- 1 + y[, 1] * apar^(y[, 1] - 1) * (apar^y[, 2] - 1) +
+ y[, 2] * apar^(y[, 2] - 1) * (apar^y[, 1] - 1)
+ Dl.dapar <- 1/(apar - 1) + 1/(apar*log(apar)) +
+ (y[, 1]+y[, 2])/apar - 2 * numerator / denom
c(w) * Dl.dapar * dapar.deta
- }), list( .lapar = lapar, .eapar = eapar, .nsimEIM = nsimEIM ))),
+ }), list( .lapar = lapar,
+ .eapar = eapar, .nsimEIM = nsimEIM ))),
weight = eval(substitute(expression({
if ( is.Numeric( .nsimEIM)) {
- pooled.weight = FALSE # For @last
+ pooled.weight <- FALSE # For @last
- run.mean = 0
+ run.mean <- 0
for(ii in 1:( .nsimEIM )) {
- ysim = rfrank(n,alpha=apar)
- y1 = ysim[, 1]; y2 = ysim[, 2];
- eval.de3 = eval(de3)
- d2l.dthetas2 = attr(eval.de3, "hessian")
+ ysim <- rfrank(n,alpha=apar)
+ y1 <- ysim[, 1]; y2 <- ysim[, 2];
+ eval.de3 <- eval(de3)
+ d2l.dthetas2 <- attr(eval.de3, "hessian")
rm(ysim)
- temp3 = -d2l.dthetas2[, 1, 1] # M = 1
- run.mean = ((ii - 1) * run.mean + temp3) / ii
+ temp3 <- -d2l.dthetas2[, 1, 1] # M = 1
+ run.mean <- ((ii - 1) * run.mean + temp3) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(mean(run.mean), n, dimm(M)) else run.mean
- wz = wz * dapar.deta^2
+ wz <- wz * dapar.deta^2
c(w) * wz
} else {
- nump = apar^(y[, 1]+y[, 2]-2) * (2 * y[, 1] * y[, 2] +
- y[, 1]*(y[, 1] - 1) + y[, 2]*(y[, 2] - 1)) -
- y[, 1]*(y[, 1] - 1) * apar^(y[, 1]-2) -
- y[, 2]*(y[, 2] - 1) * apar^(y[, 2]-2)
- D2l.dapar2 = 1/(apar - 1)^2 + (1+log(apar))/(apar*log(apar))^2 +
- (y[, 1]+y[, 2])/apar^2 + 2 *
- (nump / denom - (numerator/denom)^2)
- d2apar.deta2 = d2theta.deta2(apar, .lapar)
- wz = c(w) * (dapar.deta^2 * D2l.dapar2 - Dl.dapar * d2apar.deta2)
- if (TRUE && intercept.only) {
- wz = cbind(wz)
- sumw = sum(w)
+ nump <- apar^(y[, 1]+y[, 2]-2) * (2 * y[, 1] * y[, 2] +
+ y[, 1]*(y[, 1] - 1) + y[, 2]*(y[, 2] - 1)) -
+ y[, 1]*(y[, 1] - 1) * apar^(y[, 1]-2) -
+ y[, 2]*(y[, 2] - 1) * apar^(y[, 2]-2)
+ D2l.dapar2 <- 1/(apar - 1)^2 + (1+log(apar))/(apar*log(apar))^2 +
+ (y[, 1]+y[, 2])/apar^2 + 2 *
+ (nump / denom - (numerator/denom)^2)
+ d2apar.deta2 <- d2theta.deta2(apar, .lapar)
+ wz <- c(w) * (dapar.deta^2 * D2l.dapar2 - Dl.dapar * d2apar.deta2)
+ if (TRUE && intercept.only) {
+ wz <- cbind(wz)
+ sumw <- sum(w)
for(iii in 1:ncol(wz))
- wz[,iii] = sum(wz[, iii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
- } else {
- pooled.weight = FALSE
- }
+ wz[,iii] <- sum(wz[, iii]) / sumw
+ pooled.weight <- TRUE
+ wz <- c(w) * wz # Put back the weights
+ } else {
+ pooled.weight <- FALSE
+ }
wz
}
- }), list( .lapar = lapar, .eapar = eapar, .nsimEIM = nsimEIM ))))
+ }), list( .lapar = lapar,
+ .eapar = eapar, .nsimEIM = nsimEIM ))))
}
@@ -885,8 +888,8 @@ frank.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Gamma hyperbola bivariate distribution\n",
- "Links: ",
- namesof("theta", ltheta, etheta)),
+ "Links: ",
+ namesof("theta", ltheta, etheta)),
initialize = eval(substitute(expression({
if (any(y[, 1] <= 0) || any(y[, 2] <= 1))
stop("the response has values that are out of range")
@@ -909,30 +912,31 @@ frank.control <- function(save.weight = TRUE, ...)
c(namesof("theta", .ltheta, .etheta , short = TRUE))
if (!length(etastart)) {
- theta.init = if (length( .itheta)) {
+ theta.init <- if (length( .itheta)) {
rep( .itheta , length.out = n)
} else {
1 / (y[, 2] - 1 + 0.01)
}
- etastart =
+ etastart <-
cbind(theta2eta(theta.init, .ltheta , .etheta ))
}
}), list( .ltheta = ltheta, .etheta = etheta, .itheta = itheta))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- theta = eta2theta(eta, .ltheta , .etheta )
+ theta <- eta2theta(eta, .ltheta , .etheta )
cbind(theta*exp(theta), 1+1/theta)
}, list( .ltheta = ltheta, .etheta = etheta ))),
last = eval(substitute(expression({
- misc$link = c("theta" = .ltheta )
- misc$earg = list("theta" = .etheta )
+ misc$link <- c("theta" = .ltheta )
- misc$expected = .expected
+ misc$earg <- list("theta" = .etheta )
+
+ misc$expected <- .expected
misc$multipleResponses <- FALSE
}), list( .ltheta = ltheta, .etheta = etheta, .expected = expected ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- theta = eta2theta(eta, .ltheta , .etheta )
+ theta <- eta2theta(eta, .ltheta , .etheta )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * (-exp(-theta)*y[, 1]/theta - theta*y[, 2]))
@@ -940,23 +944,25 @@ frank.control <- function(save.weight = TRUE, ...)
}, list( .ltheta = ltheta, .etheta = etheta ))),
vfamily = c("gammahyp"),
deriv = eval(substitute(expression({
- theta = eta2theta(eta, .ltheta , .etheta )
- Dl.dtheta = exp(-theta) * y[, 1] * (1+theta) / theta^2 - y[, 2]
- DTHETA.deta = dtheta.deta(theta, .ltheta , .etheta )
+ theta <- eta2theta(eta, .ltheta , .etheta )
+ Dl.dtheta <- exp(-theta) * y[, 1] * (1+theta) / theta^2 - y[, 2]
+ DTHETA.deta <- dtheta.deta(theta, .ltheta , .etheta )
c(w) * Dl.dtheta * DTHETA.deta
}), list( .ltheta = ltheta, .etheta = etheta ))),
weight = eval(substitute(expression({
- temp300 = 2 + theta * (2 + theta)
+ temp300 <- 2 + theta * (2 + theta)
if ( .expected ) {
- D2l.dtheta2 = temp300 / theta^2
- wz = c(w) * DTHETA.deta^2 * D2l.dtheta2
+ D2l.dtheta2 <- temp300 / theta^2
+ wz <- c(w) * DTHETA.deta^2 * D2l.dtheta2
} else {
- D2l.dtheta2 = temp300 * y[, 1] * exp(-theta) / theta^3
- D2theta.deta2 = d2theta.deta2(theta, .ltheta )
- wz = c(w) * (DTHETA.deta^2 * D2l.dtheta2 - Dl.dtheta * D2theta.deta2)
+ D2l.dtheta2 <- temp300 * y[, 1] * exp(-theta) / theta^3
+ D2theta.deta2 <- d2theta.deta2(theta, .ltheta )
+ wz <- c(w) * (DTHETA.deta^2 * D2l.dtheta2 -
+ Dl.dtheta * D2theta.deta2)
}
wz
- }), list( .expected = expected, .ltheta = ltheta, .etheta = etheta ))))
+ }), list( .ltheta = ltheta,
+ .etheta = etheta, .expected = expected ))))
}
@@ -1011,73 +1017,73 @@ frank.control <- function(save.weight = TRUE, ...)
extra$dimnamesy2 = dimnames(y)[[2]]
if (!length(etastart)) {
- ainit = if (length(.iapar)) rep( .iapar , length.out = n) else {
- mean1 = if ( .imethod == 1) median(y[, 1]) else mean(y[, 1])
- mean2 = if ( .imethod == 1) median(y[, 2]) else mean(y[, 2])
- Finit = 0.01 + mean(y[, 1] <= mean1 & y[, 2] <= mean2)
+ ainit <- if (length(.iapar)) rep( .iapar , length.out = n) else {
+ mean1 <- if ( .imethod == 1) median(y[, 1]) else mean(y[, 1])
+ mean2 <- if ( .imethod == 1) median(y[, 2]) else mean(y[, 2])
+ Finit <- 0.01 + mean(y[, 1] <= mean1 & y[, 2] <= mean2)
((Finit+expm1(-mean1)+exp(-mean2)) / exp(-mean1-mean2) - 1) / (
expm1(-mean1) * expm1(-mean2))
}
- etastart =
+ etastart <-
theta2eta(rep(ainit, length.out = n), .lapar, earg = .earg )
}
}), list( .iapar = iapar, .lapar = lapar, .earg = earg,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha = eta2theta(eta, .lapar, earg = .earg )
- fv.matrix = matrix(1, length(alpha), 2)
+ alpha <- eta2theta(eta, .lapar, earg = .earg )
+ fv.matrix <- matrix(1, length(alpha), 2)
if (length(extra$dimnamesy2))
dimnames(fv.matrix) = list(names(eta), extra$dimnamesy2)
fv.matrix
}, list( .lapar = lapar, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c("apar" = .lapar )
+ misc$link <- c("apar" = .lapar )
- misc$earg = list(apar = .earg )
+ misc$earg <- list("apar" = .earg )
- misc$expected = FALSE
- misc$pooled.weight = pooled.weight
+ misc$expected <- FALSE
+ misc$pooled.weight <- pooled.weight
misc$multipleResponses <- FALSE
}), list( .lapar = lapar, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- alpha = eta2theta(eta, .lapar, earg = .earg )
- alpha[abs(alpha) < .tola0 ] = .tola0
+ alpha <- eta2theta(eta, .lapar, earg = .earg )
+ alpha[abs(alpha) < .tola0 ] <- .tola0
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- denom = (1 + alpha - 2*alpha*(exp(-y[, 1]) + exp(-y[, 2])) +
- 4*alpha*exp(-y[, 1] - y[, 2]))
+ denom <- (1 + alpha - 2*alpha*(exp(-y[, 1]) + exp(-y[, 2])) +
+ 4*alpha*exp(-y[, 1] - y[, 2]))
sum(c(w) * (-y[, 1] - y[, 2] + log(denom)))
}
}, list( .lapar = lapar, .earg = earg, .tola0=tola0 ))),
vfamily = c("morgenstern"),
deriv = eval(substitute(expression({
- alpha = eta2theta(eta, .lapar, earg = .earg )
- alpha[abs(alpha) < .tola0 ] = .tola0
- numerator = 1 - 2*(exp(-y[, 1]) + exp(-y[, 2])) +
- 4*exp(-y[, 1] - y[, 2])
- denom = (1 + alpha - 2*alpha*(exp(-y[, 1]) + exp(-y[, 2])) +
- 4 *alpha*exp(-y[, 1] - y[, 2]))
- dl.dalpha = numerator / denom
+ alpha <- eta2theta(eta, .lapar, earg = .earg )
+ alpha[abs(alpha) < .tola0 ] <- .tola0
+ numerator <- 1 - 2*(exp(-y[, 1]) + exp(-y[, 2])) +
+ 4*exp(-y[, 1] - y[, 2])
+ denom <- (1 + alpha - 2*alpha*(exp(-y[, 1]) + exp(-y[, 2])) +
+ 4 *alpha*exp(-y[, 1] - y[, 2]))
+ dl.dalpha <- numerator / denom
- dalpha.deta = dtheta.deta(alpha, .lapar, earg = .earg )
+ dalpha.deta <- dtheta.deta(alpha, .lapar, earg = .earg )
c(w) * cbind(dl.dalpha * dalpha.deta)
}), list( .lapar = lapar, .earg = earg, .tola0=tola0 ))),
weight = eval(substitute(expression({
- d2l.dalpha2 = dl.dalpha^2
- d2alpha.deta2 = d2theta.deta2(alpha, .lapar, earg = .earg )
- wz = c(w) * (dalpha.deta^2 * d2l.dalpha2 - d2alpha.deta2 * dl.dalpha)
+ d2l.dalpha2 <- dl.dalpha^2
+ d2alpha.deta2 <- d2theta.deta2(alpha, .lapar, earg = .earg )
+ wz <- c(w) * (dalpha.deta^2 * d2l.dalpha2 - d2alpha.deta2 * dl.dalpha)
if (TRUE &&
intercept.only) {
- wz = cbind(wz)
- sumw = sum(w)
+ wz <- cbind(wz)
+ sumw <- sum(w)
for(iii in 1:ncol(wz))
- wz[,iii] = sum(wz[, iii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
+ wz[,iii] <- sum(wz[, iii]) / sumw
+ pooled.weight <- TRUE
+ wz <- c(w) * wz # Put back the weights
} else {
- pooled.weight = FALSE
+ pooled.weight <- FALSE
}
wz
}), list( .lapar = lapar, .earg = earg ))))
@@ -1087,31 +1093,33 @@ frank.control <- function(save.weight = TRUE, ...)
rfgm <- function(n, alpha) {
- if (!is.Numeric(n, positive = TRUE,
- allowable.length = 1, integer.valued = TRUE))
- stop("bad input for argument 'n'")
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
if (!is.Numeric(alpha))
stop("bad input for argument 'alpha'")
if (any(abs(alpha) > 1))
stop("argument 'alpha' has values out of range")
- y1 = V1 = runif(n)
- V2 = runif(n)
- temp = 2*y1 - 1
- A = alpha * temp - 1
- B = sqrt(1 - 2 * alpha * temp + (alpha*temp)^2 + 4 * alpha * V2 * temp)
- y2 = 2 * V2 / (B - A)
- matrix(c(y1, y2), nrow = n, ncol = 2)
+ y1 <- V1 <- runif(use.n)
+ V2 <- runif(use.n)
+ temp <- 2*y1 - 1
+ A <- alpha * temp - 1
+ B <- sqrt(1 - 2 * alpha * temp + (alpha*temp)^2 + 4 * alpha * V2 * temp)
+ y2 <- 2 * V2 / (B - A)
+ matrix(c(y1, y2), nrow = use.n, ncol = 2)
}
dfgm <- function(x1, x2, alpha, log = FALSE) {
- if (!is.logical(log.arg <- log) || length(log) != 1)
+ if (!is.logical(log.arg <- log) ||
+ length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
-
if (!is.Numeric(alpha))
stop("bad input for 'alpha'")
if (any(abs(alpha) > 1))
@@ -1120,18 +1128,18 @@ dfgm <- function(x1, x2, alpha, log = FALSE) {
length( log.arg ) != 1 )
stop("bad input for argument 'log'")
- L = max(length(x1), length(x2), length(alpha))
- if (length(x1) != L) x1 = rep(x1, length.out = L)
- if (length(x2) != L) x2 = rep(x2, length.out = L)
- if (length(alpha) != L) alpha = rep(alpha, length.out = L)
- ans = 0 * x1
- xnok = (x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)
+ L <- max(length(x1), length(x2), length(alpha))
+ if (length(x1) != L) x1 <- rep(x1, length.out = L)
+ if (length(x2) != L) x2 <- rep(x2, length.out = L)
+ if (length(alpha) != L) alpha <- rep(alpha, length.out = L)
+ ans <- 0 * x1
+ xnok <- (x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)
if ( log.arg ) {
- ans[!xnok] = log1p(alpha[!xnok] * (1-2*x1[!xnok]) * (1-2*x2[!xnok]))
- ans[xnok] = log(0)
+ ans[!xnok] <- log1p(alpha[!xnok] * (1-2*x1[!xnok]) * (1-2*x2[!xnok]))
+ ans[xnok] <- log(0)
} else {
- ans[!xnok] = 1 + alpha[!xnok] * (1-2*x1[!xnok]) * (1-2*x2[!xnok])
- ans[xnok] = 0
+ ans[!xnok] <- 1 + alpha[!xnok] * (1-2*x1[!xnok]) * (1-2*x2[!xnok])
+ ans[xnok] <- 0
if (any(ans<0))
stop("negative values in the density (alpha out of range)")
}
@@ -1145,32 +1153,31 @@ pfgm <- function(q1, q2, alpha) {
if (!is.Numeric(alpha)) stop("bad input for 'alpha'")
if (any(abs(alpha) > 1)) stop("'alpha' values out of range")
- L = max(length(q1), length(q2), length(alpha))
- if (length(q1) != L) q1 = rep(q1, length.out = L)
- if (length(q2) != L) q2 = rep(q2, length.out = L)
- if (length(alpha) != L) alpha = rep(alpha, length.out = L)
-
- x=q1; y=q2
- index = (x >= 1 & y < 1) |
- (y >= 1 & x < 1) |
- (x <= 0 | y <= 0) |
- (x >= 1 & y >= 1)
- ans = as.numeric(index)
+ L <- max(length(q1), length(q2), length(alpha))
+ if (length(q1) != L) q1 <- rep(q1, length.out = L)
+ if (length(q2) != L) q2 <- rep(q2, length.out = L)
+ if (length(alpha) != L) alpha <- rep(alpha, length.out = L)
+
+ x <- q1; y <- q2
+ index <- (x >= 1 & y < 1) |
+ (y >= 1 & x < 1) |
+ (x <= 0 | y <= 0) |
+ (x >= 1 & y >= 1)
+ ans <- as.numeric(index)
if (any(!index)) {
- ans[!index] = q1[!index] * q2[!index] * (1 + alpha[!index] *
- (1-q1[!index])*(1-q2[!index]))
+ ans[!index] <- q1[!index] * q2[!index] * (1 + alpha[!index] *
+ (1-q1[!index])*(1-q2[!index]))
}
- ans[x >= 1 & y<1] = y[x >= 1 & y<1] # P(Y2 < q2) = q2
- ans[y >= 1 & x<1] = x[y >= 1 & x<1] # P(Y1 < q1) = q1
- ans[x <= 0 | y <= 0] = 0
- ans[x >= 1 & y >= 1] = 1
+ ans[x >= 1 & y<1] <- y[x >= 1 & y<1] # P(Y2 < q2) = q2
+ ans[y >= 1 & x<1] <- x[y >= 1 & x<1] # P(Y1 < q1) = q1
+ ans[x <= 0 | y <= 0] <- 0
+ ans[x >= 1 & y >= 1] <- 1
ans
}
-fgm.control <- function(save.weight = TRUE, ...)
-{
+fgm.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -1200,8 +1207,8 @@ fgm.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Farlie-Gumbel-Morgenstern distribution\n",
- "Links: ",
- namesof("apar", lapar, earg = earg )),
+ "Links: ",
+ namesof("apar", lapar, earg = earg )),
initialize = eval(substitute(expression({
if (any(y < 0) || any(y > 1))
stop("the response must have values in the unit square")
@@ -1223,84 +1230,85 @@ fgm.control <- function(save.weight = TRUE, ...)
namesof("apar", .lapar, earg = .earg , short = TRUE)
if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
+ extra$dimnamesy2 <- dimnames(y)[[2]]
if (!length(etastart)) {
- ainit = if (length( .iapar )) .iapar else {
- mean1 = if ( .imethod == 1) weighted.mean(y[, 1], w) else
- median(y[, 1])
- mean2 = if ( .imethod == 1) weighted.mean(y[, 2], w) else
- median(y[, 2])
- Finit = weighted.mean(y[, 1] <= mean1 & y[, 2] <= mean2, w)
+ ainit <- if (length( .iapar )) .iapar else {
+ mean1 <- if ( .imethod == 1) weighted.mean(y[, 1], w) else
+ median(y[, 1])
+ mean2 <- if ( .imethod == 1) weighted.mean(y[, 2], w) else
+ median(y[, 2])
+ Finit <- weighted.mean(y[, 1] <= mean1 & y[, 2] <= mean2, w)
(Finit / (mean1 * mean2) - 1) / ((1-mean1) * (1-mean2))
}
- ainit = min(0.95, max(ainit, -0.95))
+ ainit <- min(0.95, max(ainit, -0.95))
- etastart =
+ etastart <-
theta2eta(rep(ainit, length.out = n), .lapar, earg = .earg )
}
}), list( .iapar = iapar, .lapar = lapar, .earg = earg,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha = eta2theta(eta, .lapar, earg = .earg )
- fv.matrix = matrix(0.5, length(alpha), 2)
+ alpha <- eta2theta(eta, .lapar, earg = .earg )
+ fv.matrix <- matrix(0.5, length(alpha), 2)
if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(names(eta), extra$dimnamesy2)
+ dimnames(fv.matrix) <- list(names(eta), extra$dimnamesy2)
fv.matrix
}, list( .lapar = lapar, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c("apar" = .lapar )
+ misc$link <- c("apar" = .lapar )
- misc$earg = list(apar = .earg )
+ misc$earg <- list("apar" = .earg )
- misc$expected = FALSE
- misc$nsimEIM = .nsimEIM
+ misc$expected <- FALSE
+ misc$nsimEIM <- .nsimEIM
misc$multipleResponses <- FALSE
}), list( .lapar = lapar, .earg = earg, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- alpha = eta2theta(eta, .lapar, earg = .earg )
+ alpha <- eta2theta(eta, .lapar, earg = .earg )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(c(w) * dfgm(x1=y[, 1], x2=y[, 2], alpha=alpha, log = TRUE))
+ sum(c(w) * dfgm(x1 = y[, 1],
+ x2 = y[, 2], alpha = alpha, log = TRUE))
}
}, list( .lapar = lapar, .earg = earg ))),
vfamily = c("fgm"),
deriv = eval(substitute(expression({
- alpha = eta2theta(eta, .lapar, earg = .earg )
+ alpha <- eta2theta(eta, .lapar, earg = .earg )
- dalpha.deta = dtheta.deta(alpha, .lapar, earg = .earg )
+ dalpha.deta <- dtheta.deta(alpha, .lapar, earg = .earg )
- numerator = (1 - 2 * y[, 1]) * (1 - 2 * y[, 2])
- denom = 1 + alpha * numerator
+ numerator <- (1 - 2 * y[, 1]) * (1 - 2 * y[, 2])
+ denom <- 1 + alpha * numerator
- mytolerance = .Machine$double.eps
+ mytolerance <- .Machine$double.eps
bad <- (denom <= mytolerance) # Range violation
if (any(bad)) {
cat("There are some range violations in @deriv\n")
flush.console()
- denom[bad] = 2 * mytolerance
+ denom[bad] <- 2 * mytolerance
}
- dl.dalpha = numerator / denom
+ dl.dalpha <- numerator / denom
c(w) * cbind(dl.dalpha * dalpha.deta)
}), list( .lapar = lapar, .earg = earg, .nsimEIM = nsimEIM ))),
weight = eval(substitute(expression({
- run.var = 0
+ run.var <- 0
for(ii in 1:( .nsimEIM )) {
- ysim = rfgm(n, alpha=alpha)
- numerator = (1 - 2 * ysim[, 1]) * (1 - 2 * ysim[, 2])
- denom = 1 + alpha * numerator
- dl.dalpha = numerator / denom
+ ysim <- rfgm(n, alpha=alpha)
+ numerator <- (1 - 2 * ysim[, 1]) * (1 - 2 * ysim[, 2])
+ denom <- 1 + alpha * numerator
+ dl.dalpha <- numerator / denom
rm(ysim)
- temp3 = dl.dalpha
- run.var = ((ii - 1) * run.var + temp3^2) / ii
+ temp3 <- dl.dalpha
+ run.var <- ((ii - 1) * run.var + temp3^2) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(cbind(run.var)),
n, dimm(M), byrow = TRUE) else cbind(run.var)
- wz = wz * dalpha.deta^2
+ wz <- wz * dalpha.deta^2
c(w) * wz
}), list( .lapar = lapar, .earg = earg, .nsimEIM = nsimEIM ))))
}
@@ -1325,8 +1333,8 @@ fgm.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Gumbel's Type I bivariate distribution\n",
- "Links: ",
- namesof("apar", lapar, earg = earg )),
+ "Links: ",
+ namesof("apar", lapar, earg = earg )),
initialize = eval(substitute(expression({
temp5 <-
@@ -1347,38 +1355,38 @@ fgm.control <- function(save.weight = TRUE, ...)
c(namesof("apar", .lapar , earg = .earg , short = TRUE))
if (!length(etastart)) {
- ainit = if (length( .iapar )) rep( .iapar, length.out = n) else {
- mean1 = if ( .imethod == 1) median(y[, 1]) else mean(y[, 1])
- mean2 = if ( .imethod == 1) median(y[, 2]) else mean(y[, 2])
- Finit = 0.01 + mean(y[, 1] <= mean1 & y[, 2] <= mean2)
+ ainit <- if (length( .iapar )) rep( .iapar, length.out = n) else {
+ mean1 <- if ( .imethod == 1) median(y[, 1]) else mean(y[, 1])
+ mean2 <- if ( .imethod == 1) median(y[, 2]) else mean(y[, 2])
+ Finit <- 0.01 + mean(y[, 1] <= mean1 & y[, 2] <= mean2)
(log(Finit+expm1(-mean1)+exp(-mean2))+mean1+mean2)/(mean1*mean2)
}
- etastart =
+ etastart <-
theta2eta(rep(ainit, length.out = n), .lapar, earg = .earg )
}
}), list( .iapar = iapar, .lapar = lapar, .earg = earg,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha = eta2theta(eta, .lapar, earg = .earg )
+ alpha <- eta2theta(eta, .lapar, earg = .earg )
cbind(rep(1, len = length(alpha)),
rep(1, len = length(alpha)))
}, list( .lapar = lapar, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c("apar" = .lapar )
+ misc$link <- c("apar" = .lapar )
- misc$earg = list("apar" = .earg )
+ misc$earg <- list("apar" = .earg )
- misc$expected = FALSE
- misc$pooled.weight = pooled.weight
+ misc$expected <- FALSE
+ misc$pooled.weight <- pooled.weight
misc$multipleResponses <- FALSE
}), list( .lapar = lapar, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- alpha = eta2theta(eta, .lapar, earg = .earg )
+ alpha <- eta2theta(eta, .lapar, earg = .earg )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- denom = (alpha*y[, 1] - 1) * (alpha*y[, 2] - 1) + alpha
- mytolerance = .Machine$double.xmin
+ denom <- (alpha*y[, 1] - 1) * (alpha*y[, 2] - 1) + alpha
+ mytolerance <- .Machine$double.xmin
bad <- (denom <= mytolerance) # Range violation
if (any(bad)) {
cat("There are some range violations in @deriv\n")
@@ -1391,31 +1399,32 @@ fgm.control <- function(save.weight = TRUE, ...)
}, list( .lapar = lapar, .earg = earg ))),
vfamily = c("gumbelIbiv"),
deriv = eval(substitute(expression({
- alpha = eta2theta(eta, .lapar, earg = .earg )
- numerator = (alpha*y[, 1] - 1)*y[, 2] + (alpha*y[, 2] - 1)*y[, 1] + 1
- denom = (alpha*y[, 1] - 1) * (alpha*y[, 2] - 1) + alpha
- denom = abs(denom)
+ alpha <- eta2theta(eta, .lapar, earg = .earg )
+ numerator <- (alpha*y[, 1] - 1)*y[, 2] +
+ (alpha*y[, 2] - 1)*y[, 1] + 1
+ denom <- (alpha*y[, 1] - 1) * (alpha*y[, 2] - 1) + alpha
+ denom <- abs(denom)
- dl.dalpha = numerator / denom + y[, 1]*y[, 2]
+ dl.dalpha <- numerator / denom + y[, 1]*y[, 2]
- dalpha.deta = dtheta.deta(alpha, .lapar, earg = .earg )
+ dalpha.deta <- dtheta.deta(alpha, .lapar, earg = .earg )
c(w) * cbind(dl.dalpha * dalpha.deta)
}), list( .lapar = lapar, .earg = earg ))),
weight = eval(substitute(expression({
- d2l.dalpha2 = (numerator/denom)^2 - 2*y[, 1]*y[, 2] / denom
- d2alpha.deta2 = d2theta.deta2(alpha, .lapar, earg = .earg )
- wz = c(w) * (dalpha.deta^2 * d2l.dalpha2 - d2alpha.deta2 * dl.dalpha)
+ d2l.dalpha2 <- (numerator/denom)^2 - 2*y[, 1]*y[, 2] / denom
+ d2alpha.deta2 <- d2theta.deta2(alpha, .lapar, earg = .earg )
+ wz <- c(w) * (dalpha.deta^2 * d2l.dalpha2 - d2alpha.deta2 * dl.dalpha)
if (TRUE &&
intercept.only) {
- wz = cbind(wz)
- sumw = sum(w)
+ wz <- cbind(wz)
+ sumw <- sum(w)
for(iii in 1:ncol(wz))
- wz[, iii] = sum(wz[, iii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
+ wz[, iii] <- sum(wz[, iii]) / sumw
+ pooled.weight <- TRUE
+ wz <- c(w) * wz # Put back the weights
} else {
- pooled.weight = FALSE
+ pooled.weight <- FALSE
}
wz
}), list( .lapar = lapar, .earg = earg ))))
@@ -1428,54 +1437,53 @@ fgm.control <- function(save.weight = TRUE, ...)
pplack <- function(q1, q2, oratio) {
- if (!is.Numeric(q1)) stop("bad input for 'q1'")
- if (!is.Numeric(q2)) stop("bad input for 'q2'")
- if (!is.Numeric(oratio, positive = TRUE)) stop("bad input for 'oratio'")
-
- L = max(length(q1), length(q2), length(oratio))
- if (length(q1) != L) q1 = rep(q1, length.out = L)
- if (length(q2) != L) q2 = rep(q2, length.out = L)
- if (length(oratio) != L) oratio = rep(oratio, length.out = L)
-
- x=q1; y=q2
- index = (x >= 1 & y < 1) | (y >= 1 & x < 1) |
- (x <= 0 | y <= 0) | (x >= 1 & y >= 1) |
- (abs(oratio - 1) < 1.0e-6) # .Machine$double.eps
- ans = as.numeric(index)
- if (any(!index)) {
- temp1 = 1 + (oratio[!index] - 1) * (q1[!index] + q2[!index])
- temp2 = temp1 - sqrt(temp1^2 - 4 * oratio[!index] *
- (oratio[!index] - 1) * q1[!index] * q2[!index])
- ans[!index] = 0.5 * temp2 / (oratio[!index] - 1)
- }
+ if (!is.Numeric(q1)) stop("bad input for 'q1'")
+ if (!is.Numeric(q2)) stop("bad input for 'q2'")
+ if (!is.Numeric(oratio, positive = TRUE)) stop("bad input for 'oratio'")
+
+ L <- max(length(q1), length(q2), length(oratio))
+ if (length(q1) != L) q1 <- rep(q1, length.out = L)
+ if (length(q2) != L) q2 <- rep(q2, length.out = L)
+ if (length(oratio) != L) oratio <- rep(oratio, length.out = L)
+
+ x <- q1; y <- q2
+ index <- (x >= 1 & y < 1) | (y >= 1 & x < 1) |
+ (x <= 0 | y <= 0) | (x >= 1 & y >= 1) |
+ (abs(oratio - 1) < 1.0e-6) # .Machine$double.eps
+ ans <- as.numeric(index)
+ if (any(!index)) {
+ temp1 <- 1 + (oratio[!index] - 1) * (q1[!index] + q2[!index])
+ temp2 <- temp1 - sqrt(temp1^2 - 4 * oratio[!index] *
+ (oratio[!index] - 1) * q1[!index] * q2[!index])
+ ans[!index] <- 0.5 * temp2 / (oratio[!index] - 1)
+ }
- ind2 = (abs(oratio - 1) < 1.0e-6) # .Machine$double.eps
- ans[ind2] = x[ind2] * y[ind2]
- ans[x >= 1 & y<1] = y[x >= 1 & y<1] # P(Y2 < q2) = q2
- ans[y >= 1 & x<1] = x[y >= 1 & x<1] # P(Y1 < q1) = q1
- ans[x <= 0 | y <= 0] = 0
- ans[x >= 1 & y >= 1] = 1
- ans
+ ind2 <- (abs(oratio - 1) < 1.0e-6) # .Machine$double.eps
+ ans[ind2] <- x[ind2] * y[ind2]
+ ans[x >= 1 & y<1] <- y[x >= 1 & y<1] # P(Y2 < q2) = q2
+ ans[y >= 1 & x<1] <- x[y >= 1 & x<1] # P(Y1 < q1) = q1
+ ans[x <= 0 | y <= 0] <- 0
+ ans[x >= 1 & y >= 1] <- 1
+ ans
}
rplack <- function(n, oratio) {
- if (!is.Numeric(n, positive = TRUE,
- allowable.length = 1, integer.valued = TRUE))
- stop("bad input for 'n'")
- if (!is.Numeric(oratio, positive = TRUE))
- stop("bad input for 'oratio'")
- if (length(oratio) != n) oratio = rep(oratio, length.out = n)
-
- y1 = U = runif(n)
- V = runif(n)
- Z = V * (1-V)
- y2 = (2*Z*(y1*oratio^2 + 1 - y1) + oratio * (1 - 2 * Z) -
- (1 - 2 * V) *
- sqrt(oratio * (oratio + 4*Z*y1*(1-y1)*(1-oratio)^2))) / (oratio +
- Z*(1-oratio)^2)
- matrix(c(y1, 0.5 * y2), nrow = n, ncol = 2)
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+
+ y1 <- U <- runif(use.n)
+ V <- runif(use.n)
+ Z <- V * (1-V)
+ y2 <- (2*Z*(y1*oratio^2 + 1 - y1) + oratio * (1 - 2 * Z) -
+ (1 - 2 * V) *
+ sqrt(oratio * (oratio + 4*Z*y1*(1-y1)*(1-oratio)^2))) / (oratio +
+ Z*(1-oratio)^2)
+ matrix(c(y1, 0.5 * y2), nrow = use.n, ncol = 2)
}
@@ -1486,33 +1494,21 @@ dplack <- function(x1, x2, oratio, log = FALSE) {
rm(log)
- if (!is.Numeric(oratio, positive = TRUE))
- stop("bad input for 'oratio'")
- L = max(length(x1), length(x2), length(oratio))
- if (length(x1) != L) x1 = rep(x1, length.out = L)
- if (length(x2) != L) x2 = rep(x2, length.out = L)
- if (length(oratio) != L) oratio = rep(oratio, length.out = L)
- if ( !is.logical( log.arg ) || length( log.arg ) != 1 )
- stop("bad input for argument 'log'")
-
- if ( log.arg ) {
- ans = log(oratio) + log1p((oratio - 1) *
- (x1+x2-2*x1*x2)) - 1.5 *
- log((1 + (x1+x2)*(oratio - 1))^2 - 4 * oratio * (oratio - 1)*x1*x2)
- ans[(x1 < 0) | (x1 > 1) | (x2 < 0) | (x2 > 1)] = log(0)
- } else {
- ans = oratio * ((oratio - 1) * (x1+x2-2*x1*x2) + 1) / ((1 +
- (x1+x2)*(oratio - 1))^2 - 4 * oratio * (oratio - 1)*x1*x2)^1.5
- ans[(x1 < 0) | (x1 > 1) | (x2 < 0) | (x2 > 1)] = 0
- }
- ans
+ ans <- log(oratio) + log1p((oratio - 1) *
+ (x1+x2 - 2*x1*x2)) - 1.5 *
+ log((1 + (x1+x2)*(oratio - 1))^2 -
+ 4 * oratio * (oratio - 1)*x1*x2)
+ ans[ # !is.na(x1) & !is.na(x2) & !is.na(oratio) &
+ ((x1 < 0) | (x1 > 1) | (x2 < 0) | (x2 > 1))] <- log(0)
+
+
+ if (log.arg) ans else exp(ans)
}
-plackett.control <- function(save.weight = TRUE, ...)
-{
- list(save.weight = save.weight)
+plackett.control <- function(save.weight = TRUE, ...) {
+ list(save.weight = save.weight)
}
@@ -1559,50 +1555,51 @@ plackett.control <- function(save.weight = TRUE, ...)
namesof("oratio", .link , earg = .earg, short = TRUE)
if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
+ extra$dimnamesy2 <- dimnames(y)[[2]]
if (!length(etastart)) {
- orinit = if (length( .ioratio )) .ioratio else {
+ orinit <- if (length( .ioratio )) .ioratio else {
if ( .imethod == 2) {
- scorp = cor(y)[1, 2]
+ scorp <- cor(y)[1, 2]
if (abs(scorp) <= 0.1) 1 else
if (abs(scorp) <= 0.3) 3^sign(scorp) else
if (abs(scorp) <= 0.6) 5^sign(scorp) else
if (abs(scorp) <= 0.8) 20^sign(scorp) else 40^sign(scorp)
} else {
- y10 = weighted.mean(y[, 1], w)
- y20 = weighted.mean(y[, 2], w)
+ y10 <- weighted.mean(y[, 1], w)
+ y20 <- weighted.mean(y[, 2], w)
(0.5 + sum(w[(y[, 1] < y10) & (y[, 2] < y20)])) *
(0.5 + sum(w[(y[, 1] >= y10) & (y[, 2] >= y20)])) / (
((0.5 + sum(w[(y[, 1] < y10) & (y[, 2] >= y20)])) *
(0.5 + sum(w[(y[, 1] >= y10) & (y[, 2] < y20)]))))
}
}
- etastart = theta2eta(rep(orinit, length.out = n),
- .link , earg = .earg )
+ etastart <-
+ theta2eta(rep(orinit, length.out = n),
+ .link , earg = .earg )
}
- }), list( .ioratio=ioratio, .link = link, .earg = earg,
+ }), list( .ioratio = ioratio, .link = link, .earg = earg,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- oratio = eta2theta(eta, .link , earg = .earg )
- fv.matrix = matrix(0.5, length(oratio), 2)
+ oratio <- eta2theta(eta, .link , earg = .earg )
+ fv.matrix <- matrix(0.5, length(oratio), 2)
if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(dimnames(eta)[[1]], extra$dimnamesy2)
+ dimnames(fv.matrix) <- list(dimnames(eta)[[1]], extra$dimnamesy2)
fv.matrix
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c(oratio = .link)
+ misc$link <- c(oratio = .link)
- misc$earg = list(oratio = .earg)
+ misc$earg <- list(oratio = .earg)
- misc$expected = FALSE
- misc$nsimEIM = .nsimEIM
+ misc$expected <- FALSE
+ misc$nsimEIM <- .nsimEIM
misc$multipleResponses <- FALSE
}), list( .link = link, .earg = earg,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- oratio = eta2theta(eta, .link , earg = .earg )
+ oratio <- eta2theta(eta, .link , earg = .earg )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dplack(x1 = y[, 1], x2 = y[, 2],
@@ -1611,42 +1608,43 @@ plackett.control <- function(save.weight = TRUE, ...)
}, list( .link = link, .earg = earg ))),
vfamily = c("plackett"),
deriv = eval(substitute(expression({
- oratio = eta2theta(eta, .link , earg = .earg )
- doratio.deta = dtheta.deta(oratio, .link , earg = .earg )
- y1 = y[, 1]
- y2 = y[, 2]
- de3 = deriv3(~ (log(oratio) + log(1+(oratio - 1) *
- (y1+y2-2*y1*y2)) - 1.5 *
- log((1 + (y1+y2)*(oratio - 1))^2 - 4 * oratio * (oratio - 1)*y1*y2)),
- name = "oratio", hessian= FALSE)
- eval.de3 = eval(de3)
-
- dl.doratio = attr(eval.de3, "gradient")
+ oratio <- eta2theta(eta, .link , earg = .earg )
+ doratio.deta <- dtheta.deta(oratio, .link , earg = .earg )
+ y1 <- y[, 1]
+ y2 <- y[, 2]
+ de3 <- deriv3(~ (log(oratio) + log(1+(oratio - 1) *
+ (y1+y2-2*y1*y2)) - 1.5 *
+ log((1 + (y1+y2)*(oratio - 1))^2 -
+ 4 * oratio * (oratio - 1)*y1*y2)),
+ name = "oratio", hessian = FALSE)
+ eval.de3 <- eval(de3)
+
+ dl.doratio <- attr(eval.de3, "gradient")
c(w) * dl.doratio * doratio.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- sd3 = deriv3(~ (log(oratio) + log(1+(oratio - 1) *
+ sd3 <- deriv3(~ (log(oratio) + log(1+(oratio - 1) *
(y1sim+y2sim-2*y1sim*y2sim)) - 1.5 *
log((1 + (y1sim+y2sim)*(oratio - 1))^2 -
4 * oratio * (oratio - 1)*y1sim*y2sim)),
name = "oratio", hessian= FALSE)
- run.var = 0
+ run.var <- 0
for(ii in 1:( .nsimEIM )) {
- ysim = rplack(n, oratio=oratio)
- y1sim = ysim[, 1]
- y2sim = ysim[, 1]
- eval.sd3 = eval(sd3)
- dl.doratio = attr(eval.sd3, "gradient")
+ ysim <- rplack(n, oratio=oratio)
+ y1sim <- ysim[, 1]
+ y2sim <- ysim[, 1]
+ eval.sd3 <- eval(sd3)
+ dl.doratio <- attr(eval.sd3, "gradient")
rm(ysim, y1sim, y2sim)
- temp3 = dl.doratio
- run.var = ((ii - 1) * run.var + temp3^2) / ii
+ temp3 <- dl.doratio
+ run.var <- ((ii - 1) * run.var + temp3^2) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(cbind(run.var)),
n, dimm(M), byrow = TRUE) else cbind(run.var)
- wz = wz * doratio.deta^2
+ wz <- wz * doratio.deta^2
c(w) * wz
}), list( .link = link, .earg = earg, .nsimEIM = nsimEIM ))))
}
@@ -1660,68 +1658,68 @@ damh <- function(x1, x2, alpha, log = FALSE) {
rm(log)
- if (!is.Numeric(x1)) stop("bad input for 'x1'")
- if (!is.Numeric(x2)) stop("bad input for 'x2'")
- if (!is.Numeric(alpha)) stop("bad input for 'alpha'")
- if (any(abs(alpha) > 1)) stop("'alpha' values out of range")
- L = max(length(x1), length(x2), length(alpha))
- alpha = rep(alpha, length.out = L)
- x1 = rep(x1, length.out = L)
- x2 = rep(x2, length.out = L)
- temp = 1-alpha*(1-x1)*(1-x2)
+
+ L <- max(length(x1), length(x2), length(alpha))
+ alpha <- rep(alpha, length.out = L)
+ x1 <- rep(x1, length.out = L)
+ x2 <- rep(x2, length.out = L)
+ temp <- 1 - alpha*(1-x1)*(1-x2)
+
if (log.arg) {
- ans = log1p(-alpha+2*alpha*x1*x2/temp) - 2*log(temp)
- ans[(x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)] = log(0)
+ ans <- log1p(-alpha+2*alpha*x1*x2/temp) - 2*log(temp)
+ ans[(x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)] <- log(0)
} else {
- ans = (1-alpha+2*alpha*x1*x2/temp) / (temp^2)
- ans[(x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)] = 0
+ ans <- (1-alpha+2*alpha*x1*x2/temp) / (temp^2)
+ ans[(x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)] <- 0
}
+ ans[abs(alpha) > 1] <- NA
ans
}
pamh <- function(q1, q2, alpha) {
- if (!is.Numeric(q1)) stop("bad input for 'q1'")
- if (!is.Numeric(q2)) stop("bad input for 'q2'")
- if (!is.Numeric(alpha)) stop("bad input for 'alpha'")
- if (any(abs(alpha) > 1)) stop("'alpha' values out of range")
-
- L = max(length(q1), length(q2), length(alpha))
- if (length(q1) != L) q1 = rep(q1, length.out = L)
- if (length(q2) != L) q2 = rep(q2, length.out = L)
- if (length(alpha) != L) alpha = rep(alpha, length.out = L)
-
- x=q1; y=q2
- index = (x >= 1 & y < 1) | (y >= 1 & x < 1) |
- (x <= 0 | y<= 0) | (x >= 1 & y >= 1)
- ans = as.numeric(index)
- if (any(!index)) {
- ans[!index] = (q1[!index]*q2[!index]) / (1 -
- alpha[!index]*(1-q1[!index])*(1-q2[!index]))
- }
- ans[x >= 1 & y < 1] = y[x >= 1 & y < 1] # P(Y2 < q2) = q2
- ans[y >= 1 & x < 1] = x[y >= 1 & x < 1] # P(Y1 < q1) = q1
- ans[x <= 0 | y <= 0] = 0
- ans[x >= 1 & y >= 1] = 1
- ans
+ if (!is.Numeric(q1)) stop("bad input for 'q1'")
+ if (!is.Numeric(q2)) stop("bad input for 'q2'")
+ if (!is.Numeric(alpha)) stop("bad input for 'alpha'")
+
+ L <- max(length(q1), length(q2), length(alpha))
+ if (length(q1) != L) q1 <- rep(q1, length.out = L)
+ if (length(q2) != L) q2 <- rep(q2, length.out = L)
+ if (length(alpha) != L) alpha <- rep(alpha, length.out = L)
+
+ x <- q1; y <- q2
+ index <- (x >= 1 & y < 1) | (y >= 1 & x < 1) |
+ (x <= 0 | y<= 0) | (x >= 1 & y >= 1)
+ ans <- as.numeric(index)
+ if (any(!index)) {
+ ans[!index] <- (q1[!index]*q2[!index]) / (1 -
+ alpha[!index]*(1-q1[!index])*(1-q2[!index]))
+ }
+ ans[x >= 1 & y < 1] <- y[x >= 1 & y < 1] # P(Y2 < q2) = q2
+ ans[y >= 1 & x < 1] <- x[y >= 1 & x < 1] # P(Y1 < q1) = q1
+ ans[x <= 0 | y <= 0] <- 0
+ ans[x >= 1 & y >= 1] <- 1
+ ans[abs(alpha) > 1] <- NA
+ ans
}
+
ramh <- function(n, alpha) {
- if (!is.Numeric(n, positive = TRUE, allowable.length = 1,
- integer.valued = TRUE))
- stop("bad input for 'n'")
- if (!is.Numeric(alpha))
- stop("bad input for 'alpha'")
- if (any(abs(alpha) > 1))
- stop("'alpha' values out of range")
-
- U1 = V1 = runif(n)
- V2 = runif(n)
- b = 1-V1
- A = -alpha*(2*b*V2+1)+2*alpha^2*b^2*V2+1
- B = alpha^2*(4*b^2*V2-4*b*V2+1)+alpha*(4*V2-4*b*V2-2)+1
- U2 = (2*V2*(alpha*b - 1)^2)/(A+sqrt(B))
- matrix(c(U1,U2), nrow = n, ncol = 2)
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ if (any(abs(alpha) > 1))
+ stop("'alpha' values out of range")
+
+ U1 <- V1 <- runif(use.n)
+ V2 <- runif(use.n)
+ b <- 1-V1
+ A <- -alpha*(2*b*V2+1)+2*alpha^2*b^2*V2+1
+ B <- alpha^2*(4*b^2*V2-4*b*V2+1)+alpha*(4*V2-4*b*V2-2)+1
+ U2 <- (2*V2*(alpha*b - 1)^2)/(A+sqrt(B))
+ matrix(c(U1, U2), nrow = use.n, ncol = 2)
}
@@ -1731,8 +1729,7 @@ amh.control <- function(save.weight = TRUE, ...) {
amh <- function(lalpha = "rhobit", ialpha = NULL,
- imethod = 1, nsimEIM = 250)
-{
+ imethod = 1, nsimEIM = 250) {
lalpha <- as.list(substitute(lalpha))
ealpha <- link2list(lalpha)
lalpha <- attr(ealpha, "function.name")
@@ -1755,8 +1752,8 @@ amh.control <- function(save.weight = TRUE, ...) {
new("vglmff",
blurb = c("Ali-Mikhail-Haq distribution\n",
- "Links: ",
- namesof("alpha", lalpha, earg = ealpha )),
+ "Links: ",
+ namesof("alpha", lalpha, earg = ealpha )),
initialize = eval(substitute(expression({
if (any(y < 0) || any(y > 1))
stop("the response must have values in the unit square")
@@ -1778,87 +1775,88 @@ amh.control <- function(save.weight = TRUE, ...) {
c(namesof("alpha", .lalpha, earg = .ealpha, short = TRUE))
if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
+ extra$dimnamesy2 <- dimnames(y)[[2]]
if (!length(etastart)) {
- ainit = if (length( .ialpha )) .ialpha else {
- mean1 = if ( .imethod == 1) weighted.mean(y[, 1], w) else
- median(y[, 1])
- mean2 = if ( .imethod == 1) weighted.mean(y[, 2], w) else
- median(y[, 2])
- Finit = weighted.mean(y[, 1] <= mean1 & y[, 2] <= mean2, w)
+ ainit <- if (length( .ialpha )) .ialpha else {
+ mean1 <- if ( .imethod == 1) weighted.mean(y[, 1], w) else
+ median(y[, 1])
+ mean2 <- if ( .imethod == 1) weighted.mean(y[, 2], w) else
+ median(y[, 2])
+ Finit <- weighted.mean(y[, 1] <= mean1 & y[, 2] <= mean2, w)
(1 - (mean1 * mean2 / Finit)) / ((1-mean1) * (1-mean2))
}
- ainit = min(0.95, max(ainit, -0.95))
- etastart =
+ ainit <- min(0.95, max(ainit, -0.95))
+ etastart <-
theta2eta(rep(ainit, length.out = n), .lalpha, earg = .ealpha )
}
}), list( .lalpha = lalpha, .ealpha = ealpha, .ialpha = ialpha,
.imethod = imethod))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha = eta2theta(eta, .lalpha, earg = .ealpha )
- fv.matrix = matrix(0.5, length(alpha), 2)
+ alpha <- eta2theta(eta, .lalpha, earg = .ealpha )
+ fv.matrix <- matrix(0.5, length(alpha), 2)
if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(names(eta), extra$dimnamesy2)
+ dimnames(fv.matrix) <- list(names(eta), extra$dimnamesy2)
fv.matrix
}, list( .lalpha = lalpha, .ealpha = ealpha ))),
last = eval(substitute(expression({
- misc$link = c("alpha" = .lalpha )
+ misc$link <- c("alpha" = .lalpha )
- misc$earg = list("alpha" = .ealpha )
+ misc$earg <- list("alpha" = .ealpha )
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
+ misc$expected <- TRUE
+ misc$nsimEIM <- .nsimEIM
misc$multipleResponses <- FALSE
}), list( .lalpha = lalpha,
.ealpha = ealpha, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- alpha = eta2theta(eta, .lalpha, earg = .ealpha )
+ alpha <- eta2theta(eta, .lalpha, earg = .ealpha )
if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * damh(x1=y[, 1], x2=y[, 2], alpha=alpha, log = TRUE))
- }
+ "implemented yet") else {
+ sum(c(w) * damh(x1 = y[, 1], x2 = y[, 2],
+ alpha = alpha, log = TRUE))
+ }
}, list( .lalpha = lalpha, .ealpha = ealpha ))),
vfamily = c("amh"),
deriv = eval(substitute(expression({
- alpha = eta2theta(eta, .lalpha, earg = .ealpha )
+ alpha <- eta2theta(eta, .lalpha, earg = .ealpha )
- dalpha.deta = dtheta.deta(alpha, .lalpha, earg = .ealpha )
+ dalpha.deta <- dtheta.deta(alpha, .lalpha, earg = .ealpha )
- y1 = y[, 1]
- y2 = y[, 2]
- de3 = deriv3(~ (log(1-alpha+(2*alpha*y1*y2/(1-alpha*(1-y1)*(1-y2))))-
+ y1 <- y[, 1]
+ y2 <- y[, 2]
+ de3 <- deriv3(~ (log(1-alpha+(2*alpha*y1*y2/(1-alpha*(1-y1)*(1-y2))))-
2*log(1-alpha*(1-y1)*(1-y2))) ,
name = "alpha", hessian= FALSE)
- eval.de3 = eval(de3)
+ eval.de3 <- eval(de3)
- dl.dalpha = attr(eval.de3, "gradient")
+ dl.dalpha <- attr(eval.de3, "gradient")
c(w) * dl.dalpha * dalpha.deta
}), list( .lalpha = lalpha, .ealpha = ealpha ))),
weight = eval(substitute(expression({
- sd3 = deriv3(~ (log(1-alpha+
- (2*alpha*y1sim*y2sim/(1-alpha*(1-y1sim)*(1-y2sim)))) -
- 2*log(1-alpha*(1-y1sim)*(1-y2sim))) ,
- name = "alpha", hessian= FALSE)
- run.var = 0
+ sd3 <- deriv3(~ (log(1-alpha+
+ (2*alpha*y1sim*y2sim/(1-alpha*(1-y1sim)*(1-y2sim)))) -
+ 2*log(1-alpha*(1-y1sim)*(1-y2sim))) ,
+ name = "alpha", hessian= FALSE)
+ run.var <- 0
for(ii in 1:( .nsimEIM )) {
- ysim = ramh(n, alpha=alpha)
- y1sim = ysim[, 1]
- y2sim = ysim[, 1]
- eval.sd3 = eval(sd3)
- dl.alpha = attr(eval.sd3, "gradient")
- rm(ysim, y1sim, y2sim)
- temp3 = dl.dalpha
- run.var = ((ii - 1) * run.var + temp3^2) / ii
+ ysim <- ramh(n, alpha=alpha)
+ y1sim <- ysim[, 1]
+ y2sim <- ysim[, 1]
+ eval.sd3 <- eval(sd3)
+ dl.alpha <- attr(eval.sd3, "gradient")
+ rm(ysim, y1sim, y2sim)
+ temp3 <- dl.dalpha
+ run.var <- ((ii - 1) * run.var + temp3^2) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(cbind(run.var)),
n, dimm(M), byrow = TRUE) else cbind(run.var)
- wz = wz * dalpha.deta^2
+ wz <- wz * dalpha.deta^2
c(w) * wz
}), list( .lalpha = lalpha,
@@ -1887,12 +1885,12 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
- temp5 = 1 - rho^2
- zedd1 = (x1 - mean1) / sd1
- zedd2 = (x2 - mean2) / sd2
- logpdf = -log(2 * pi) - log(sd1 ) - log(sd2) -
- 0.5 * log1p(-rho^2) +
- -(0.5 / temp5) * (zedd1^2 - 2 * rho * zedd1 * zedd2 + zedd2^2)
+ temp5 <- 1 - rho^2
+ zedd1 <- (x1 - mean1) / sd1
+ zedd2 <- (x2 - mean2) / sd2
+ logpdf <- -log(2 * pi) - log(sd1 ) - log(sd2) -
+ 0.5 * log1p(-rho^2) +
+ -(0.5 / temp5) * (zedd1^2 - 2 * rho * zedd1 * zedd2 + zedd2^2)
if (log.arg) logpdf else exp(logpdf)
}
@@ -1933,14 +1931,15 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
- trivial1 = is.logical(eq.mean) && length(eq.mean) == 1 && !eq.mean
- trivial2 = is.logical(eq.sd ) && length(eq.sd ) == 1 && !eq.sd
+ trivial1 <- is.logical(eq.mean) && length(eq.mean) == 1 && !eq.mean
+ trivial2 <- is.logical(eq.sd ) && length(eq.sd ) == 1 && !eq.sd
if(!trivial1 && !trivial2)
stop("only one of 'eq.mean' and 'eq.sd' can be assigned a value")
if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2) stop("argument 'imethod' must be 1 or 2")
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
new("vglmff",
blurb = c("Bivariate normal distribution\n",
@@ -1956,10 +1955,10 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
temp8.s <- diag(5)[, -4]
temp8.s[4, 3] <- 1
constraints <- cm.vgam(temp8.m, x, .eq.mean,
- constraints, intercept.apply = TRUE)
+ constraints, apply.int = TRUE)
constraints <- cm.vgam(temp8.s, x, .eq.sd,
- constraints, intercept.apply = TRUE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints, apply.int = TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero,
.eq.sd = eq.sd,
.eq.mean = eq.mean ))),
@@ -1986,29 +1985,30 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
namesof("rho", .lrho, earg = .erho, short = TRUE))
if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
+ extra$dimnamesy2 <- dimnames(y)[[2]]
if (!length(etastart)) {
- imean1 = rep(if (length( .imean1 )) .imean1 else
+ imean1 <- rep(if (length( .imean1 )) .imean1 else
weighted.mean(y[, 1], w = w), length.out = n)
- imean2 = rep(if (length( .imean2 )) .imean2 else
+ imean2 <- rep(if (length( .imean2 )) .imean2 else
weighted.mean(y[, 2], w = w), length.out = n)
- isd1 = rep(if (length( .isd1 )) .isd1 else sd(y[, 1]),
+ isd1 <- rep(if (length( .isd1 )) .isd1 else sd(y[, 1]),
length.out = n)
- isd2 = rep(if (length( .isd2 )) .isd2 else sd(y[, 2]),
+ isd2 <- rep(if (length( .isd2 )) .isd2 else sd(y[, 2]),
length.out = n)
- irho = rep(if (length( .irho )) .irho else cor(y[, 1], y[, 2]),
+ irho <- rep(if (length( .irho )) .irho else cor(y[, 1], y[, 2]),
length.out = n)
if ( .imethod == 2) {
- imean1 = abs(imean1) + 0.01
- imean2 = abs(imean2) + 0.01
+ imean1 <- abs(imean1) + 0.01
+ imean2 <- abs(imean2) + 0.01
}
- etastart = cbind(theta2eta(imean1, .lmean1, earg = .emean1),
- theta2eta(imean2, .lmean2, earg = .emean2),
- theta2eta(isd1, .lsd1, earg = .esd1),
- theta2eta(isd2, .lsd2, earg = .esd2),
- theta2eta(irho, .lrho, earg = .erho))
+ etastart <-
+ cbind(theta2eta(imean1, .lmean1, earg = .emean1),
+ theta2eta(imean2, .lmean2, earg = .emean2),
+ theta2eta(isd1, .lsd1, earg = .esd1),
+ theta2eta(isd2, .lsd2, earg = .esd2),
+ theta2eta(irho, .lrho, earg = .erho))
}
}), list( .lmean1 = lmean1, .lmean2 = lmean2,
.emean1 = emean1, .emean2 = emean2,
@@ -2019,11 +2019,11 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
.isd1 = isd1, .isd2 = isd2,
.irho = irho ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mean1 = eta2theta(eta[, 1], .lmean1, earg = .emean1)
- mean2 = eta2theta(eta[, 2], .lmean2, earg = .emean2)
- fv.matrix = cbind(mean1, mean2)
+ mean1 <- eta2theta(eta[, 1], .lmean1, earg = .emean1)
+ mean2 <- eta2theta(eta[, 2], .lmean2, earg = .emean2)
+ fv.matrix <- cbind(mean1, mean2)
if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(names(eta), extra$dimnamesy2)
+ dimnames(fv.matrix) <- list(names(eta), extra$dimnamesy2)
fv.matrix
} , list( .lmean1 = lmean1, .lmean2 = lmean2,
.emean1 = emean1, .emean2 = emean2,
@@ -2031,19 +2031,19 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
.esd1 = esd1 , .esd2 = esd2 , .erho = erho ))),
last = eval(substitute(expression({
- misc$link = c("mean1" = .lmean1,
- "mean2" = .lmean2,
- "sd1" = .lsd1,
- "sd2" = .lsd2,
- "rho" = .lrho)
-
- misc$earg = list("mean1" = .emean1,
- "mean2" = .emean2,
- "sd1" = .esd1,
- "sd2" = .esd2,
- "rho" = .erho)
-
- misc$expected = TRUE
+ misc$link <- c("mean1" = .lmean1,
+ "mean2" = .lmean2,
+ "sd1" = .lsd1,
+ "sd2" = .lsd2,
+ "rho" = .lrho)
+
+ misc$earg <- list("mean1" = .emean1,
+ "mean2" = .emean2,
+ "sd1" = .esd1,
+ "sd2" = .esd2,
+ "rho" = .erho)
+
+ misc$expected <- TRUE
misc$multipleResponses <- FALSE
}) , list( .lmean1 = lmean1, .lmean2 = lmean2,
.emean1 = emean1, .emean2 = emean2,
@@ -2051,11 +2051,11 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
.esd1 = esd1 , .esd2 = esd2 , .erho = erho ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mean1 = eta2theta(eta[, 1], .lmean1, earg = .emean1)
- mean2 = eta2theta(eta[, 2], .lmean2, earg = .emean2)
- sd1 = eta2theta(eta[, 3], .lsd1 , earg = .esd1 )
- sd2 = eta2theta(eta[, 4], .lsd2 , earg = .esd2 )
- Rho = eta2theta(eta[, 5], .lrho , earg = .erho )
+ mean1 <- eta2theta(eta[, 1], .lmean1, earg = .emean1)
+ mean2 <- eta2theta(eta[, 2], .lmean2, earg = .emean2)
+ sd1 <- eta2theta(eta[, 3], .lsd1 , earg = .esd1 )
+ sd2 <- eta2theta(eta[, 4], .lsd2 , earg = .esd2 )
+ Rho <- eta2theta(eta[, 5], .lrho , earg = .erho )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
@@ -2070,35 +2070,35 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
.imethod = imethod ))),
vfamily = c("binormal"),
deriv = eval(substitute(expression({
- mean1 = eta2theta(eta[, 1], .lmean1, earg = .emean1)
- mean2 = eta2theta(eta[, 2], .lmean2, earg = .emean2)
- sd1 = eta2theta(eta[, 3], .lsd1 , earg = .esd1 )
- sd2 = eta2theta(eta[, 4], .lsd2 , earg = .esd2 )
- Rho = eta2theta(eta[, 5], .lrho , earg = .erho )
-
- zedd1 = (y[, 1] - mean1) / sd1
- zedd2 = (y[, 2] - mean2) / sd2
- temp5 = 1 - Rho^2
-
- SigmaInv = matrix(0, n, dimm(2))
- SigmaInv[, iam(1, 1, M = 2)] = 1 / ((sd1^2) * temp5)
- SigmaInv[, iam(2, 2, M = 2)] = 1 / ((sd2^2) * temp5)
- SigmaInv[, iam(1, 2, M = 2)] = -Rho / (sd1 * sd2 * temp5)
- dl.dmeans = mux22(t(SigmaInv), y - cbind(mean1, mean2), M = 2,
- as.matrix = TRUE)
- dl.dsd1 = -1 / sd1 + zedd1 * (zedd1 - Rho * zedd2) / (sd1 * temp5)
- dl.dsd2 = -1 / sd2 + zedd2 * (zedd2 - Rho * zedd1) / (sd2 * temp5)
- dl.drho = -Rho * (zedd1^2 - 2 * Rho * zedd1 * zedd2 +
+ mean1 <- eta2theta(eta[, 1], .lmean1, earg = .emean1)
+ mean2 <- eta2theta(eta[, 2], .lmean2, earg = .emean2)
+ sd1 <- eta2theta(eta[, 3], .lsd1 , earg = .esd1 )
+ sd2 <- eta2theta(eta[, 4], .lsd2 , earg = .esd2 )
+ Rho <- eta2theta(eta[, 5], .lrho , earg = .erho )
+
+ zedd1 <- (y[, 1] - mean1) / sd1
+ zedd2 <- (y[, 2] - mean2) / sd2
+ temp5 <- 1 - Rho^2
+
+ SigmaInv <- matrix(0, n, dimm(2))
+ SigmaInv[, iam(1, 1, M = 2)] <- 1 / ((sd1^2) * temp5)
+ SigmaInv[, iam(2, 2, M = 2)] <- 1 / ((sd2^2) * temp5)
+ SigmaInv[, iam(1, 2, M = 2)] <- -Rho / (sd1 * sd2 * temp5)
+ dl.dmeans <- mux22(t(SigmaInv), y - cbind(mean1, mean2), M = 2,
+ as.matrix = TRUE)
+ dl.dsd1 <- -1 / sd1 + zedd1 * (zedd1 - Rho * zedd2) / (sd1 * temp5)
+ dl.dsd2 <- -1 / sd2 + zedd2 * (zedd2 - Rho * zedd1) / (sd2 * temp5)
+ dl.drho <- -Rho * (zedd1^2 - 2 * Rho * zedd1 * zedd2 +
zedd2^2) / temp5^2 +
zedd1 * zedd2 / temp5 +
Rho / temp5
- dmean1.deta = dtheta.deta(mean1, .lmean1)
- dmean2.deta = dtheta.deta(mean2, .lmean2)
- dsd1.deta = dtheta.deta(sd1 , .lsd1 )
- dsd2.deta = dtheta.deta(sd2 , .lsd2 )
- drho.deta = dtheta.deta(Rho , .lrho )
- dthetas.detas = cbind(dmean1.deta,
+ dmean1.deta <- dtheta.deta(mean1, .lmean1)
+ dmean2.deta <- dtheta.deta(mean2, .lmean2)
+ dsd1.deta <- dtheta.deta(sd1 , .lsd1 )
+ dsd2.deta <- dtheta.deta(sd2 , .lsd2 )
+ drho.deta <- dtheta.deta(Rho , .lrho )
+ dthetas.detas <- cbind(dmean1.deta,
dmean2.deta,
dsd1.deta,
dsd2.deta,
@@ -2116,20 +2116,20 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
.imethod = imethod ))),
weight = eval(substitute(expression({
- wz = matrix(0.0, n, dimm(M))
- wz[, iam(1, 1, M)] = SigmaInv[, iam(1, 1, M = 2)]
- wz[, iam(2, 2, M)] = SigmaInv[, iam(2, 2, M = 2)]
- wz[, iam(1, 2, M)] = SigmaInv[, iam(1, 2, M = 2)]
- wz[, iam(3, 3, M)] = (1 + 1 / temp5) / sd1^2
- wz[, iam(4, 4, M)] = (1 + 1 / temp5) / sd2^2
- wz[, iam(3, 4, M)] = -(Rho^2) / (temp5 * sd1 * sd2)
- wz[, iam(5, 5, M)] = 2 * (1 + 2 * Rho^2) / temp5^2 -
- (1 + Rho^2) / temp5^2
- wz[, iam(3, 5, M)] = -Rho / (sd1 * temp5)
- wz[, iam(4, 5, M)] = -Rho / (sd2 * temp5)
+ wz <- matrix(0.0, n, dimm(M))
+ wz[, iam(1, 1, M)] <- SigmaInv[, iam(1, 1, M = 2)]
+ wz[, iam(2, 2, M)] <- SigmaInv[, iam(2, 2, M = 2)]
+ wz[, iam(1, 2, M)] <- SigmaInv[, iam(1, 2, M = 2)]
+ wz[, iam(3, 3, M)] <- (1 + 1 / temp5) / sd1^2
+ wz[, iam(4, 4, M)] <- (1 + 1 / temp5) / sd2^2
+ wz[, iam(3, 4, M)] <- -(Rho^2) / (temp5 * sd1 * sd2)
+ wz[, iam(5, 5, M)] <- 2 * (1 + 2 * Rho^2) / temp5^2 -
+ (1 + Rho^2) / temp5^2
+ wz[, iam(3, 5, M)] <- -Rho / (sd1 * temp5)
+ wz[, iam(4, 5, M)] <- -Rho / (sd2 * temp5)
for (ilocal in 1:M)
for (jlocal in ilocal:M)
- wz[, iam(ilocal, jlocal, M)] = wz[, iam(ilocal, jlocal, M)] *
+ wz[, iam(ilocal, jlocal, M)] <- wz[, iam(ilocal, jlocal, M)] *
dthetas.detas[, ilocal] *
dthetas.detas[, jlocal]
c(w) * wz
@@ -2166,8 +2166,8 @@ gumbelI <-
new("vglmff",
blurb=c("Gumbel's Type I Bivariate Distribution\n",
- "Links: ",
- namesof("a", la, earg = earg )),
+ "Links: ",
+ namesof("a", la, earg = earg )),
initialize=eval(substitute(expression({
if (!is.matrix(y) || ncol(y) != 2)
stop("the response must be a 2 column matrix")
@@ -2175,70 +2175,73 @@ gumbelI <-
if (any(y < 0))
stop("the response must have non-negative values only")
- predictors.names = c(namesof("a", .la, earg = .earg , short = TRUE))
+ predictors.names <-
+ c(namesof("a", .la, earg = .earg , short = TRUE))
if (!length(etastart)) {
- ainit = if (length( .ia )) rep( .ia, len = n) else {
- mean1 = if ( .imethod == 1) median(y[,1]) else mean(y[,1])
- mean2 = if ( .imethod == 1) median(y[,2]) else mean(y[,2])
- Finit = 0.01 + mean(y[,1] <= mean1 & y[,2] <= mean2)
- (log(Finit+expm1(-mean1)+exp(-mean2))+mean1+mean2)/(mean1*mean2)
+ ainit <- if (length( .ia )) rep( .ia, len = n) else {
+ mean1 <- if ( .imethod == 1) median(y[,1]) else mean(y[,1])
+ mean2 <- if ( .imethod == 1) median(y[,2]) else mean(y[,2])
+ Finit <- 0.01 + mean(y[,1] <= mean1 & y[,2] <= mean2)
+ (log(Finit+expm1(-mean1)+exp(-mean2))+mean1+mean2)/(mean1*mean2)
}
- etastart = theta2eta(rep(ainit, len = n), .la, earg = .earg )
- }
- }), list( .ia=ia, .la=la, .earg = earg, .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha = eta2theta(eta, .la, earg = .earg )
- cbind(rep(1, len = length(alpha)),
- rep(1, len = length(alpha)))
- }, list( .la=la ))),
- last = eval(substitute(expression({
- misc$link = c("a" = .la)
- misc$earg = list("a" = .earg)
- misc$expected = FALSE
- misc$pooled.weight = pooled.weight
- }), list( .la=la, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- alpha = eta2theta(eta, .la, earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- denom = (alpha*y[,1] - 1) * (alpha*y[,2] - 1) + alpha
- mytolerance = .Machine$double.xmin
- bad <- (denom <= mytolerance) # Range violation
- if (any(bad)) {
- cat("There are some range violations in @deriv\n")
- flush.console()
- denom[bad] = 2 * mytolerance
- }
- sum(w * (-y[,1] - y[,2] + alpha*y[,1]*y[,2] + log(denom)))
- }
- }, list( .la=la, .earg = earg ))),
- vfamily=c("gumbelI"),
- deriv=eval(substitute(expression({
- alpha = eta2theta(eta, .la, earg = .earg )
- numerator = (alpha*y[,1] - 1)*y[,2] + (alpha*y[,2] - 1)*y[,1] + 1
- denom = (alpha*y[,1] - 1) * (alpha*y[,2] - 1) + alpha
- denom = abs(denom)
- dl.dalpha = numerator / denom + y[,1]*y[,2]
- dalpha.deta = dtheta.deta(alpha, .la, earg = .earg )
- c(w) * cbind(dl.dalpha * dalpha.deta)
- }), list( .la=la, .earg = earg ))),
- weight=eval(substitute(expression({
- d2l.dalpha2 = (numerator/denom)^2 - 2*y[,1]*y[,2] / denom
- d2alpha.deta2 = d2theta.deta2(alpha, .la, earg = .earg )
- wz = w * (dalpha.deta^2 * d2l.dalpha2 - d2alpha.deta2 * dl.dalpha)
- if (TRUE &&
- intercept.only) {
- wz = cbind(wz)
- sumw = sum(w)
- for(iii in 1:ncol(wz))
- wz[,iii] = sum(wz[,iii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
- } else
- pooled.weight = FALSE
- wz
- }), list( .la=la, .earg = earg ))))
+ etastart <-
+ theta2eta(rep(ainit, len = n), .la, earg = .earg )
+ }
+ }), list( .ia=ia, .la = la, .earg = earg, .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ alpha <- eta2theta(eta, .la, earg = .earg )
+ cbind(rep(1, len = length(alpha)),
+ rep(1, len = length(alpha)))
+ }, list( .la = la ))),
+ last = eval(substitute(expression({
+ misc$link <- c("a" = .la )
+ misc$earg <- list("a" = .earg )
+
+ misc$expected <- FALSE
+ misc$pooled.weight <- pooled.weight
+ }), list( .la = la, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ alpha <- eta2theta(eta, .la, earg = .earg )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ denom <- (alpha*y[,1] - 1) * (alpha*y[,2] - 1) + alpha
+ mytolerance <- .Machine$double.xmin
+ bad <- (denom <= mytolerance) # Range violation
+ if (any(bad)) {
+ cat("There are some range violations in @deriv\n")
+ flush.console()
+ denom[bad] <- 2 * mytolerance
+ }
+ sum(w * (-y[,1] - y[,2] + alpha*y[,1]*y[,2] + log(denom)))
+ }
+ }, list( .la = la, .earg = earg ))),
+ vfamily=c("gumbelI"),
+ deriv=eval(substitute(expression({
+ alpha <- eta2theta(eta, .la, earg = .earg )
+ numerator <- (alpha*y[,1] - 1)*y[,2] + (alpha*y[,2] - 1)*y[,1] + 1
+ denom <- (alpha*y[,1] - 1) * (alpha*y[,2] - 1) + alpha
+ denom <- abs(denom)
+ dl.dalpha <- numerator / denom + y[,1]*y[,2]
+ dalpha.deta <- dtheta.deta(alpha, .la, earg = .earg )
+ c(w) * cbind(dl.dalpha * dalpha.deta)
+ }), list( .la = la, .earg = earg ))),
+ weight=eval(substitute(expression({
+ d2l.dalpha2 <- (numerator/denom)^2 - 2*y[,1]*y[,2] / denom
+ d2alpha.deta2 <- d2theta.deta2(alpha, .la, earg = .earg )
+ wz <- w * (dalpha.deta^2 * d2l.dalpha2 - d2alpha.deta2 * dl.dalpha)
+ if (TRUE &&
+ intercept.only) {
+ wz <- cbind(wz)
+ sumw <- sum(w)
+ for(iii in 1:ncol(wz))
+ wz[,iii] <- sum(wz[,iii]) / sumw
+ pooled.weight <- TRUE
+ wz <- c(w) * wz # Put back the weights
+ } else
+ pooled.weight <- FALSE
+ wz
+ }), list( .la = la, .earg = earg ))))
}
diff --git a/R/family.categorical.R b/R/family.categorical.R
index 6fbf55d..2608d7c 100644
--- a/R/family.categorical.R
+++ b/R/family.categorical.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -15,45 +15,45 @@
-process.categorical.data.vgam = expression({
+process.categorical.data.vgam <- expression({
- extra$y.integer = TRUE
+ extra$y.integer <- TRUE
if (!all(w == 1))
- extra$orig.w = w
+ extra$orig.w <- w
if (!is.matrix(y)) {
- yf = as.factor(y)
- lev = levels(yf)
- llev = length(lev)
- nn = length(yf)
- y = matrix(0, nn, llev)
- y[cbind(1:nn,as.vector(unclass(yf)))] = 1
- dimnames(y) = list(names(yf), lev)
+ yf <- as.factor(y)
+ lev <- levels(yf)
+ llev <- length(lev)
+ nn <- length(yf)
+ y <- matrix(0, nn, llev)
+ y[cbind(1:nn,as.vector(unclass(yf)))] <- 1
+ dimnames(y) <- list(names(yf), lev)
if (llev <= 1)
stop("the response matrix does not have 2 or more columns")
} else {
- nn = nrow(y)
+ nn <- nrow(y)
}
- nvec = rowSums(y)
+ nvec <- rowSums(y)
if (min(y) < 0 || any(round(y) != y))
stop("the response must be non-negative counts (integers)")
if (!exists("delete.zero.colns") ||
(exists("delete.zero.colns") && delete.zero.colns)) {
- sumy2 = colSums(y)
+ sumy2 <- colSums(y)
if (any(index <- sumy2 == 0)) {
- y = y[,!index, drop = FALSE]
- sumy2 = sumy2[!index]
+ y <- y[,!index, drop = FALSE]
+ sumy2 <- sumy2[!index]
if (all(index) || ncol(y) <= 1)
stop("'y' matrix has 0 or 1 columns")
warning("Deleted ", sum(!index),
@@ -66,23 +66,23 @@ process.categorical.data.vgam = expression({
smiss <- sum(miss)
warning("Deleted ", smiss,
" rows of the response matrix due to zero counts")
- x = x[!miss,, drop = FALSE]
- y = y[!miss,, drop = FALSE]
- w = cbind(w)
- w = w[!miss,, drop = FALSE]
+ x <- x[!miss,, drop = FALSE]
+ y <- y[!miss,, drop = FALSE]
+ w <- cbind(w)
+ w <- w[!miss,, drop = FALSE]
- nvec = nvec[!miss]
- nn = nn - smiss
+ nvec <- nvec[!miss]
+ nn <- nn - smiss
}
- w = w * nvec
+ w <- w * nvec
- nvec[nvec == 0] = 1
- y = prop.table(y, 1) # Convert to proportions
+ nvec[nvec == 0] <- 1
+ y <- prop.table(y, 1) # Convert to proportions
if (length(mustart) + length(etastart) == 0) {
- mustart = y + (1 / ncol(y) - y) / nvec
+ mustart <- y + (1 / ncol(y) - y) / nvec
}
})
@@ -91,40 +91,48 @@ process.categorical.data.vgam = expression({
Deviance.categorical.data.vgam <-
- function(mu, y, w, residuals = FALSE, eta, extra = NULL)
-{
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (ncol(y) == 1 || ncol(mu) == 1)
- stop("'y' and 'mu' must have at least 2 columns")
- double.eps = .Machine$double.xmin # ^0.75
- devy = y
- nonz = (y != 0)
- devy[nonz] = y[nonz] * log(y[nonz])
+ if (ncol(y) == 1 || ncol(mu) == 1)
+ stop("arguments 'y' and 'mu' must have at least 2 columns")
- devmu = 0 * y # filler; y*log(mu) gives a warning (fixed up anyway).
- if (any(smallmu <- (mu * (1 - mu) < double.eps))) {
- warning("fitted values close to 0 or 1")
- smu = mu[smallmu]
- smy = y[smallmu]
- smu = ifelse(smu < double.eps, double.eps, smu)
- devmu[smallmu] = smy * log(smu)
- }
- devmu[!smallmu] = y[!smallmu] * log(mu[!smallmu])
- devi = 2 * (devy - devmu)
- if (residuals) {
- M = if (is.matrix(eta)) ncol(eta) else 1
- if (M > 1)
- return(NULL)
- devi = devi %*% rep(1, ncol(devi)) # deviance = \sum_i devi[i]
- return(c(sign(y[, 1] - mu[, 1]) * sqrt(abs(devi) * w)))
- } else
- sum(w * devi)
+ double.eps <- sqrt( .Machine$double.xmin )
+
+
+ devy <- y
+ nonz <- (y != 0)
+ devy[nonz] <- y[nonz] * log(y[nonz])
+
+ devmu <- 0 * y # filler; y*log(mu) gives a warning (fixed up anyway).
+ if (any(smallmu <- (mu * (1 - mu) < double.eps))) {
+ warning("fitted values close to 0 or 1")
+ smu <- mu[smallmu]
+ smy <- y[smallmu]
+ smu <- ifelse(smu < double.eps, double.eps, smu)
+
+
+
+ devmu[smallmu] <- smy * log(smu)
+ }
+ devmu[!smallmu] <- y[!smallmu] * log(mu[!smallmu])
+
+ devi <- 2 * (devy - devmu)
+
+ if (residuals) {
+ M <- if (is.matrix(eta)) ncol(eta) else 1
+ if (M > 1)
+ return(NULL)
+ devi <- devi %*% rep(1, ncol(devi)) # deviance = \sum_i devi[i]
+ return(c(sign(y[, 1] - mu[, 1]) * sqrt(abs(devi) * w)))
+ } else {
+ sum(w * devi)
+ }
}
@@ -139,8 +147,8 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
- x = as.matrix(x)
- prob = as.matrix(prob)
+ x <- as.matrix(x)
+ prob <- as.matrix(prob)
if (((K <- ncol(x)) <= 1) ||
ncol(prob) != K)
stop("arguments 'x' and 'prob' must be matrices with ",
@@ -156,13 +164,13 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
if (any(abs(size - rowSums(x)) > smallno))
stop("rowSums(x) does not agree with argument 'size'")
} else {
- size = round(rowSums(x))
+ size <- round(rowSums(x))
}
} else {
if (!length(size))
- size = round(rowSums(prob))
+ size <- round(rowSums(prob))
}
- logdensity = lgamma(size + 1) + rowSums(x * log(prob) - lgamma(x + 1))
+ logdensity <- lgamma(size + 1) + rowSums(x * log(prob) - lgamma(x + 1))
if (log.arg) logdensity else exp(logdensity)
}
@@ -176,8 +184,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
sratio <- function(link = "logit",
parallel = FALSE, reverse = FALSE, zero = NULL,
- whitespace = FALSE)
-{
+ whitespace = FALSE) {
link <- as.list(substitute(link))
earg <- link2list(link)
link <- attr(earg, "function.name")
@@ -193,18 +200,18 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
new("vglmff",
blurb = c("Stopping ratio model\n\n",
- "Links: ",
- namesof(if (reverse)
- ifelse(whitespace, "P[Y = j+1|Y <= j+1]", "P[Y=j+1|Y<=j+1]") else
- ifelse(whitespace, "P[Y = j|Y >= j]", "P[Y=j|Y>=j]"),
- link, earg = earg), "\n",
- "Variance: ",
- ifelse(whitespace,
- "mu[,j] * (1 - mu[,j]); -mu[,j] * mu[,k]",
- "mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]")),
+ "Links: ",
+ namesof(if (reverse)
+ ifelse(whitespace, "P[Y = j+1|Y <= j+1]", "P[Y=j+1|Y<=j+1]") else
+ ifelse(whitespace, "P[Y = j|Y >= j]", "P[Y=j|Y>=j]"),
+ link, earg = earg), "\n",
+ "Variance: ",
+ ifelse(whitespace,
+ "mu[,j] * (1 - mu[,j]); -mu[,j] * mu[,k]",
+ "mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]")),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .parallel = parallel, .zero = zero ))),
deviance = Deviance.categorical.data.vgam,
@@ -215,41 +222,41 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
- delete.zero.colns = TRUE
+ delete.zero.colns <- TRUE
eval(process.categorical.data.vgam)
- extra$wy.prod = TRUE
- M = ncol(y) - 1
+ extra$wy.prod <- TRUE
+ M <- ncol(y) - 1
- mynames = if ( .reverse )
+ mynames <- if ( .reverse )
paste("P[Y", .fillerChar, "=", .fillerChar, 2:(M+1), "|Y",
.fillerChar, "<=", .fillerChar, 2:(M+1), "]", sep = "") else
paste("P[Y", .fillerChar, "=", .fillerChar, 1:M, "|Y",
.fillerChar, ">=", .fillerChar, 1:M, "]", sep = "")
predictors.names <-
namesof(mynames, .link , short = TRUE, earg = .earg )
- y.names = paste("mu", 1:(M+1), sep = "")
+ y.names <- paste("mu", 1:(M+1), sep = "")
- extra$mymat = if ( .reverse ) tapplymat1(y, "cumsum") else
+ extra$mymat <- if ( .reverse ) tapplymat1(y, "cumsum") else
tapplymat1(y[, ncol(y):1], "cumsum")[, ncol(y):1]
if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
+ extra$dimnamesy2 <- dimnames(y)[[2]]
}), list( .earg = earg, .link = link, .reverse = reverse,
.fillerChar = fillerChar,
.whitespace = whitespace ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
if (!is.matrix(eta))
- eta = as.matrix(eta)
- fv.matrix =
+ eta <- as.matrix(eta)
+ fv.matrix <-
if ( .reverse ) {
- M = ncol(eta)
- djr = eta2theta(eta, .link , earg = .earg )
- temp = tapplymat1(1 - djr[, M:1], "cumprod")[, M:1]
+ M <- ncol(eta)
+ djr <- eta2theta(eta, .link , earg = .earg )
+ temp <- tapplymat1(1 - djr[, M:1], "cumprod")[, M:1]
cbind(1, djr) * cbind(temp, 1)
} else {
- dj = eta2theta(eta, .link , earg = .earg )
- temp = tapplymat1(1 - dj, "cumprod")
+ dj <- eta2theta(eta, .link , earg = .earg )
+ temp <- tapplymat1(1 - dj, "cumprod")
cbind(dj, 1) * cbind(1, temp)
}
if (length(extra$dimnamesy2))
@@ -258,30 +265,31 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
fv.matrix
}, list( .earg = earg, .link = link, .reverse = reverse) )),
last = eval(substitute(expression({
- misc$link = rep( .link , length = M)
- names(misc$link) = mynames
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- mynames
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for (ii in 1:M) misc$earg[[ii]] = .earg
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for (ii in 1:M)
+ misc$earg[[ii]] <- .earg
- misc$parameters = mynames
- misc$reverse = .reverse
- misc$fillerChar = .fillerChar
- misc$whitespace = .whitespace
+ misc$parameters <- mynames
+ misc$reverse <- .reverse
+ misc$fillerChar <- .fillerChar
+ misc$whitespace <- .whitespace
- extra = list() # kill what was used
+ extra <- list() # kill what was used
}), list( .earg = earg, .link = link, .reverse = reverse,
.fillerChar = fillerChar,
.whitespace = whitespace ))),
linkfun = eval(substitute( function(mu, extra = NULL) {
- cump = tapplymat1(mu, "cumsum")
+ cump <- tapplymat1(mu, "cumsum")
if ( .reverse ) {
- djr = mu[, -1] / cump[, -1]
+ djr <- mu[, -1] / cump[, -1]
theta2eta(djr, .link , earg = .earg )
} else {
- M = ncol(mu) - 1
- dj = if (M == 1) mu[, 1] else
+ M <- ncol(mu) - 1
+ dj <- if (M == 1) mu[, 1] else
mu[, 1:M] / (1 - cbind(0, cump[, 1:(M-1)]))
theta2eta(dj, .link , earg = .earg )
}
@@ -290,15 +298,15 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
- smallno = 1.0e4 * .Machine$double.eps
+ smallno <- 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ ycounts <- round(ycounts)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
@@ -307,30 +315,30 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
vfamily = c("sratio", "vcategorical"),
deriv = eval(substitute(expression({
if (!length(extra$mymat)) {
- extra$mymat = if ( .reverse ) tapplymat1(y, "cumsum") else
- tapplymat1(y[, ncol(y):1], "cumsum")[, ncol(y):1]
+ extra$mymat <- if ( .reverse ) tapplymat1(y, "cumsum") else
+ tapplymat1(y[, ncol(y):1], "cumsum")[, ncol(y):1]
}
if ( .reverse ) {
- djr = eta2theta(eta, .link , earg = .earg )
- Mp1 = ncol(extra$mymat)
+ djr <- eta2theta(eta, .link , earg = .earg )
+ Mp1 <- ncol(extra$mymat)
c(w) * (y[, -1] / djr - extra$mymat[, -Mp1] / (1 - djr)) *
dtheta.deta(djr, .link , earg = .earg )
} else {
- dj = eta2theta(eta, .link , earg = .earg )
+ dj <- eta2theta(eta, .link , earg = .earg )
c(w) * (y[, -ncol(y)] / dj - extra$mymat[, -1] / (1 - dj)) *
dtheta.deta(dj, .link , earg = .earg )
}
}), list( .earg = earg, .link = link, .reverse = reverse) )),
weight = eval(substitute(expression({
if ( .reverse ) {
- cump = tapplymat1(mu, "cumsum")
- ddjr.deta = dtheta.deta(djr, .link , earg = .earg )
- wz = c(w) * ddjr.deta^2 *
+ cump <- tapplymat1(mu, "cumsum")
+ ddjr.deta <- dtheta.deta(djr, .link , earg = .earg )
+ wz <- c(w) * ddjr.deta^2 *
(mu[, -1] / djr^2 + cump[, 1:M] / (1 - djr)^2)
} else {
- ccump = tapplymat1(mu[, ncol(mu):1], "cumsum")[, ncol(mu):1]
- ddj.deta = dtheta.deta(dj, .link , earg = .earg )
- wz = c(w) * ddj.deta^2 *
+ ccump <- tapplymat1(mu[, ncol(mu):1], "cumsum")[, ncol(mu):1]
+ ddj.deta <- dtheta.deta(dj, .link , earg = .earg )
+ wz <- c(w) * ddj.deta^2 *
(mu[, 1:M] / dj^2 + ccump[, -1] / (1 - dj)^2)
}
@@ -343,8 +351,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
cratio <- function(link = "logit",
parallel = FALSE, reverse = FALSE, zero = NULL,
- whitespace = FALSE)
-{
+ whitespace = FALSE) {
link <- as.list(substitute(link))
earg <- link2list(link)
link <- attr(earg, "function.name")
@@ -360,20 +367,20 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
new("vglmff",
blurb = c("Continuation ratio model\n\n",
- "Links: ",
- namesof(if (reverse)
- ifelse(whitespace, "P[Y < j+1|Y <= j+1]", "P[Y<j+1|Y<=j+1]") else
- ifelse(whitespace, "P[Y > j|Y >= j]", "P[Y>j|Y>=j]"),
- link, earg = earg),
- "\n",
- "Variance: ",
- ifelse(whitespace,
- "mu[,j] * (1 - mu[,j]); -mu[,j] * mu[,k]",
- "mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]")),
+ "Links: ",
+ namesof(if (reverse)
+ ifelse(whitespace, "P[Y < j+1|Y <= j+1]", "P[Y<j+1|Y<=j+1]") else
+ ifelse(whitespace, "P[Y > j|Y >= j]", "P[Y>j|Y>=j]"),
+ link, earg = earg),
+ "\n",
+ "Variance: ",
+ ifelse(whitespace,
+ "mu[,j] * (1 - mu[,j]); -mu[,j] * mu[,k]",
+ "mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]")),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .parallel = parallel, .zero = zero ))),
deviance = Deviance.categorical.data.vgam,
@@ -384,76 +391,76 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
- delete.zero.colns = TRUE
+ delete.zero.colns <- TRUE
eval(process.categorical.data.vgam)
- M = ncol(y) - 1
+ M <- ncol(y) - 1
- mynames = if ( .reverse )
+ mynames <- if ( .reverse )
paste("P[Y", .fillerChar, "<", .fillerChar, 2:(M+1), "|Y",
.fillerChar, "<=", .fillerChar, 2:(M+1), "]", sep = "") else
paste("P[Y", .fillerChar, ">", .fillerChar, 1:M, "|Y",
.fillerChar, ">=", .fillerChar, 1:M, "]", sep = "")
predictors.names <-
namesof(mynames, .link , earg = .earg , short = TRUE)
- y.names = paste("mu", 1:(M+1), sep = "")
+ y.names <- paste("mu", 1:(M+1), sep = "")
- extra$mymat = if ( .reverse )
+ extra$mymat <- if ( .reverse )
tapplymat1(y, "cumsum") else
tapplymat1(y[, ncol(y):1], "cumsum")[, ncol(y):1]
if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
+ extra$dimnamesy2 <- dimnames(y)[[2]]
}), list( .earg = earg, .link = link, .reverse = reverse,
.fillerChar = fillerChar,
.whitespace = whitespace ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
if (!is.matrix(eta))
- eta = as.matrix(eta)
- fv.matrix = if ( .reverse ) {
- M = ncol(eta)
- djrs = eta2theta(eta, .link , earg = .earg )
- temp = tapplymat1(djrs[, M:1], "cumprod")[, M:1]
+ eta <- as.matrix(eta)
+ fv.matrix <- if ( .reverse ) {
+ M <- ncol(eta)
+ djrs <- eta2theta(eta, .link , earg = .earg )
+ temp <- tapplymat1(djrs[, M:1], "cumprod")[, M:1]
cbind(1, 1 - djrs) * cbind(temp, 1)
} else {
- djs = eta2theta(eta, .link , earg = .earg )
- temp = tapplymat1(djs, "cumprod")
+ djs <- eta2theta(eta, .link , earg = .earg )
+ temp <- tapplymat1(djs, "cumprod")
cbind(1 - djs,1) * cbind(1, temp)
}
if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(dimnames(eta)[[1]],
- extra$dimnamesy2)
+ dimnames(fv.matrix) <- list(dimnames(eta)[[1]],
+ extra$dimnamesy2)
fv.matrix
}, list( .earg = earg, .link = link, .reverse = reverse) )),
last = eval(substitute(expression({
- misc$link = rep( .link , length = M)
- names(misc$link) = mynames
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- mynames
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
for (ii in 1:M)
- misc$earg[[ii]] = .earg
+ misc$earg[[ii]] <- .earg
- misc$parameters = mynames
- misc$reverse = .reverse
- misc$fillerChar = .fillerChar
- misc$whitespace = .whitespace
+ misc$parameters <- mynames
+ misc$reverse <- .reverse
+ misc$fillerChar <- .fillerChar
+ misc$whitespace <- .whitespace
- extra = list() # kill what was used
+ extra <- list() # kill what was used
}), list( .earg = earg, .link = link, .reverse = reverse,
.fillerChar = fillerChar,
.whitespace = whitespace ))),
linkfun = eval(substitute( function(mu, extra = NULL) {
- cump = tapplymat1(mu, "cumsum")
+ cump <- tapplymat1(mu, "cumsum")
if ( .reverse ) {
- djrs = 1 - mu[, -1] / cump[, -1]
+ djrs <- 1 - mu[, -1] / cump[, -1]
theta2eta(djrs, .link , earg = .earg )
} else {
- M = ncol(mu) - 1
- djs = if (M == 1) 1 - mu[, 1] else
- 1 - mu[, 1:M] / (1 - cbind(0, cump[, 1:(M-1)]))
+ M <- ncol(mu) - 1
+ djs <- if (M == 1) 1 - mu[, 1] else
+ 1 - mu[, 1:M] / (1 - cbind(0, cump[, 1:(M-1)]))
theta2eta(djs, .link , earg = .earg )
}
}, list( .earg = earg, .link = link, .reverse = reverse) )),
@@ -461,15 +468,15 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
- smallno = 1.0e4 * .Machine$double.eps
+ smallno <- 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts <- round(ycounts)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
@@ -479,80 +486,81 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
vfamily = c("cratio", "vcategorical"),
deriv = eval(substitute(expression({
- if (!length(extra$mymat)) {
- extra$mymat = if ( .reverse ) tapplymat1(y, "cumsum") else
- tapplymat1(y[, ncol(y):1], "cumsum")[, ncol(y):1]
- }
- if ( .reverse ) {
- djrs = eta2theta(eta, .link , earg = .earg )
- Mp1 = ncol(extra$mymat)
- -c(w) * (y[, -1]/(1 - djrs) - extra$mymat[, -Mp1]/djrs) *
- dtheta.deta(djrs, .link , earg = .earg )
- } else {
- djs = eta2theta(eta, .link , earg = .earg )
- -c(w) * (y[, -ncol(y)]/(1 - djs) - extra$mymat[, -1]/djs) *
- dtheta.deta(djs, .link , earg = .earg )
- }
+ if (!length(extra$mymat)) {
+ extra$mymat <- if ( .reverse ) tapplymat1(y, "cumsum") else
+ tapplymat1(y[, ncol(y):1], "cumsum")[, ncol(y):1]
+ }
+ if ( .reverse ) {
+ djrs <- eta2theta(eta, .link , earg = .earg )
+ Mp1 <- ncol(extra$mymat)
+ -c(w) * (y[, -1]/(1 - djrs) - extra$mymat[, -Mp1]/djrs) *
+ dtheta.deta(djrs, .link , earg = .earg )
+ } else {
+ djs <- eta2theta(eta, .link , earg = .earg )
+ -c(w) * (y[, -ncol(y)]/(1 - djs) - extra$mymat[, -1]/djs) *
+ dtheta.deta(djs, .link , earg = .earg )
+ }
}), list( .earg = earg, .link = link, .reverse = reverse) )),
weight = eval(substitute(expression({
- if ( .reverse ) {
- cump = tapplymat1(mu, "cumsum")
- ddjrs.deta = dtheta.deta(djrs, .link , earg = .earg )
- wz = c(w) * ddjrs.deta^2 *
- (mu[, -1] / (1 - djrs)^2 + cump[, 1:M] / djrs^2)
- } else {
- ccump = tapplymat1(mu[, ncol(mu):1], "cumsum")[, ncol(mu):1]
- ddjs.deta = dtheta.deta(djs, .link , earg = .earg )
- wz = c(w) * ddjs.deta^2 *
- (mu[, 1:M] / (1 - djs)^2 + ccump[, -1] / djs^2)
- }
-
- wz
+ if ( .reverse ) {
+ cump <- tapplymat1(mu, "cumsum")
+ ddjrs.deta <- dtheta.deta(djrs, .link , earg = .earg )
+ wz <- c(w) * ddjrs.deta^2 *
+ (mu[, -1] / (1 - djrs)^2 + cump[, 1:M] / djrs^2)
+ } else {
+ ccump <- tapplymat1(mu[, ncol(mu):1], "cumsum")[, ncol(mu):1]
+ ddjs.deta <- dtheta.deta(djs, .link , earg = .earg )
+ wz <- c(w) * ddjs.deta^2 *
+ (mu[, 1:M] / (1 - djs)^2 + ccump[, -1] / djs^2)
+ }
+ wz
}), list( .earg = earg, .link = link, .reverse = reverse ))))
}
-vglm.multinomial.deviance.control <- function(maxit = 21, panic = FALSE, ...)
-{
- if (maxit < 1) {
- warning("bad value of maxit; using 21 instead")
- maxit = 21
- }
- list(maxit=maxit, panic = as.logical(panic)[1])
+ vglm.multinomial.deviance.control <-
+ function(maxit = 21, panic = FALSE, ...) {
+ if (maxit < 1) {
+ warning("bad value of maxit; using 21 instead")
+ maxit = 21
+ }
+ list(maxit = maxit, panic = as.logical(panic)[1])
}
-vglm.multinomial.control <- function(maxit = 21, panic = FALSE,
- criterion = c("aic1", "aic2", names( .min.criterion.VGAM )), ...)
-{
- if (mode(criterion) != "character" && mode(criterion) != "name")
- criterion = as.character(substitute(criterion))
- criterion = match.arg(criterion,
- c("aic1", "aic2", names( .min.criterion.VGAM )))[1]
-
- if (maxit < 1) {
- warning("bad value of maxit; using 21 instead")
- maxit = 21
- }
- list(maxit = maxit, panic = as.logical(panic)[1],
- criterion = criterion,
- min.criterion = c("aic1" = FALSE, "aic2" = TRUE,
- .min.criterion.VGAM))
+ vglm.multinomial.control <-
+ function(maxit = 21, panic = FALSE,
+ criterion = c("aic1", "aic2", names( .min.criterion.VGAM )),
+ ...) {
+ if (mode(criterion) != "character" && mode(criterion) != "name")
+ criterion <- as.character(substitute(criterion))
+ criterion <- match.arg(criterion,
+ c("aic1", "aic2", names( .min.criterion.VGAM )))[1]
+
+ if (maxit < 1) {
+ warning("bad value of maxit; using 21 instead")
+ maxit <- 21
+ }
+ list(maxit = maxit,
+ panic = as.logical(panic)[1],
+ criterion = criterion,
+ min.criterion = c("aic1" = FALSE, "aic2" = TRUE,
+ .min.criterion.VGAM))
}
-vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
- panic = TRUE, ...)
-{
- if (maxit < 1) {
- warning("bad value of maxit; using 200 instead")
- maxit = 200
- }
- list(maxit=maxit, trace=as.logical(trace)[1],
- panic = as.logical(panic)[1])
+ vglm.vcategorical.control <-
+ function(maxit = 30, trace = FALSE,
+ panic = TRUE, ...) {
+ if (maxit < 1) {
+ warning("bad value of maxit; using 200 instead")
+ maxit = 200
+ }
+ list(maxit = maxit, trace = as.logical(trace)[1],
+ panic = as.logical(panic)[1])
}
@@ -565,6 +573,7 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
nointercept = NULL, refLevel = "last",
whitespace = FALSE)
{
+
if (length(refLevel) != 1)
stop("the length of 'refLevel' must be one")
@@ -594,7 +603,7 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
new("vglmff",
blurb = c("Multinomial logit model\n\n",
- "Links: ",
+ "Links: ",
if (refLevel < 0) {
ifelse(whitespace,
"log(mu[,j] / mu[,M+1]), j = 1:M,\n",
@@ -624,10 +633,10 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
- constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
- intercept.apply = FALSE)
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- constraints <- cm.nointercept.vgam(constraints, x, .nointercept, M)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
+ apply.int = FALSE)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.nointercept.vgam(constraints, x, .nointercept, M)
}), list( .parallel = parallel, .zero = zero,
.nointercept = nointercept,
.refLevel = refLevel ))),
@@ -645,7 +654,7 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
))),
initialize = eval(substitute(expression({
- delete.zero.colns = TRUE
+ delete.zero.colns <- TRUE
eval(process.categorical.data.vgam)
M <- ncol(y)-1
@@ -653,6 +662,7 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
if (use.refLevel > (M+1))
stop("argument 'refLevel' has a value that is too high")
+
allbut.refLevel <- (1:(M+1))[-use.refLevel]
predictors.names <-
paste("log(mu[,", allbut.refLevel,
@@ -665,22 +675,16 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
.whitespace = whitespace ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
+
if (any(is.na(eta)))
warning("there are NAs in eta in slot inverse")
- M <- ncol(cbind(eta))
- if ( (.refLevel < 0) || (.refLevel == M+1)) {
- phat <- cbind(exp(eta), 1)
- } else if ( .refLevel == 1) {
- phat <- cbind(1, exp(eta))
- } else {
- use.refLevel <- if ( .refLevel < 0) M+1 else .refLevel
- etamat <- cbind(eta[, 1:( .refLevel - 1)], 0,
- eta[, ( .refLevel ):M])
- phat <- exp(etamat)
- }
- ans <- phat / as.vector(phat %*% rep(1, ncol(phat)))
+
+ ans <- mlogit(eta, refLevel = .refLevel , inverse = TRUE)
if (any(is.na(ans)))
- warning("there are NAs here in slot inverse")
+ warning("there are NAs here in slot linkinv")
+ if (min(ans) == 0 || max(ans) == 1)
+ warning("fitted probabilities numerically 0 or 1 occurred")
+
ans
}), list( .refLevel = refLevel )),
@@ -689,10 +693,9 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
misc$link <- "mlogit"
misc$earg <- list(mlogit = list(
- nointercept = .nointercept,
- parallel = .parallel ,
- refLevel = .refLevel ,
- zero = .zero ))
+ M = M,
+ refLevel = use.refLevel
+ ))
dy <- dimnames(y)
if (!is.null(dy[[2]]))
@@ -701,7 +704,8 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
misc$multipleResponses <- FALSE
misc$nointercept <- .nointercept
misc$parallel <- .parallel
- misc$refLevel <- .refLevel
+ misc$refLevel <- use.refLevel
+ misc$refLevel.orig <- .refLevel
misc$zero <- .zero
}), list( .refLevel = refLevel,
.nointercept = nointercept,
@@ -710,12 +714,7 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
))),
linkfun = eval(substitute( function(mu, extra = NULL) {
- if ( .refLevel < 0) {
- log(mu[, -ncol(mu)] / mu[, ncol(mu)])
- } else {
- use.refLevel <- if ( .refLevel < 0) ncol(mu) else .refLevel
- log(mu[, -( use.refLevel )] / mu[, use.refLevel ])
- }
+ mlogit(mu, refLevel = .refLevel )
}), list( .refLevel = refLevel )),
loglikelihood =
@@ -723,9 +722,9 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
+ y * w # Convert proportions to counts
nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
+ round(w)
smallno <- 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
@@ -760,7 +759,7 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
myinc <- (index$col.index >= use.refLevel)
index$col.index[myinc] <- index$col.index[myinc] + 1
- wz <- -mu[,index$row] * mu[,index$col]
+ wz <- -mu[, index$row] * mu[, index$col]
wz[, 1:M] <- wz[, 1:M] + mu[, -use.refLevel ]
}
@@ -783,9 +782,8 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
cumulative <- function(link = "logit",
parallel = FALSE, reverse = FALSE,
mv = FALSE,
- intercept.apply = FALSE,
- whitespace = FALSE)
-{
+ apply.parint = FALSE,
+ whitespace = FALSE) {
link <- as.list(substitute(link))
@@ -808,45 +806,45 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
new("vglmff",
blurb = if ( mv )
c(paste("Multivariate cumulative", link, "model\n\n"),
- "Links: ",
- namesof(if (reverse)
- ifelse(whitespace, "P[Y1 >= j+1]", "P[Y1>=j+1]") else
- ifelse(whitespace, "P[Y1 <= j]", "P[Y1<=j]"),
- link, earg = earg),
- ", ...") else
- c(paste("Cumulative", link, "model\n\n"),
- "Links: ",
- namesof(if (reverse)
- ifelse(whitespace, "P[Y >= j+1]", "P[Y>=j+1]") else
- ifelse(whitespace, "P[Y <= j]", "P[Y<=j]"),
- link, earg = earg)),
+ "Links: ",
+ namesof(if (reverse)
+ ifelse(whitespace, "P[Y1 >= j+1]", "P[Y1>=j+1]") else
+ ifelse(whitespace, "P[Y1 <= j]", "P[Y1<=j]"),
+ link, earg = earg),
+ ", ...") else
+ c(paste("Cumulative", link, "model\n\n"),
+ "Links: ",
+ namesof(if (reverse)
+ ifelse(whitespace, "P[Y >= j+1]", "P[Y>=j+1]") else
+ ifelse(whitespace, "P[Y <= j]", "P[Y<=j]"),
+ link, earg = earg)),
constraints = eval(substitute(expression({
if ( .mv ) {
if ( !length(constraints) ) {
- Llevels = extra$Llevels
- NOS = extra$NOS
- Hk.matrix = kronecker(diag(NOS), matrix(1,Llevels-1,1))
- constraints = cm.vgam(Hk.matrix, x, .parallel, constraints,
- intercept.apply = .intercept.apply)
+ Llevels <- extra$Llevels
+ NOS <- extra$NOS
+ Hk.matrix <- kronecker(diag(NOS), matrix(1,Llevels-1,1))
+ constraints <- cm.vgam(Hk.matrix, x, .parallel, constraints,
+ apply.int = .apply.parint)
}
} else {
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
- intercept.apply = .intercept.apply)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
+ apply.int = .apply.parint)
}
}), list( .parallel = parallel, .mv = mv,
- .intercept.apply = intercept.apply ))),
+ .apply.parint = apply.parint ))),
deviance = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
answer <-
if ( .mv ) {
- totdev = 0
- NOS = extra$NOS
- Llevels = extra$Llevels
+ totdev <- 0
+ NOS <- extra$NOS
+ Llevels <- extra$Llevels
for (iii in 1:NOS) {
- cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
- aindex = (iii-1)*(Llevels) + 1:(Llevels)
- totdev = totdev + Deviance.categorical.data.vgam(
+ cindex <- (iii-1)*(Llevels-1) + 1:(Llevels-1)
+ aindex <- (iii-1)*(Llevels) + 1:(Llevels)
+ totdev <- totdev + Deviance.categorical.data.vgam(
mu = mu[, aindex, drop = FALSE],
y = y[, aindex, drop = FALSE], w = w,
residuals = residuals,
@@ -938,43 +936,43 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
linkinv = eval(substitute( function(eta, extra = NULL) {
- answer =
+ answer <-
if ( .mv ) {
- NOS = extra$NOS
- Llevels = extra$Llevels
- fv.matrix = matrix(0, nrow(eta), NOS*Llevels)
+ NOS <- extra$NOS
+ Llevels <- extra$Llevels
+ fv.matrix <- matrix(0, nrow(eta), NOS*Llevels)
for (iii in 1:NOS) {
- cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
- aindex = (iii-1)*(Llevels) + 1:(Llevels)
+ cindex <- (iii-1)*(Llevels-1) + 1:(Llevels-1)
+ aindex <- (iii-1)*(Llevels) + 1:(Llevels)
if ( .reverse ) {
- ccump = cbind(1,
- eta2theta(eta[, cindex, drop = FALSE],
- .link , earg = .earg ))
- fv.matrix[,aindex] =
+ ccump <- cbind(1,
+ eta2theta(eta[, cindex, drop = FALSE],
+ .link , earg = .earg ))
+ fv.matrix[,aindex] <-
cbind(-tapplymat1(ccump, "diff"),
ccump[, ncol(ccump)])
} else {
- cump = cbind(eta2theta(eta[, cindex, drop = FALSE],
- .link ,
- earg = .earg ),
- 1)
- fv.matrix[,aindex] =
+ cump <- cbind(eta2theta(eta[, cindex, drop = FALSE],
+ .link ,
+ earg = .earg ),
+ 1)
+ fv.matrix[,aindex] <-
cbind(cump[, 1], tapplymat1(cump, "diff"))
}
}
fv.matrix
} else {
- fv.matrix =
+ fv.matrix <-
if ( .reverse ) {
- ccump = cbind(1, eta2theta(eta, .link , earg = .earg ))
+ ccump <- cbind(1, eta2theta(eta, .link , earg = .earg ))
cbind(-tapplymat1(ccump, "diff"), ccump[, ncol(ccump)])
} else {
- cump = cbind(eta2theta(eta, .link , earg = .earg ), 1)
+ cump <- cbind(eta2theta(eta, .link , earg = .earg ), 1)
cbind(cump[, 1], tapplymat1(cump, "diff"))
}
if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(dimnames(eta)[[1]],
- extra$dimnamesy2)
+ dimnames(fv.matrix) <- list(dimnames(eta)[[1]],
+ extra$dimnamesy2)
fv.matrix
}
answer
@@ -984,26 +982,27 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
last = eval(substitute(expression({
if ( .mv ) {
- misc$link = .link
- misc$earg = list( .earg )
+ misc$link <- .link
+ misc$earg <- list( .earg )
} else {
- misc$link = rep( .link , length = M)
- names(misc$link) = mynames
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- mynames
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for (ii in 1:M) misc$earg[[ii]] = .earg
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for (ii in 1:M)
+ misc$earg[[ii]] <- .earg
}
- misc$fillerChar = .fillerChar
- misc$whitespace = .whitespace
+ misc$fillerChar <- .fillerChar
+ misc$whitespace <- .whitespace
- misc$parameters = mynames
- misc$reverse = .reverse
- misc$parallel = .parallel
- misc$mv = .mv
+ misc$parameters <- mynames
+ misc$reverse <- .reverse
+ misc$parallel <- .parallel
+ misc$mv <- .mv
}), list(
.reverse = reverse, .parallel = parallel,
.link = link, .earg = earg,
@@ -1011,23 +1010,23 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
.whitespace = whitespace ))),
linkfun = eval(substitute( function(mu, extra = NULL) {
- answer =
+ answer <-
if ( .mv ) {
- NOS = extra$NOS
- Llevels = extra$Llevels
- eta.matrix = matrix(0, nrow(mu), NOS*(Llevels-1))
+ NOS <- extra$NOS
+ Llevels <- extra$Llevels
+ eta.matrix <- matrix(0, nrow(mu), NOS*(Llevels-1))
for (iii in 1:NOS) {
- cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
- aindex = (iii-1)*(Llevels) + 1:(Llevels)
- cump = tapplymat1(as.matrix(mu[,aindex]), "cumsum")
+ cindex <- (iii-1)*(Llevels-1) + 1:(Llevels-1)
+ aindex <- (iii-1)*(Llevels) + 1:(Llevels)
+ cump <- tapplymat1(as.matrix(mu[,aindex]), "cumsum")
eta.matrix[,cindex] =
theta2eta(if ( .reverse ) 1-cump[, 1:(Llevels-1)] else
cump[, 1:(Llevels-1)], .link , earg = .earg )
}
eta.matrix
} else {
- cump = tapplymat1(as.matrix(mu), "cumsum")
- M = ncol(as.matrix(mu)) - 1
+ cump <- tapplymat1(as.matrix(mu), "cumsum")
+ M <- ncol(as.matrix(mu)) - 1
theta2eta(if ( .reverse ) 1-cump[, 1:M] else cump[, 1:M],
.link ,
earg = .earg )
@@ -1040,15 +1039,15 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
- smallno = 1.0e4 * .Machine$double.eps
+ smallno <- 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts <- round(ycounts)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
@@ -1056,21 +1055,21 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
},
vfamily = c("cumulative", "vcategorical"),
deriv = eval(substitute(expression({
- mu.use = pmax(mu, .Machine$double.eps * 1.0e-0)
- deriv.answer =
+ mu.use <- pmax(mu, .Machine$double.eps * 1.0e-0)
+ deriv.answer <-
if ( .mv ) {
- NOS = extra$NOS
- Llevels = extra$Llevels
- dcump.deta = resmat = matrix(0, n, NOS * (Llevels-1))
+ NOS <- extra$NOS
+ Llevels <- extra$Llevels
+ dcump.deta <- resmat <- matrix(0, n, NOS * (Llevels-1))
for (iii in 1:NOS) {
- cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
- aindex = (iii-1)*(Llevels) + 1:(Llevels-1)
- cump = eta2theta(eta[,cindex, drop = FALSE],
+ cindex <- (iii-1)*(Llevels-1) + 1:(Llevels-1)
+ aindex <- (iii-1)*(Llevels) + 1:(Llevels-1)
+ cump <- eta2theta(eta[,cindex, drop = FALSE],
.link , earg = .earg )
- dcump.deta[,cindex] = dtheta.deta(cump, .link , earg = .earg )
- resmat[,cindex] =
- (y[,aindex, drop = FALSE] / mu.use[,aindex, drop = FALSE] -
- y[, 1+aindex, drop = FALSE]/mu.use[, 1+aindex, drop = FALSE])
+ dcump.deta[,cindex] <- dtheta.deta(cump, .link , earg = .earg )
+ resmat[,cindex] <-
+ (y[, aindex, drop = FALSE] / mu.use[, aindex, drop = FALSE] -
+ y[, 1+aindex, drop = FALSE] / mu.use[, 1+aindex, drop = FALSE])
}
(if ( .reverse ) -c(w) else c(w)) * dcump.deta * resmat
} else {
@@ -1086,32 +1085,32 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
.mv = mv ))),
weight = eval(substitute(expression({
if ( .mv ) {
- NOS = extra$NOS
- Llevels = extra$Llevels
- wz = matrix(0, n, NOS*(Llevels-1)) # Diagonal elts only for a start
+ NOS <- extra$NOS
+ Llevels <- extra$Llevels
+ wz <- matrix(0, n, NOS*(Llevels-1)) # Diagonal elts only for a start
for (iii in 1:NOS) {
- cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
- aindex = (iii-1)*(Llevels) + 1:(Llevels-1)
- wz[,cindex] = c(w) * dcump.deta[,cindex, drop = FALSE]^2 *
- (1 / mu.use[, aindex, drop = FALSE] +
- 1 / mu.use[, 1+aindex, drop = FALSE])
+ cindex <- (iii-1)*(Llevels-1) + 1:(Llevels-1)
+ aindex <- (iii-1)*(Llevels) + 1:(Llevels-1)
+ wz[,cindex] <- c(w) * dcump.deta[,cindex, drop = FALSE]^2 *
+ (1 / mu.use[, aindex, drop = FALSE] +
+ 1 / mu.use[, 1+aindex, drop = FALSE])
}
if (Llevels-1 > 1) {
- iii = 1
- oindex = (iii-1) * (Llevels-1) + 1:(Llevels-2)
- wz = cbind(wz, -c(w) *
- dcump.deta[, oindex] * dcump.deta[, 1+oindex])
+ iii <- 1
+ oindex <- (iii-1) * (Llevels-1) + 1:(Llevels-2)
+ wz <- cbind(wz, -c(w) *
+ dcump.deta[, oindex] * dcump.deta[, 1+oindex])
if (NOS > 1) {
- cptrwz = ncol(wz) # Like a pointer
- wz = cbind(wz, matrix(0, nrow(wz), (NOS-1) * (Llevels-1)))
+ cptrwz <- ncol(wz) # Like a pointer
+ wz <- cbind(wz, matrix(0, nrow(wz), (NOS-1) * (Llevels-1)))
for (iii in 2:NOS) {
- oindex = (iii-1)*(Llevels-1) + 1:(Llevels-2)
+ oindex <- (iii-1)*(Llevels-1) + 1:(Llevels-2)
wz[,cptrwz + 1 + (1:(Llevels-2))] =
-c(w) * dcump.deta[,oindex] *
dcump.deta[, 1+oindex]
- cptrwz = cptrwz + Llevels - 1 # Move it along a bit
+ cptrwz <- cptrwz + Llevels - 1 # Move it along a bit
}
}
@@ -1122,13 +1121,12 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
wz <- c(w) * dcump.deta^2 * (1/mu.use[, 1:M] + 1/mu.use[, -1])
if (M > 1)
wz <- cbind(wz,
- -c(w) * dcump.deta[, -M] *
- dcump.deta[, 2:M] / mu.use[, 2:M])
+ -c(w) * dcump.deta[, -M] *
+ dcump.deta[, 2:M] / mu.use[, 2:M])
}
wz
- }), list(
- .earg = earg, .link = link,
- .mv = mv ))))
+ }), list( .earg = earg, .link = link,
+ .mv = mv ))))
}
@@ -1146,8 +1144,7 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
acat <- function(link = "loge", parallel = FALSE,
- reverse = FALSE, zero = NULL, whitespace = FALSE)
-{
+ reverse = FALSE, zero = NULL, whitespace = FALSE) {
link <- as.list(substitute(link))
@@ -1169,14 +1166,14 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
ifelse(whitespace, "P[Y = j] / P[Y = j + 1]", "P[Y=j]/P[Y=j+1]") else
ifelse(whitespace, "P[Y = j + 1] / P[Y = j]", "P[Y=j+1]/P[Y=j]"),
link, earg = earg),
- "\n",
- "Variance: ",
- ifelse(whitespace,
- "mu[,j] * (1 - mu[,j]); -mu[,j] * mu[,k]",
- "mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]")),
+ "\n",
+ "Variance: ",
+ ifelse(whitespace,
+ "mu[,j] * (1 - mu[,j]); -mu[,j] * mu[,k]",
+ "mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]")),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .parallel = parallel, .zero = zero ))),
deviance = Deviance.categorical.data.vgam,
@@ -1188,10 +1185,10 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
- delete.zero.colns = TRUE
+ delete.zero.colns <- TRUE
eval(process.categorical.data.vgam)
- M = ncol(y) - 1
- mynames = if ( .reverse )
+ M <- ncol(y) - 1
+ mynames <- if ( .reverse )
paste("P[Y", .fillerChar , "=",
1:M, "]", .fillerChar , "/", .fillerChar ,
"P[Y", .fillerChar , "=", .fillerChar , 2:(M+1), "]",
@@ -1202,67 +1199,68 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
predictors.names <-
namesof(mynames, .link , short = TRUE, earg = .earg )
- y.names = paste("mu", 1:(M+1), sep = "")
+ y.names <- paste("mu", 1:(M+1), sep = "")
if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
+ extra$dimnamesy2 <- dimnames(y)[[2]]
}), list( .earg = earg, .link = link, .reverse = reverse,
.fillerChar = fillerChar,
.whitespace = whitespace ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
if (!is.matrix(eta))
- eta = as.matrix(eta)
- M = ncol(eta)
- fv.matrix = if ( .reverse ) {
- zetar = eta2theta(eta, .link , earg = .earg )
- temp = tapplymat1(zetar[, M:1], "cumprod")[, M:1, drop = FALSE]
+ eta <- as.matrix(eta)
+ M <- ncol(eta)
+ fv.matrix <- if ( .reverse ) {
+ zetar <- eta2theta(eta, .link , earg = .earg )
+ temp <- tapplymat1(zetar[, M:1], "cumprod")[, M:1, drop = FALSE]
cbind(temp, 1) / drop(1 + temp %*% rep(1, ncol(temp)))
} else {
- zeta = eta2theta(eta, .link , earg = .earg )
- temp = tapplymat1(zeta, "cumprod")
+ zeta <- eta2theta(eta, .link , earg = .earg )
+ temp <- tapplymat1(zeta, "cumprod")
cbind(1, temp) / drop(1 + temp %*% rep(1, ncol(temp)))
}
if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(dimnames(eta)[[1]],
- extra$dimnamesy2)
+ dimnames(fv.matrix) <- list(dimnames(eta)[[1]],
+ extra$dimnamesy2)
fv.matrix
}, list( .earg = earg, .link = link, .reverse = reverse) )),
last = eval(substitute(expression({
- misc$link = rep( .link , length = M)
- names(misc$link) = mynames
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- mynames
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for (ii in 1:M) misc$earg[[ii]] = .earg
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for (ii in 1:M)
+ misc$earg[[ii]] <- .earg
- misc$parameters = mynames
- misc$reverse = .reverse
- misc$fillerChar = .fillerChar
- misc$whitespace = .whitespace
+ misc$parameters <- mynames
+ misc$reverse <- .reverse
+ misc$fillerChar <- .fillerChar
+ misc$whitespace <- .whitespace
}), list( .earg = earg, .link = link, .reverse = reverse,
.fillerChar = fillerChar,
.whitespace = whitespace ))),
linkfun = eval(substitute( function(mu, extra = NULL) {
- M = ncol(mu) - 1
- theta2eta(if ( .reverse ) mu[, 1:M] / mu[, -1] else
- mu[, -1] / mu[, 1:M],
+ M <- ncol(mu) - 1
+ theta2eta(if ( .reverse ) mu[, 1:M] / mu[, -1] else
+ mu[, -1] / mu[, 1:M],
.link , earg = .earg )
}, list( .earg = earg, .link = link, .reverse = reverse) )),
loglikelihood =
function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
- smallno = 1.0e4 * .Machine$double.eps
+ smallno <- 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ ycounts <- round(ycounts)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
@@ -1278,12 +1276,11 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
score <- attr(d1, "gradient") / d1
- answer <-
- if ( .reverse ) {
- cumy = tapplymat1(y, "cumsum")
+ answer <- if ( .reverse ) {
+ cumy <- tapplymat1(y, "cumsum")
c(w) * dzeta.deta * (cumy[, 1:M] / zeta - score)
} else {
- ccumy = tapplymat1(y[, ncol(y):1], "cumsum")[, ncol(y):1]
+ ccumy <- tapplymat1(y[, ncol(y):1], "cumsum")[, ncol(y):1]
c(w) * dzeta.deta * (ccumy[, -1] / zeta - score)
}
@@ -1291,9 +1288,9 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
answer
}), list( .earg = earg, .link = link, .reverse = reverse) )),
weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, dimm(M))
+ wz <- matrix(as.numeric(NA), n, dimm(M))
- hess = attr(d1, "hessian") / d1
+ hess <- attr(d1, "hessian") / d1
if (M > 1)
for (jay in 1:(M-1))
@@ -1302,39 +1299,38 @@ vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
(hess[, jay, kay] - score[, jay] * score[, kay]) *
dzeta.deta[, jay] * dzeta.deta[, kay]
if ( .reverse ) {
- cump = tapplymat1(mu, "cumsum")
- wz[, 1:M] = (cump[, 1:M] / zeta^2 - score^2) * dzeta.deta^2
+ cump <- tapplymat1(mu, "cumsum")
+ wz[, 1:M] <- (cump[, 1:M] / zeta^2 - score^2) * dzeta.deta^2
} else {
- ccump = tapplymat1(mu[, ncol(mu):1], "cumsum")[, ncol(mu):1]
- wz[, 1:M] = (ccump[, -1] / zeta^2 - score^2) * dzeta.deta^2
+ ccump <- tapplymat1(mu[, ncol(mu):1], "cumsum")[, ncol(mu):1]
+ wz[, 1:M] <- (ccump[, -1] / zeta^2 - score^2) * dzeta.deta^2
}
c(w) * wz
}), list( .earg = earg, .link = link, .reverse = reverse ))))
}
-acat.deriv <- function(zeta, reverse, M, n)
-{
+acat.deriv <- function(zeta, reverse, M, n) {
- alltxt = NULL
+ alltxt <- NULL
for (ii in 1:M) {
- index = if (reverse) ii:M else 1:ii
- vars = paste("zeta", index, sep = "")
- txt = paste(vars, collapse = "*")
- alltxt = c(alltxt, txt)
+ index <- if (reverse) ii:M else 1:ii
+ vars <- paste("zeta", index, sep = "")
+ txt <- paste(vars, collapse = "*")
+ alltxt <- c(alltxt, txt)
}
- alltxt = paste(alltxt, collapse = " + ")
- alltxt = paste(" ~ 1 +", alltxt)
- txt = as.formula(alltxt)
+ alltxt <- paste(alltxt, collapse = " + ")
+ alltxt <- paste(" ~ 1 +", alltxt)
+ txt <- as.formula(alltxt)
- allvars = paste("zeta", 1:M, sep = "")
- d1 = deriv3(txt, allvars, hessian = TRUE)
+ allvars <- paste("zeta", 1:M, sep = "")
+ d1 <- deriv3(txt, allvars, hessian = TRUE)
- zeta = as.matrix(zeta)
+ zeta <- as.matrix(zeta)
for (ii in 1:M)
assign(paste("zeta", ii, sep = ""), zeta[, ii])
- ans = eval(d1)
+ ans <- eval(d1)
ans
}
@@ -1358,44 +1354,44 @@ acat.deriv <- function(zeta, reverse, M, n)
new("vglmff",
blurb = c(paste("Bradley-Terry model (without ties)\n\n"),
- "Links: ",
- namesof("alpha's", "loge")),
+ "Links: ",
+ namesof("alpha's", "loge")),
initialize = eval(substitute(expression({
- are.ties = attr(y, "are.ties") # If Brat() was used
+ are.ties <- attr(y, "are.ties") # If Brat() was used
if (is.logical(are.ties) && are.ties)
stop("use bratt(), not brat(), when there are ties")
- try.index = 1:400
- M = (1:length(try.index))[(try.index+1)*(try.index) == ncol(y)]
+ try.index <- 1:400
+ M <- (1:length(try.index))[(try.index+1)*(try.index) == ncol(y)]
if (!is.finite(M))
stop("cannot determine 'M'")
- init.alpha = matrix( rep( .init.alpha , length.out = M),
+ init.alpha <- matrix(rep( .init.alpha , length.out = M),
n, M, byrow = TRUE)
etastart <- matrix(theta2eta(init.alpha, "loge",
earg = list(theta = NULL)),
n, M, byrow = TRUE)
- refgp = .refgp
+ refgp <- .refgp
if (!intercept.only)
warning("this function only works with intercept-only models")
- extra$ybrat.indices = .brat.indices(NCo = M+1, are.ties = FALSE)
- uindex = if ( .refgp == "last") 1:M else (1:(M+1))[-( .refgp ) ]
+ extra$ybrat.indices <- .brat.indices(NCo = M+1, are.ties = FALSE)
+ uindex <- if ( .refgp == "last") 1:M else (1:(M+1))[-( .refgp ) ]
predictors.names <-
namesof(paste("alpha", uindex, sep = ""), "loge", short = TRUE)
}), list( .refgp = refgp, .init.alpha=init.alpha ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
- probs = NULL
- eta = as.matrix(eta) # in case M = 1
+ probs <- NULL
+ eta <- as.matrix(eta) # in case M = 1
for (ii in 1:nrow(eta)) {
- alpha = .brat.alpha(eta2theta(eta[ii,], "loge",
- earg = list(theta = NULL)),
- .refvalue , .refgp )
- alpha1 = alpha[extra$ybrat.indices[, "rindex"]]
- alpha2 = alpha[extra$ybrat.indices[, "cindex"]]
- probs = rbind(probs, alpha1 / (alpha1 + alpha2))
+ alpha <- .brat.alpha(eta2theta(eta[ii,], "loge",
+ earg = list(theta = NULL)),
+ .refvalue , .refgp )
+ alpha1 <- alpha[extra$ybrat.indices[, "rindex"]]
+ alpha2 <- alpha[extra$ybrat.indices[, "cindex"]]
+ probs <- rbind(probs, alpha1 / (alpha1 + alpha2))
}
- dimnames(probs) = dimnames(eta)
+ dimnames(probs) <- dimnames(eta)
probs
}, list( .refgp = refgp, .refvalue = refvalue) )),
@@ -1416,15 +1412,15 @@ acat.deriv <- function(zeta, reverse, M, n)
function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
- smallno = 1.0e4 * .Machine$double.eps
+ smallno <- 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ ycounts <- round(ycounts)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
@@ -1432,39 +1428,39 @@ acat.deriv <- function(zeta, reverse, M, n)
},
vfamily = c("brat"),
deriv = eval(substitute(expression({
- ans = NULL
- uindex = if ( .refgp == "last") 1:M else (1:(M+1))[-( .refgp ) ]
- eta = as.matrix(eta) # in case M = 1
+ ans <- NULL
+ uindex <- if ( .refgp == "last") 1:M else (1:(M+1))[-( .refgp ) ]
+ eta <- as.matrix(eta) # in case M = 1
for (ii in 1:nrow(eta)) {
- alpha = .brat.alpha(eta2theta(eta[ii,], "loge",
- earg = list(theta = NULL)),
+ alpha <- .brat.alpha(eta2theta(eta[ii,], "loge",
+ earg = list(theta = NULL)),
.refvalue, .refgp)
- ymat = InverseBrat(y[ii,], NCo = M+1, diag = 0)
- answer = rep(0, len = M)
+ ymat <- InverseBrat(y[ii,], NCo = M+1, diag = 0)
+ answer <- rep(0, len = M)
for (aa in 1:(M+1)) {
- answer = answer + (1 - (aa == uindex)) *
- (ymat[uindex,aa] * alpha[aa] - ymat[aa,uindex] *
- alpha[uindex]) / (alpha[aa] + alpha[uindex])
+ answer <- answer + (1 - (aa == uindex)) *
+ (ymat[uindex,aa] * alpha[aa] - ymat[aa,uindex] *
+ alpha[uindex]) / (alpha[aa] + alpha[uindex])
}
- ans = rbind(ans, w[ii] * answer)
+ ans <- rbind(ans, w[ii] * answer)
}
- dimnames(ans) = dimnames(eta)
+ dimnames(ans) <- dimnames(eta)
ans
}), list( .refvalue = refvalue, .refgp = refgp) )),
weight = eval(substitute(expression({
- wz = matrix(0, n, dimm(M))
+ wz <- matrix(0, n, dimm(M))
for (ii in 1:nrow(eta)) {
- alpha = .brat.alpha(eta2theta(eta[ii,], "loge",
- earg = list(theta = NULL)),
+ alpha <- .brat.alpha(eta2theta(eta[ii,], "loge",
+ earg = list(theta = NULL)),
.refvalue, .refgp)
- ymat = InverseBrat(y[ii,], NCo = M+1, diag = 0)
+ ymat <- InverseBrat(y[ii,], NCo = M+1, diag = 0)
for (aa in 1:(M+1)) {
- wz[ii, 1:M] = wz[ii, 1:M] + (1 - (aa == uindex)) *
- (ymat[aa,uindex] + ymat[uindex,aa]) * alpha[aa] *
- alpha[uindex] / (alpha[aa] + alpha[uindex])^2
+ wz[ii, 1:M] <- wz[ii, 1:M] + (1 - (aa == uindex)) *
+ (ymat[aa,uindex] + ymat[uindex,aa]) * alpha[aa] *
+ alpha[uindex] / (alpha[aa] + alpha[uindex])^2
}
if (M > 1) {
- ind5 = iam(1, 1, M, both = TRUE, diag = FALSE)
+ ind5 <- iam(1, 1, M, both = TRUE, diag = FALSE)
wz[ii,(M+1):ncol(wz)] =
-(ymat[cbind(uindex[ind5$row],uindex[ind5$col])] +
ymat[cbind(uindex[ind5$col],uindex[ind5$row])]) *
@@ -1472,7 +1468,7 @@ acat.deriv <- function(zeta, reverse, M, n)
(alpha[uindex[ind5$row]] + alpha[uindex[ind5$col]])^2
}
}
- wz = c(w) * wz
+ wz <- c(w) * wz
wz
}), list( .refvalue = refvalue, .refgp = refgp ))))
}
@@ -1500,27 +1496,27 @@ acat.deriv <- function(zeta, reverse, M, n)
new("vglmff",
blurb = c(paste("Bradley-Terry model (with ties)\n\n"),
- "Links: ",
- namesof("alpha's", "loge"), ", log(alpha0)"),
+ "Links: ",
+ namesof("alpha's", "loge"), ", log(alpha0)"),
initialize = eval(substitute(expression({
- try.index = 1:400
- M = (1:length(try.index))[(try.index*(try.index-1)) == ncol(y)]
+ try.index <- 1:400
+ M <- (1:length(try.index))[(try.index*(try.index-1)) == ncol(y)]
if (!is.Numeric(M, allowable.length = 1, integer.valued = TRUE))
stop("cannot determine 'M'")
- NCo = M # number of contestants
+ NCo <- M # Number of contestants
- are.ties = attr(y, "are.ties") # If Brat() was used
+ are.ties <- attr(y, "are.ties") # If Brat() was used
if (is.logical(are.ties)) {
if (!are.ties)
stop("use brat(), not bratt(), when there are no ties")
- ties = attr(y, "ties")
+ ties <- attr(y, "ties")
} else {
- are.ties = FALSE
- ties = 0 * y
+ are.ties <- FALSE
+ ties <- 0 * y
}
- init.alpha = rep( .init.alpha, len = NCo-1)
- ialpha0 = .i0
+ init.alpha <- rep( .init.alpha, len = NCo-1)
+ ialpha0 <- .i0
etastart <-
cbind(matrix(theta2eta(init.alpha,
"loge",
@@ -1529,14 +1525,14 @@ acat.deriv <- function(zeta, reverse, M, n)
theta2eta(rep(ialpha0, length.out = n),
"loge",
list(theta = NULL)))
- refgp = .refgp
+ refgp <- .refgp
if (!intercept.only)
warning("this function only works with intercept-only models")
- extra$ties = ties # Flat (1-row) matrix
- extra$ybrat.indices = .brat.indices(NCo=NCo, are.ties = FALSE)
- extra$tbrat.indices = .brat.indices(NCo=NCo, are.ties = TRUE) # unused
- extra$dnties = dimnames(ties)
- uindex = if (refgp == "last") 1:(NCo-1) else (1:(NCo))[-refgp ]
+ extra$ties <- ties # Flat (1-row) matrix
+ extra$ybrat.indices <- .brat.indices(NCo=NCo, are.ties = FALSE)
+ extra$tbrat.indices <- .brat.indices(NCo=NCo, are.ties = TRUE) # unused
+ extra$dnties <- dimnames(ties)
+ uindex <- if (refgp == "last") 1:(NCo-1) else (1:(NCo))[-refgp ]
predictors.names <- c(
namesof(paste("alpha", uindex, sep = ""), "loge", short = TRUE),
@@ -1546,26 +1542,26 @@ acat.deriv <- function(zeta, reverse, M, n)
.init.alpha=init.alpha ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
- probs = qprobs = NULL
- M = ncol(eta)
+ probs <- qprobs <- NULL
+ M <- ncol(eta)
for (ii in 1:nrow(eta)) {
- alpha = .brat.alpha(eta2theta(eta[ii, -M],
- "loge"),
- .refvalue , .refgp )
- alpha0 = loge(eta[ii, M], inverse = TRUE)
- alpha1 = alpha[extra$ybrat.indices[, "rindex"]]
- alpha2 = alpha[extra$ybrat.indices[, "cindex"]]
- probs = rbind(probs, alpha1 / (alpha1+alpha2+alpha0)) #
- qprobs = rbind(qprobs, alpha0 / (alpha1+alpha2+alpha0)) #
+ alpha <- .brat.alpha(eta2theta(eta[ii, -M],
+ "loge"),
+ .refvalue , .refgp )
+ alpha0 <- loge(eta[ii, M], inverse = TRUE)
+ alpha1 <- alpha[extra$ybrat.indices[, "rindex"]]
+ alpha2 <- alpha[extra$ybrat.indices[, "cindex"]]
+ probs <- rbind(probs, alpha1 / (alpha1+alpha2+alpha0)) #
+ qprobs <- rbind(qprobs, alpha0 / (alpha1+alpha2+alpha0)) #
}
if (length(extra$dnties))
- dimnames(qprobs) = extra$dnties
- attr(probs, "probtie") = qprobs
+ dimnames(qprobs) <- extra$dnties
+ attr(probs, "probtie") <- qprobs
probs
}, list( .refgp = refgp, .refvalue = refvalue) )),
last = eval(substitute(expression({
- misc$link = rep( "loge", length = M)
- names(misc$link) = c(paste("alpha", uindex, sep = ""), "alpha0")
+ misc$link <- rep( "loge", length = M)
+ names(misc$link) <- c(paste("alpha", uindex, sep = ""), "alpha0")
misc$earg <- vector("list", M)
@@ -1574,10 +1570,10 @@ acat.deriv <- function(zeta, reverse, M, n)
misc$earg[[ii]] <- list(theta = NULL)
- misc$refgp = .refgp
- misc$refvalue = .refvalue
- misc$alpha = alpha
- misc$alpha0 = alpha0
+ misc$refgp <- .refgp
+ misc$refvalue <- .refvalue
+ misc$alpha <- alpha
+ misc$alpha0 <- alpha0
}), list( .refgp = refgp, .refvalue = refvalue ))),
loglikelihood =
function(mu, y, w, residuals = FALSE, eta, extra = NULL)
@@ -1588,85 +1584,85 @@ acat.deriv <- function(zeta, reverse, M, n)
},
vfamily = c("bratt"),
deriv = eval(substitute(expression({
- ans = NULL
- ties = extra$ties
- NCo = M
- uindex = if ( .refgp == "last") 1:(M-1) else (1:(M))[-( .refgp )]
- eta = as.matrix(eta)
+ ans <- NULL
+ ties <- extra$ties
+ NCo <- M
+ uindex <- if ( .refgp == "last") 1:(M-1) else (1:(M))[-( .refgp )]
+ eta <- as.matrix(eta)
for (ii in 1:nrow(eta)) {
- alpha = .brat.alpha(eta2theta(eta[ii, -M], "loge",
+ alpha <- .brat.alpha(eta2theta(eta[ii, -M], "loge",
earg = list(theta = NULL)),
.refvalue, .refgp)
- alpha0 = loge(eta[ii, M], inverse = TRUE)
- ymat = InverseBrat(y[ii,], NCo = M, diag = 0)
- tmat = InverseBrat(ties[ii,], NCo = M, diag = 0)
- answer = rep(0, len=NCo-1) # deriv wrt eta[-M]
+ alpha0 <- loge(eta[ii, M], inverse = TRUE)
+ ymat <- InverseBrat(y[ii,], NCo = M, diag = 0)
+ tmat <- InverseBrat(ties[ii,], NCo = M, diag = 0)
+ answer <- rep(0, len=NCo-1) # deriv wrt eta[-M]
for (aa in 1:NCo) {
- Daj = alpha[aa] + alpha[uindex] + alpha0
- pja = alpha[uindex] / Daj
- answer = answer + alpha[uindex] *
- (-ymat[aa,uindex] + ymat[uindex,aa]*(1-pja)/pja -
- tmat[uindex,aa]) / Daj
+ Daj <- alpha[aa] + alpha[uindex] + alpha0
+ pja <- alpha[uindex] / Daj
+ answer <- answer + alpha[uindex] *
+ (-ymat[aa,uindex] + ymat[uindex,aa]*(1-pja)/pja -
+ tmat[uindex,aa]) / Daj
}
- deriv0 = 0 # deriv wrt eta[M]
+ deriv0 <- 0 # deriv wrt eta[M]
for (aa in 1:(NCo-1))
for (bb in (aa+1):NCo) {
- Dab = alpha[aa] + alpha[bb] + alpha0
- qab = alpha0 / Dab
- deriv0 = deriv0 + alpha0 *
- (-ymat[aa,bb] - ymat[bb,aa] +
- tmat[aa,bb]*(1-qab)/qab) / Dab
+ Dab <- alpha[aa] + alpha[bb] + alpha0
+ qab <- alpha0 / Dab
+ deriv0 <- deriv0 + alpha0 *
+ (-ymat[aa,bb] - ymat[bb,aa] +
+ tmat[aa,bb]*(1-qab)/qab) / Dab
}
- ans = rbind(ans, w[ii] * c(answer, deriv0))
+ ans <- rbind(ans, w[ii] * c(answer, deriv0))
}
- dimnames(ans) = dimnames(eta)
+ dimnames(ans) <- dimnames(eta)
ans
}), list( .refvalue = refvalue, .refgp = refgp) )),
weight = eval(substitute(expression({
- wz = matrix(0, n, dimm(M)) # includes diagonal
+ wz <- matrix(0, n, dimm(M)) # includes diagonal
for (ii in 1:nrow(eta)) {
- alpha = .brat.alpha(eta2theta(eta[ii, -M], "loge",
+ alpha <- .brat.alpha(eta2theta(eta[ii, -M], "loge",
earg = list(theta = NULL)),
.refvalue, .refgp)
- alpha0 = loge(eta[ii, M], inverse = TRUE)
- ymat = InverseBrat(y[ii,], NCo = M, diag = 0)
- tmat = InverseBrat(ties[ii,], NCo = M, diag = 0)
+ alpha0 <- loge(eta[ii, M], inverse = TRUE)
+ ymat <- InverseBrat(y[ii,], NCo = M, diag = 0)
+ tmat <- InverseBrat(ties[ii,], NCo = M, diag = 0)
for (aa in 1:(NCo)) {
- Daj = alpha[aa] + alpha[uindex] + alpha0
- pja = alpha[uindex] / Daj
- nja = ymat[aa,uindex] + ymat[uindex,aa] + tmat[uindex,aa]
- wz[ii, 1:(NCo-1)] = wz[ii, 1:(NCo - 1)] +
- alpha[uindex]^2 * nja *
- (1 - pja) / (pja * Daj^2)
+ Daj <- alpha[aa] + alpha[uindex] + alpha0
+ pja <- alpha[uindex] / Daj
+ nja <- ymat[aa,uindex] + ymat[uindex,aa] + tmat[uindex,aa]
+ wz[ii, 1:(NCo-1)] <- wz[ii, 1:(NCo - 1)] +
+ alpha[uindex]^2 * nja *
+ (1 - pja) / (pja * Daj^2)
if (aa < NCo)
for (bb in (aa+1):(NCo)) {
- nab = ymat[aa,bb] + ymat[bb,aa] + tmat[bb,aa]
- Dab = alpha[aa] + alpha[bb] + alpha0
- qab = alpha0 / Dab
- wz[ii, NCo] = wz[ii,NCo] + alpha0^2 * nab *
+ nab <- ymat[aa,bb] + ymat[bb,aa] + tmat[bb,aa]
+ Dab <- alpha[aa] + alpha[bb] + alpha0
+ qab <- alpha0 / Dab
+ wz[ii, NCo] <- wz[ii,NCo] + alpha0^2 * nab *
(1-qab) / (qab * Dab^2)
}
}
if (NCo > 2) {
- ind5 = iam(1, 1, M = NCo, both = TRUE, diag = FALSE)
- alphajunk = c(alpha, junk=NA)
- mat4 = cbind(uindex[ind5$row],uindex[ind5$col])
- wz[ii,(M+1):ncol(wz)] = -(ymat[mat4] + ymat[mat4[, 2:1]] +
+ ind5 <- iam(1, 1, M = NCo, both = TRUE, diag = FALSE)
+ alphajunk <- c(alpha, junk=NA)
+ mat4 <- cbind(uindex[ind5$row],uindex[ind5$col])
+ wz[ii,(M+1):ncol(wz)] <- -(ymat[mat4] + ymat[mat4[, 2:1]] +
tmat[mat4]) * alphajunk[uindex[ind5$col]] *
alphajunk[uindex[ind5$row]] / (alpha0 +
alphajunk[uindex[ind5$row]] + alphajunk[uindex[ind5$col]])^2
}
for (sss in 1:length(uindex)) {
- jay = uindex[sss]
- naj = ymat[, jay] + ymat[jay,] + tmat[, jay]
- Daj = alpha[jay] + alpha + alpha0
+ jay <- uindex[sss]
+ naj <- ymat[, jay] + ymat[jay,] + tmat[, jay]
+ Daj <- alpha[jay] + alpha + alpha0
wz[ii,iam(sss, NCo, M = NCo, diag = TRUE)] =
-alpha[jay] * alpha0 * sum(naj / Daj^2)
}
}
- wz = c(w) * wz
+ wz <- c(w) * wz
wz
}), list( .refvalue = refvalue, .refgp = refgp ))))
}
@@ -1686,7 +1682,7 @@ acat.deriv <- function(zeta, reverse, M, n)
integer.valued = TRUE) ||
NCo < 2)
stop("bad input for 'NCo'")
- m = diag(NCo)
+ m <- diag(NCo)
if (are.ties) {
cbind(rindex = row(m)[col(m) < row(m)],
cindex = col(m)[col(m) < row(m)])
@@ -1706,39 +1702,39 @@ acat.deriv <- function(zeta, reverse, M, n)
string <- paste(fillerChar, string, fillerChar, sep = "")
- allargs = list(mat) # ,...
- callit = if (length(names(allargs))) names(allargs) else
+ allargs <- list(mat) # ,...
+ callit <- if (length(names(allargs))) names(allargs) else
as.character(1:length(allargs))
- ans = ans.ties = NULL
+ ans <- ans.ties <- NULL
for (ii in 1:length(allargs)) {
- m = allargs[[ii]]
+ m <- allargs[[ii]]
if (!is.matrix(m) || dim(m)[1] != dim(m)[2])
stop("m must be a square matrix")
- diag(ties) = 0
+ diag(ties) <- 0
if (!all(ties == t(ties)))
stop("ties must be a symmetric matrix")
- are.ties = any(ties > 0)
- diag(ties) = NA
+ are.ties <- any(ties > 0)
+ diag(ties) <- NA
- diag(m) = 0 # Could have been NAs
+ diag(m) <- 0 # Could have been NAs
if (any(is.na(m)))
stop("missing values not allowed (except on the diagonal)")
- diag(m) = NA
-
- dm = as.data.frame.table(m)
- dt = as.data.frame.table(ties)
- dm = dm[!is.na(dm$Freq),]
- dt = dt[!is.na(dt$Freq),]
- usethis1 = paste(dm[, 1], string[1], dm[, 2], sep = "")
- usethis2 = paste(dm[, 1], string[2], dm[, 2], sep = "")
- ans = rbind(ans, matrix(dm$Freq, nrow = 1))
- ans.ties = rbind(ans.ties, matrix(dt$Freq, nrow = 1))
+ diag(m) <- NA
+
+ dm <- as.data.frame.table(m)
+ dt <- as.data.frame.table(ties)
+ dm <- dm[!is.na(dm$Freq),]
+ dt <- dt[!is.na(dt$Freq),]
+ usethis1 <- paste(dm[, 1], string[1], dm[, 2], sep = "")
+ usethis2 <- paste(dm[, 1], string[2], dm[, 2], sep = "")
+ ans <- rbind(ans, matrix(dm$Freq, nrow = 1))
+ ans.ties <- rbind(ans.ties, matrix(dt$Freq, nrow = 1))
}
- dimnames(ans) = list(callit, usethis1)
- dimnames(ans.ties) = list(callit, usethis2)
- attr(ans, "ties") = ans.ties
- attr(ans, "are.ties") = are.ties
+ dimnames(ans) <- list(callit, usethis1)
+ dimnames(ans.ties) <- list(callit, usethis2)
+ attr(ans, "ties") <- ans.ties
+ attr(ans, "are.ties") <- are.ties
ans
}
@@ -1759,31 +1755,29 @@ InverseBrat <- function(yvec, NCo =
string <- paste(fillerChar, string, fillerChar, sep = "")
-
-
- ans = array(diag, c(NCo, NCo, multiplicity))
- yvec.orig = yvec
- yvec = c(yvec)
- ptr = 1
+ ans <- array(diag, c(NCo, NCo, multiplicity))
+ yvec.orig <- yvec
+ yvec <- c(yvec)
+ ptr <- 1
for (mul in 1:multiplicity)
for (i1 in 1:(NCo))
for (i2 in 1:(NCo))
if (i1 != i2) {
- ans[i2,i1,mul] = yvec[ptr]
- ptr = ptr + 1
+ ans[i2,i1,mul] <- yvec[ptr]
+ ptr <- ptr + 1
}
- ans = if (multiplicity > 1) ans else matrix(ans, NCo, NCo)
+ ans <- if (multiplicity > 1) ans else matrix(ans, NCo, NCo)
if (is.array(yvec.orig) || is.matrix(yvec.orig)) {
- names.yvec = dimnames(yvec.orig)[[2]]
- ii = strsplit(names.yvec, string[1])
- cal = NULL
+ names.yvec <- dimnames(yvec.orig)[[2]]
+ ii <- strsplit(names.yvec, string[1])
+ cal <- NULL
for (kk in c(NCo, 1:(NCo-1)))
- cal = c(cal, (ii[[kk]])[1])
+ cal <- c(cal, (ii[[kk]])[1])
if (multiplicity>1) {
- dimnames(ans) = list(cal, cal, dimnames(yvec.orig)[[1]])
+ dimnames(ans) <- list(cal, cal, dimnames(yvec.orig)[[1]])
} else {
- dimnames(ans) = list(cal, cal)
+ dimnames(ans) <- list(cal, cal)
}
}
ans
@@ -1798,21 +1792,22 @@ tapplymat1 <- function(mat,
if (!missing(function.arg))
- function.arg = as.character(substitute(function.arg))
- function.arg = match.arg(function.arg, c("cumsum", "diff", "cumprod"))[1]
+ function.arg <- as.character(substitute(function.arg))
+ function.arg <- match.arg(function.arg,
+ c("cumsum", "diff", "cumprod"))[1]
- type = switch(function.arg, cumsum = 1, diff = 2, cumprod = 3,
- stop("function.arg not matched"))
+ type <- switch(function.arg, cumsum = 1, diff = 2, cumprod = 3,
+ stop("function.arg not matched"))
if (!is.matrix(mat))
- mat = as.matrix(mat)
- NR = nrow(mat)
- NC = ncol(mat)
- fred = dotC(name = "tapplymat1", mat=as.double(mat),
- as.integer(NR), as.integer(NC), as.integer(type))
-
- dim(fred$mat) = c(NR, NC)
- dimnames(fred$mat) = dimnames(mat)
+ mat <- as.matrix(mat)
+ NR <- nrow(mat)
+ NC <- ncol(mat)
+ fred <- dotC(name = "tapplymat1", mat=as.double(mat),
+ as.integer(NR), as.integer(NC), as.integer(type))
+
+ dim(fred$mat) <- c(NR, NC)
+ dimnames(fred$mat) <- dimnames(mat)
switch(function.arg,
cumsum = fred$mat,
diff = fred$mat[, -1, drop = FALSE],
@@ -1834,160 +1829,164 @@ tapplymat1 <- function(mat,
- fcutpoints = cutpoints[is.finite(cutpoints)]
- if (!is.Numeric(fcutpoints, integer.valued = TRUE) ||
- any(fcutpoints < 0))
- stop("'cutpoints' must have non-negative integer or Inf ",
- "values only")
- if (is.finite(cutpoints[length(cutpoints)]))
- cutpoints = c(cutpoints, Inf)
+ fcutpoints <- cutpoints[is.finite(cutpoints)]
+ if (!is.Numeric(fcutpoints, integer.valued = TRUE) ||
+ any(fcutpoints < 0))
+ stop("'cutpoints' must have non-negative integer or Inf ",
+ "values only")
+ if (is.finite(cutpoints[length(cutpoints)]))
+ cutpoints <- c(cutpoints, Inf)
- if (!is.logical(countdata) || length(countdata) != 1)
- stop("argument 'countdata' must be a single logical")
- if (countdata) {
- if (!is.Numeric(NOS, integer.valued = TRUE, positive = TRUE))
- stop("'NOS' must have integer values only")
- if (!is.Numeric(Levels, integer.valued = TRUE,
- positive = TRUE) ||
- any(Levels < 2))
- stop("'Levels' must have integer values (>= 2) only")
- Levels = rep(Levels, length=NOS)
- }
+ if (!is.logical(countdata) || length(countdata) != 1)
+ stop("argument 'countdata' must be a single logical")
+ if (countdata) {
+ if (!is.Numeric(NOS, integer.valued = TRUE, positive = TRUE))
+ stop("'NOS' must have integer values only")
+ if (!is.Numeric(Levels, integer.valued = TRUE,
+ positive = TRUE) ||
+ any(Levels < 2))
+ stop("'Levels' must have integer values (>= 2) only")
+ Levels <- rep(Levels, length = NOS)
+ }
- new("vglmff",
- blurb = c(paste("Ordinal Poisson model\n\n"),
- "Link: ", namesof("mu", link, earg = earg)),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
- intercept.apply = TRUE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallel = parallel, .zero = zero ))),
- initialize = eval(substitute(expression({
- orig.y = cbind(y) # Convert y into a matrix if necessary
- if ( .countdata ) {
- extra$NOS = M = NOS = .NOS
- extra$Levels = Levels = .Levels
- y.names = dimnames(y)[[2]] # Hopefully the user inputted them
- } else {
- if (any(w != 1) || ncol(cbind(w)) != 1)
- stop("the 'weights' argument must be a vector of all ones")
- extra$NOS = M = NOS = if (is.Numeric( .NOS )) .NOS else
- ncol(orig.y)
- Levels = rep( if (is.Numeric( .Levels )) .Levels else 0,
- len = NOS)
- if (!is.Numeric( .Levels ))
- for (iii in 1:NOS) {
- Levels[iii] = length(unique(sort(orig.y[,iii])))
- }
- extra$Levels = Levels
+ new("vglmff",
+ blurb = c(paste("Ordinal Poisson model\n\n"),
+ "Link: ", namesof("mu", link, earg = earg)),
+ constraints = eval(substitute(expression({
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
+ apply.int = TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallel = parallel, .zero = zero ))),
+ initialize = eval(substitute(expression({
+ orig.y <- cbind(y) # Convert y into a matrix if necessary
+ if ( .countdata ) {
+ extra$NOS <- M <- NOS <- .NOS
+ extra$Levels <- Levels <- .Levels
+ y.names <- dimnames(y)[[2]] # Hopefully the user inputted them
+ } else {
+ if (any(w != 1) || ncol(cbind(w)) != 1)
+ stop("the 'weights' argument must be a vector of all ones")
+ extra$NOS <- M <- NOS <- if (is.Numeric( .NOS )) .NOS else
+ ncol(orig.y)
+ Levels <- rep(if (is.Numeric( .Levels )) .Levels else 0,
+ len = NOS)
+ if (!is.Numeric( .Levels ))
+ for (iii in 1:NOS) {
+ Levels[iii] <- length(unique(sort(orig.y[,iii])))
}
+ extra$Levels <- Levels
+ }
- initmu = if (is.Numeric( .init.mu ))
- rep( .init.mu, len = NOS) else NULL
- cutpoints = rep( .cutpoints, len=sum(Levels))
- delete.zero.colns = FALSE
- use.y = if ( .countdata ) y else matrix(0, n, sum(Levels))
- use.etastart <- matrix(0, n, M)
- cptr = 1
- for (iii in 1:NOS) {
- y = factor(orig.y[,iii], levels=(1:Levels[iii]))
- if ( !( .countdata )) {
- eval(process.categorical.data.vgam) # Creates mustart and y
- use.y[,cptr:(cptr+Levels[iii]-1)] = y
- }
- use.etastart[,iii] <- if (is.Numeric(initmu))
- initmu[iii] else
- median(cutpoints[cptr:(cptr+Levels[iii]-1-1)])
- cptr = cptr + Levels[iii]
- }
- mustart = NULL # Overwrite it
- etastart <- theta2eta(use.etastart, .link , earg = .earg )
- y = use.y # n x sum(Levels)
- M = NOS
- for (iii in 1:NOS) {
- mu.names = paste("mu", iii, ".", sep = "")
+ initmu <- if (is.Numeric( .init.mu ))
+ rep( .init.mu, len = NOS) else NULL
+ cutpoints <- rep( .cutpoints, len = sum(Levels))
+ delete.zero.colns <- FALSE
+ use.y <- if ( .countdata ) y else matrix(0, n, sum(Levels))
+ use.etastart <- matrix(0, n, M)
+ cptr <- 1
+ for (iii in 1:NOS) {
+ y <- factor(orig.y[,iii], levels=(1:Levels[iii]))
+ if ( !( .countdata )) {
+ eval(process.categorical.data.vgam) # Creates mustart and y
+ use.y[,cptr:(cptr+Levels[iii]-1)] <- y
}
+ use.etastart[,iii] <- if (is.Numeric(initmu))
+ initmu[iii] else
+ median(cutpoints[cptr:(cptr+Levels[iii]-1-1)])
+ cptr <- cptr + Levels[iii]
+ }
+ mustart <- NULL # Overwrite it
+ etastart <- theta2eta(use.etastart, .link , earg = .earg )
+ y <- use.y # n x sum(Levels)
+ M <- NOS
+ for (iii in 1:NOS) {
+ mu.names <- paste("mu", iii, ".", sep = "")
+ }
- ncoly = extra$ncoly = sum(Levels)
- cp.vector = rep( .cutpoints, length=ncoly)
- extra$countdata = .countdata
- extra$cutpoints = cp.vector
- extra$n = n
- mynames = if (M > 1) paste("mu", 1:M, sep = "") else "mu"
- predictors.names <-
- namesof(mynames, .link , short = TRUE, earg = .earg )
- }), list( .link = link, .countdata = countdata, .earg = earg,
- .cutpoints=cutpoints, .NOS=NOS, .Levels=Levels,
- .init.mu = init.mu
- ))),
- linkinv = eval(substitute( function(eta, extra = NULL) {
- mu = eta2theta(eta, link= .link , earg = .earg ) # Poisson means
- mu = cbind(mu)
- mu
- }, list( .link = link, .earg = earg, .countdata = countdata ))),
- last = eval(substitute(expression({
- if ( .countdata ) {
- misc$link = .link
- misc$earg = list( .earg )
- } else {
- misc$link = rep( .link , length = M)
- names(misc$link) = mynames
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for (ii in 1:M) misc$earg[[ii]] = .earg
- }
- misc$parameters = mynames
- misc$countdata = .countdata
- misc$true.mu = FALSE # $fitted is not a true mu
- }), list( .link = link, .countdata = countdata, .earg = earg ))),
+ ncoly <- extra$ncoly <- sum(Levels)
+ cp.vector <- rep( .cutpoints, length=ncoly)
+ extra$countdata <- .countdata
+ extra$cutpoints <- cp.vector
+ extra$n <- n
+
+ mynames <- if (M > 1) paste("mu", 1:M, sep = "") else "mu"
+ predictors.names <-
+ namesof(mynames, .link , short = TRUE, earg = .earg )
+ }), list( .link = link, .countdata = countdata, .earg = earg,
+ .cutpoints=cutpoints, .NOS=NOS, .Levels=Levels,
+ .init.mu = init.mu
+ ))),
+ linkinv = eval(substitute( function(eta, extra = NULL) {
+ mu <- eta2theta(eta, link= .link , earg = .earg ) # Poisson means
+ mu <- cbind(mu)
+ mu
+ }, list( .link = link, .earg = earg, .countdata = countdata ))),
+ last = eval(substitute(expression({
+print("y.names")
+ if ( .countdata ) {
+ misc$link <- .link
+ misc$earg <- list( .earg )
+ } else {
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- mynames
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for (ii in 1:M)
+ misc$earg[[ii]] <- .earg
+ }
+ misc$parameters <- mynames
+ misc$countdata <- .countdata
+ misc$true.mu = FALSE # $fitted is not a true mu
+ }), list( .link = link, .countdata = countdata, .earg = earg ))),
loglikelihood =
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- probs = ordpoissonProbs(extra, mu)
- index0 = y == 0
- probs[index0] = 1
- pindex0 = probs == 0
- probs[pindex0] = 1
+ probs <- ordpoissonProbs(extra, mu)
+ index0 <- y == 0
+ probs[index0] <- 1
+ pindex0 <- probs == 0
+ probs[pindex0] <- 1
sum(pindex0) * (-1.0e+10) + sum(w * y * log(probs))
}
},
vfamily = c("ordpoisson", "vcategorical"),
deriv = eval(substitute(expression({
- probs = ordpoissonProbs(extra, mu)
- probs.use = pmax(probs, .Machine$double.eps * 1.0e-0)
-
- cp.vector = extra$cutpoints
- NOS = extra$NOS
- Levels = extra$Levels
- resmat = matrix(0, n, M)
- dl.dprob = y / probs.use
- dmu.deta = dtheta.deta(mu, .link , earg = .earg )
- dprob.dmu = ordpoissonProbs(extra, mu, deriv = 1)
- cptr = 1
+ probs <- ordpoissonProbs(extra, mu)
+ probs.use <- pmax(probs, .Machine$double.eps * 1.0e-0)
+
+ cp.vector <- extra$cutpoints
+ NOS <- extra$NOS
+ Levels <- extra$Levels
+ resmat <- matrix(0, n, M)
+ dl.dprob <- y / probs.use
+ dmu.deta <- dtheta.deta(mu, .link , earg = .earg )
+ dprob.dmu <- ordpoissonProbs(extra, mu, deriv = 1)
+ cptr <- 1
for (iii in 1:NOS) {
for (kkk in 1:Levels[iii]) {
- resmat[,iii] = resmat[,iii] +
+ resmat[,iii] <- resmat[,iii] +
dl.dprob[,cptr] * dprob.dmu[,cptr]
- cptr = cptr + 1
+ cptr <- cptr + 1
}
}
- resmat = c(w) * resmat * dmu.deta
+ resmat <- c(w) * resmat * dmu.deta
resmat
}), list( .link = link, .earg = earg, .countdata=countdata ))),
weight = eval(substitute(expression({
- d2l.dmu2 = matrix(0, n, M) # Diagonal matrix
- cptr = 1
+ d2l.dmu2 <- matrix(0, n, M) # Diagonal matrix
+ cptr <- 1
for (iii in 1:NOS) {
for (kkk in 1:Levels[iii]) {
- d2l.dmu2[,iii] = d2l.dmu2[,iii] +
+ d2l.dmu2[,iii] <- d2l.dmu2[,iii] +
dprob.dmu[,cptr]^2 / probs.use[,cptr]
- cptr = cptr + 1
+ cptr <- cptr + 1
}
}
- wz = c(w) * d2l.dmu2 * dmu.deta^2
+ wz <- c(w) * d2l.dmu2 * dmu.deta^2
wz
}), list( .earg = earg, .link = link, .countdata = countdata ))))
}
@@ -1995,40 +1994,44 @@ tapplymat1 <- function(mat,
ordpoissonProbs <- function(extra, mu, deriv = 0) {
- cp.vector = extra$cutpoints
- NOS = extra$NOS
+ cp.vector <- extra$cutpoints
+ NOS <- extra$NOS
if (deriv == 1) {
- dprob.dmu = matrix(0, extra$n, extra$ncoly)
+ dprob.dmu <- matrix(0, extra$n, extra$ncoly)
} else {
- probs = matrix(0, extra$n, extra$ncoly)
+ probs <- matrix(0, extra$n, extra$ncoly)
}
- mu = cbind(mu)
- cptr = 1
+ mu <- cbind(mu)
+ cptr <- 1
for (iii in 1:NOS) {
if (deriv == 1) {
- dprob.dmu[,cptr] = -dpois(x = cp.vector[cptr], lambda = mu[,iii])
+ dprob.dmu[,cptr] <- -dpois(x = cp.vector[cptr], lambda = mu[,iii])
} else {
- probs[,cptr] = ppois(q = cp.vector[cptr], lambda = mu[,iii])
+ probs[,cptr] <- ppois(q = cp.vector[cptr], lambda = mu[,iii])
}
- cptr = cptr + 1
+ cptr <- cptr + 1
while(is.finite(cp.vector[cptr])) {
if (deriv == 1) {
- dprob.dmu[,cptr] = dpois(x = cp.vector[cptr-1], lambda = mu[,iii]) -
- dpois(x = cp.vector[cptr], lambda = mu[,iii])
+ dprob.dmu[,cptr] <-
+ dpois(x = cp.vector[cptr-1], lambda = mu[,iii]) -
+ dpois(x = cp.vector[cptr ], lambda = mu[,iii])
} else {
- probs[,cptr] = ppois(q = cp.vector[cptr], lambda = mu[,iii]) -
+ probs[,cptr] <-
+ ppois(q = cp.vector[cptr ], lambda = mu[,iii]) -
ppois(q = cp.vector[cptr-1], lambda = mu[,iii])
}
- cptr = cptr + 1
+ cptr <- cptr + 1
}
if (deriv == 1) {
- dprob.dmu[,cptr] = dpois(x = cp.vector[cptr-1], lambda = mu[,iii]) -
- dpois(x = cp.vector[cptr], lambda = mu[,iii])
+ dprob.dmu[,cptr] <-
+ dpois(x = cp.vector[cptr-1], lambda = mu[,iii]) -
+ dpois(x = cp.vector[cptr ], lambda = mu[,iii])
} else {
- probs[,cptr] = ppois(q = cp.vector[cptr], lambda = mu[,iii]) -
+ probs[,cptr] <-
+ ppois(q = cp.vector[cptr ], lambda = mu[,iii]) -
ppois(q = cp.vector[cptr-1], lambda = mu[,iii])
}
- cptr = cptr + 1
+ cptr <- cptr + 1
}
if (deriv == 1) dprob.dmu else probs
}
@@ -2043,8 +2046,7 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
lscale = "loge", escale = list(),
parallel = FALSE, sparallel = TRUE,
reverse = FALSE,
- iscale = 1)
-{
+ iscale = 1) {
stop("sorry, not working yet")
link <- as.list(substitute(link))
@@ -2064,20 +2066,20 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
new("vglmff",
blurb = c(paste("Scaled cumulative", link, "model\n\n"),
- "Links: ",
- namesof(if (reverse) "P[Y>=j+1]" else "P[Y<=j]",
- link, earg = earg),
- ", ",
- namesof("scale_j", lscale, escale)),
+ "Links: ",
+ namesof(if (reverse) "P[Y>=j+1]" else "P[Y<=j]",
+ link, earg = earg),
+ ", ",
+ namesof("scale_j", lscale, escale)),
constraints = eval(substitute(expression({
J = M / 2
- constraints = cm.vgam(matrix(1,J,1), x, .parallel, constraints,
- intercept.apply = FALSE)
+ constraints <- cm.vgam(matrix(1,J,1), x, .parallel, constraints,
+ apply.int = FALSE)
constraints[["(Intercept)"]] = rbind(constraints[["(Intercept)"]],
matrix(0, J, ncol(constraints[["(Intercept)"]])))
- cm2 = cm.vgam(matrix(1,J,1), x, .sparallel, constraints = NULL,
- intercept.apply = FALSE)
+ cm2 <- cm.vgam(matrix(1,J,1), x, .sparallel, constraints = NULL,
+ apply.int = FALSE)
for (ii in 2:length(constraints))
constraints[[ii]] =
@@ -2085,9 +2087,9 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
matrix(0, J, ncol(constraints[[ii]]))),
rbind(matrix(0, J, ncol(cm2[[ii]])), cm2[[ii]]))
- for (ii in 1:length(constraints))
- constraints[[ii]] =
- (constraints[[ii]])[interleave.VGAM(M, M = 2),, drop = FALSE]
+ for (ii in 1:length(constraints))
+ constraints[[ii]] =
+ (constraints[[ii]])[interleave.VGAM(M, M = 2),, drop = FALSE]
}), list( .parallel = parallel, .sparallel=sparallel ))),
deviance = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
@@ -2162,10 +2164,10 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
c(rep( .link , length = J),
rep( .lscale, length = J))[interleave.VGAM(M, M = 2)]
names(misc$link) = predictors.names
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for (ii in 1:J) misc$earg[[2*ii-1]] = .earg
- for (ii in 1:J) misc$earg[[2*ii ]] = .escale
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for (ii in 1:J) misc$earg[[2*ii-1]] <- .earg
+ for (ii in 1:J) misc$earg[[2*ii ]] <- .escale
misc$parameters = mynames
misc$reverse = .reverse
misc$parallel = .parallel
@@ -2288,13 +2290,13 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
margeff <- function(object, subset = NULL) {
- ii = ii.save = subset
+ ii <- ii.save <- subset
if (!is(object, "vglm"))
stop("'object' is not a vglm() object")
if (!any(temp.logical <- is.element(c("multinomial","cumulative"),
object at family@vfamily)))
stop("'object' is not a 'multinomial' or 'cumulative' VGLM!")
- model.multinomial = temp.logical[1]
+ model.multinomial <- temp.logical[1]
if (is(object, "vgam"))
stop("'object' is a vgam() object")
if (length(object at control$xij))
@@ -2302,19 +2304,19 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
if (length(object at misc$form2))
stop("'object' contains 'form2' terms")
- oassign = object at misc$orig.assign
+ oassign <- object at misc$orig.assign
if (any(unlist(lapply(oassign, length)) > 1))
warning("some terms in 'object' create more than one column of ",
"the LM design matrix")
- nnn = object at misc$n
- M = object at misc$M # ncol(B) # length(pvec) - 1
+ nnn <- object at misc$n
+ M <- object at misc$M # ncol(B) # length(pvec) - 1
if (model.multinomial) {
- rlev = object at misc$refLevel
- cfit = coefvlm(object, matrix.out = TRUE)
- B = if (!length(rlev)) {
+ rlev <- object at misc$refLevel
+ cfit <- coefvlm(object, matrix.out = TRUE)
+ B <- if (!length(rlev)) {
cbind(cfit, 0)
} else {
if (rlev == M+1) { # Default
@@ -2325,46 +2327,46 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
cbind(cfit[, 1:(rlev-1)], 0, cfit[,rlev:M])
}
}
- ppp = nrow(B)
- pvec1 = fitted(object)[ 1,]
- colnames(B) = if (length(names(pvec1))) names(pvec1) else
- paste("mu", 1:(M+1), sep = "")
+ ppp <- nrow(B)
+ pvec1 <- fitted(object)[ 1,]
+ colnames(B) <- if (length(names(pvec1))) names(pvec1) else
+ paste("mu", 1:(M+1), sep = "")
if (is.null(ii)) {
- BB = array(B, c(ppp, M+1, nnn))
- pvec = c(t(fitted(object)))
- pvec = rep(pvec, each=ppp)
- temp1 = array(BB * pvec, c(ppp, M+1, nnn))
- temp2 = aperm(temp1, c(2,1,3)) # (M+1) x ppp x nnn
- temp2 = colSums(temp2) # ppp x nnn
- temp2 = array(rep(temp2, each=M+1), c(M+1, ppp, nnn))
- temp2 = aperm(temp2, c(2, 1, 3)) # ppp x (M+1) x nnn
- temp3 = pvec
- ans = array((BB - temp2) * temp3, c(ppp, M+1, nnn),
- dimnames = list(dimnames(B)[[1]],
- dimnames(B)[[2]], dimnames(fitted(object))[[1]]))
+ BB <- array(B, c(ppp, M+1, nnn))
+ pvec <- c(t(fitted(object)))
+ pvec <- rep(pvec, each=ppp)
+ temp1 <- array(BB * pvec, c(ppp, M+1, nnn))
+ temp2 <- aperm(temp1, c(2,1,3)) # (M+1) x ppp x nnn
+ temp2 <- colSums(temp2) # ppp x nnn
+ temp2 <- array(rep(temp2, each=M+1), c(M+1, ppp, nnn))
+ temp2 <- aperm(temp2, c(2, 1, 3)) # ppp x (M+1) x nnn
+ temp3 <- pvec
+ ans <- array((BB - temp2) * temp3, c(ppp, M+1, nnn),
+ dimnames = list(dimnames(B)[[1]],
+ dimnames(B)[[2]], dimnames(fitted(object))[[1]]))
ans
} else
if (is.numeric(ii) && (length(ii) == 1)) {
- pvec = fitted(object)[ii,]
- temp1 = B * matrix(pvec, ppp, M+1, byrow = TRUE)
- temp2 = matrix(rowSums(temp1), ppp, M+1)
- temp3 = matrix(pvec, nrow(B), M+1, byrow = TRUE)
+ pvec <- fitted(object)[ii,]
+ temp1 <- B * matrix(pvec, ppp, M+1, byrow = TRUE)
+ temp2 <- matrix(rowSums(temp1), ppp, M+1)
+ temp3 <- matrix(pvec, nrow(B), M+1, byrow = TRUE)
(B - temp2) * temp3
} else {
if (is.logical(ii))
- ii = (1:nnn)[ii]
+ ii <- (1:nnn)[ii]
- ans = array(0, c(ppp, M+1, length(ii)),
- dimnames = list(dimnames(B)[[1]],
- dimnames(B)[[2]],
- dimnames(fitted(object)[ii,])[[1]]))
+ ans <- array(0, c(ppp, M+1, length(ii)),
+ dimnames = list(dimnames(B)[[1]],
+ dimnames(B)[[2]],
+ dimnames(fitted(object)[ii,])[[1]]))
for (ilocal in 1:length(ii)) {
- pvec = fitted(object)[ii[ilocal],]
- temp1 = B * matrix(pvec, ppp, M+1, byrow = TRUE)
- temp2 = matrix(rowSums(temp1), ppp, M+1)
- temp3 = matrix(pvec, nrow(B), M+1, byrow = TRUE)
- ans[,,ilocal] = (B - temp2) * temp3
+ pvec <- fitted(object)[ii[ilocal],]
+ temp1 <- B * matrix(pvec, ppp, M+1, byrow = TRUE)
+ temp2 <- matrix(rowSums(temp1), ppp, M+1)
+ temp3 <- matrix(pvec, nrow(B), M+1, byrow = TRUE)
+ ans[,,ilocal] <- (B - temp2) * temp3
}
ans
}
@@ -2373,40 +2375,40 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
if (is.logical(is.multivariateY <- object at misc$mv) &&
is.multivariateY)
stop("cannot handle cumulative(mv = TRUE)")
- reverse = object at misc$reverse
- linkfunctions = object at misc$link
- all.eargs = object at misc$earg
- B = cfit = coefvlm(object, matrix.out = TRUE)
- ppp = nrow(B)
-
- hdot = lpmat = kronecker(predict(object), matrix(1, ppp, 1))
- resmat = cbind(hdot, 1)
+ reverse <- object at misc$reverse
+ linkfunctions <- object at misc$link
+ all.eargs <- object at misc$earg
+ B <- cfit <- coefvlm(object, matrix.out = TRUE)
+ ppp <- nrow(B)
+
+ hdot <- lpmat <- kronecker(predict(object), matrix(1, ppp, 1))
+ resmat <- cbind(hdot, 1)
for (jlocal in 1:M) {
- Cump = eta2theta(lpmat[,jlocal],
- link = linkfunctions[jlocal],
- earg = all.eargs[[jlocal]])
- hdot[, jlocal] = dtheta.deta(Cump,
- link = linkfunctions[jlocal],
- earg = all.eargs[[jlocal]])
+ Cump <- eta2theta(lpmat[,jlocal],
+ link = linkfunctions[jlocal],
+ earg = all.eargs[[jlocal]])
+ hdot[, jlocal] <- dtheta.deta(Cump,
+ link = linkfunctions[jlocal],
+ earg = all.eargs[[jlocal]])
}
- resmat[, 1] = ifelse(reverse, -1, 1) * hdot[, 1] * cfit[, 1]
+ resmat[, 1] <- ifelse(reverse, -1, 1) * hdot[, 1] * cfit[, 1]
if (M > 1) {
for (jlocal in 2:M)
- resmat[, jlocal] = ifelse(reverse, -1, 1) *
+ resmat[, jlocal] <- ifelse(reverse, -1, 1) *
(hdot[, jlocal ] * cfit[, jlocal ] -
hdot[, jlocal - 1] * cfit[, jlocal - 1])
}
- resmat[, M+1] = ifelse(reverse, 1, -1) * hdot[, M] * cfit[, M]
+ resmat[, M+1] <- ifelse(reverse, 1, -1) * hdot[, M] * cfit[, M]
- temp1 = array(resmat, c(ppp, nnn, M+1),
- dimnames = list(dimnames(B)[[1]],
- dimnames(fitted(object))[[1]],
- dimnames(fitted(object))[[2]]))
- temp1 = aperm(temp1, c(1, 3, 2)) # ppp x (M+1) x nnn
+ temp1 <- array(resmat, c(ppp, nnn, M+1),
+ dimnames = list(dimnames(B)[[1]],
+ dimnames(fitted(object))[[1]],
+ dimnames(fitted(object))[[2]]))
+ temp1 <- aperm(temp1, c(1, 3, 2)) # ppp x (M+1) x nnn
if (is.null(ii)) {
return(temp1)
@@ -2437,32 +2439,32 @@ prplot <- function(object,
if (!any(object at family@vfamily == "cumulative"))
stop("'object' is not seem to be a VGAM categorical model object")
- control = prplot.control(...)
+ control <- prplot.control(...)
- object = plotvgam(object, plot.arg = FALSE, raw = FALSE) # , ...
+ object <- plotvgam(object, plot.arg = FALSE, raw = FALSE) # , ...
if (length(names(object at preplot)) != 1)
stop("object needs to have only one term")
- MM = object at misc$M
- use.y = cbind((object at preplot[[1]])$y)
- Constant = attr(object at preplot, "Constant")
+ MM <- object at misc$M
+ use.y <- cbind((object at preplot[[1]])$y)
+ Constant <- attr(object at preplot, "Constant")
if (is.numeric(Constant) && length(Constant) == ncol(use.y))
- use.y = use.y + matrix(Constant, nrow(use.y), ncol(use.y),
- byrow = TRUE)
+ use.y <- use.y + matrix(Constant, nrow(use.y), ncol(use.y),
+ byrow = TRUE)
for (ii in 1:MM) {
- use.y[, ii] = eta2theta(use.y[, ii],
- link = object at misc$link[[ii]],
- earg = object at misc$earg[[ii]])
+ use.y[, ii] <- eta2theta(use.y[, ii],
+ link = object at misc$link[[ii]],
+ earg = object at misc$earg[[ii]])
}
if (ncol(use.y) != MM) use.y = use.y[, 1:MM, drop = FALSE]
- use.x = (object at preplot[[1]])$x
- myxlab = if (length(control$xlab))
+ use.x <- (object at preplot[[1]])$x
+ myxlab <- if (length(control$xlab))
control$xlab else (object at preplot[[1]])$xlab
- mymain = if (MM <= 3)
+ mymain <- if (MM <= 3)
paste(object at misc$parameters, collapse = ", ") else
paste(object at misc$parameters[c(1, MM)], collapse = ",...,")
if (length(control$main)) mymain = control$main
diff --git a/R/family.censored.R b/R/family.censored.R
index 25cbdc1..d4fb289 100644
--- a/R/family.censored.R
+++ b/R/family.censored.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -12,6 +12,9 @@
+
+
+
cenpoisson <- function(link = "loge", imu = NULL) {
link <- as.list(substitute(link))
@@ -34,64 +37,65 @@
Is.integer.y = TRUE)
- centype = attr(y, "type")
+ centype <- attr(y, "type")
if (centype == "right") {
- temp = y[, 2]
- extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
- extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
- extra$leftcensored = rep(FALSE, len = n)
- extra$interval = rep(FALSE, len = n)
- init.mu = pmax(y[, 1], 1/8)
+ temp <- y[, 2]
+ extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
+ extra$rightcensored <- ifelse(temp == 0, TRUE, FALSE)
+ extra$leftcensored <- rep(FALSE, len = n)
+ extra$interval <- rep(FALSE, len = n)
+ init.mu <- pmax(y[, 1], 1/8)
} else
if (centype == "left") {
- temp = y[, 2]
- extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
- extra$rightcensored = rep(FALSE, len = n)
- extra$leftcensored = ifelse(temp == 0, TRUE, FALSE)
- extra$interval = rep(FALSE, len = n)
- init.mu = pmax(y[, 1], 1/8)
+ temp <- y[, 2]
+ extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
+ extra$rightcensored <- rep(FALSE, len = n)
+ extra$leftcensored <- ifelse(temp == 0, TRUE, FALSE)
+ extra$interval <- rep(FALSE, len = n)
+ init.mu <- pmax(y[, 1], 1/8)
} else
if (centype == "interval" ||
centype == "interval2") {
- temp = y[, 3]
- extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
- extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
- extra$leftcensored = ifelse(temp == 2, TRUE, FALSE)
- extra$intervalcensored = ifelse(temp == 3, TRUE, FALSE)
- init.mu = pmax((y[, 1] + y[, 2])/2, 1/8) # for intervalcensored
+ temp <- y[, 3]
+ extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
+ extra$rightcensored <- ifelse(temp == 0, TRUE, FALSE)
+ extra$leftcensored <- ifelse(temp == 2, TRUE, FALSE)
+ extra$intervalcensored <- ifelse(temp == 3, TRUE, FALSE)
+ init.mu <- pmax((y[, 1] + y[, 2])/2, 1/8) # for intervalcensored
if (any(extra$uncensored))
- init.mu[extra$uncensored] = pmax(y[extra$uncensored, 1], 1/8)
+ init.mu[extra$uncensored] <- pmax(y[extra$uncensored, 1], 1/8)
if (any(extra$rightcensored))
- init.mu[extra$rightcensored] = pmax(y[extra$rightcensored, 1], 1/8)
+ init.mu[extra$rightcensored] <- pmax(y[extra$rightcensored, 1], 1/8)
if (any(extra$leftcensored))
- init.mu[extra$leftcensored] = pmax(y[extra$leftcensored, 1], 1/8)
+ init.mu[extra$leftcensored] <- pmax(y[extra$leftcensored, 1], 1/8)
} else
if (centype == "counting") {
stop("type == 'counting' not compatible with cenpoisson()")
- init.mu = pmax(y[, 1], 1/8)
+ init.mu <- pmax(y[, 1], 1/8)
stop("currently not working")
} else
stop("response have to be in a class of SurvS4")
- if (length( .imu )) init.mu = 0 * y[, 1] + .imu
+ if (length( .imu )) init.mu <- 0 * y[, 1] + .imu
predictors.names <-
namesof("mu", .link, earg = .earg, short = TRUE)
if (!length(etastart))
- etastart = theta2eta(init.mu, link = .link, earg = .earg)
+ etastart <- theta2eta(init.mu, link = .link, earg = .earg)
}), list( .link = link, .earg = earg, .imu = imu))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mu = eta2theta(eta, link = .link, earg = .earg)
+ mu <- eta2theta(eta, link = .link, earg = .earg)
mu
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$expected = FALSE
+ misc$expected <- FALSE
+
+ misc$link <- c("mu" = .link)
- misc$link = c("mu" = .link)
+ misc$earg <- list("mu" = .earg)
- misc$earg = list("mu" = .earg)
misc$multipleResponses <- FALSE
}), list( .link = link, .earg = earg ))),
linkfun = eval(substitute(function(mu, extra = NULL) {
@@ -99,10 +103,10 @@
}, list( .link = link, .earg = earg ))),
loglikelihood = function(mu, y, w, residuals = FALSE, eta,
extra = NULL) {
- cen0 = extra$uncensored
- cenL = extra$leftcensored
- cenU = extra$rightcensored
- cenI = extra$intervalcensored
+ cen0 <- extra$uncensored
+ cenL <- extra$leftcensored
+ cenU <- extra$rightcensored
+ cenI <- extra$intervalcensored
if (residuals){
stop("loglikelihood residuals not implemented yet")
} else {
@@ -115,66 +119,66 @@
},
vfamily = "cenpoisson",
deriv = eval(substitute(expression({
- cen0 = extra$uncensored
- cenL = extra$leftcensored
- cenU = extra$rightcensored
- cenI = extra$intervalcensored
- lambda = eta2theta(eta, link = .link, earg = .earg)
+ cen0 <- extra$uncensored
+ cenL <- extra$leftcensored
+ cenU <- extra$rightcensored
+ cenI <- extra$intervalcensored
+ lambda <- eta2theta(eta, link = .link, earg = .earg)
- dl.dlambda = (y[, 1] - lambda)/lambda # uncensored
+ dl.dlambda <- (y[, 1] - lambda)/lambda # uncensored
- yllim = yulim = y[, 1] # uncensored
+ yllim <- yulim <- y[, 1] # uncensored
if (any(cenU)) {
- yllim[cenU] = y[cenU, 1]
- densm1 = dpois(yllim-1, lambda)
- queue = ppois(yllim-1, lambda, lower.tail = FALSE)
- dl.dlambda[cenU] = densm1[cenU] / queue[cenU]
+ yllim[cenU] <- y[cenU, 1]
+ densm1 <- dpois(yllim-1, lambda)
+ queue <- ppois(yllim-1, lambda, lower.tail = FALSE)
+ dl.dlambda[cenU] <- densm1[cenU] / queue[cenU]
}
if (any(cenL)) {
- yulim[cenL] = y[cenL, 1] - 1
- densm0 = dpois(yulim, lambda)
- Queue = ppois(yulim, lambda) # Left tail probability
- dl.dlambda[cenL] = -densm0[cenL] / Queue[cenL]
+ yulim[cenL] <- y[cenL, 1] - 1
+ densm0 <- dpois(yulim, lambda)
+ Queue <- ppois(yulim, lambda) # Left tail probability
+ dl.dlambda[cenL] <- -densm0[cenL] / Queue[cenL]
}
if (any(cenI)) {
- yllim[cenI] = y[cenI, 1] + 1
- yulim[cenI] = y[cenI, 2]
- Queue1 = ppois(yllim-1, lambda)
- Queue2 = ppois(yulim, lambda)
- densm02 = dpois(yulim, lambda)
- densm12 = dpois(yllim-1, lambda)
- dl.dlambda[cenI] =
+ yllim[cenI] <- y[cenI, 1] + 1
+ yulim[cenI] <- y[cenI, 2]
+ Queue1 <- ppois(yllim-1, lambda)
+ Queue2 <- ppois(yulim, lambda)
+ densm02 <- dpois(yulim, lambda)
+ densm12 <- dpois(yllim-1, lambda)
+ dl.dlambda[cenI] <-
(-densm02[cenI]+densm12[cenI]) / (Queue2[cenI]-Queue1[cenI])
}
- dlambda.deta = dtheta.deta(theta=lambda, link = .link, earg = .earg)
+ dlambda.deta <- dtheta.deta(theta=lambda, link = .link, earg = .earg)
c(w) * dl.dlambda * dlambda.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- d2lambda.deta2 = d2theta.deta2(theta = lambda,
+ d2lambda.deta2 <- d2theta.deta2(theta = lambda,
link = .link, earg = .earg)
- d2l.dlambda2 = 1 / lambda # uncensored; Fisher scoring
+ d2l.dlambda2 <- 1 / lambda # uncensored; Fisher scoring
if (any(cenU)) {
- densm2 = dpois(yllim-2, lambda)
- d2l.dlambda2[cenU] = (dl.dlambda[cenU])^2 -
+ densm2 <- dpois(yllim-2, lambda)
+ d2l.dlambda2[cenU] <- (dl.dlambda[cenU])^2 -
(densm2[cenU]-densm1[cenU])/queue[cenU]
}
if (any(cenL)) {
- densm1 = dpois(yulim-1, lambda)
- d2l.dlambda2[cenL] = (dl.dlambda[cenL])^2 -
+ densm1 <- dpois(yulim-1, lambda)
+ d2l.dlambda2[cenL] <- (dl.dlambda[cenL])^2 -
(densm0[cenL]-densm1[cenL])/Queue[cenL]
}
if (any(cenI)) {
- densm03 = dpois(yulim-1, lambda)
- densm13 = dpois(yllim-2, lambda)
- d2l.dlambda2[cenI] = (dl.dlambda[cenI])^2 -
+ densm03 <- dpois(yulim-1, lambda)
+ densm13 <- dpois(yllim-2, lambda)
+ d2l.dlambda2[cenI] <- (dl.dlambda[cenI])^2 -
(densm13[cenI]-densm12[cenI]-densm03[cenI] +
densm02[cenI]) / (Queue2[cenI]-Queue1[cenI])
}
- wz = c(w) * ((dlambda.deta^2) * d2l.dlambda2)
+ wz <- c(w) * ((dlambda.deta^2) * d2l.dlambda2)
wz
}), list( .link = link, .earg = earg ))))
}
@@ -184,8 +188,7 @@
if (FALSE)
cexpon <-
- ecexpon <- function(link = "loge", location = 0)
-{
+ ecexpon <- function(link = "loge", location = 0) {
if (!is.Numeric(location, allowable.length = 1))
stop("bad input for 'location'")
@@ -203,7 +206,7 @@ if (FALSE)
if (location == 0) "Exponential: mu^2" else
paste("(mu-", location, ")^2", sep = "")),
initialize = eval(substitute(expression({
- extra$location = .location
+ extra$location <- .location
if (any(y[, 1] <= extra$location))
stop("all responses must be greater than ", extra$location)
@@ -212,45 +215,45 @@ if (FALSE)
type <- attr(y, "type")
if (type == "right" || type == "left"){
- mu = y[, 1] + (abs(y[, 1] - extra$location) < 0.001) / 8
+ mu <- y[, 1] + (abs(y[, 1] - extra$location) < 0.001) / 8
}else
if (type == "interval"){
temp <- y[, 3]
- mu = ifelse(temp == 3, y[, 2] + (abs(y[, 2] - extra$location)
- < 0.001)/8,
- y[, 1] + (abs(y[, 1] - extra$location) < 0.001) / 8)
+ mu <- ifelse(temp == 3,
+ y[, 2] + (abs(y[, 2] - extra$location) < 0.001) / 8,
+ y[, 1] + (abs(y[, 1] - extra$location) < 0.001) / 8)
}
if (!length(etastart))
- etastart = theta2eta(1/(mu-extra$location), .link , .earg )
+ etastart <- theta2eta(1/(mu-extra$location), .link , .earg )
if (type == "right") {
temp <- y[, 2]
- extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
- extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
- extra$leftcensored = rep(FALSE, len = n)
- extra$interval = rep(FALSE, len = n)
+ extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
+ extra$rightcensored <- ifelse(temp == 0, TRUE, FALSE)
+ extra$leftcensored <- rep(FALSE, len = n)
+ extra$interval <- rep(FALSE, len = n)
} else
if (type == "left") {
temp <- y[, 2]
- extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
- extra$rightcensored = rep(FALSE, len = n)
- extra$leftcensored = ifelse(temp == 0, TRUE, FALSE)
- extra$interval = rep(FALSE, len = n)
+ extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
+ extra$rightcensored <- rep(FALSE, len = n)
+ extra$leftcensored <- ifelse(temp == 0, TRUE, FALSE)
+ extra$interval <- rep(FALSE, len = n)
} else
if (type == "counting") {
stop("type == 'counting' not recognized")
- extra$uncensored = rep(temp == 1, TRUE, FALSE)
- extra$interval = rep(FALSE, len = n)
- extra$leftcensored = rep(FALSE, len = n)
- extra$rightcensored = rep(FALSE, len = n)
- extra$counting = ifelse(temp == 0, TRUE, FALSE)
+ extra$uncensored <- rep(temp == 1, TRUE, FALSE)
+ extra$interval <- rep(FALSE, len = n)
+ extra$leftcensored <- rep(FALSE, len = n)
+ extra$rightcensored <- rep(FALSE, len = n)
+ extra$counting <- ifelse(temp == 0, TRUE, FALSE)
} else
if (type == "interval") {
temp <- y[, 3]
- extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
- extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
- extra$leftcensored = ifelse(temp == 2, TRUE, FALSE)
- extra$interval = ifelse(temp == 3, TRUE, FALSE)
+ extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
+ extra$rightcensored <- ifelse(temp == 0, TRUE, FALSE)
+ extra$leftcensored <- ifelse(temp == 2, TRUE, FALSE)
+ extra$interval <- ifelse(temp == 3, TRUE, FALSE)
} else
stop("'type' not recognized")
}), list( .location=location, .link = link ))),
@@ -258,20 +261,20 @@ if (FALSE)
extra$location + 1 / eta2theta(eta, .link , .earg ),
list( .link = link ) )),
last = eval(substitute(expression({
- misc$location = extra$location
- misc$link = c("rate" = .link)
+ misc$location <- extra$location
+ misc$link <- c("rate" = .link)
misc$multipleResponses <- FALSE
}), list( .link = link ))),
link = eval(substitute(function(mu, extra = NULL)
theta2eta(1/(mu-extra$location), .link , .earg ),
list( .link = link ) )),
loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
- rate = 1 / (mu - extra$location)
- cen0 = extra$uncensored
- cenL = extra$leftcensored
- cenU = extra$rightcensored
- cenI = extra$interval
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ rate <- 1 / (mu - extra$location)
+ cen0 <- extra$uncensored
+ cenL <- extra$leftcensored
+ cenU <- extra$rightcensored
+ cenI <- extra$interval
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w[cenL] * log1p(-exp(-rate[cenL]*(y[cenL, 1]-extra$location)))) +
@@ -283,45 +286,45 @@ if (FALSE)
}, list( .link = link ))),
vfamily = c("ecexpon"),
deriv = eval(substitute(expression({
- rate = 1 / (mu - extra$location)
- cen0 = extra$uncensored
- cenL = extra$leftcensored
- cenU = extra$rightcensored
- cenI = extra$interval
- dl.drate = 1/rate - (y[, 1]-extra$location) # uncensored
- tmp200 = exp(-rate*(y[, 1]-extra$location))
- tmp200b = exp(-rate*(y[, 2]-extra$location)) # for interval censored
+ rate <- 1 / (mu - extra$location)
+ cen0 <- extra$uncensored
+ cenL <- extra$leftcensored
+ cenU <- extra$rightcensored
+ cenI <- extra$interval
+ dl.drate <- 1/rate - (y[, 1]-extra$location) # uncensored
+ tmp200 <- exp(-rate*(y[, 1]-extra$location))
+ tmp200b <- exp(-rate*(y[, 2]-extra$location)) # for interval censored
if (any(cenL))
- dl.drate[cenL] = (y[cenL, 1]-extra$location) *
- tmp200[cenL] / (1 - tmp200[cenL])
+ dl.drate[cenL] <- (y[cenL, 1]-extra$location) *
+ tmp200[cenL] / (1 - tmp200[cenL])
if (any(cenU))
- dl.drate[cenU] = -(y[cenU, 1]-extra$location)
+ dl.drate[cenU] <- -(y[cenU, 1]-extra$location)
if (any(cenI))
- dl.drate[cenI] = ((y[cenI, 2]-extra$location)*tmp200b[cenI]-
+ dl.drate[cenI] <- ((y[cenI, 2]-extra$location)*tmp200b[cenI]-
(y[cenI, 1]-extra$location)*tmp200[cenI])/
(-tmp200b[cenI]+tmp200[cenI])
- drate.deta = dtheta.deta(rate, .link , .earg )
+ drate.deta <- dtheta.deta(rate, .link , .earg )
c(w) * dl.drate * drate.deta
}), list( .link = link ) )),
weight = eval(substitute(expression({
- A123 = ((mu-extra$location)^2) # uncensored d2l.drate2
- Lowpt = ifelse(cenL, y[, 1], extra$location)
- Lowpt = ifelse(cenI, y[, 1], Lowpt) #interval censored
- Upppt = ifelse(cenU, y[, 1], Inf)
- Upppt = ifelse(cenI, y[, 2], Upppt) #interval censored
- tmp300 = exp(-rate*(Lowpt - extra$location))
+ A123 <- ((mu-extra$location)^2) # uncensored d2l.drate2
+ Lowpt <- ifelse(cenL, y[, 1], extra$location)
+ Lowpt <- ifelse(cenI, y[, 1], Lowpt) #interval censored
+ Upppt <- ifelse(cenU, y[, 1], Inf)
+ Upppt <- ifelse(cenI, y[, 2], Upppt) #interval censored
+ tmp300 <- exp(-rate*(Lowpt - extra$location))
- d2l.drate2 = 0 * y[, 1]
- ind50 = Lowpt > extra$location
+ d2l.drate2 <- 0 * y[, 1]
+ ind50 <- Lowpt > extra$location
- d2l.drate2[ind50] = (Lowpt[ind50]-extra$location)^2 *
+ d2l.drate2[ind50] <- (Lowpt[ind50]-extra$location)^2 *
tmp300[ind50] / (1-tmp300[ind50])
- d2l.drate2 = d2l.drate2 + (exp(-rate*(Lowpt-extra$location)) -
+ d2l.drate2 <- d2l.drate2 + (exp(-rate*(Lowpt-extra$location)) -
exp(-rate*(Upppt-extra$location))) * A123
- wz = c(w) * (drate.deta^2) * d2l.drate2
+ wz <- c(w) * (drate.deta^2) * d2l.drate2
wz
}), list( .link = link ))))
}
@@ -331,8 +334,7 @@ if (FALSE)
cennormal1 <- function(lmu = "identity", lsd = "loge",
- imethod = 1, zero = 2)
-{
+ imethod = 1, zero = 2) {
lmu <- as.list(substitute(lmu))
@@ -357,7 +359,7 @@ if (FALSE)
namesof("sd", lsd, tag = TRUE), "\n",
"Conditional variance: sd^2"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero=zero ))),
initialize = eval(substitute(expression({
@@ -371,9 +373,9 @@ if (FALSE)
if (!length(extra$leftcensored))
- extra$leftcensored = rep(FALSE, len = n)
+ extra$leftcensored <- rep(FALSE, len = n)
if (!length(extra$rightcensored))
- extra$rightcensored = rep(FALSE, len = n)
+ extra$rightcensored <- rep(FALSE, len = n)
if (any(extra$rightcensored & extra$leftcensored))
stop("some observations are both right and left censored!")
@@ -382,14 +384,15 @@ if (FALSE)
namesof("sd", .lsd, earg =.esd, tag = FALSE))
if (!length(etastart)) {
- anyc = extra$leftcensored | extra$rightcensored
- i11 = if ( .imethod == 1) anyc else FALSE # can be all data
- junk = lm.wfit(x=cbind(x[!i11,]),y=y[!i11],w=w[!i11])
- sd.y.est = sqrt( sum(w[!i11] * junk$resid^2) / junk$df.residual )
- etastart = cbind(mu = y,
- rep(theta2eta(sd.y.est, .lsd), length = n))
+ anyc <- extra$leftcensored | extra$rightcensored
+ i11 <- if ( .imethod == 1) anyc else FALSE # can be all data
+ junk <- lm.wfit(x = cbind(x[!i11, ]),
+ y = y[!i11], w = w[!i11])
+ sd.y.est <- sqrt(sum(w[!i11] * junk$resid^2) / junk$df.residual)
+ etastart <- cbind(mu = y,
+ rep(theta2eta(sd.y.est, .lsd), length = n))
if (any(anyc))
- etastart[anyc, 1] = x[anyc,,drop = FALSE] %*% junk$coeff
+ etastart[anyc, 1] <- x[anyc,,drop = FALSE] %*% junk$coeff
}
}), list( .lmu = lmu, .lsd = lsd,
.emu = emu, .esd = esd,
@@ -398,28 +401,28 @@ if (FALSE)
eta2theta(eta[, 1], .lmu, earg = .emu)
}, list( .lmu = lmu, .emu = emu ))),
last = eval(substitute(expression({
- misc$link = c("mu" = .lmu, "sd" = .lsd)
+ misc$link <- c("mu" = .lmu, "sd" = .lsd)
- misc$earg = list("mu" = .emu ,"sd" = .esd )
+ misc$earg <- list("mu" = .emu ,"sd" = .esd )
- misc$expected = TRUE
+ misc$expected <- TRUE
misc$multipleResponses <- FALSE
}), list( .lmu = lmu, .lsd = lsd,
.emu = emu, .esd = esd ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- cenL = extra$leftcensored
- cenU = extra$rightcensored
- cen0 = !cenL & !cenU # uncensored obsns
-
- mum = eta2theta(eta[, 1], .lmu, earg = .emu )
- sdv = eta2theta(eta[, 2], .lsd, earg = .esd )
-
- Lower = ifelse(cenL, y, -Inf)
- Upper = ifelse(cenU, y, Inf)
- ell1 = -log(sdv[cen0]) - 0.5 * ((y[cen0] - mum[cen0])/sdv[cen0])^2
- ell2 = log1p(-pnorm((mum[cenL] - Lower[cenL])/sdv[cenL]))
- ell3 = log1p(-pnorm(( Upper[cenU] - mum[cenU])/sdv[cenU]))
+ cenL <- extra$leftcensored
+ cenU <- extra$rightcensored
+ cen0 <- !cenL & !cenU # uncensored obsns
+
+ mum <- eta2theta(eta[, 1], .lmu, earg = .emu )
+ sdv <- eta2theta(eta[, 2], .lsd, earg = .esd )
+
+ Lower <- ifelse(cenL, y, -Inf)
+ Upper <- ifelse(cenU, y, Inf)
+ ell1 <- -log(sdv[cen0]) - 0.5 * ((y[cen0] - mum[cen0])/sdv[cen0])^2
+ ell2 <- log1p(-pnorm((mum[cenL] - Lower[cenL])/sdv[cenL]))
+ ell3 <- log1p(-pnorm(( Upper[cenU] - mum[cenU])/sdv[cenU]))
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
@@ -427,39 +430,39 @@ if (FALSE)
.emu = emu, .esd = esd ))),
vfamily = c("cennormal1"),
deriv = eval(substitute(expression({
- cenL = extra$leftcensored
- cenU = extra$rightcensored
- cen0 = !cenL & !cenU # uncensored obsns
- Lower = ifelse(cenL, y, -Inf)
- Upper = ifelse(cenU, y, Inf)
+ cenL <- extra$leftcensored
+ cenU <- extra$rightcensored
+ cen0 <- !cenL & !cenU # uncensored obsns
+ Lower <- ifelse(cenL, y, -Inf)
+ Upper <- ifelse(cenU, y, Inf)
- mum = eta2theta(eta[, 1], .lmu)
- sdv = eta2theta(eta[, 2], .lsd)
+ mum <- eta2theta(eta[, 1], .lmu)
+ sdv <- eta2theta(eta[, 2], .lsd)
- dl.dmu = (y-mum) / sdv^2
- dl.dsd = (((y-mum)/sdv)^2 - 1) / sdv
+ dl.dmu <- (y-mum) / sdv^2
+ dl.dsd <- (((y-mum)/sdv)^2 - 1) / sdv
- dmu.deta = dtheta.deta(mum, .lmu, earg = .emu )
- dsd.deta = dtheta.deta(sdv, .lsd, earg = .esd )
+ dmu.deta <- dtheta.deta(mum, .lmu, earg = .emu )
+ dsd.deta <- dtheta.deta(sdv, .lsd, earg = .esd )
if (any(cenL)) {
- mumL = mum - Lower
- temp21L = mumL[cenL] / sdv[cenL]
- PhiL = pnorm(temp21L)
- phiL = dnorm(temp21L)
- fred21 = phiL / (1 - PhiL)
- dl.dmu[cenL] = -fred21 / sdv[cenL]
- dl.dsd[cenL] = mumL[cenL] * fred21 / sdv[cenL]^2
+ mumL <- mum - Lower
+ temp21L <- mumL[cenL] / sdv[cenL]
+ PhiL <- pnorm(temp21L)
+ phiL <- dnorm(temp21L)
+ fred21 <- phiL / (1 - PhiL)
+ dl.dmu[cenL] <- -fred21 / sdv[cenL]
+ dl.dsd[cenL] <- mumL[cenL] * fred21 / sdv[cenL]^2
rm(fred21)
}
if (any(cenU)) {
- mumU = Upper - mum
- temp21U = mumU[cenU] / sdv[cenU]
- PhiU = pnorm(temp21U)
- phiU = dnorm(temp21U)
- fred21 = phiU / (1 - PhiU)
- dl.dmu[cenU] = fred21 / sdv[cenU] # Negated
- dl.dsd[cenU] = mumU[cenU] * fred21 / sdv[cenU]^2
+ mumU <- Upper - mum
+ temp21U <- mumU[cenU] / sdv[cenU]
+ PhiU <- pnorm(temp21U)
+ phiU <- dnorm(temp21U)
+ fred21 <- phiU / (1 - PhiU)
+ dl.dmu[cenU] <- fred21 / sdv[cenU] # Negated
+ dl.dsd[cenU] <- mumU[cenU] * fred21 / sdv[cenU]^2
rm(fred21)
}
c(w) * cbind(dl.dmu * dmu.deta,
@@ -467,48 +470,48 @@ if (FALSE)
}), list( .lmu = lmu, .lsd = lsd,
.emu = emu, .esd = esd ))),
weight = eval(substitute(expression({
- A1 = 1 - pnorm((mum - Lower) / sdv) # Lower
- A3 = 1 - pnorm(( Upper - mum) / sdv) # Upper
- A2 = 1 - A1 - A3 # Middle; uncensored
- wz = matrix(0, n, 3)
- wz[,iam(1, 1,M)] = A2 * 1 / sdv^2 # ed2l.dmu2
- wz[,iam(2, 2,M)] = A2 * 2 / sdv^2 # ed2l.dsd2
- mumL = mum - Lower
- temp21L = mumL / sdv
- PhiL = pnorm(temp21L)
- phiL = dnorm(temp21L)
- temp31L = ((1-PhiL) * sdv)^2
- wz.cenL11 = phiL * (phiL - (1-PhiL)*temp21L) / temp31L
- wz.cenL22 = mumL * phiL * ((1-PhiL) * (2 - temp21L^2) +
+ A1 <- 1 - pnorm((mum - Lower) / sdv) # Lower
+ A3 <- 1 - pnorm((Upper - mum) / sdv) # Upper
+ A2 <- 1 - A1 - A3 # Middle; uncensored
+ wz <- matrix(0, n, 3)
+ wz[,iam(1, 1,M)] <- A2 * 1 / sdv^2 # ed2l.dmu2
+ wz[,iam(2, 2,M)] <- A2 * 2 / sdv^2 # ed2l.dsd2
+ mumL <- mum - Lower
+ temp21L <- mumL / sdv
+ PhiL <- pnorm(temp21L)
+ phiL <- dnorm(temp21L)
+ temp31L <- ((1-PhiL) * sdv)^2
+ wz.cenL11 <- phiL * (phiL - (1-PhiL)*temp21L) / temp31L
+ wz.cenL22 <- mumL * phiL * ((1-PhiL) * (2 - temp21L^2) +
mumL * phiL / sdv) / (sdv * temp31L)
- wz.cenL12 = phiL * ((1-PhiL)*(temp21L^2 - 1) -
+ wz.cenL12 <- phiL * ((1-PhiL)*(temp21L^2 - 1) -
temp21L*phiL) / temp31L
- wz.cenL11[!is.finite(wz.cenL11)] = 0
- wz.cenL22[!is.finite(wz.cenL22)] = 0
- wz.cenL12[!is.finite(wz.cenL12)] = 0
- wz[,iam(1, 1,M)] = wz[,iam(1, 1,M)] + A1 * wz.cenL11
- wz[,iam(2, 2,M)] = wz[,iam(2, 2,M)] + A1 * wz.cenL22
- wz[,iam(1, 2,M)] = A1 * wz.cenL12
- mumU = Upper - mum # often Inf
- temp21U = mumU / sdv # often Inf
- PhiU = pnorm(temp21U) # often 1
- phiU = dnorm(temp21U) # often 0
- temp31U = ((1-PhiU) * sdv)^2 # often 0
- tmp8 = (1-PhiU)*temp21U
- wzcenU11 = phiU * (phiU - tmp8) / temp31U
- tmp9 = (1-PhiU) * (2 - temp21U^2)
- wzcenU22 = mumU * phiU * (tmp9 + mumU * phiU / sdv) / (sdv * temp31U)
- wzcenU12 = -phiU * ((1-PhiU)*(temp21U^2 - 1) -
+ wz.cenL11[!is.finite(wz.cenL11)] <- 0
+ wz.cenL22[!is.finite(wz.cenL22)] <- 0
+ wz.cenL12[!is.finite(wz.cenL12)] <- 0
+ wz[,iam(1, 1,M)] <- wz[,iam(1, 1,M)] + A1 * wz.cenL11
+ wz[,iam(2, 2,M)] <- wz[,iam(2, 2,M)] + A1 * wz.cenL22
+ wz[,iam(1, 2,M)] <- A1 * wz.cenL12
+ mumU <- Upper - mum # often Inf
+ temp21U <- mumU / sdv # often Inf
+ PhiU <- pnorm(temp21U) # often 1
+ phiU <- dnorm(temp21U) # often 0
+ temp31U <- ((1-PhiU) * sdv)^2 # often 0
+ tmp8 <- (1-PhiU)*temp21U
+ wzcenU11 <- phiU * (phiU - tmp8) / temp31U
+ tmp9 <- (1-PhiU) * (2 - temp21U^2)
+ wzcenU22 <- mumU * phiU * (tmp9 + mumU * phiU / sdv) / (sdv * temp31U)
+ wzcenU12 <- -phiU * ((1-PhiU)*(temp21U^2 - 1) -
temp21U*phiU) / temp31U
- wzcenU11[!is.finite(wzcenU11)] = 0 # Needed when Upper==Inf
- wzcenU22[!is.finite(wzcenU22)] = 0 # Needed when Upper==Inf
- wzcenU12[!is.finite(wzcenU12)] = 0 # Needed when Upper==Inf
- wz[,iam(1, 1,M)] = wz[,iam(1, 1,M)] + A3 * wzcenU11
- wz[,iam(2, 2,M)] = wz[,iam(2, 2,M)] + A3 * wzcenU22
- wz[,iam(1, 2,M)] = wz[,iam(1, 2,M)] + A3 * wzcenU12
- wz[,iam(1, 1,M)] = wz[,iam(1, 1,M)] * dmu.deta^2
- wz[,iam(2, 2,M)] = wz[,iam(2, 2,M)] * dsd.deta^2
- wz[,iam(1, 2,M)] = wz[,iam(1, 2,M)] * dmu.deta * dsd.deta
+ wzcenU11[!is.finite(wzcenU11)] <- 0 # Needed when Upper==Inf
+ wzcenU22[!is.finite(wzcenU22)] <- 0 # Needed when Upper==Inf
+ wzcenU12[!is.finite(wzcenU12)] <- 0 # Needed when Upper==Inf
+ wz[,iam(1, 1,M)] <- wz[,iam(1, 1,M)] + A3 * wzcenU11
+ wz[,iam(2, 2,M)] <- wz[,iam(2, 2,M)] + A3 * wzcenU22
+ wz[,iam(1, 2,M)] <- wz[,iam(1, 2,M)] + A3 * wzcenU12
+ wz[,iam(1, 1,M)] <- wz[,iam(1, 1,M)] * dmu.deta^2
+ wz[,iam(2, 2,M)] <- wz[,iam(2, 2,M)] * dsd.deta^2
+ wz[,iam(1, 2,M)] <- wz[,iam(1, 2,M)] * dmu.deta * dsd.deta
c(w) * wz
}), list( .lmu = lmu, .lsd = lsd ))))
}
@@ -542,33 +545,34 @@ if (FALSE)
stop("cannot handle left-censored data")
if (!length(extra$rightcensored))
- extra$rightcensored = rep(FALSE, len = n)
+ extra$rightcensored <- rep(FALSE, len = n)
predictors.names <-
namesof("scale", .lscale, earg = .escale, tag = FALSE)
if (!length(etastart)) {
- a.init = (y+1/8) / sqrt(pi/2)
- etastart = theta2eta(a.init, .lscale, earg = .escale )
+ a.init <- (y+1/8) / sqrt(pi/2)
+ etastart <- theta2eta(a.init, .lscale, earg = .escale )
}
}), list( .lscale = lscale, .escale = escale ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Scale = eta2theta(eta, .lscale, earg = .escale )
+ Scale <- eta2theta(eta, .lscale, earg = .escale )
Scale * sqrt(pi/2)
}, list( .lscale = lscale, .escale = escale ))),
last = eval(substitute(expression({
- misc$link = c("scale" = .lscale)
- misc$earg = list("scale" = .escale)
+ misc$link <- c("scale" = .lscale)
- misc$oim = .oim
+ misc$earg <- list("scale" = .escale)
+
+ misc$oim <- .oim
}), list( .lscale = lscale, .escale = escale,
.oim = oim ))),
loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
- Scale = eta2theta(eta, .lscale, earg = .escale )
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ Scale <- eta2theta(eta, .lscale, earg = .escale )
- cen0 = !extra$rightcensored # uncensored obsns
- cenU = extra$rightcensored
+ cen0 <- !extra$rightcensored # uncensored obsns
+ cenU <- extra$rightcensored
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
@@ -576,34 +580,34 @@ if (FALSE)
0.5*(y[cen0]/Scale[cen0])^2)) -
sum(w[cenU] * (y[cenU]/Scale[cenU])^2) * 0.5
}, list( .lscale = lscale,
- .escale = escale ))),
+ .escale = escale ))),
vfamily = c("cenrayleigh"),
deriv = eval(substitute(expression({
- cen0 = !extra$rightcensored # uncensored obsns
- cenU = extra$rightcensored
+ cen0 <- !extra$rightcensored # uncensored obsns
+ cenU <- extra$rightcensored
- Scale = eta2theta(eta, .lscale, earg = .escale )
+ Scale <- eta2theta(eta, .lscale, earg = .escale )
- dl.dScale = ((y/Scale)^2 - 2) / Scale
+ dl.dScale <- ((y/Scale)^2 - 2) / Scale
- dScale.deta = dtheta.deta(Scale, .lscale, earg = .escale )
- dl.dScale[cenU] = y[cenU]^2 / Scale[cenU]^3
+ dScale.deta <- dtheta.deta(Scale, .lscale, earg = .escale )
+ dl.dScale[cenU] <- y[cenU]^2 / Scale[cenU]^3
c(w) * dl.dScale * dScale.deta
}), list( .lscale = lscale,
.escale = escale ))),
weight = eval(substitute(expression({
- ned2l.dScale2 = 4 / Scale^2
- wz = dScale.deta^2 * ned2l.dScale2
+ ned2l.dScale2 <- 4 / Scale^2
+ wz <- dScale.deta^2 * ned2l.dScale2
if ( .oim ) {
- d2l.dScale2 = 3 * (y[cenU])^2 / (Scale[cenU])^4
- d2Scale.deta2 = d2theta.deta2(Scale[cenU], .lscale, earg = .escale )
- wz[cenU] = (dScale.deta[cenU])^2 * d2l.dScale2 -
- dl.dScale[cenU] * d2Scale.deta2
+ d2l.dScale2 <- 3 * (y[cenU])^2 / (Scale[cenU])^4
+ d2Scale.deta2 <- d2theta.deta2(Scale[cenU], .lscale, earg = .escale )
+ wz[cenU] <- (dScale.deta[cenU])^2 * d2l.dScale2 -
+ dl.dScale[cenU] * d2Scale.deta2
} else {
- ned2l.dScale2[cenU] = 6 / (Scale[cenU])^2
- wz[cenU] = (dScale.deta[cenU])^2 * ned2l.dScale2[cenU]
+ ned2l.dScale2[cenU] <- 6 / (Scale[cenU])^2
+ wz[cenU] <- (dScale.deta[cenU])^2 * ned2l.dScale2[cenU]
}
c(w) * wz
@@ -623,8 +627,7 @@ if (FALSE)
ishape = NULL, iscale = NULL,
nrfs = 1,
probs.y = c(0.2, 0.5, 0.8),
- imethod = 1, zero = -2)
-{
+ imethod = 1, zero = -2) {
@@ -640,10 +643,12 @@ if (FALSE)
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE))
stop("bad input for argument 'zero'")
+
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
+
if (!is.Numeric(probs.y, positive = TRUE) ||
length(probs.y) < 2 ||
max(probs.y) >= 1)
@@ -655,9 +660,11 @@ if (FALSE)
nrfs > 1)
stop("bad input for argument 'nrfs'")
+
if (length(ishape))
if (!is.Numeric(ishape, positive = TRUE))
stop("argument 'ishape' values must be positive")
+
if (length(iscale))
if (!is.Numeric(iscale, positive = TRUE))
stop("argument 'iscale' values must be positive")
@@ -708,15 +715,14 @@ if (FALSE)
"don't use SurvS4()")
- mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
predictors.names <-
c(namesof(mynames1, .lshape , earg = .eshape , tag = FALSE),
namesof(mynames2, .lscale , earg = .escale , tag = FALSE))[
interleave.VGAM(M, M = Musual)]
-
Shape.init <- matrix(if(length( .ishape )) .ishape else 0 + NA,
n, ncoly, byrow = TRUE)
Scale.init <- matrix(if(length( .iscale )) .iscale else 0 + NA,
@@ -727,12 +733,13 @@ if (FALSE)
!length( .iscale )) {
for (ilocal in 1:ncoly) {
- anyc <- FALSE # extra$leftcensored | extra$rightcensored
- i11 <- if ( .imethod == 1) anyc else FALSE # can be all data
+ anyc <- FALSE # extra$leftcensored | extra$rightcensored
+ i11 <- if ( .imethod == 1) anyc else FALSE # Can be all data
probs.y <- .probs.y
xvec <- log(-log1p(-probs.y))
- fit0 <- lsfit(x = xvec, y = log(quantile(y[!i11, ilocal],
- probs = probs.y )))
+ fit0 <- lsfit(x = xvec,
+ y = log(quantile(y[!i11, ilocal],
+ probs = probs.y )))
if (!is.Numeric(Shape.init[, ilocal]))
@@ -805,7 +812,7 @@ if (FALSE)
.imethod = imethod,
.nrfs = nrfs ) )),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , earg = .eshape )
Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , earg = .escale )
@@ -822,14 +829,14 @@ if (FALSE)
Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , earg = .escale )
dl.dshape <- 1 / Shape + log(y / Scale) -
- log(y / Scale) * (y / Scale)^Shape
+ log(y / Scale) * (y / Scale)^Shape
dl.dscale <- (Shape / Scale) * (-1.0 + (y / Scale)^Shape)
dshape.deta <- dtheta.deta(Shape, .lshape, earg = .eshape )
dscale.deta <- dtheta.deta(Scale, .lscale, earg = .escale )
- myderiv <- c(w) * cbind(dl.dshape, dl.dscale) *
- cbind(dshape.deta, dscale.deta)
+ myderiv <- c(w) * cbind(dl.dshape * dshape.deta,
+ dl.dscale * dscale.deta)
myderiv[, interleave.VGAM(M, M = Musual)]
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape ) )),
@@ -841,20 +848,14 @@ if (FALSE)
ned2l.dscale <- (Shape / Scale)^2
ned2l.dshapescale <- (EulerM-1) / Scale
- wz <- matrix(0.0, n, M + M - 1) # wz is tridiagonal
+ wz <- array(c(c(w) * ned2l.dshape * dshape.deta^2,
+ c(w) * ned2l.dscale * dscale.deta^2,
+ c(w) * ned2l.dshapescale * dscale.deta * dshape.deta),
+ dim = c(n, M / Musual, 3))
+ wz <- arwz2wz(wz, M = M, Musual = Musual)
- ind11 <- ind22 <- ind12 <- NULL
- for (ii in 1:(M / Musual)) {
- ind11 <- c(ind11, iam(Musual*ii - 1, Musual*ii - 1, M))
- ind22 <- c(ind22, iam(Musual*ii - 0, Musual*ii - 0, M))
- ind12 <- c(ind12, iam(Musual*ii - 1, Musual*ii - 0, M))
- }
- wz[, ind11] <- ned2l.dshape * dshape.deta^2
- wz[, ind22] <- ned2l.dscale * dscale.deta^2
- wz[, ind12] <- ned2l.dshapescale * dscale.deta * dshape.deta
-
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ wz
}), list( .eshape = eshape, .nrfs = nrfs ))))
}
@@ -870,88 +871,87 @@ setOldClass(c("SurvS4", "Surv"))
SurvS4 <-
function (time, time2, event, type = c("right", "left", "interval",
- "counting", "interval2"), origin = 0)
-{
- nn <- length(time)
- ng <- nargs()
- if (missing(type)) {
- if (ng == 1 || ng == 2)
- type <- "right" else if (ng == 3)
- type <- "counting" else stop("Invalid number of arguments")
- } else {
- type <- match.arg(type)
- ng <- ng - 1
- if (ng != 3 && (type == "interval" || type == "counting"))
- stop("Wrong number of args for this type of survival data")
- if (ng != 2 && (type == "right" || type == "left" ||
- type == "interval2"))
- stop("Wrong number of args for this type of survival data")
- }
- who <- !is.na(time)
- if (ng == 1) {
- if (!is.numeric(time))
- stop("Time variable is not numeric")
- ss <- cbind(time, 1)
- dimnames(ss) <- list(NULL, c("time", "status"))
- } else if (type == "right" || type == "left") {
- if (!is.numeric(time))
- stop("Time variable is not numeric")
- if (length(time2) != nn)
- stop("Time and status are different lengths")
- if (is.logical(time2))
- status <- 1 * time2 else if (is.numeric(time2)) {
- who2 <- !is.na(time2)
- if (max(time2[who2]) == 2)
- status <- time2 - 1 else status <- time2
- if (any(status[who2] != 0 & status[who2] != 1))
+ "counting", "interval2"), origin = 0) {
+ nn <- length(time)
+ ng <- nargs()
+ if (missing(type)) {
+ if (ng == 1 || ng == 2)
+ type <- "right" else if (ng == 3)
+ type <- "counting" else stop("Invalid number of arguments")
+ } else {
+ type <- match.arg(type)
+ ng <- ng - 1
+ if (ng != 3 && (type == "interval" || type == "counting"))
+ stop("Wrong number of args for this type of survival data")
+ if (ng != 2 && (type == "right" || type == "left" ||
+ type == "interval2"))
+ stop("Wrong number of args for this type of survival data")
+ }
+ who <- !is.na(time)
+ if (ng == 1) {
+ if (!is.numeric(time))
+ stop("Time variable is not numeric")
+ ss <- cbind(time, 1)
+ dimnames(ss) <- list(NULL, c("time", "status"))
+ } else if (type == "right" || type == "left") {
+ if (!is.numeric(time))
+ stop("Time variable is not numeric")
+ if (length(time2) != nn)
+ stop("Time and status are different lengths")
+ if (is.logical(time2))
+ status <- 1 * time2 else if (is.numeric(time2)) {
+ who2 <- !is.na(time2)
+ if (max(time2[who2]) == 2)
+ status <- time2 - 1 else status <- time2
+ if (any(status[who2] != 0 & status[who2] != 1))
+ stop("Invalid status value")
+ } else stop("Invalid status value")
+ ss <- cbind(time, status)
+ dimnames(ss) <- list(NULL, c("time", "status"))
+ } else if (type == "counting") {
+ if (length(time2) != nn)
+ stop("Start and stop are different lengths")
+ if (length(event) != nn)
+ stop("Start and event are different lengths")
+ if (!is.numeric(time))
+ stop("Start time is not numeric")
+ if (!is.numeric(time2))
+ stop("Stop time is not numeric")
+ who3 <- who & !is.na(time2)
+ if (any(time[who3] >= time2[who3]))
+ stop("Stop time must be > start time")
+ if (is.logical(event))
+ status <- 1 * event else if (is.numeric(event)) {
+ who2 <- !is.na(event)
+ if (max(event[who2]) == 2)
+ status <- event - 1 else status <- event
+ if (any(status[who2] != 0 & status[who2] != 1))
stop("Invalid status value")
- } else stop("Invalid status value")
- ss <- cbind(time, status)
- dimnames(ss) <- list(NULL, c("time", "status"))
- } else if (type == "counting") {
- if (length(time2) != nn)
- stop("Start and stop are different lengths")
- if (length(event) != nn)
- stop("Start and event are different lengths")
- if (!is.numeric(time))
- stop("Start time is not numeric")
- if (!is.numeric(time2))
- stop("Stop time is not numeric")
- who3 <- who & !is.na(time2)
- if (any(time[who3] >= time2[who3]))
- stop("Stop time must be > start time")
- if (is.logical(event))
- status <- 1 * event else if (is.numeric(event)) {
- who2 <- !is.na(event)
- if (max(event[who2]) == 2)
- status <- event - 1 else status <- event
- if (any(status[who2] != 0 & status[who2] != 1))
- stop("Invalid status value")
- } else stop("Invalid status value")
- ss <- cbind(time - origin, time2 - origin, status)
+ } else stop("Invalid status value")
+ ss <- cbind(time - origin, time2 - origin, status)
+ } else {
+ if (type == "interval2") {
+ event <- ifelse(is.na(time), 2, ifelse(is.na(time2),
+ 0, ifelse(time == time2, 1, 3)))
+ if (any(time[event == 3] > time2[event == 3]))
+ stop("Invalid interval: start > stop")
+ time <- ifelse(event != 2, time, time2)
+ type <- "interval"
} else {
- if (type == "interval2") {
- event <- ifelse(is.na(time), 2, ifelse(is.na(time2),
- 0, ifelse(time == time2, 1, 3)))
- if (any(time[event == 3] > time2[event == 3]))
- stop("Invalid interval: start > stop")
- time <- ifelse(event != 2, time, time2)
- type <- "interval"
- } else {
- temp <- event[!is.na(event)]
- if (!is.numeric(temp))
- stop("Status indicator must be numeric")
- if (length(temp) > 0 && any(temp != floor(temp) |
- temp < 0 | temp > 3))
- stop("Status indicator must be 0, 1, 2 or 3")
- }
- status <- event
- ss <- cbind(time, ifelse(!is.na(event) & event == 3,
- time2, 1), status)
+ temp <- event[!is.na(event)]
+ if (!is.numeric(temp))
+ stop("Status indicator must be numeric")
+ if (length(temp) > 0 && any(temp != floor(temp) |
+ temp < 0 | temp > 3))
+ stop("Status indicator must be 0, 1, 2 or 3")
}
- attr(ss, "type") <- type
- class(ss) <- "SurvS4"
- ss
+ status <- event
+ ss <- cbind(time, ifelse(!is.na(event) & event == 3,
+ time2, 1), status)
+ }
+ attr(ss, "type") <- type
+ class(ss) <- "SurvS4"
+ ss
}
@@ -963,13 +963,12 @@ is.SurvS4 <- function(x) inherits(x, "SurvS4")
-setIs(class1 = "SurvS4", class2 = "matrix") # Forces vglm()@y to be a matrix
+setIs(class1 = "SurvS4",
+ class2 = "matrix") # Forces vglm()@y to be a matrix
-as.character.SurvS4 <-
-function (x, ...)
-{
+as.character.SurvS4 <- function (x, ...) {
class(x) <- NULL
type <- attr(x, "type")
@@ -996,7 +995,7 @@ function (x, ...)
-"[.SurvS4" <- function(x, i,j, drop = FALSE) {
+"[.SurvS4" <- function(x, i, j, drop = FALSE) {
if (missing(j)) {
temp <- class(x)
type <- attr(x, "type")
@@ -1040,4 +1039,346 @@ setMethod("show", "SurvS4",
+pgamma.deriv.unscaled <- function(q, shape) {
+
+
+
+
+ gam0 <- exp(lgamma(shape) +
+ pgamma(q = q, shape = shape, log.p = TRUE))
+
+ I.sq <- pgamma(q = q, shape = shape)
+
+ alld <- pgamma.deriv(q = q, shape = shape) # 6-coln matrix
+ tmp3 <- alld[, 3] / I.sq # RHS of eqn (4.5) of \cite{wing:1989}
+
+ G1s <- digamma(shape) + tmp3 # eqn (4.9)
+ gam1 <- gam0 * G1s
+
+
+ dG1s <- trigamma(shape) + alld[, 4] / I.sq - tmp3^2 # eqn (4.13)
+ G2s <- dG1s + G1s^2 # eqn (4.12)
+
+ gam2 <- gam0 * G2s
+
+ cbind("0" = gam0,
+ "1" = gam1,
+ "2" = gam2)
+}
+
+
+
+
+
+
+ truncweibull <-
+ function(lower.limit = 1e-5,
+ lAlpha = "loge", lBetaa = "loge",
+ iAlpha = NULL, iBetaa = NULL,
+ nrfs = 1,
+ probs.y = c(0.2, 0.5, 0.8),
+ imethod = 1, zero = -2) {
+
+
+
+
+
+
+
+
+
+ lAlpha <- as.list(substitute(lAlpha))
+ eAlpha <- link2list(lAlpha)
+ lAlpha <- attr(eAlpha, "function.name")
+
+ lBetaa <- as.list(substitute(lBetaa))
+ eBetaa <- link2list(lBetaa)
+ lBetaa <- attr(eBetaa, "function.name")
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+ if (!is.Numeric(probs.y, positive = TRUE) ||
+ length(probs.y) < 2 ||
+ max(probs.y) >= 1)
+ stop("bad input for argument 'probs.y'")
+
+
+ if (!is.Numeric(nrfs, allowable.length = 1) ||
+ nrfs < 0 ||
+ nrfs > 1)
+ stop("bad input for argument 'nrfs'")
+
+
+ if (length(iAlpha))
+ if (!is.Numeric(iAlpha, positive = TRUE))
+ stop("argument 'iAlpha' values must be positive")
+
+ if (length(iBetaa))
+ if (!is.Numeric(iBetaa, positive = TRUE))
+ stop("argument 'iBetaa' values must be positive")
+
+
+ new("vglmff",
+ blurb = c("Truncated weibull distribution\n\n",
+ "Links: ",
+ namesof("Alpha", lAlpha, earg = eAlpha), ", ",
+ namesof("Betaa", lBetaa, earg = eBetaa), "\n",
+ if (length( lower.limit ) < 5)
+ paste("Truncation point(s): ",
+ lower.limit, sep = ", ") else
+ ""),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ lower.limit = .lower.limit ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .lower.limit = lower.limit
+ ))),
+
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+ ncoly <- ncol(y)
+ Musual <- 2
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+ extra$lower.limit <- matrix( .lower.limit , n, ncoly, byrow = TRUE)
+
+ if (any(y < extra$lower.limit)) {
+ stop("some response values less than argument 'lower.limit'")
+ }
+
+
+
+ if (is.SurvS4(y))
+ stop("only uncensored observations are allowed; ",
+ "don't use SurvS4()")
+
+
+ mynames1 <- paste("Alpha", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("Betaa", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .lAlpha , earg = .eAlpha , tag = FALSE),
+ namesof(mynames2, .lBetaa , earg = .eBetaa , tag = FALSE))[
+ interleave.VGAM(M, M = Musual)]
+
+
+ Alpha.init <- matrix(if (length( .iAlpha )) .iAlpha else 0 + NA,
+ n, ncoly, byrow = TRUE)
+ Betaa.init <- matrix(if (length( .iBetaa )) .iBetaa else 0 + NA,
+ n, ncoly, byrow = TRUE)
+
+ if (!length(etastart)) {
+ if (!length( .iAlpha ) ||
+ !length( .iBetaa )) {
+ for (ilocal in 1:ncoly) {
+
+ anyc <- FALSE # extra$leftcensored | extra$rightcensored
+ i11 <- if ( .imethod == 1) anyc else FALSE # Can be all data
+ probs.y <- .probs.y
+ xvec <- log(-log1p(-probs.y))
+ fit0 <- lsfit(x = xvec,
+ y = log(quantile(y[!i11, ilocal],
+ probs = probs.y )))
+ aaa.init <- 1 / fit0$coef["X"]
+ bbb.init <- exp(fit0$coef["Intercept"])
+
+ if (!is.Numeric(Betaa.init[, ilocal]))
+ Betaa.init[, ilocal] <- aaa.init
+ if (!is.Numeric(Alpha.init[, ilocal]))
+ Alpha.init[, ilocal] <- (1 / bbb.init)^aaa.init
+ } # ilocal
+ } else {
+ Alpha.init <- rep( .iAlpha , length = n)
+ Betaa.init <- rep( .iBetaa , length = n)
+ }
+
+ etastart <-
+ cbind(theta2eta(Alpha.init, .lAlpha , earg = .eAlpha ),
+ theta2eta(Betaa.init, .lBetaa , earg = .eBetaa ))[,
+ interleave.VGAM(M, M = Musual)]
+ }
+ }), list( .lBetaa = lBetaa, .lAlpha = lAlpha,
+ .eBetaa = eBetaa, .eAlpha = eAlpha,
+ .iBetaa = iBetaa, .iAlpha = iAlpha,
+ .lower.limit = lower.limit,
+ .probs.y = probs.y,
+ .imethod = imethod ) )),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Alpha <- eta2theta(eta[, c(TRUE, FALSE)], .lAlpha , earg = .eAlpha )
+ Betaa <- eta2theta(eta[, c(FALSE, TRUE)], .lBetaa , earg = .eBetaa )
+
+
+ aTb <- Alpha * extra$lower.limit^Betaa
+ wingo3 <- pgamma.deriv.unscaled(q = aTb, shape = 1 + 1 / Betaa)
+ exp.aTb <- exp(aTb)
+
+ (gamma(1 + 1 / Betaa) - wingo3[, 1]) *
+ exp.aTb / Alpha^(1 / Betaa)
+ }, list( .lBetaa = lBetaa, .lAlpha = lAlpha,
+ .eBetaa = eBetaa, .eAlpha = eAlpha,
+ .lower.limit = lower.limit) )),
+ last = eval(substitute(expression({
+
+ aaa.hat <- Betaa
+ regnotok <- any(aaa.hat <= 2)
+ if (any(aaa.hat <= 1)) {
+ warning("MLE regularity conditions are violated",
+ "(Betaa <= 1) at the final iteration: ",
+ "MLEs are not consistent")
+ } else if (any(1 < aaa.hat & aaa.hat < 2)) {
+ warning("MLE regularity conditions are violated",
+ "(1 < Betaa < 2) at the final iteration: ",
+ "MLEs exist but are not asymptotically normal")
+ } else if (any(2 == aaa.hat)) {
+ warning("MLE regularity conditions are violated",
+ "(Betaa == 2) at the final iteration: ",
+ "MLEs exist and are normal and asymptotically ",
+ "efficient but with a slower convergence rate than when ",
+ "Betaa > 2")
+ }
+
+
+
+ Musual <- extra$Musual
+ misc$link <-
+ c(rep( .lAlpha , length = ncoly),
+ rep( .lBetaa , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ names(misc$link) <- temp.names
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii-1]] <- .eAlpha
+ misc$earg[[Musual*ii ]] <- .eBetaa
+ }
+
+ misc$Musual <- Musual
+ misc$imethod <- .imethod
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+
+
+ misc$nrfs <- .nrfs
+ misc$RegCondOK <- !regnotok # Save this for later
+
+
+
+ }), list( .lBetaa = lBetaa, .lAlpha = lAlpha,
+ .eBetaa = eBetaa, .eAlpha = eAlpha,
+ .imethod = imethod,
+ .lower.limit = lower.limit,
+ .nrfs = nrfs ) )),
+
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ Alpha <- eta2theta(eta[, c(TRUE, FALSE)], .lAlpha , earg = .eAlpha )
+ Betaa <- eta2theta(eta[, c(FALSE, TRUE)], .lBetaa , earg = .eBetaa )
+ Shape <- Betaa
+ Scale <- 1 / Alpha^(1/Betaa)
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * (dweibull(x = y, shape = Shape,
+ scale = Scale, log = TRUE) -
+ pweibull(q = extra$lower.limit, shape = Shape,
+ scale = Scale, log.p = TRUE,
+ lower.tail = FALSE)))
+ }
+ }, list( .lBetaa = lBetaa, .lAlpha = lAlpha,
+ .eBetaa = eBetaa, .eAlpha = eAlpha,
+ .lower.limit = lower.limit ) )),
+
+ vfamily = c("truncweibull"),
+
+ deriv = eval(substitute(expression({
+ Musual <- 2
+ Alpha <- eta2theta(eta[, c(TRUE, FALSE)], .lAlpha , earg = .eAlpha )
+ Betaa <- eta2theta(eta[, c(FALSE, TRUE)], .lBetaa , earg = .eBetaa )
+
+ Shape <- Betaa
+ Scale <- 1 / Alpha^(1/Betaa)
+ TTT <- extra$lower.limit
+ dl.dAlpha <- 1 / Alpha - y^Betaa + TTT^Betaa
+ dl.dBetaa <- (1 / Betaa) + log(y) -
+ Alpha * (y^Betaa * log(y) -
+ TTT^Betaa * log(TTT))
+
+ dAlpha.deta <- dtheta.deta(Alpha, .lAlpha, earg = .eAlpha )
+ dBetaa.deta <- dtheta.deta(Betaa, .lBetaa, earg = .eBetaa )
+
+ myderiv <- c(w) * cbind(dl.dAlpha * dAlpha.deta,
+ dl.dBetaa * dBetaa.deta)
+ myderiv[, interleave.VGAM(M, M = Musual)]
+ }), list( .lBetaa = lBetaa, .lAlpha = lAlpha,
+ .eBetaa = eBetaa, .eAlpha = eAlpha,
+ .lower.limit = lower.limit ) )),
+
+
+ weight = eval(substitute(expression({
+ aTb <- Alpha * TTT^Betaa
+ exp.aTb <- exp(aTb)
+ TblogT <- (TTT^Betaa) * log(TTT)
+ wingo3 <- pgamma.deriv.unscaled(q = aTb,
+ shape = 2) # 3-cols
+
+
+ Eyblogy <- (exp.aTb * (digamma(2) - wingo3[, 2]) -
+ (aTb + 1) * log(Alpha)) / (Alpha * Betaa)
+
+
+
+ Eyblog2y <- (exp.aTb * (digamma(2)^2 + trigamma(2) -
+ wingo3[, 3]) - 2 * log(Alpha) *
+ (digamma(2) - wingo3[, 2])) / (Alpha * Betaa^2) +
+ (log(Alpha)^2) * (aTb + 1) / (Alpha * Betaa^2)
+
+ ned2l.daa <- 1 / Alpha^2
+ ned2l.dab <- Eyblogy - TblogT
+ ned2l.dbb <- (1 / Betaa)^2 + Alpha * Eyblog2y -
+ aTb * (log(TTT))^2
+
+
+
+
+
+ wz <- array(c(c(w) * ned2l.daa * dAlpha.deta^2,
+ c(w) * ned2l.dbb * dBetaa.deta^2,
+ c(w) * ned2l.dab * dBetaa.deta * dAlpha.deta),
+ dim = c(n, M / Musual, 3))
+ wz <- arwz2wz(wz, M = M, Musual = Musual)
+ wz
+ }), list( .nrfs = nrfs ))))
+}
+
+
+
+
diff --git a/R/family.circular.R b/R/family.circular.R
index 0d985fd..e2bb415 100644
--- a/R/family.circular.R
+++ b/R/family.circular.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -16,15 +16,18 @@ dcard <- function(x, mu, rho, log = FALSE) {
rm(log)
- L = max(length(x), length(mu), length(rho))
- x = rep(x, len = L); mu = rep(mu, len = L); rho = rep(rho, len = L);
- logdensity = rep(log(0), len = L)
- xok = (x > 0) & (x < (2*pi))
- logdensity[xok] = -log(2*pi) + log1p(2 * rho[xok] * cos(x[xok]-mu[xok]))
- logdensity[mu <= 0] = NaN
- logdensity[mu >= 2*pi] = NaN
- logdensity[rho <= -0.5] = NaN
- logdensity[rho >= 0.5] = NaN
+ L <- max(length(x), length(mu), length(rho))
+ x <- rep(x, len = L);
+ mu <- rep(mu, len = L);
+ rho <- rep(rho, len = L);
+ logdensity <- rep(log(0), len = L)
+ xok <- (x > 0) & (x < (2*pi))
+ logdensity[xok] <- -log(2*pi) + log1p(2 * rho[xok] *
+ cos(x[xok]-mu[xok]))
+ logdensity[mu <= 0] <- NaN
+ logdensity[mu >= 2*pi] <- NaN
+ logdensity[rho <= -0.5] <- NaN
+ logdensity[rho >= 0.5] <- NaN
if (log.arg) logdensity else exp(logdensity)
}
@@ -34,9 +37,9 @@ pcard <- function(q, mu, rho) {
stop("'mu' must be between 0 and 2*pi inclusive")
if (!is.Numeric(rho) || max(abs(rho) > 0.5))
stop("'rho' must be between -0.5 and 0.5 inclusive")
- ans = (q + 2 * rho * (sin(q-mu) + sin(mu))) / (2*pi)
- ans[q >= (2*pi)] = 1
- ans[q <= 0] = 0
+ ans <- (q + 2 * rho * (sin(q-mu) + sin(mu))) / (2*pi)
+ ans[q >= (2*pi)] <- 1
+ ans[q <= 0] <- 0
ans
}
@@ -49,24 +52,24 @@ qcard <- function(p, mu, rho, tolerance=1.0e-7, maxits=500) {
stop("'rho' must be between -0.5 and 0.5 inclusive")
if (!is.Numeric(p, positive = TRUE) || any(p > 1))
stop("'p' must be between 0 and 1")
- nn = max(length(p), length(mu), length(rho))
- p = rep(p, len=nn)
- mu = rep(mu, len=nn)
- rho = rep(rho, len=nn)
+ nn <- max(length(p), length(mu), length(rho))
+ p <- rep(p, len=nn)
+ mu <- rep(mu, len=nn)
+ rho <- rep(rho, len=nn)
- oldans = 2 * pi * p
+ oldans <- 2 * pi * p
for(its in 1:maxits) {
- ans = oldans - (oldans + 2 * rho * (sin(oldans-mu)+sin(mu)) -
- 2*pi*p) / (1 + 2 * rho * cos(oldans - mu))
- index = (ans <= 0) | (ans > 2*pi)
- if (any(index)) {
- ans[index] = runif (sum(index), 0, 2*pi)
- }
+ ans <- oldans - (oldans + 2 * rho * (sin(oldans-mu)+sin(mu)) -
+ 2*pi*p) / (1 + 2 * rho * cos(oldans - mu))
+ index <- (ans <= 0) | (ans > 2*pi)
+ if (any(index)) {
+ ans[index] <- runif (sum(index), 0, 2*pi)
+ }
if (max(abs(ans - oldans)) < tolerance) break;
if (its == maxits) {warning("did not converge"); break}
- oldans = ans
+ oldans <- ans
}
ans
}
@@ -82,16 +85,15 @@ rcard <- function(n, mu, rho, ...) {
integer.valued = TRUE, allowable.length = 1))
stop("argument 'n' must be a single positive integer")
- mu = rep(mu, len = n)
- rho = rep(rho, len = n)
+ mu <- rep(mu, len = n)
+ rho <- rep(rho, len = n)
qcard(runif (n), mu = mu, rho = rho, ...)
}
-cardioid.control <- function(save.weight = TRUE, ...)
-{
+cardioid.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -101,8 +103,7 @@ cardioid.control <- function(save.weight = TRUE, ...)
lmu = elogit(min = 0, max = 2*pi),
lrho = elogit(min = -0.5, max = 0.5),
imu = NULL, irho = 0.3,
- nsimEIM = 100, zero = NULL)
-{
+ nsimEIM = 100, zero = NULL) {
lmu <- as.list(substitute(lmu))
emu <- link2list(lmu)
@@ -128,14 +129,14 @@ cardioid.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Cardioid distribution\n\n",
- "Links: ",
- namesof("mu", lmu, earg = emu, tag = FALSE), ", ",
- namesof("rho", lrho, earg = erho, tag = FALSE), "\n",
- "Mean: ",
- "pi + (rho/pi) *",
- "((2*pi-mu)*sin(2*pi-mu)+cos(2*pi-mu)-mu*sin(mu)-cos(mu))"),
+ "Links: ",
+ namesof("mu", lmu, earg = emu, tag = FALSE), ", ",
+ namesof("rho", lrho, earg = erho, tag = FALSE), "\n",
+ "Mean: ",
+ "pi + (rho/pi) *",
+ "((2*pi-mu)*sin(2*pi-mu)+cos(2*pi-mu)-mu*sin(mu)-cos(mu))"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -158,43 +159,45 @@ cardioid.control <- function(save.weight = TRUE, ...)
namesof("rho", .lrho, earg = .erho, tag = FALSE))
if (!length(etastart)) {
- rho.init = rep(if (length(.irho)) .irho else 0.3, length=n)
+ rho.init <- rep(if (length(.irho)) .irho else 0.3, length=n)
cardioid.Loglikfun <- function(mu, y, x, w, extraargs) {
- rho = extraargs$irho
+ rho <- extraargs$irho
sum(w * (-log(2*pi) + log1p(2*rho*cos(y-mu))))
}
- mu.grid = seq(0.1, 6.0, len=19)
- mu.init = if (length( .imu )) .imu else
- getMaxMin(mu.grid, objfun=cardioid.Loglikfun, y=y, x=x, w=w,
+ mu.grid <- seq(0.1, 6.0, len=19)
+ mu.init <- if (length( .imu )) .imu else
+ getMaxMin(mu.grid, objfun = cardioid.Loglikfun,
+ y = y, x = x, w = w,
extraargs = list(irho = rho.init))
- mu.init = rep(mu.init, length=length(y))
- etastart = cbind(theta2eta( mu.init, .lmu, earg = .emu),
- theta2eta(rho.init, .lrho, earg = .erho))
+ mu.init <- rep(mu.init, length=length(y))
+ etastart <-
+ cbind(theta2eta( mu.init, .lmu, earg = .emu),
+ theta2eta(rho.init, .lrho, earg = .erho))
}
}), list( .lmu = lmu, .lrho = lrho,
.imu = imu, .irho = irho,
.emu = emu, .erho = erho ))),
linkinv = eval(substitute(function(eta, extra = NULL){
- mu = eta2theta(eta[, 1], link = .lmu, earg = .emu)
- rho = eta2theta(eta[, 2], link = .lrho, earg = .erho)
+ mu <- eta2theta(eta[, 1], link = .lmu, earg = .emu)
+ rho <- eta2theta(eta[, 2], link = .lrho, earg = .erho)
pi + (rho/pi) *
((2*pi-mu)*sin(2*pi-mu) + cos(2*pi-mu) - mu*sin(mu) - cos(mu))
}, list( .lmu = lmu, .lrho = lrho,
.emu = emu, .erho = erho ))),
last = eval(substitute(expression({
- misc$link = c("mu" = .lmu, "rho" = .lrho)
+ misc$link <- c("mu" = .lmu, "rho" = .lrho)
- misc$earg = list("mu" = .emu, "rho" = .erho)
+ misc$earg <- list("mu" = .emu, "rho" = .erho)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
+ misc$expected <- TRUE
+ misc$nsimEIM <- .nsimEIM
}), list( .lmu = lmu, .lrho = lrho,
.emu = emu, .erho = erho, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mu = eta2theta(eta[, 1], link = .lmu, earg = .emu)
- rho = eta2theta(eta[, 2], link = .lrho, earg = .erho)
+ mu <- eta2theta(eta[, 1], link = .lmu, earg = .emu)
+ rho <- eta2theta(eta[, 2], link = .lrho, earg = .erho)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(w * dcard(x = y, mu = mu, rho = rho, log = TRUE))
@@ -203,37 +206,37 @@ cardioid.control <- function(save.weight = TRUE, ...)
.emu = emu, .erho=erho ))),
vfamily = c("cardioid"),
deriv = eval(substitute(expression({
- mu = eta2theta(eta[, 1], link = .lmu, earg = .emu)
- rho = eta2theta(eta[, 2], link = .lrho, earg = .erho)
+ mu <- eta2theta(eta[, 1], link = .lmu, earg = .emu)
+ rho <- eta2theta(eta[, 2], link = .lrho, earg = .erho)
- dmu.deta = dtheta.deta(mu, link = .lmu, earg = .emu)
- drho.deta = dtheta.deta(rho, link = .lrho, earg = .erho)
+ dmu.deta <- dtheta.deta(mu, link = .lmu, earg = .emu)
+ drho.deta <- dtheta.deta(rho, link = .lrho, earg = .erho)
- dl.dmu = 2 * rho * sin(y-mu) / (1 + 2 * rho * cos(y-mu))
- dl.drho = 2 * cos(y-mu) / (1 + 2 * rho * cos(y-mu))
+ dl.dmu <- 2 * rho * sin(y-mu) / (1 + 2 * rho * cos(y-mu))
+ dl.drho <- 2 * cos(y-mu) / (1 + 2 * rho * cos(y-mu))
c(w) * cbind(dl.dmu * dmu.deta,
dl.drho * drho.deta)
}), list( .lmu = lmu, .lrho=lrho,
.emu = emu, .erho=erho, .nsimEIM=nsimEIM ))),
weight = eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ index0 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
for(ii in 1:( .nsimEIM )) {
- ysim = rcard(n, mu=mu, rho=rho)
- dl.dmu = 2 * rho * sin(ysim-mu) / (1 + 2 * rho * cos(ysim-mu))
- dl.drho = 2 * cos(ysim-mu) / (1 + 2 * rho * cos(ysim-mu))
+ ysim <- rcard(n, mu=mu, rho=rho)
+ dl.dmu <- 2 * rho * sin(ysim-mu) / (1 + 2 * rho * cos(ysim-mu))
+ dl.drho <- 2 * cos(ysim-mu) / (1 + 2 * rho * cos(ysim-mu))
rm(ysim)
- temp3 = cbind(dl.dmu, dl.drho)
- run.varcov = ((ii-1) * run.varcov +
+ temp3 <- cbind(dl.dmu, dl.drho)
+ run.varcov <- ((ii-1) * run.varcov +
temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- dtheta.detas = cbind(dmu.deta, drho.deta)
- wz = wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
+ dtheta.detas <- cbind(dmu.deta, drho.deta)
+ wz <- wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
c(w) * wz
}), list( .lmu = lmu, .lrho = lrho,
.emu = emu, .erho = erho, .nsimEIM = nsimEIM ))))
@@ -270,13 +273,13 @@ cardioid.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Von Mises distribution\n\n",
- "Links: ",
- namesof("location", llocat, earg = elocat), ", ",
- namesof("scale", lscale, earg = escale),
- "\n", "\n",
- "Mean: location"),
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale),
+ "\n", "\n",
+ "Mean: location"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(Musual = 2,
@@ -295,24 +298,24 @@ cardioid.control <- function(save.weight = TRUE, ...)
if (!length(etastart)) {
if ( .imethod == 1) {
- locat.init = mean(y)
- rat10 = sqrt((sum(w*cos(y )))^2 + sum(w*sin(y))^2) / sum(w)
- scale.init = sqrt(1 - rat10)
+ locat.init <- mean(y)
+ rat10 <- sqrt((sum(w*cos(y )))^2 + sum(w*sin(y))^2) / sum(w)
+ scale.init <- sqrt(1 - rat10)
} else {
- locat.init = median(y)
- scale.init = sqrt(sum(w*abs(y - locat.init)) / sum(w))
+ locat.init <- median(y)
+ scale.init <- sqrt(sum(w*abs(y - locat.init)) / sum(w))
}
- locat.init = if (length( .ilocat ))
+ locat.init <- if (length( .ilocat ))
rep( .ilocat , len=n) else
rep(locat.init, len=n)
- scale.init = if (length( .iscale ))
+ scale.init <- if (length( .iscale ))
rep( .iscale , len = n) else rep(1, len = n)
- etastart = cbind(
+ etastart <- cbind(
theta2eta(locat.init, .llocat, earg = .elocat),
theta2eta(scale.init, .lscale, earg = .escale))
}
- y = y %% (2*pi) # Coerce after initial values have been computed
+ y <- y %% (2*pi) # Coerce after initial values have been computed
}), list( .imethod = imethod, .ilocat = ilocat,
.escale = escale, .elocat = elocat,
.lscale = lscale, .llocat = llocat,
@@ -322,8 +325,8 @@ cardioid.control <- function(save.weight = TRUE, ...)
}, list( .escale = escale, .lscale = lscale,
.llocat = llocat, .elocat = elocat ))),
last = eval(substitute(expression({
- misc$link = c(location = .llocat, scale = .lscale)
- misc$earg = list(location = .elocat, scale = .escale )
+ misc$link <- c(location = .llocat , scale = .lscale )
+ misc$earg <- list(location = .elocat , scale = .escale )
@@ -331,8 +334,8 @@ cardioid.control <- function(save.weight = TRUE, ...)
.elocat = elocat, .escale = escale ))),
loglikelihood = eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
- locat = eta2theta(eta[, 1], .llocat, earg = .elocat)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ locat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
+ Scale <- eta2theta(eta[, 2], .lscale, earg = .escale)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
@@ -342,27 +345,28 @@ cardioid.control <- function(save.weight = TRUE, ...)
.llocat = llocat, .elocat = elocat ))),
vfamily = c("vonmises"),
deriv = eval(substitute(expression({
- locat = eta2theta(eta[, 1], .llocat, earg = .elocat)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ locat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
+ Scale <- eta2theta(eta[, 2], .lscale, earg = .escale)
- tmp6 = mbesselI0(x=Scale, deriv=2)
- dl.dlocat = Scale * sin(y - locat)
- dl.dscale = cos(y - locat) - tmp6[, 2] / tmp6[, 1]
+ tmp6 <- mbesselI0(x = Scale, deriv = 2)
+ dl.dlocat <- Scale * sin(y - locat)
+ dl.dscale <- cos(y - locat) - tmp6[, 2] / tmp6[, 1]
- dlocat.deta = dtheta.deta(locat, .llocat ,
+ dlocat.deta <- dtheta.deta(locat, .llocat ,
earg = .elocat )
- dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
+ dscale.deta <- dtheta.deta(Scale, .lscale, earg = .escale)
c(w) * cbind(dl.dlocat * dlocat.deta,
dl.dscale * dscale.deta)
}), list( .escale = escale, .lscale = lscale,
.llocat = llocat, .elocat = elocat ))),
weight = eval(substitute(expression({
- d2l.dlocat2 = Scale * tmp6[, 2] / tmp6[, 1]
- d2l.dscale2 = tmp6[, 3] / tmp6[, 1] - (tmp6[, 2] / tmp6[, 1])^2
- wz = matrix(as.numeric(NA), nrow = n, ncol = 2) # diagonal
- wz[,iam(1, 1, M)] = d2l.dlocat2 * dlocat.deta^2
- wz[,iam(2, 2, M)] = d2l.dscale2 * dscale.deta^2
+ ned2l.dlocat2 <- Scale * tmp6[, 2] / tmp6[, 1]
+ ned2l.dscale2 <- tmp6[, 3] / tmp6[, 1] - (tmp6[, 2] / tmp6[, 1])^2
+
+ wz <- matrix(as.numeric(NA), nrow = n, ncol = 2) # diagonal
+ wz[,iam(1, 1, M)] <- ned2l.dlocat2 * dlocat.deta^2
+ wz[,iam(2, 2, M)] <- ned2l.dscale2 * dscale.deta^2
c(w) * wz
}), list( .escale = escale, .elocat = elocat,
.lscale = lscale, .llocat = llocat ))))
diff --git a/R/family.exp.R b/R/family.exp.R
index 624f572..db1fe11 100644
--- a/R/family.exp.R
+++ b/R/family.exp.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -12,11 +12,12 @@
+
qeunif <- function(p, min = 0, max = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
- ppp = p
- vsmallno = sqrt(.Machine$double.eps)
- smallno = 0.10
+ ppp <- p
+ vsmallno <- sqrt(.Machine$double.eps)
+ smallno <- 0.10
if (any(min >= max))
stop("argument 'min' has values greater or equal ",
"to argument 'max'")
@@ -24,16 +25,16 @@ qeunif <- function(p, min = 0, max = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
Tol_nr > 0.10)
stop("argument 'Tol_nr' is not a single positive value, ",
"or is too large")
- nrok = ppp >= vsmallno & ppp <= 1.0 - vsmallno & is.finite(ppp)
+ nrok <- ppp >= vsmallno & ppp <= 1.0 - vsmallno & is.finite(ppp)
- eee = qbeta(ppp, shape1 = 3, shape2 = 3)
- eee[ppp < smallno] = sqrt(ppp[ppp < smallno])
- eee[ppp > 1.0 - smallno] = 1.0 - sqrt(1.0 - ppp[ppp > 1.0 - smallno])
+ eee <- qbeta(ppp, shape1 = 3, shape2 = 3)
+ eee[ppp < smallno] <- sqrt(ppp[ppp < smallno])
+ eee[ppp > 1.0 - smallno] <- 1.0 - sqrt(1.0 - ppp[ppp > 1.0 - smallno])
for(iii in 1:Maxit_nr) {
realdiff <- (peunif(eee[nrok]) - ppp[nrok]) / deunif(eee[nrok])
- eee[nrok] = eee[nrok] - realdiff
+ eee[nrok] <- eee[nrok] - realdiff
if (all(abs(realdiff) / (1.0 + abs(realdiff)) < Tol_nr )) break
if (iii == Maxit_nr) warning("did not converge")
}
@@ -41,12 +42,12 @@ qeunif <- function(p, min = 0, max = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
if (max(abs(peunif(eee[nrok]) - ppp[nrok])) > Tol_nr)
warning("did not converge on the second check")
- eee[ppp < vsmallno] = sqrt( ppp[ppp < vsmallno])
- eee[ppp > 1.0 - vsmallno] = 1.0 - sqrt(1.0 - ppp[ppp > 1.0 - vsmallno])
- eee[ppp == 0] = 0
- eee[ppp == 1] = 1
- eee[ppp < 0] = NA
- eee[ppp > 1] = NA
+ eee[ppp < vsmallno] <- sqrt( ppp[ppp < vsmallno])
+ eee[ppp > 1.0 - vsmallno] <- 1.0 - sqrt(1.0 - ppp[ppp > 1.0 - vsmallno])
+ eee[ppp == 0] <- 0
+ eee[ppp == 1] <- 1
+ eee[ppp < 0] <- NA
+ eee[ppp > 1] <- NA
min + eee * (max - min)
}
@@ -58,16 +59,16 @@ peunif <- function(q, min = 0, max = 1, log = FALSE) {
if (any(min >= max))
stop("argument 'min' has values greater or equal to argument 'max'")
- eee = (q - min) / (max - min)
+ eee <- (q - min) / (max - min)
if (log.arg) {
- logGofy = 2 * log(eee) - log1p(2 * eee * (eee - 1))
- logGofy[eee < 0] = -Inf
- logGofy[eee > 1] = 0.0
+ logGofy <- 2 * log(eee) - log1p(2 * eee * (eee - 1))
+ logGofy[eee < 0] <- -Inf
+ logGofy[eee > 1] <- 0.0
logGofy
} else {
- Gofy = eee^2 / (1 + 2 * eee * (eee - 1))
- Gofy[eee < 0] = 0.0
- Gofy[eee > 1] = 1.0
+ Gofy <- eee^2 / (1 + 2 * eee * (eee - 1))
+ Gofy[eee < 0] <- 0.0
+ Gofy[eee > 1] <- 1.0
Gofy
}
}
@@ -81,17 +82,17 @@ deunif <- function(x, min = 0, max = 1, log = FALSE) {
if (any(min >= max))
stop("argument 'min' has values greater or equal to argument 'max'")
- eee = (x - min) / (max - min)
+ eee <- (x - min) / (max - min)
if (log.arg) {
- ans = log(2) + log(eee) + log1p(-eee) -
- 2.0 * log(2*eee*(1-eee) - 1) - log(max - min)
- ans[eee <= 0.0] = log(0.0)
- ans[eee >= 1.0] = log(0.0)
+ ans <- log(2) + log(eee) + log1p(-eee) -
+ 2.0 * log(2*eee*(1-eee) - 1) - log(max - min)
+ ans[eee <= 0.0] <- log(0.0)
+ ans[eee >= 1.0] <- log(0.0)
} else {
gunif <- function(y)
as.numeric(y >= 0 & y <= 1) * 2*y*(1-y) / (2*y*(1-y) - 1)^2
- ans = gunif(eee) / (max - min)
+ ans <- gunif(eee) / (max - min)
}
ans
}
@@ -100,12 +101,8 @@ deunif <- function(x, min = 0, max = 1, log = FALSE) {
reunif <- function(n, min = 0, max = 1) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
- qeunif(runif(use.n), min = min, max = max)
+ qeunif(runif(n), min = min, max = max)
}
@@ -114,21 +111,21 @@ reunif <- function(n, min = 0, max = 1) {
qenorm <- function(p, mean = 0, sd = 1, Maxit_nr = 10,
Tol_nr = 1.0e-6) {
- ppp = p
+ ppp <- p
if (!is.Numeric( Tol_nr, allowable.length = 1, positive = TRUE) ||
Tol_nr > 0.10)
stop("argument 'Tol_nr' is not a single ",
"positive value, or is too large")
- nrok = is.finite(ppp)
+ nrok <- is.finite(ppp)
- eee = qnorm(ppp, sd = 2/3)
+ eee <- qnorm(ppp, sd = 2/3)
- gnorm = function(y) dnorm(y) / (y * (1-2*pnorm(y)) - 2*dnorm(y))^2
+ gnorm <- function(y) dnorm(y) / (y * (1-2*pnorm(y)) - 2*dnorm(y))^2
for(iii in 1:Maxit_nr) {
realdiff <- (penorm(eee[nrok]) - ppp[nrok]) / gnorm(eee[nrok])
- eee[nrok] = eee[nrok] - realdiff
+ eee[nrok] <- eee[nrok] - realdiff
if (all(abs(realdiff) / (1.0 + abs(realdiff)) < Tol_nr )) break
if (iii == Maxit_nr) warning("did not converge")
}
@@ -136,10 +133,10 @@ qenorm <- function(p, mean = 0, sd = 1, Maxit_nr = 10,
if (max(abs(penorm(eee[nrok]) - ppp[nrok])) > Tol_nr)
warning("did not converge on the second check")
- eee[ppp == 0] = -Inf
- eee[ppp == 1] = Inf
- eee[ppp < 0] = NA
- eee[ppp > 1] = NA
+ eee[ppp == 0] <- -Inf
+ eee[ppp == 1] <- Inf
+ eee[ppp < 0] <- NA
+ eee[ppp > 1] <- NA
eee * ifelse(sd >= 0, sd, NaN) + mean
}
@@ -149,17 +146,17 @@ penorm <- function(q, mean = 0, sd = 1, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- eee = (q - mean) / sd
- tmp1 = -dnorm(eee) - eee * pnorm(eee)
+ eee <- (q - mean) / sd
+ tmp1 <- -dnorm(eee) - eee * pnorm(eee)
if (log.arg) {
- logGofy = log(tmp1) - log(2 * tmp1 + eee)
- logGofy[eee <= -Inf] = -Inf
- logGofy[eee >= Inf] = 0.0
+ logGofy <- log(tmp1) - log(2 * tmp1 + eee)
+ logGofy[eee <= -Inf] <- -Inf
+ logGofy[eee >= Inf] <- 0.0
logGofy
} else {
- Gofy = tmp1 / (2 * tmp1 + eee)
- Gofy[eee <= -Inf] = 0.0
- Gofy[eee >= Inf] = 1.0
+ Gofy <- tmp1 / (2 * tmp1 + eee)
+ Gofy[eee <= -Inf] <- 0.0
+ Gofy[eee >= Inf] <- 1.0
Gofy
}
}
@@ -170,14 +167,14 @@ denorm <- function(x, mean = 0, sd = 1, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- eee = (x - mean) / sd
+ eee <- (x - mean) / sd
if (log.arg) {
- ans = dnorm(eee, log = TRUE) -
- 2.0 * log(eee * (1-2*pnorm(eee)) - 2*dnorm(eee)) - log(sd)
+ ans <- dnorm(eee, log = TRUE) -
+ 2.0 * log(eee * (1-2*pnorm(eee)) - 2*dnorm(eee)) - log(sd)
} else {
- gnorm = function(y) dnorm(y) / (y * (1-2*pnorm(y)) - 2*dnorm(y))^2
- ans = gnorm(eee) / sd
- ans[sd <= 0.0] = NaN
+ gnorm <- function(y) dnorm(y) / (y * (1-2*pnorm(y)) - 2*dnorm(y))^2
+ ans <- gnorm(eee) / sd
+ ans[sd <= 0.0] <- NaN
}
ans
}
@@ -186,12 +183,8 @@ denorm <- function(x, mean = 0, sd = 1, log = FALSE) {
renorm <- function(n, mean = 0, sd = 1) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
- qenorm(runif(use.n), mean = mean, sd = sd)
+ qenorm(runif(n), mean = mean, sd = sd)
}
@@ -201,26 +194,26 @@ renorm <- function(n, mean = 0, sd = 1) {
qeexp <- function(p, rate = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
- ppp = p
- vsmallno = sqrt(.Machine$double.eps)
+ ppp <- p
+ vsmallno <- sqrt(.Machine$double.eps)
if (!is.Numeric( Tol_nr, allowable.length = 1, positive = TRUE) ||
Tol_nr > 0.10)
stop("argument 'Tol_nr' is not a single positive value, or ",
"is too large")
- nrok = ppp >= vsmallno & is.finite(ppp)
+ nrok <- ppp >= vsmallno & is.finite(ppp)
- eee = qf(1.0 * ppp, df1 = 4.0, df2 = 44)
+ eee <- qf(1.0 * ppp, df1 = 4.0, df2 = 44)
if ( any(rangex <- ppp < 0.8) )
- eee[rangex] = qrayleigh(ppp[rangex], scale = 0.8)
+ eee[rangex] <- qrayleigh(ppp[rangex], scale = 0.8)
- eee[ppp < vsmallno] = sqrt(ppp[ppp < vsmallno])
+ eee[ppp < vsmallno] <- sqrt(ppp[ppp < vsmallno])
for(iii in 1:Maxit_nr) {
realdiff <- (peexp(eee[nrok]) - ppp[nrok]) / deexp(eee[nrok])
- eee[nrok] = eee[nrok] - realdiff
+ eee[nrok] <- eee[nrok] - realdiff
if (all(abs(realdiff) / (1.0 + abs(realdiff)) < Tol_nr )) break
if (iii == Maxit_nr) warning("did not converge")
}
@@ -228,11 +221,11 @@ qeexp <- function(p, rate = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
if (max(abs(peexp(eee[nrok]) - ppp[nrok])) > Tol_nr)
warning("did not converge on the second check")
- eee[ppp < vsmallno] = sqrt(ppp[ppp < vsmallno])
- eee[ppp == 0] = 0
- eee[ppp == 1] = Inf
- eee[ppp < 0] = NaN
- eee[ppp > 1] = NaN
+ eee[ppp < vsmallno] <- sqrt(ppp[ppp < vsmallno])
+ eee[ppp == 0] <- 0
+ eee[ppp == 1] <- Inf
+ eee[ppp < 0] <- NaN
+ eee[ppp > 1] <- NaN
eee / rate
}
@@ -242,18 +235,18 @@ peexp <- function(q, rate = 1, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- eee = q * rate
+ eee <- q * rate
if (log.arg) {
- tmp1 = -expm1(-eee) - eee
- logGofy = log1p(- eee - exp(-eee)) - log(2 * tmp1 + eee - 1.0)
- logGofy[eee < 0] = log(0.0)
- logGofy[eee >= Inf] = log(1.0)
+ tmp1 <- -expm1(-eee) - eee
+ logGofy <- log1p(- eee - exp(-eee)) - log(2 * tmp1 + eee - 1.0)
+ logGofy[eee < 0] <- log(0.0)
+ logGofy[eee >= Inf] <- log(1.0)
logGofy
} else {
- tmp1 = -expm1(-eee) - eee
- Gofy = tmp1 / (2 * tmp1 + eee - 1.0)
- Gofy[eee < 0] = 0.0
- Gofy[eee >= Inf] = 1.0
+ tmp1 <- -expm1(-eee) - eee
+ Gofy <- tmp1 / (2 * tmp1 + eee - 1.0)
+ Gofy[eee < 0] <- 0.0
+ Gofy[eee >= Inf] <- 1.0
Gofy
}
}
@@ -267,15 +260,15 @@ deexp <- function(x, rate = 1, log = FALSE) {
if (any(rate <= 0))
stop("argument 'rate' must have positive values")
- eee = x * rate
+ eee <- x * rate
if (log.arg) {
- ans = log(eee) - eee + 2.0 * log((1-x) - 2 * exp(-x)) + log(rate)
+ ans <- log(eee) - eee + 2.0 * log((1-x) - 2 * exp(-x)) + log(rate)
} else {
- gexp = function(y)
+ gexp <- function(y)
as.numeric(y >= 0) * y * exp(-y) / ((1-y) - 2 * exp(-y))^2
- ans = gexp(eee) * rate
- ans[rate <= 0.0] = NaN
+ ans <- gexp(eee) * rate
+ ans[rate <= 0.0] <- NaN
}
ans
}
@@ -283,11 +276,7 @@ deexp <- function(x, rate = 1, log = FALSE) {
reexp <- function(n, rate = 1) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
- qeexp(runif(use.n), rate = rate)
+ qeexp(runif(n), rate = rate)
}
@@ -358,8 +347,7 @@ rkoenker <- function(n, location = 0, scale = 1) {
llocation = "identity", lscale = "loge",
ilocation = NULL, iscale = NULL,
imethod = 1,
- zero = 2)
-{
+ zero = 2) {
@@ -461,9 +449,9 @@ rkoenker <- function(n, location = 0, scale = 1) {
for(ii in 1:length( .percentile )) {
y.use <- if (ncoly > 1) y[, ii] else y
mu <- cbind(mu)
- extra$percentile[ii] = 100 * weighted.mean(y.use <= mu[, ii], w)
+ extra$percentile[ii] <- 100 * weighted.mean(y.use <= mu[, ii], w)
}
- names(extra$percentile) = colnames(mu)
+ names(extra$percentile) <- colnames(mu)
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale,
.imethod = imethod, .percentile = percentile ))),
@@ -497,12 +485,12 @@ rkoenker <- function(n, location = 0, scale = 1) {
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
weight = eval(substitute(expression({
- ed2l.dlocat2 <- 0.3 / Scale^2
- ed2l.dscale2 <- 2.0 / (3 * Scale^2)
+ ned2l.dlocat2 <- 0.3 / Scale^2
+ ned2l.dscale2 <- 2.0 / (3 * Scale^2)
- wz <- matrix(-10, n, M) # Diagonal EIM
- wz[, iam(1, 1, M = M)] <- ed2l.dlocat2 * dlocat.deta^2
- wz[, iam(2, 2, M = M)] <- ed2l.dscale2 * dscale.deta^2
+ wz <- matrix(-10, n, M) # Diagonal EIM
+ wz[, iam(1, 1, M = M)] <- ned2l.dlocat2 * dlocat.deta^2
+ wz[, iam(2, 2, M = M)] <- ned2l.dscale2 * dscale.deta^2
c(w) * wz
}), list( .llocat = llocat, .lscale = lscale,
diff --git a/R/family.extremes.R b/R/family.extremes.R
index 1ee4b0e..14ee420 100644
--- a/R/family.extremes.R
+++ b/R/family.extremes.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -33,17 +33,18 @@ rgev <- function(n, location = 0, scale = 1, shape = 0) {
if (length(shape) != use.n)
shape <- rep(shape, length.out = use.n)
if (length(location) != use.n)
- location <- rep(location, length.out = use.n);
+ location <- rep(location, length.out = use.n)
if (length(scale) != use.n)
scale <- rep(scale, length.out = use.n)
- scase <- abs(shape) < sqrt(.Machine$double.eps)
+ scase <- abs(shape) < sqrt( .Machine$double.eps )
nscase <- sum(scase)
if (use.n - nscase)
ans[!scase] <- location[!scase] + scale[!scase] *
((-log(runif(use.n - nscase)))^(-shape[!scase]) -1) / shape[!scase]
if (nscase)
- ans[scase] <- rgumbel(nscase, location[scase], scale[scase])
+ ans[scase] <- rgumbel(nscase, location = location[scase],
+ scale = scale[scase])
ans[scale <= 0] <- NaN
ans
}
@@ -51,10 +52,10 @@ rgev <- function(n, location = 0, scale = 1, shape = 0) {
dgev <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
- tolshape0 = sqrt(.Machine$double.eps),
+ tolshape0 = sqrt( .Machine$double.eps ),
oobounds.log = -Inf, giveWarning = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
- stop("bad input for argument 'log'")
+ stop("bad input for argument 'log'")
rm(log)
if (oobounds.log > 0)
stop("bad input for argument 'oobounds.log'")
@@ -62,26 +63,26 @@ rgev <- function(n, location = 0, scale = 1, shape = 0) {
if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'tolshape0'")
- use.n = max(length(x), length(location), length(scale), length(shape))
+ use.n <- max(length(x), length(location), length(scale), length(shape))
if (length(shape) != use.n)
shape <- rep(shape, length.out = use.n)
if (length(location) != use.n)
- location <- rep(location, length.out = use.n);
+ location <- rep(location, length.out = use.n)
if (length(scale) != use.n)
scale <- rep(scale, length.out = use.n)
- x <- rep(x, length.out = use.n)
+ x <- rep(x, length.out = use.n)
logdensity <- rep(log(0), length.out = use.n)
- scase <- abs(shape) < tolshape0
+ scase <- (abs(shape) < tolshape0)
nscase <- sum(scase)
if (use.n - nscase) {
- zedd <- 1+shape*(x-location)/scale # pmax(0, (1+shape*xc/scale))
+ zedd <- 1 + shape * (x - location) / scale # pmax(0, (1+shape*xc/scale))
xok <- (!scase) & (zedd > 0)
logdensity[xok] <- -log(scale[xok]) - zedd[xok]^(-1/shape[xok]) -
- (1 + 1/shape[xok]) * log(zedd[xok])
+ (1 + 1/shape[xok]) * log(zedd[xok])
outofbounds <- (!scase) & (zedd <= 0)
if (any(outofbounds)) {
logdensity[outofbounds] <- oobounds.log
@@ -119,16 +120,18 @@ pgev <- function(q, location = 0, scale = 1, shape = 0) {
if (length(scale) != use.n)
scale <- rep(scale, length.out = use.n)
if (length(q) != use.n)
- q <- rep(q - location, length.out = use.n)
+ q <- rep(q, length.out = use.n)
- scase <- abs(shape) < sqrt(.Machine$double.eps)
+ scase <- abs(shape) < sqrt( .Machine$double.eps )
nscase <- sum(scase)
+ zedd <- (q - location) / scale
if (use.n - nscase) {
- zedd <- pmax(0, (1 + shape * q / scale))
- ans[!scase] <- exp(-zedd[!scase]^(-1 / shape[!scase]))
+ use.zedd <- pmax(0, 1 + shape * zedd)
+ ans[!scase] <- exp(-use.zedd[!scase]^(-1 / shape[!scase]))
}
if (nscase) {
- ans[scase] <- pgumbel(q[scase], location[scase], scale[scase])
+ ans[scase] <- pgumbel(q[scase], location = location[scase],
+ scale = scale[scase])
}
ans[scale <= 0] <- NaN
ans
@@ -156,14 +159,15 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
p <- rep(p, length.out = use.n)
- scase <- abs(shape) < sqrt(.Machine$double.eps)
+ scase <- abs(shape) < sqrt( .Machine$double.eps )
nscase <- sum(scase)
if (use.n - nscase) {
ans[!scase] <- location[!scase] + scale[!scase] *
((-log(p[!scase]))^(-shape[!scase]) - 1) / shape[!scase]
}
if (nscase)
- ans[scase] <- qgumbel(p[scase], location[scase], scale[scase])
+ ans[scase] <- qgumbel(p[scase], location = location[scase],
+ scale = scale[scase])
ans[scale <= 0] <- NaN
ans
}
@@ -180,8 +184,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
iscale = NULL, ishape = NULL,
imethod = 1, gshape = c(-0.45, 0.45),
tolshape0 = 0.001, giveWarning = TRUE,
- zero = 3)
-{
+ zero = 3) {
@@ -236,12 +239,12 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
new("vglmff",
blurb = c("Generalized extreme value distribution\n",
- "Links: ",
- namesof("location", llocat, elocat), ", ",
- namesof("scale", lscale, escale), ", ",
- namesof("shape", lshape, eshape)),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ "Links: ",
+ namesof("location", llocat, elocat), ", ",
+ namesof("scale", lscale, escale), ", ",
+ namesof("shape", lshape, eshape)),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(Musual = 3,
@@ -273,14 +276,14 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
- y = as.matrix(y)
+ y <- as.matrix(y)
if (ncol(y) > 1)
- y = -t(apply(-y, 1, sort, na.last = TRUE))
+ y <- -t(apply(-y, 1, sort, na.last = TRUE))
@@ -288,19 +291,19 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
- r.vec = rowSums(cbind(!is.na(y)))
+ r.vec <- rowSums(cbind(!is.na(y)))
if (any(r.vec == 0))
stop("A row contains all missing values")
- extra$percentiles = .percentiles
+ extra$percentiles <- .percentiles
if (!length(etastart)) {
- init.sig = if (length( .iscale ))
+ init.sig <- if (length( .iscale ))
rep( .iscale, length.out = nrow(y)) else NULL
- init.xi = if (length( .ishape ))
+ init.xi <- if (length( .ishape ))
rep( .ishape, length.out = nrow(y)) else NULL
- LIST.lshape = .lshape
+ LIST.lshape <- .lshape
if ( .lshape == "elogit" && length(init.xi) &&
(any(init.xi <= LIST.lshape$min |
@@ -308,22 +311,22 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
stop("bad input for an argument in 'lshape'")
if ( .imethod == 1) {
- nvector = 4:10 # Arbitrary; could be made an argument
- ynvector = quantile(y[, 1], probs = 1-1/nvector)
- objecFunction = -Inf # Actually the log-likelihood
- est.sigma = !length(init.sig)
- gshape = .gshape
- temp234 = if (length(init.xi)) init.xi[1] else
- seq(gshape[1], gshape[2], length.out = 12)
+ nvector <- 4:10 # Arbitrary; could be made an argument
+ ynvector <- quantile(y[, 1], probs = 1-1/nvector)
+ objecFunction <- -Inf # Actually the log-likelihood
+ est.sigma <- !length(init.sig)
+ gshape <- .gshape
+ temp234 <- if (length(init.xi)) init.xi[1] else
+ seq(gshape[1], gshape[2], length.out = 12)
for(shapeTry in temp234) {
- xvec = if (abs(shapeTry) < .tolshape0) log(nvector) else
- (nvector^shapeTry - 1) / shapeTry
- fit0 = lsfit(x = xvec, y=ynvector, intercept = TRUE)
+ xvec <- if (abs(shapeTry) < .tolshape0) log(nvector) else
+ (nvector^shapeTry - 1) / shapeTry
+ fit0 <- lsfit(x = xvec, y = ynvector, intercept = TRUE)
sigmaTry = if (est.sigma)
rep(fit0$coef["X"], length.out = nrow(y)) else
init.sig
- LocatTry = rep(fit0$coef["Intercept"], length.out = nrow(y))
- llTry = egev(giveWarning =
+ LocatTry <- rep(fit0$coef["Intercept"], length.out = nrow(y))
+ llTry <- egev(giveWarning =
FALSE)@loglikelihood(mu = NULL, y = y[, 1], w = w,
residuals = FALSE,
eta =
@@ -332,29 +335,29 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
theta2eta(shapeTry, .lshape , .eshape )))
if (llTry >= objecFunction) {
if (est.sigma)
- init.sig = sigmaTry
- init.mu = rep(LocatTry, length.out = nrow(y))
- objecFunction = llTry
- bestxi = shapeTry
+ init.sig <- sigmaTry
+ init.mu <- rep(LocatTry, length.out = nrow(y))
+ objecFunction <- llTry
+ bestxi <- shapeTry
}
}
if (!length(init.xi))
- init.xi = rep(bestxi, length.out = nrow(y))
+ init.xi <- rep(bestxi, length.out = nrow(y))
} else {
- init.xi = rep(0.05, length.out = nrow(y))
+ init.xi <- rep(0.05, length.out = nrow(y))
if (!length(init.sig))
- init.sig = rep(sqrt(6 * var(y[, 1]))/pi,
- length.out = nrow(y))
+ init.sig <- rep(sqrt(6 * var(y[, 1]))/pi,
+ length.out = nrow(y))
EulerM <- -digamma(1)
- init.mu = rep(median(y[, 1]) - EulerM*init.sig,
- length.out = nrow(y))
+ init.mu <- rep(median(y[, 1]) - EulerM*init.sig,
+ length.out = nrow(y))
}
- bad = ((1 + init.xi*(y-init.mu)/init.sig) <= 0)
+ bad <- ((1 + init.xi*(y-init.mu)/init.sig) <= 0)
if (fred <- sum(bad)) {
warning(paste(fred, "observations violating boundary",
"constraints while initializing. Taking corrective action."))
- init.xi[bad] = ifelse(y[bad] > init.mu[bad], 0.1, -0.1)
+ init.xi[bad] <- ifelse(y[bad] > init.mu[bad], 0.1, -0.1)
}
etastart <-
@@ -377,25 +380,26 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
sigma <- eta2theta(eta[, 2], .lscale , .escale )
shape <- eta2theta(eta[, 3], .lshape , .eshape )
- is.zero = (abs(shape) < .tolshape0 )
- cent = extra$percentiles
- LP = length(cent)
- fv = matrix(as.numeric(NA), nrow(eta), LP)
+ is.zero <- (abs(shape) < .tolshape0 )
+ cent <- extra$percentiles
+ LP <- length(cent)
+ fv <- matrix(as.numeric(NA), nrow(eta), LP)
if (LP) {
for(ii in 1:LP) {
- yp = -log(cent[ii]/100)
- fv[!is.zero,ii] = Locat[!is.zero] - sigma[!is.zero] *
- (1 - yp^(-shape[!is.zero])) / shape[!is.zero]
- fv[is.zero,ii] = Locat[is.zero] - sigma[is.zero] * log(yp)
+ yp <- -log(cent[ii]/100)
+ fv[!is.zero, ii] <- Locat[!is.zero] - sigma[!is.zero] *
+ (1 - yp^(-shape[!is.zero])) / shape[!is.zero]
+ fv[ is.zero, ii] <- Locat[ is.zero] - sigma[ is.zero] *
+ log(yp)
}
- dimnames(fv) = list(dimnames(eta)[[1]],
- paste(as.character(cent), "%", sep = ""))
+ dimnames(fv) <- list(dimnames(eta)[[1]],
+ paste(as.character(cent), "%", sep = ""))
} else {
EulerM <- -digamma(1)
- fv = Locat + sigma * EulerM # When shape = 0, is Gumbel
- fv[!is.zero] = Locat[!is.zero] + sigma[!is.zero] *
- (gamma(1-shape[!is.zero])-1) / shape[!is.zero]
- fv[shape >= 1] = NA # Mean exists only if shape < 1.
+ fv <- Locat + sigma * EulerM # When shape = 0, is Gumbel
+ fv[!is.zero] <- Locat[!is.zero] + sigma[!is.zero] *
+ (gamma(1-shape[!is.zero])-1) / shape[!is.zero]
+ fv[shape >= 1] <- NA # Mean exists only if shape < 1.
}
fv
}, list(
@@ -423,12 +427,12 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
- misc$true.mu = !length( .percentiles) # @fitted is not a true mu
- misc$percentiles = .percentiles
- misc$expected = TRUE
- misc$tolshape0 = .tolshape0
+ misc$true.mu <- !length( .percentiles) # @fitted is not a true mu
+ misc$percentiles <- .percentiles
+ misc$expected <- TRUE
+ misc$tolshape0 <- .tolshape0
if (ncol(y) == 1)
- y = as.vector(y)
+ y <- as.vector(y)
if (any(shape < -0.5))
warning("some values of the shape parameter are less than -0.5")
}), list(
@@ -443,32 +447,32 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
shape <- eta2theta(eta[, 3], .lshape , .eshape )
- is.zero = (abs(shape) < .tolshape0)
- zedd = (y-Locat) / sigma
- r.vec = rowSums(cbind(!is.na(y)))
- A = 1 + shape * (y-Locat)/sigma
- ii = 1:nrow(eta)
- A1 = A[cbind(ii, r.vec)]
- mytolerance = 0 # .Machine$double.eps
+ is.zero <- (abs(shape) < .tolshape0)
+ zedd <- (y-Locat) / sigma
+ r.vec <- rowSums(cbind(!is.na(y)))
+ A <- 1 + shape * (y-Locat)/sigma
+ ii <- 1:nrow(eta)
+ A1 <- A[cbind(ii, r.vec)]
+ mytolerance <- 0 # .Machine$double.eps
if (any(bad <- (A1 <= mytolerance), na.rm = TRUE)) {
cat("There are", sum(bad),
"range violations in @loglikelihood\n")
flush.console()
}
- igev = !is.zero & !bad
- igum = is.zero & !bad
- pow = 1 + 1/shape[igev]
+ igev <- !is.zero & !bad
+ igum <- is.zero & !bad
+ pow <- 1 + 1/shape[igev]
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- old.answer =
- sum(bad) * (-1.0e10) +
- sum(w[igum] * (-r.vec[igum]*log(sigma[igum]) -
- exp(-zedd[igum,r.vec]) -
- rowSums(cbind(zedd, na.rm = TRUE)))) +
- sum(w[igev] * (-r.vec[igev]*log(sigma[igev]) -
- pow*rowSums(cbind(log(A[igev])), na.rm = TRUE) -
- A1[igev]^(-1/shape[igev])))
+ old.answer <-
+ sum(bad) * (-1.0e10) +
+ sum(w[igum] * (-r.vec[igum]*log(sigma[igum]) -
+ exp(-zedd[igum,r.vec]) -
+ rowSums(cbind(zedd, na.rm = TRUE)))) +
+ sum(w[igev] * (-r.vec[igev]*log(sigma[igev]) -
+ pow*rowSums(cbind(log(A[igev])), na.rm = TRUE) -
+ A1[igev]^(-1/shape[igev])))
old.answer
}
}, list(
@@ -481,7 +485,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
vfamily = c("gev", "vextremes"),
deriv = eval(substitute(expression({
Musual <- 3
- r.vec = rowSums(cbind(!is.na(y)))
+ r.vec <- rowSums(cbind(!is.na(y)))
Locat <- eta2theta(eta[, 1], .llocat , .elocat )
sigma <- eta2theta(eta[, 2], .lscale , .escale )
@@ -494,41 +498,41 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
dxi.deta <- dtheta.deta(shape, .lshape , .eshape )
- is.zero = (abs(shape) < .tolshape0)
- ii = 1:nrow(eta)
- zedd = (y-Locat) / sigma
- A = 1 + shape * zedd
- dA.dxi = zedd # matrix
- dA.dmu = -shape/sigma # vector
- dA.dsigma = -shape*zedd/sigma # matrix
- pow = 1 + 1/shape
- A1 = A[cbind(ii, r.vec)]
-
- AAr1 = dA.dmu/(shape * A1^pow) -
- pow * rowSums(cbind(dA.dmu/A), na.rm = TRUE)
- AAr2 = dA.dsigma[cbind(ii,r.vec)] / (shape * A1^pow) -
- pow * rowSums(cbind(dA.dsigma/A), na.rm = TRUE)
- AAr3 = 1/(shape * A1^pow) -
- pow * rowSums(cbind(dA.dsigma/A), na.rm = TRUE)
- dl.dmu = AAr1
- dl.dsi = AAr2 - r.vec/sigma
- dl.dxi = rowSums(cbind(log(A)), na.rm = TRUE)/shape^2 -
- pow * rowSums(cbind(dA.dxi/A), na.rm = TRUE) -
- (log(A1) / shape^2 -
- dA.dxi[cbind(ii,r.vec)] / (shape*A1)) * A1^(-1/shape)
-
- if (any(is.zero)) {
- zorro = c(zedd[cbind(1:n,r.vec)])
- zorro = zorro[is.zero]
- ezedd = exp(-zorro)
- dl.dmu[is.zero] = (1-ezedd) / sigma[is.zero]
- dl.dsi[is.zero] = (zorro * (1-ezedd) - 1) / sigma[is.zero]
- dl.dxi[is.zero] = zorro * ((1 - ezedd) * zorro / 2 - 1)
- }
+ is.zero <- (abs(shape) < .tolshape0)
+ ii <- 1:nrow(eta)
+ zedd <- (y-Locat) / sigma
+ A <- 1 + shape * zedd
+ dA.dxi <- zedd # matrix
+ dA.dmu <- -shape/sigma # vector
+ dA.dsigma <- -shape*zedd/sigma # matrix
+ pow <- 1 + 1/shape
+ A1 <- A[cbind(ii, r.vec)]
+
+ AAr1 <- dA.dmu/(shape * A1^pow) -
+ pow * rowSums(cbind(dA.dmu/A), na.rm = TRUE)
+ AAr2 <- dA.dsigma[cbind(ii,r.vec)] / (shape * A1^pow) -
+ pow * rowSums(cbind(dA.dsigma/A), na.rm = TRUE)
+ AAr3 <- 1/(shape * A1^pow) -
+ pow * rowSums(cbind(dA.dsigma/A), na.rm = TRUE)
+ dl.dmu <- AAr1
+ dl.dsi <- AAr2 - r.vec/sigma
+ dl.dxi <- rowSums(cbind(log(A)), na.rm = TRUE)/shape^2 -
+ pow * rowSums(cbind(dA.dxi/A), na.rm = TRUE) -
+ (log(A1) / shape^2 -
+ dA.dxi[cbind(ii,r.vec)] / (shape*A1)) * A1^(-1/shape)
- c(w) * cbind(dl.dmu * dmu.deta,
- dl.dsi * dsi.deta,
- dl.dxi * dxi.deta)
+ if (any(is.zero)) {
+ zorro <- c(zedd[cbind(1:n,r.vec)])
+ zorro <- zorro[is.zero]
+ ezedd <- exp(-zorro)
+ dl.dmu[is.zero] <- (1-ezedd) / sigma[is.zero]
+ dl.dsi[is.zero] <- (zorro * (1-ezedd) - 1) / sigma[is.zero]
+ dl.dxi[is.zero] <- zorro * ((1 - ezedd) * zorro / 2 - 1)
+ }
+
+ c(w) * cbind(dl.dmu * dmu.deta,
+ dl.dsi * dsi.deta,
+ dl.dxi * dxi.deta)
}), list(
.llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape,
@@ -536,92 +540,88 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
.tolshape0 = tolshape0 ))),
weight = eval(substitute(expression({
- kay = -shape
- dd = digamma(r.vec-kay+1)
- ddd = digamma(r.vec+1) # Unnecessarily evaluated at each iteration
- temp13 = -kay * dd + (kay^2 - kay + 1) / (1-kay)
- temp33 = 1 - 2 * kay * ddd +
- kay^2 * (1 + trigamma(r.vec+1) + ddd^2)
- temp23 = -kay * dd + (1+(1-kay)^2) / (1-kay)
- GR.gev = function(j, ri, kay) gamma(ri - j*kay + 1) / gamma(ri)
- tmp2 = (1-kay)^2 * GR.gev(2, r.vec, kay) # Latter is GR2
- tmp1 = (1-2*kay) * GR.gev(1, r.vec, kay) # Latter is GR1
- k0 = (1-2*kay)
- k1 = k0 * kay
- k2 = k1 * kay
- k3 = k2 * kay # kay^3 * (1-2*kay)
-
- wz = matrix(as.numeric(NA), n, 6)
- wz[, iam(1, 1, M)] = tmp2 / (sigma^2 * k0)
- wz[, iam(1, 2, M)] = (tmp2 - tmp1) / (sigma^2 * k1)
- wz[, iam(1, 3, M)] = (tmp1 * temp13 - tmp2) / (sigma * k2)
- wz[, iam(2, 2, M)] = (r.vec*k0 - 2*tmp1 + tmp2) / (sigma^2 * k2)
- wz[, iam(2, 3, M)] = (r.vec*k1*ddd + tmp1 *
- temp23 - tmp2 - r.vec*k0) / (sigma * k3)
- wz[, iam(3, 3, M)] = (2*tmp1*(-temp13) + tmp2 +
- r.vec*k0*temp33)/(k3*kay)
+ kay <- -shape
+ dd <- digamma(r.vec-kay+1)
+ ddd <- digamma(r.vec+1) # Unnecessarily evaluated at each iteration
+ temp13 <- -kay * dd + (kay^2 - kay + 1) / (1-kay)
+ temp33 <- 1 - 2 * kay * ddd +
+ kay^2 * (1 + trigamma(r.vec+1) + ddd^2)
+ temp23 <- -kay * dd + (1+(1-kay)^2) / (1-kay)
+ GR.gev <- function(j, ri, kay) gamma(ri - j*kay + 1) / gamma(ri)
+ tmp2 <- (1-kay)^2 * GR.gev(2, r.vec, kay) # Latter is GR2
+ tmp1 <- (1-2*kay) * GR.gev(1, r.vec, kay) # Latter is GR1
+ k0 <- (1-2*kay)
+ k1 <- k0 * kay
+ k2 <- k1 * kay
+ k3 <- k2 * kay # kay^3 * (1-2*kay)
+
+ wz <- matrix(as.numeric(NA), n, 6)
+ wz[, iam(1, 1, M)] <- tmp2 / (sigma^2 * k0)
+ wz[, iam(1, 2, M)] <- (tmp2 - tmp1) / (sigma^2 * k1)
+ wz[, iam(1, 3, M)] <- (tmp1 * temp13 - tmp2) / (sigma * k2)
+ wz[, iam(2, 2, M)] <- (r.vec*k0 - 2*tmp1 + tmp2) / (sigma^2 * k2)
+ wz[, iam(2, 3, M)] <- (r.vec*k1*ddd + tmp1 *
+ temp23 - tmp2 - r.vec*k0) / (sigma * k3)
+ wz[, iam(3, 3, M)] <- (2*tmp1*(-temp13) + tmp2 +
+ r.vec*k0*temp33)/(k3*kay)
if (any(is.zero)) {
if (ncol(y) > 1)
stop("cannot handle shape == 0 with a multivariate response")
- EulerM <- -digamma(1)
- wz[is.zero, iam(2, 2, M)] = (pi^2/6 + (1-EulerM)^2) / sigma^2
- wz[is.zero, iam(3, 3, M)] = 2.4236
- wz[is.zero, iam(1, 2, M)] = (digamma(2) +
- 2*(EulerM-1)) / sigma^2
- wz[is.zero, iam(1, 3, M)]= -(trigamma(1)/2 + digamma(1)*
- (digamma(1)/2+1))/sigma
- wz[is.zero, iam(2, 3, M)] = (-dgammadx(2, 3)/6 + dgammadx(1, 1) +
- 2*dgammadx(1, 2) +
- 2*dgammadx(1, 3)/3) / sigma
-
- if (FALSE ) {
- wz[, iam(1, 2, M)] = 2 * r.vec / sigma^2
- wz[, iam(2, 2, M)] = -4 * r.vec * digamma(r.vec+1) + 2 * r.vec +
- (4 * dgammadx(r.vec+1, deriv.arg = 1) -
- 3 * dgammadx(r.vec+1, deriv.arg = 2)) / gamma(r.vec) # Not checked
- }
+ EulerM <- -digamma(1)
+ wz[is.zero, iam(2, 2, M)] <- (pi^2/6 + (1-EulerM)^2) / sigma^2
+ wz[is.zero, iam(3, 3, M)] <- 2.4236
+ wz[is.zero, iam(1, 2, M)] <- (digamma(2) +
+ 2*(EulerM-1)) / sigma^2
+ wz[is.zero, iam(1, 3, M)]= -(trigamma(1)/2 + digamma(1)*
+ (digamma(1)/2+1))/sigma
+ wz[is.zero, iam(2, 3, M)] <- (-dgammadx(2, 3)/6 + dgammadx(1, 1) +
+ 2*dgammadx(1, 2) +
+ 2*dgammadx(1, 3)/3) / sigma
+
+ if (FALSE ) {
+ wz[, iam(1, 2, M)] <- 2 * r.vec / sigma^2
+ wz[, iam(2, 2, M)] <- -4 * r.vec * digamma(r.vec+1) + 2 * r.vec +
+ (4 * dgammadx(r.vec+1, deriv.arg = 1) -
+ 3 * dgammadx(r.vec+1, deriv.arg = 2)) / gamma(r.vec) # Not checked
}
+ }
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] * dmu.deta^2
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] * dsi.deta^2
- wz[, iam(3, 3, M)] = wz[, iam(3, 3, M)] * dxi.deta^2
- wz[, iam(1, 2, M)] = wz[, iam(1, 2, M)] * dmu.deta * dsi.deta
- wz[, iam(1, 3, M)] = wz[, iam(1, 3, M)] * dmu.deta * (-dxi.deta)
- wz[, iam(2, 3, M)] = wz[, iam(2, 3, M)] * dsi.deta * (-dxi.deta)
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * dmu.deta^2
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] * dsi.deta^2
+ wz[, iam(3, 3, M)] <- wz[, iam(3, 3, M)] * dxi.deta^2
+ wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)] * dmu.deta * dsi.deta
+ wz[, iam(1, 3, M)] <- wz[, iam(1, 3, M)] * dmu.deta * (-dxi.deta)
+ wz[, iam(2, 3, M)] <- wz[, iam(2, 3, M)] * dsi.deta * (-dxi.deta)
c(w) * wz
- }), list(
- .llocat = llocat, .lscale = lscale, .lshape = lshape,
- .elocat = elocat, .escale = escale, .eshape = eshape
-
- ))))
-
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape ))))
}
dgammadx <- function(x, deriv.arg = 1) {
- if (deriv.arg == 0) {
- gamma(x)
- } else if (deriv.arg == 1) {
- digamma(x) * gamma(x)
- } else if (deriv.arg == 2) {
- gamma(x) * (trigamma(x) + digamma(x)^2)
- } else if (deriv.arg == 3) {
- gamma(x) * (psigamma(x, deriv = 2) +
- 2 * digamma(x) * trigamma(x)) +
- Recall(x, deriv.arg = 1) * (trigamma(x) + digamma(x)^2)
- } else if (deriv.arg == 4) {
- Recall(x, deriv.arg = 2) * (trigamma(x) + digamma(x)^2) +
- 2 * Recall(x, deriv.arg = 1) * (psigamma(x, deriv = 2) +
- 2*digamma(x) * trigamma(x)) +
- gamma(x) * (psigamma(x, deriv = 3) + 2*trigamma(x)^2 +
- 2 * digamma(x) * psigamma(x, deriv = 2))
- } else {
- stop("cannot handle 'deriv' > 4")
- }
+ if (deriv.arg == 0) {
+ gamma(x)
+ } else if (deriv.arg == 1) {
+ digamma(x) * gamma(x)
+ } else if (deriv.arg == 2) {
+ gamma(x) * (trigamma(x) + digamma(x)^2)
+ } else if (deriv.arg == 3) {
+ gamma(x) * (psigamma(x, deriv = 2) +
+ 2 * digamma(x) * trigamma(x)) +
+ Recall(x, deriv.arg = 1) * (trigamma(x) + digamma(x)^2)
+ } else if (deriv.arg == 4) {
+ Recall(x, deriv.arg = 2) * (trigamma(x) + digamma(x)^2) +
+ 2 * Recall(x, deriv.arg = 1) * (psigamma(x, deriv = 2) +
+ 2*digamma(x) * trigamma(x)) +
+ gamma(x) * (psigamma(x, deriv = 3) + 2*trigamma(x)^2 +
+ 2 * digamma(x) * psigamma(x, deriv = 2))
+ } else {
+ stop("cannot handle 'deriv' > 4")
+ }
}
@@ -634,8 +634,7 @@ dgammadx <- function(x, deriv.arg = 1) {
iscale = NULL, ishape = NULL,
imethod = 1, gshape = c(-0.45, 0.45),
tolshape0 = 0.001, giveWarning = TRUE,
- zero = 3)
-{
+ zero = 3) {
if (!is.logical(giveWarning) || length(giveWarning) != 1)
stop("bad input for argument 'giveWarning'")
if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
@@ -670,7 +669,8 @@ dgammadx <- function(x, deriv.arg = 1) {
stop("argument 'imethod' must be 1 or 2")
if (length(ishape) && !is.Numeric(ishape))
stop("bad input for argument 'ishape'")
- if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE) ||
+ if (!is.Numeric(tolshape0, allowable.length = 1,
+ positive = TRUE) ||
tolshape0 > 0.1)
stop("bad input for argument 'tolshape0'")
if (length(zero) &&
@@ -684,235 +684,235 @@ dgammadx <- function(x, deriv.arg = 1) {
namesof("location", link = llocat, earg = elocat), ", ",
namesof("scale", link = lscale, earg = escale), ", ",
namesof("shape", link = lshape, earg = eshape)),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- predictors.names <-
- c(namesof("location", .llocat , earg = .elocat , short = TRUE),
- namesof("scale", .lscale , earg = .escale , short = TRUE),
- namesof("shape", .lshape , earg = .eshape , short = TRUE))
-
-
-
-
-
- if (ncol(as.matrix(y)) != 1)
- stop("response must be a vector or one-column matrix")
-
-
-
-
-
- if (!length(etastart)) {
- init.sig = if (length( .iscale ))
- rep( .iscale, length.out = length(y)) else NULL
- init.xi = if (length( .ishape ))
- rep( .ishape, length.out = length(y)) else NULL
- eshape = .eshape
- if ( .lshape == "elogit" && length(init.xi) &&
- (any(init.xi <= eshape$min | init.xi >= eshape$max)))
- stop("bad input for argument 'eshape'")
- if ( .imethod == 1) {
- nvector = 4:10 # Arbitrary; could be made an argument
- ynvector = quantile(y, probs = 1-1/nvector)
- objecFunction = -Inf # Actually the log-likelihood
- est.sigma = !length(init.sig)
- gshape = .gshape
- temp234 = if (length(init.xi)) init.xi[1] else
- seq(gshape[1], gshape[2], length.out = 12)
- for(xi.try in temp234) {
- xvec = if (abs(xi.try) < .tolshape0) log(nvector) else
- (nvector^xi.try - 1) / xi.try
- fit0 = lsfit(x = xvec, y=ynvector, intercept = TRUE)
- if (est.sigma) {
- sigmaTry = rep(fit0$coef["X"], length.out = length(y))
- } else {
- sigmaTry = init.sig
- }
- muTry = rep(fit0$coef["Intercept"], length.out = length(y))
- llTry = egev(giveWarning=
- FALSE)@loglikelihood(mu = NULL, y = y, w = w,
- residuals = FALSE,
- eta = cbind(theta2eta(muTry, .llocat , earg = .elocat ),
- theta2eta(sigmaTry, .lscale , earg = .escale ),
- theta2eta(xi.try, .lshape , earg = .eshape )))
- if (llTry >= objecFunction) {
- if (est.sigma)
- init.sig = sigmaTry
- init.mu = rep(muTry, length.out = length(y))
- objecFunction = llTry
- bestxi = xi.try
- }
- }
- if (!length(init.xi))
- init.xi = rep(bestxi, length.out = length(y))
+ predictors.names <-
+ c(namesof("location", .llocat , earg = .elocat , short = TRUE),
+ namesof("scale", .lscale , earg = .escale , short = TRUE),
+ namesof("shape", .lshape , earg = .eshape , short = TRUE))
- } else {
- init.xi = rep(if (length(init.xi)) init.xi else 0.05,
- length.out = length(y))
- if (!length(init.sig))
- init.sig = rep(sqrt(6*var(y))/pi,
- length.out = length(y))
- EulerM <- -digamma(1)
- init.mu = rep(median(y) - EulerM * init.sig,
- length.out = length(y))
- }
- bad <- (1 + init.xi*(y-init.mu)/init.sig <= 0)
- if (fred <- sum(bad, na.rm = TRUE)) {
- warning(paste(fred, "observations violating boundary",
- "constraints while initializing. Taking corrective action."))
- init.xi[bad] = ifelse(y[bad] > init.mu[bad], 0.01, -0.01)
- }
- extra$percentiles = .percentiles
- etastart <-
- cbind(theta2eta(init.mu, .llocat , earg = .elocat ),
- theta2eta(init.sig, .lscale , earg = .escale ),
- theta2eta(init.xi, .lshape , earg = .eshape ))
- }
- }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
- .elocat = elocat, .escale = escale, .eshape = eshape,
- .percentiles = percentiles, .tolshape0 = tolshape0,
- .imethod = imethod,
- .giveWarning= giveWarning,
- .iscale = iscale, .ishape = ishape, .gshape = gshape ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- loc <- eta2theta(eta[, 1], .llocat , earg = .elocat )
- sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
- xi <- eta2theta(eta[, 3], .lshape , earg = .eshape )
- is.zero <- (abs(xi) < .tolshape0)
- cent = extra$percentiles
- LP <- length(cent)
- fv <- matrix(as.numeric(NA), nrow(eta), LP)
- if (LP) {
- for(ii in 1:LP) {
- yp = -log(cent[ii]/100)
- fv[!is.zero,ii] = loc[!is.zero] - sigma[!is.zero] *
- (1 - yp^(-xi[!is.zero])) / xi[!is.zero]
- fv[is.zero,ii] = loc[is.zero] - sigma[is.zero] * log(yp)
+ if (ncol(as.matrix(y)) != 1)
+ stop("response must be a vector or one-column matrix")
+
+
+
+ if (!length(etastart)) {
+ init.sig <- if (length( .iscale ))
+ rep( .iscale , length.out = length(y)) else NULL
+ init.xi <- if (length( .ishape ))
+ rep( .ishape , length.out = length(y)) else NULL
+ eshape <- .eshape
+ if ( .lshape == "elogit" && length(init.xi) &&
+ (any(init.xi <= eshape$min | init.xi >= eshape$max)))
+ stop("bad input for argument 'eshape'")
+ if ( .imethod == 1) {
+ nvector <- 4:10 # Arbitrary; could be made an argument
+ ynvector <- quantile(y, probs = 1-1/nvector)
+ objecFunction <- -Inf # Actually the log-likelihood
+ est.sigma <- !length(init.sig)
+ gshape <- .gshape
+ temp234 <- if (length(init.xi)) init.xi[1] else
+ seq(gshape[1], gshape[2], length.out = 12)
+ for(xi.try in temp234) {
+ xvec <- if (abs(xi.try) < .tolshape0) log(nvector) else
+ (nvector^xi.try - 1) / xi.try
+ fit0 <- lsfit(x = xvec, y=ynvector, intercept = TRUE)
+ if (est.sigma) {
+ sigmaTry <- rep(fit0$coef["X"], length.out = length(y))
+ } else {
+ sigmaTry <- init.sig
}
- dimnames(fv) = list(dimnames(eta)[[1]],
- paste(as.character(cent), "%", sep = ""))
- } else {
- EulerM <- -digamma(1)
- fv = loc + sigma * EulerM # When xi = 0, is Gumbel
- fv[!is.zero] = loc[!is.zero] + sigma[!is.zero] *
- (gamma(1-xi[!is.zero])-1) / xi[!is.zero]
- fv[xi >= 1] = NA # Mean exists only if xi < 1.
+ muTry <- rep(fit0$coef["Intercept"],
+ length.out = length(y))
+ llTry <- egev(giveWarning = FALSE)@loglikelihood(mu = NULL,
+ y = y, w = w,
+ residuals = FALSE,
+ eta = cbind(theta2eta(muTry, .llocat , earg = .elocat ),
+ theta2eta(sigmaTry, .lscale , earg = .escale ),
+ theta2eta(xi.try, .lshape , earg = .eshape )))
+ if (llTry >= objecFunction) {
+ if (est.sigma)
+ init.sig <- sigmaTry
+ init.mu <- rep(muTry, length.out = length(y))
+ objecFunction <- llTry
+ bestxi <- xi.try
+ }
}
- fv
- }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
- .elocat = elocat, .escale = escale, .eshape = eshape,
- .tolshape0 = tolshape0 ))),
- last = eval(substitute(expression({
- misc$links <- c(location = .llocat,
- scale = .lscale ,
- shape = .lshape)
- misc$true.mu = !length( .percentiles) # @fitted is not a true mu
- misc$percentiles <- .percentiles
- misc$earg = list(location = .elocat,
- scale = .escale,
- shape = .eshape)
- misc$tolshape0 = .tolshape0
- misc$expected = TRUE
- if (any(xi < -0.5))
- warning("some values of the shape parameter are less than -0.5")
- }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
- .elocat = elocat, .escale = escale, .eshape = eshape,
- .tolshape0 = tolshape0, .percentiles = percentiles ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mmu <- eta2theta(eta[, 1], .llocat , earg = .elocat )
- sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
- xi <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+ if (!length(init.xi))
+ init.xi <- rep(bestxi, length.out = length(y))
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dgev(x=y, location=mmu, scale=sigma, shape=xi,
- tolshape0 = .tolshape0,
- log = TRUE, oobounds.log = -1.0e04,
- giveWarning= .giveWarning))
- }
- }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
- .elocat = elocat, .escale = escale, .eshape = eshape,
- .giveWarning= giveWarning, .tolshape0 = tolshape0 ))),
- vfamily = c("egev", "vextremes"),
- deriv = eval(substitute(expression({
- Locat = eta2theta(eta[, 1], .llocat , earg = .elocat )
- sigma = eta2theta(eta[, 2], .lscale , earg = .escale )
- xi = eta2theta(eta[, 3], .lshape , earg = .eshape)
- is.zero <- (abs(xi) < .tolshape0)
- zedd = (y-Locat) / sigma
- A = 1 + xi * zedd
- dA.dxi = zedd
- dA.dmu = -xi / sigma
- dA.dsigma = -xi * zedd / sigma
- pow = 1 + 1/xi
- if (any(bad <- A <= 0, na.rm = TRUE))
- stop(sum(bad, na.rm = TRUE),
- " observations violating boundary constraints in '@deriv'")
- AA = 1/(xi*A^pow)- pow/A
- dl.dmu = dA.dmu * AA
- dl.dsi = dA.dsigma * AA - 1/sigma
- dl.dxi = log(A)/xi^2 - pow * dA.dxi / A -
- (log(A)/xi^2 - dA.dxi /(xi*A)) * A^(-1/xi)
- if (any(is.zero)) {
- ezedd = exp(-zedd[is.zero])
- dl.dmu[is.zero] = (1 - ezedd) / sigma[is.zero]
- dl.dsi[is.zero] = (zedd[is.zero] *
- (1 - ezedd) - 1) / sigma[is.zero]
- dl.dxi[is.zero] = zedd[is.zero] *
- ((1 - ezedd) * zedd[is.zero] / 2 - 1)
+ } else {
+ init.xi = rep(if (length(init.xi)) init.xi else 0.05,
+ length.out = length(y))
+ if (!length(init.sig))
+ init.sig <- rep(sqrt(6*var(y))/pi,
+ length.out = length(y))
+ EulerM <- -digamma(1)
+ init.mu <- rep(median(y) - EulerM * init.sig,
+ length.out = length(y))
}
- dmu.deta = dtheta.deta(Locat, .llocat , earg = .elocat )
- dsi.deta = dtheta.deta(sigma, .lscale , earg = .escale )
- dxi.deta = dtheta.deta(xi, .lshape , earg = .eshape)
- c(w) * cbind(dl.dmu * dmu.deta,
- dl.dsi * dsi.deta,
- dl.dxi * dxi.deta)
- }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
- .elocat = elocat, .escale = escale, .eshape = eshape,
- .tolshape0 = tolshape0 ))),
- weight = eval(substitute(expression({
- bad <- A <= 0
- if (any(bad, na.rm = TRUE))
- stop(sum(bad, na.rm = TRUE),
- " observations violating boundary constraints in '@weight'")
- kay = -xi # for the formulae
- kay[abs(kay-0.5) < .tolshape0] = 0.501
- temp100 = gamma(2-kay)
- pp = (1-kay)^2 * gamma(1-2*kay) # gamma(0) is undefined so kay != 0.5
- qq = temp100 * (digamma(1-kay) - (1-kay)/kay)
- wz = matrix(as.numeric(NA), n, 6)
- wz[, iam(1, 1, M)] = pp / sigma^2
- wz[, iam(2, 2, M)] = (1-2*temp100 + pp) / (sigma * kay)^2
- EulerM <- -digamma(1)
- wz[, iam(3, 3, M)] = (pi^2 / 6 + (1-EulerM-1/kay)^2 +
- (2*qq + pp/kay)/kay) / kay^2
- wz[, iam(1, 2, M)] = (pp - temp100) / (sigma^2 * kay)
- wz[, iam(1, 3, M)] = -(qq + pp/kay) / (sigma * kay)
- wz[, iam(2, 3, M)] = (1-EulerM - (1-temp100)/kay - qq -
- pp/kay) / (sigma * kay^2)
- if (any(is.zero)) {
- wz[is.zero, iam(2, 2, M)] = (pi^2/6 + (1-EulerM)^2) / sigma^2
- wz[is.zero, iam(3, 3, M)] <- 2.4236
- wz[is.zero, iam(1, 2, M)] <- (digamma(2) + 2*(EulerM-1)) / sigma^2
- wz[is.zero, iam(1, 3, M)] <- -(trigamma(1)/2 + digamma(1)*
- (digamma(1)/2+1))/sigma
- wz[is.zero, iam(2, 3, M)] <- (-dgammadx(2, 3)/6 + dgammadx(1, 1) +
- 2*dgammadx(1, 2) +
- 2*dgammadx(1, 3)/3)/sigma
+ bad <- (1 + init.xi*(y-init.mu)/init.sig <= 0)
+ if (fred <- sum(bad, na.rm = TRUE)) {
+ warning(paste(fred, "observations violating boundary",
+ "constraints while initializing. Taking corrective action."))
+ init.xi[bad] = ifelse(y[bad] > init.mu[bad], 0.01, -0.01)
}
- wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * dmu.deta^2
- wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] * dsi.deta^2
- wz[, iam(3, 3, M)] <- wz[, iam(3, 3, M)] * dxi.deta^2
- wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)] * dmu.deta * dsi.deta
- wz[, iam(1, 3, M)] <- wz[, iam(1, 3, M)] * dmu.deta * (-dxi.deta)
- wz[, iam(2, 3, M)] <- wz[, iam(2, 3, M)] * dsi.deta * (-dxi.deta)
- c(w) * wz
- }), list( .eshape = eshape, .tolshape0 = tolshape0 ))))
+
+ extra$percentiles <- .percentiles
+
+ etastart <-
+ cbind(theta2eta(init.mu, .llocat , earg = .elocat ),
+ theta2eta(init.sig, .lscale , earg = .escale ),
+ theta2eta(init.xi, .lshape , earg = .eshape ))
+ }
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
+ .percentiles = percentiles, .tolshape0 = tolshape0,
+ .imethod = imethod,
+ .giveWarning= giveWarning,
+ .iscale = iscale, .ishape = ishape, .gshape = gshape ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ loc <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ xi <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+ is.zero <- (abs(xi) < .tolshape0)
+ cent <- extra$percentiles
+ LP <- length(cent)
+ fv <- matrix(as.numeric(NA), nrow(eta), LP)
+ if (LP) {
+ for(ii in 1:LP) {
+ yp <- -log(cent[ii]/100)
+ fv[!is.zero,ii] <- loc[!is.zero] - sigma[!is.zero] *
+ (1 - yp^(-xi[!is.zero])) / xi[!is.zero]
+ fv[is.zero,ii] <- loc[is.zero] - sigma[is.zero] * log(yp)
+ }
+ dimnames(fv) <- list(dimnames(eta)[[1]],
+ paste(as.character(cent), "%", sep = ""))
+ } else {
+ EulerM <- -digamma(1)
+ fv <- loc + sigma * EulerM # When xi = 0, is Gumbel
+ fv[!is.zero] <- loc[!is.zero] + sigma[!is.zero] *
+ (gamma(1-xi[!is.zero])-1) / xi[!is.zero]
+ fv[xi >= 1] <- NA # Mean exists only if xi < 1.
+ }
+ fv
+ }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
+ .tolshape0 = tolshape0 ))),
+ last = eval(substitute(expression({
+ misc$links <- c(location = .llocat,
+ scale = .lscale ,
+ shape = .lshape)
+
+ misc$earg <- list(location = .elocat,
+ scale = .escale,
+ shape = .eshape)
+
+
+ misc$true.mu <- !length( .percentiles) # @fitted is not a true mu
+ misc$percentiles <- .percentiles
+ misc$tolshape0 <- .tolshape0
+ misc$expected <- TRUE
+ if (any(xi < -0.5))
+ warning("some values of the shape parameter are less than -0.5")
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
+ .tolshape0 = tolshape0, .percentiles = percentiles ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mmu <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ xi <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dgev(x=y, location=mmu, scale=sigma, shape=xi,
+ tolshape0 = .tolshape0,
+ log = TRUE, oobounds.log = -1.0e04,
+ giveWarning= .giveWarning))
+ }
+ }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
+ .giveWarning= giveWarning, .tolshape0 = tolshape0 ))),
+ vfamily = c("egev", "vextremes"),
+ deriv = eval(substitute(expression({
+ Locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ xi <- eta2theta(eta[, 3], .lshape , earg = .eshape)
+ is.zero <- (abs(xi) < .tolshape0)
+ zedd <- (y-Locat) / sigma
+ A <- 1 + xi * zedd
+ dA.dxi <- zedd
+ dA.dmu <- -xi / sigma
+ dA.dsigma <- -xi * zedd / sigma
+ pow <- 1 + 1/xi
+ if (any(bad <- A <= 0, na.rm = TRUE))
+ stop(sum(bad, na.rm = TRUE),
+ " observations violating boundary constraints in '@deriv'")
+ AA <- 1/(xi*A^pow)- pow/A
+ dl.dmu <- dA.dmu * AA
+ dl.dsi <- dA.dsigma * AA - 1/sigma
+ dl.dxi <- log(A)/xi^2 - pow * dA.dxi / A -
+ (log(A)/xi^2 - dA.dxi /(xi*A)) * A^(-1/xi)
+ if (any(is.zero)) {
+ ezedd <- exp(-zedd[is.zero])
+ dl.dmu[is.zero] <- (1 - ezedd) / sigma[is.zero]
+ dl.dsi[is.zero] <- (zedd[is.zero] *
+ (1 - ezedd) - 1) / sigma[is.zero]
+ dl.dxi[is.zero] <- zedd[is.zero] *
+ ((1 - ezedd) * zedd[is.zero] / 2 - 1)
+ }
+ dmu.deta <- dtheta.deta(Locat, .llocat , earg = .elocat )
+ dsi.deta <- dtheta.deta(sigma, .lscale , earg = .escale )
+ dxi.deta <- dtheta.deta(xi, .lshape , earg = .eshape)
+ c(w) * cbind(dl.dmu * dmu.deta,
+ dl.dsi * dsi.deta,
+ dl.dxi * dxi.deta)
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
+ .tolshape0 = tolshape0 ))),
+ weight = eval(substitute(expression({
+ bad <- A <= 0
+ if (any(bad, na.rm = TRUE))
+ stop(sum(bad, na.rm = TRUE),
+ " observations violating boundary constraints in '@weight'")
+ kay <- -xi # for the formulae
+ kay[abs(kay-0.5) < .tolshape0] <- 0.501
+ temp100 <- gamma(2-kay)
+ pp <- (1-kay)^2 * gamma(1-2*kay) # gamma(0) is undefined so kay != 0.5
+ qq <- temp100 * (digamma(1-kay) - (1-kay)/kay)
+ wz <- matrix(as.numeric(NA), n, 6)
+ wz[, iam(1, 1, M)] <- pp / sigma^2
+ wz[, iam(2, 2, M)] <- (1-2*temp100 + pp) / (sigma * kay)^2
+ EulerM <- -digamma(1)
+ wz[, iam(3, 3, M)] <- (pi^2 / 6 + (1-EulerM-1/kay)^2 +
+ (2*qq + pp/kay)/kay) / kay^2
+ wz[, iam(1, 2, M)] <- (pp - temp100) / (sigma^2 * kay)
+ wz[, iam(1, 3, M)] <- -(qq + pp/kay) / (sigma * kay)
+ wz[, iam(2, 3, M)] <- (1-EulerM - (1-temp100)/kay - qq -
+ pp/kay) / (sigma * kay^2)
+ if (any(is.zero)) {
+ wz[is.zero, iam(2, 2, M)] <- (pi^2/6 + (1-EulerM)^2) / sigma^2
+ wz[is.zero, iam(3, 3, M)] <- 2.4236
+ wz[is.zero, iam(1, 2, M)] <- (digamma(2) + 2*(EulerM-1)) / sigma^2
+ wz[is.zero, iam(1, 3, M)] <- -(trigamma(1)/2 + digamma(1)*
+ (digamma(1)/2+1))/sigma
+ wz[is.zero, iam(2, 3, M)] <- (-dgammadx(2, 3)/6 + dgammadx(1, 1) +
+ 2*dgammadx(1, 2) +
+ 2*dgammadx(1, 3)/3)/sigma
+ }
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * dmu.deta^2
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] * dsi.deta^2
+ wz[, iam(3, 3, M)] <- wz[, iam(3, 3, M)] * dxi.deta^2
+ wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)] * dmu.deta * dsi.deta
+ wz[, iam(1, 3, M)] <- wz[, iam(1, 3, M)] * dmu.deta * (-dxi.deta)
+ wz[, iam(2, 3, M)] <- wz[, iam(2, 3, M)] * dsi.deta * (-dxi.deta)
+ c(w) * wz
+ }), list( .eshape = eshape, .tolshape0 = tolshape0 ))))
}
@@ -920,9 +920,9 @@ dgammadx <- function(x, deriv.arg = 1) {
rgumbel <- function(n, location = 0, scale = 1) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
answer <- location - scale * log(-log(runif(use.n)))
@@ -936,8 +936,8 @@ dgumbel <- function(x, location = 0, scale = 1, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- zedd = (x - location) / scale
- logdensity = -zedd - exp(-zedd) - log(scale)
+ zedd <- (x - location) / scale
+ logdensity <- -zedd - exp(-zedd) - log(scale)
if (log.arg) logdensity else exp(logdensity)
}
@@ -964,8 +964,7 @@ pgumbel <- function(q, location = 0, scale = 1) {
lscale = "loge",
iscale = NULL,
R = NA, percentiles = c(95, 99),
- mpv = FALSE, zero = NULL)
-{
+ mpv = FALSE, zero = NULL) {
llocat <- as.list(substitute(llocation))
elocat <- link2list(llocat)
@@ -995,11 +994,11 @@ pgumbel <- function(q, location = 0, scale = 1) {
new("vglmff",
blurb = c("Gumbel distribution for extreme value regression\n",
- "Links: ",
- namesof("location", llocat, earg = elocat), ", ",
- namesof("scale", lscale, earg = escale )),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ "Links: ",
+ namesof("location", llocat, earg = elocat ), ", ",
+ namesof("scale", lscale, earg = escale )),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -1008,93 +1007,95 @@ pgumbel <- function(q, location = 0, scale = 1) {
namesof("scale", .lscale , earg = .escale , short = TRUE))
- y = as.matrix(y)
+ y <- as.matrix(y)
if (ncol(y) > 1)
- y = -t(apply(-y, 1, sort, na.last = TRUE))
+ y <- -t(apply(-y, 1, sort, na.last = TRUE))
- r.vec = rowSums(cbind(!is.na(y)))
+ r.vec <- rowSums(cbind(!is.na(y)))
if (any(r.vec == 0))
stop("There is at least one row of the response containing all NAs")
-
if (ncol(y) > 1) {
- yiri = y[cbind(1:nrow(y), r.vec)]
- sc.init = if (is.Numeric( .iscale, positive = TRUE))
+ yiri <- y[cbind(1:nrow(y), r.vec)]
+ sc.init <- if (is.Numeric( .iscale, positive = TRUE))
.iscale else {3 * (rowMeans(y, na.rm = TRUE) - yiri)}
- sc.init = rep(sc.init, length=nrow(y))
- sc.init[sc.init <= 0.0001] = 1 # Used to be .iscale
- loc.init = yiri + sc.init * log(r.vec)
+ sc.init <- rep(sc.init, length = nrow(y))
+ sc.init[sc.init <= 0.0001] <- 1 # Used to be .iscale
+ loc.init <- yiri + sc.init * log(r.vec)
} else {
- sc.init = if (is.Numeric( .iscale, positive = TRUE))
+ sc.init <- if (is.Numeric( .iscale, positive = TRUE))
.iscale else 1.1 * (0.01+sqrt(var(y)*6)) / pi
- sc.init = rep(sc.init, length.out = n)
+ sc.init <- rep(sc.init, length.out = n)
EulerM <- -digamma(1)
- loc.init = (y - sc.init * EulerM)
- loc.init[loc.init <= 0] = min(y)
+ loc.init <- (y - sc.init * EulerM)
+ loc.init[loc.init <= 0] <- min(y)
}
- extra$R = .R
- extra$mpv = .mpv
- extra$percentiles = .percentiles
+ extra$R <- .R
+ extra$mpv <- .mpv
+ extra$percentiles <- .percentiles
- if (!length(etastart))
+ if (!length(etastart)) {
etastart <-
cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
theta2eta( sc.init, .lscale , earg = .escale ))
- }), list( .llocat = llocat, .lscale = lscale, .iscale = iscale,
+ }
+
+ }), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale,
+ .iscale = iscale,
.R = R, .mpv = mpv, .percentiles = percentiles ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- loc = eta2theta(eta[, 1], .llocat , earg = .elocat )
- sigma = eta2theta(eta[, 2], .lscale , earg = .escale ) # sigma
+ loc <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
- Percentiles = extra$percentiles
- LP = length(Percentiles) # may be 0
+ Percentiles <- extra$percentiles
+ LP <- length(Percentiles) # may be 0
if (LP > 0) {
- mpv = extra$mpv
- mu = matrix(as.numeric(NA), nrow(eta), LP + mpv) # LP may be 0
- Rvec = extra$R
- for(ii in 1:LP) {
- ci = if (is.Numeric(Rvec))
- Rvec * (1 - Percentiles[ii] / 100) else
- -log(Percentiles[ii] / 100)
- mu[,ii] = loc - sigma * log(ci)
- }
- if (mpv)
- mu[,ncol(mu)] = loc - sigma * log(log(2))
- dmn2 = paste(as.character(Percentiles), "%", sep = "")
+ mpv <- extra$mpv
+ mu <- matrix(as.numeric(NA), nrow(eta), LP + mpv) # LP may be 0
+ Rvec <- extra$R
+ for(ii in 1:LP) {
+ ci <- if (is.Numeric(Rvec))
+ Rvec * (1 - Percentiles[ii] / 100) else
+ -log(Percentiles[ii] / 100)
+ mu[,ii] <- loc - sigma * log(ci)
+ }
if (mpv)
- dmn2 = c(dmn2, "MPV")
- dimnames(mu) = list(dimnames(eta)[[1]], dmn2)
+ mu[,ncol(mu)] <- loc - sigma * log(log(2))
+ dmn2 <- paste(as.character(Percentiles), "%", sep = "")
+ if (mpv)
+ dmn2 <- c(dmn2, "MPV")
+ dimnames(mu) <- list(dimnames(eta)[[1]], dmn2)
} else {
EulerM <- -digamma(1)
- mu = loc + sigma * EulerM
+ mu <- loc + sigma * EulerM
}
mu
}, list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
last = eval(substitute(expression({
- misc$links = c(location = .llocat, scale = .lscale)
+ misc$links <- c(location = .llocat , scale = .lscale )
- misc$earg = list(location= .elocat, scale= .escale )
+ misc$earg <- list(location = .elocat , scale = .escale )
- misc$R = .R
- misc$mpv = .mpv
- misc$true.mu = !length( .percentiles) # @fitted is not a true mu
- misc$percentiles = .percentiles
+ misc$R <- .R
+ misc$mpv <- .mpv
+ misc$true.mu <- !length( .percentiles) # @fitted is not a true mu
+ misc$percentiles <- .percentiles
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale,
- .percentiles = percentiles,
+ .percentiles = percentiles,
.mpv = mpv, .R = R ))),
vfamily = c("gumbel", "vextremes"),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- loc <- eta2theta(eta[, 1], .llocat, earg = .elocat)
+ loc <- eta2theta(eta[, 1], .llocat, earg = .elocat )
sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
r.vec <- rowSums(cbind(!is.na(y)))
@@ -1114,41 +1115,42 @@ pgumbel <- function(q, location = 0, scale = 1) {
}, list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
deriv = eval(substitute(expression({
- loc = eta2theta(eta[, 1], .llocat, earg = .elocat)
- sigma = eta2theta(eta[, 2], .lscale , earg = .escale )
+ loc <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
- r.vec = rowSums(cbind(!is.na(y)))
- yiri = y[cbind(1:nrow(y),r.vec)]
- yi.bar = rowMeans(y, na.rm = TRUE)
- temp2 = (yiri - loc) / sigma
- term2 = exp(-temp2)
+ r.vec <- rowSums(cbind(!is.na(y)))
+ yiri <- y[cbind(1:nrow(y),r.vec)]
+ yi.bar <- rowMeans(y, na.rm = TRUE)
+ temp2 <- (yiri - loc) / sigma
+ term2 <- exp(-temp2)
- dloc.deta = dtheta.deta(loc, .llocat, earg = .elocat)
- dsigma.deta = dtheta.deta(sigma, .lscale , earg = .escale )
+ dloc.deta <- dtheta.deta(loc, .llocat, earg = .elocat)
+ dsigma.deta <- dtheta.deta(sigma, .lscale , earg = .escale )
- dl.dloc = (r.vec - term2) / sigma
- dl.dsigma = (rowSums((y - loc) / sigma, na.rm = TRUE) - r.vec -
- temp2 * term2) / sigma
+ dl.dloc <- (r.vec - term2) / sigma
+ dl.dsigma <- (rowSums((y - loc) / sigma, na.rm = TRUE) -
+ r.vec - temp2 * term2) / sigma
c(w) * cbind(dl.dloc * dloc.deta,
dl.dsigma * dsigma.deta)
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
weight = eval(substitute(expression({
- temp6 = digamma(r.vec) # , integer = T
- temp5 = digamma(1:max(r.vec)) # , integer=T
- temp5 = matrix(temp5, n, max(r.vec), byrow = TRUE)
- temp5[col(temp5) > r.vec] = 0
- temp5 = temp5 %*% rep(1, ncol(temp5))
-
- wz = matrix(as.numeric(NA), n, dimm(M = 2)) # 3=dimm(M = 2)
- wz[, iam(1, 1, M)] = r.vec / sigma^2
- wz[, iam(2, 1, M)] = -(1 + r.vec * temp6) / sigma^2
- wz[, iam(2, 2, M)] = (2*(r.vec+1)*temp6 + r.vec*(trigamma(r.vec) +
- temp6^2) + 2 - r.vec - 2*temp5) / sigma^2
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] * dloc.deta^2
- wz[, iam(2, 1, M)] = wz[, iam(2, 1, M)] * dsigma.deta * dloc.deta
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] * dsigma.deta^2
+ temp6 <- digamma(r.vec) # , integer = T
+ temp5 <- digamma(1:max(r.vec)) # , integer=T
+ temp5 <- matrix(temp5, n, max(r.vec), byrow = TRUE)
+ temp5[col(temp5) > r.vec] <- 0
+ temp5 <- temp5 %*% rep(1, ncol(temp5))
+
+ wz <- matrix(as.numeric(NA), n, dimm(M = 2)) # 3=dimm(M = 2)
+ wz[, iam(1, 1, M)] <- r.vec / sigma^2
+ wz[, iam(2, 1, M)] <- -(1 + r.vec * temp6) / sigma^2
+ wz[, iam(2, 2, M)] <- (2*(r.vec+1)*temp6 + r.vec*(trigamma(r.vec) +
+ temp6^2) + 2 - r.vec - 2*temp5) / sigma^2
+
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * dloc.deta^2
+ wz[, iam(2, 1, M)] <- wz[, iam(2, 1, M)] * dsigma.deta * dloc.deta
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] * dsigma.deta^2
c(w) * wz
}), list( .lscale = lscale ))))
@@ -1176,7 +1178,7 @@ rgpd <- function(n, location = 0, scale = 1, shape = 0) {
scale <- rep(scale, length.out = use.n)
- scase <- abs(shape) < sqrt(.Machine$double.eps)
+ scase <- abs(shape) < sqrt( .Machine$double.eps )
nscase <- sum(scase)
if (use.n - nscase)
ans[!scase] <- location[!scase] +
@@ -1191,7 +1193,7 @@ rgpd <- function(n, location = 0, scale = 1, shape = 0) {
dgpd <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
- tolshape0 = sqrt(.Machine$double.eps),
+ tolshape0 = sqrt( .Machine$double.eps ),
oobounds.log = -Inf, giveWarning = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
@@ -1204,7 +1206,7 @@ dgpd <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
stop("bad input for argument 'tolshape0'")
- L = max(length(x), length(location), length(scale), length(shape))
+ L <- max(length(x), length(location), length(scale), length(shape))
if (length(shape) != L)
shape <- rep(shape, length.out = L)
if (length(location) != L)
@@ -1219,52 +1221,52 @@ dgpd <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
- logdensity = rep(log(0), length.out = L)
- scase = abs(shape) < tolshape0
- nscase = sum(scase)
+ logdensity <- rep(log(0), length.out = L)
+ scase <- abs(shape) < tolshape0
+ nscase <- sum(scase)
if (L - nscase) {
- zedd = (x-location) / scale
- xok = (!scase) & (zedd > 0) & (1 + shape*zedd > 0)
- logdensity[xok] = -(1 + 1/shape[xok])*log1p(shape[xok]*zedd[xok]) -
+ zedd <- (x-location) / scale
+ xok <- (!scase) & (zedd > 0) & (1 + shape*zedd > 0)
+ logdensity[xok] <- -(1 + 1/shape[xok])*log1p(shape[xok]*zedd[xok]) -
log(scale[xok])
- outofbounds = (!scase) & ((zedd <= 0) | (1 + shape*zedd <= 0))
+ outofbounds <- (!scase) & ((zedd <= 0) | (1 + shape*zedd <= 0))
if (any(outofbounds)) {
- logdensity[outofbounds] = oobounds.log
- no.oob = sum(outofbounds)
+ logdensity[outofbounds] <- oobounds.log
+ no.oob <- sum(outofbounds)
if (giveWarning)
warning(no.oob, " observation",
ifelse(no.oob > 1, "s are", " is"), " out of bounds")
}
}
if (nscase) {
- xok = scase & (x > location)
- logdensity[xok] = -(x[xok] - location[xok]) / scale[xok] -
+ xok <- scase & (x > location)
+ logdensity[xok] <- -(x[xok] - location[xok]) / scale[xok] -
log(scale[xok])
- outofbounds = scase & (x <= location)
+ outofbounds <- scase & (x <= location)
if (any(outofbounds)) {
- logdensity[outofbounds] = oobounds.log
- no.oob = sum(outofbounds)
+ logdensity[outofbounds] <- oobounds.log
+ no.oob <- sum(outofbounds)
if (giveWarning)
warning(no.oob, " observation",
ifelse(no.oob > 1, "s are", " is"), " out of bounds")
}
}
- logdensity[scale <= 0] = NaN
+ logdensity[scale <= 0] <- NaN
if (log.arg) logdensity else exp(logdensity)
}
pgpd <- function(q, location = 0, scale = 1, shape = 0) {
- if (!is.Numeric(q))
- stop("bad input for argument 'q'")
- if (!is.Numeric(location))
- stop("bad input for argument 'location'")
- if (!is.Numeric(shape))
- stop("bad input for argument 'shape'")
+ if (!is.Numeric(q))
+ stop("bad input for argument 'q'")
+ if (!is.Numeric(location))
+ stop("bad input for argument 'location'")
+ if (!is.Numeric(shape))
+ stop("bad input for argument 'shape'")
- use.n = max(length(q), length(location), length(scale), length(shape))
+ use.n <- max(length(q), length(location), length(scale), length(shape))
ans <- numeric(use.n)
if (length(shape) != use.n)
@@ -1274,31 +1276,32 @@ pgpd <- function(q, location = 0, scale = 1, shape = 0) {
if (length(scale) != use.n)
scale <- rep(scale, length.out = use.n)
if (length(q) != use.n)
- q <- rep(q - location, length.out = use.n)
+ q <- rep(q, length.out = use.n)
+ zedd <- (q - location) / scale
+ use.zedd <- pmax(zedd, 0)
- scase = abs(shape) < sqrt(.Machine$double.eps)
- nscase = sum(scase)
- if (use.n - nscase) {
- q[q < 0] = 0
- ans <- 1 - pmax(0, (1 + shape*q/scale))^(-1/shape)
- }
- if (nscase) {
- pos = q >= 0
- ind9 = pos & scase
- ans[ind9] = -expm1(-q[ind9]/scale[ind9])
- ind9 = !pos & scase
- ans[ind9] = 0
- }
- ans[scale <= 0] = NaN
- ans
+ scase <- abs(shape) < sqrt( .Machine$double.eps )
+ nscase <- sum(scase)
+ if (use.n - nscase) {
+ ans <- 1 - pmax(1 + shape * use.zedd, 0)^(-1/shape)
+ }
+ if (nscase) {
+ pos <- (zedd >= 0)
+ ind9 <- ( pos & scase)
+ ans[ind9] <- -expm1(-use.zedd[ind9])
+ ind9 <- (!pos & scase)
+ ans[ind9] <- 0
+ }
+ ans[scale <= 0] <- NaN
+ ans
}
qgpd <- function(p, location = 0, scale = 1, shape = 0) {
- use.n = max(length(p), length(location), length(scale), length(shape))
+ use.n <- max(length(p), length(location), length(scale), length(shape))
ans <- numeric(use.n)
if (length(shape) != use.n)
@@ -1312,25 +1315,25 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
- scase = abs(shape) < sqrt(.Machine$double.eps)
- nscase = sum(scase)
- if (use.n - nscase) {
- ans[!scase] = location[!scase] + scale[!scase] *
- ((1-p[!scase])^(-shape[!scase]) - 1) / shape[!scase]
- }
- if (nscase) {
- ans[scase] = location[scase] - scale[scase] * log1p(-p[scase])
- }
+ scase <- abs(shape) < sqrt( .Machine$double.eps )
+ nscase <- sum(scase)
+ if (use.n - nscase) {
+ ans[!scase] <- location[!scase] + scale[!scase] *
+ ((1-p[!scase])^(-shape[!scase]) - 1) / shape[!scase]
+ }
+ if (nscase) {
+ ans[scase] <- location[scase] - scale[scase] * log1p(-p[scase])
+ }
- ans[p < 0] = NaN
- ans[p > 1] = NaN
- ans[(p == 0)] = location[p == 0]
- ans[(p == 1) & (shape >= 0)] = Inf
- ind5 = (p == 1) & (shape < 0)
- ans[ind5] = location[ind5] - scale[ind5] / shape[ind5]
+ ans[p < 0] <- NaN
+ ans[p > 1] <- NaN
+ ans[(p == 0)] <- location[p == 0]
+ ans[(p == 1) & (shape >= 0)] <- Inf
+ ind5 <- (p == 1) & (shape < 0)
+ ans[ind5] <- location[ind5] - scale[ind5] / shape[ind5]
- ans[scale <= 0] = NaN
- ans
+ ans[scale <= 0] <- NaN
+ ans
}
@@ -1514,15 +1517,15 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
tolshape0 = 0.001) {
is.zero <- (abs(shape) < tolshape0 ) # A matrix
- LP = length(percentiles)
- fv = matrix(as.numeric(NA), length(shape), LP)
- is.zero = (abs(shape) < tolshape0)
+ LP <- length(percentiles)
+ fv <- matrix(as.numeric(NA), length(shape), LP)
+ is.zero <- (abs(shape) < tolshape0)
for(ii in 1:LP) {
- temp = 1 - percentiles[ii] / 100
- fv[!is.zero, ii] = threshold[!is.zero] +
+ temp <- 1 - percentiles[ii] / 100
+ fv[!is.zero, ii] <- threshold[!is.zero] +
(temp^(-shape[!is.zero]) - 1) *
scale[!is.zero] / shape[!is.zero]
- fv[ is.zero, ii] = threshold[is.zero] - scale[is.zero] * log(temp)
+ fv[ is.zero, ii] <- threshold[is.zero] - scale[is.zero] * log(temp)
}
post.name <- paste(as.character(percentiles), "%", sep = "")
@@ -1636,13 +1639,13 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
iexp <- is.zero & !bad
dl.dShape <- dl.dsigma <- rep(0, length.out = length(y))
- dl.dsigma[igpd] <- ((1 + Shape[igpd]) * ystar[igpd] / (sigma[igpd] +
- Shape[igpd]*ystar[igpd]) - 1) / sigma[igpd]
+ dl.dsigma[igpd] <- ((1 + Shape[igpd]) * ystar[igpd] / (sigma[igpd] +
+ Shape[igpd] * ystar[igpd])-1) / sigma[igpd]
dl.dShape[igpd] <- log(A[igpd])/Shape[igpd]^2 - (1 + 1/Shape[igpd]) *
- ystar[igpd] / (A[igpd] * sigma[igpd])
+ ystar[igpd] / (A[igpd] * sigma[igpd])
dl.dShape[iexp] <- ystar[iexp] *
- (0.5*ystar[iexp]/sigma[iexp] - 1) / sigma[iexp]
+ (0.5*ystar[iexp]/sigma[iexp] - 1) / sigma[iexp]
dsigma.deta <- dtheta.deta(sigma, .lscale , earg = .escale )
dShape.deta <- dtheta.deta(Shape, .lshape , earg = .eshape )
@@ -1662,19 +1665,16 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
ned2l.dshapescale <- 1 / ((1+2*Shape) * (1+Shape) * sigma) # > 0 !
NOS <- M / Musual
- wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
- ind11 <- ind22 <- ind12 <- NULL
- for (ii in 1:(M / Musual)) {
- ind11 <- c(ind11, iam(Musual*ii - 1, Musual*ii - 1, M))
- ind22 <- c(ind22, iam(Musual*ii - 0, Musual*ii - 0, M))
- ind12 <- c(ind12, iam(Musual*ii - 1, Musual*ii - 0, M))
- }
- wz[, ind11] <- ned2l.dscale2 * dsigma.deta^2
- wz[, ind22] <- ned2l.dshape2 * dShape.deta^2
- wz[, ind12] <- ned2l.dshapescale * dsigma.deta * dShape.deta
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ wz <- array(c(c(w) * ned2l.dscale2 * dsigma.deta^2,
+ c(w) * ned2l.dshape2 * dShape.deta^2,
+ c(w) * ned2l.dshapescale * dsigma.deta * dShape.deta),
+ dim = c(n, M / Musual, 3))
+ wz <- arwz2wz(wz, M = M, Musual = Musual)
+
+
+ wz
}), list( .lscale = lscale ))))
}
@@ -1694,18 +1694,18 @@ meplot.default <- function(y, main = "Mean Excess Plot",
if (!is.Numeric(y))
stop("bad input for argument 'y'")
- n = length(y)
- sy = sort(y)
- dsy = rev(sy) # decreasing sequence
- me = rev(cumsum(dsy)) / (n:1) - sy
- me2 = rev(cumsum(dsy^2))
- var = (me2 - (n:1) * (me+sy)^2) / (n:1)
- ci = qnorm((1+conf)/2) * sqrt(abs(var)) / sqrt(n:1)
+ n <- length(y)
+ sy <- sort(y)
+ dsy <- rev(sy) # decreasing sequence
+ me <- rev(cumsum(dsy)) / (n:1) - sy
+ me2 <- rev(cumsum(dsy^2))
+ var <- (me2 - (n:1) * (me+sy)^2) / (n:1)
+ ci <- qnorm((1+conf)/2) * sqrt(abs(var)) / sqrt(n:1)
- ci[length(ci)] = NA
+ ci[length(ci)] <- NA
- mymat = cbind(me - ci, me, me + ci)
- sy = sy - sqrt( .Machine$double.eps )
+ mymat <- cbind(me - ci, me, me + ci)
+ sy <- sy - sqrt( .Machine$double.eps )
matplot(sy, mymat, main = main,
xlab = xlab, ylab = ylab,
@@ -1751,9 +1751,9 @@ guplot.default <-
if (!is.Numeric(y))
stop("bad input for argument 'y'")
- n = length(y)
- sy = sort(y)
- x = -log(-log(((1:n) - 0.5) / n))
+ n <- length(y)
+ sy <- sort(y)
+ x <- -log(-log(((1:n) - 0.5) / n))
plot(x, sy, main = main, xlab = xlab, ylab = ylab,
type = type, ...)
invisible(list(x = x, y = sy))
@@ -1794,8 +1794,7 @@ setMethod("guplot", "vlm",
lscale = "loge",
iscale = NULL,
R = NA, percentiles = c(95, 99),
- mpv = FALSE, zero = NULL)
-{
+ mpv = FALSE, zero = NULL) {
llocat <- as.list(substitute(llocation))
elocat <- link2list(llocat)
@@ -1822,21 +1821,21 @@ setMethod("guplot", "vlm",
new("vglmff",
blurb = c("Gumbel distribution (univariate response)\n\n",
- "Links: ",
- namesof("location", llocat,
- earg = elocat, tag = TRUE), ", ",
- namesof("scale", lscale, earg = escale , tag = TRUE), "\n",
- "Mean: location + scale*0.5772..\n",
- "Variance: pi^2 * scale^2 / 6"),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ "Links: ",
+ namesof("location", llocat,
+ earg = elocat, tag = TRUE), ", ",
+ namesof("scale", lscale, earg = escale , tag = TRUE), "\n",
+ "Mean: location + scale*0.5772..\n",
+ "Variance: pi^2 * scale^2 / 6"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- y = cbind(y)
+ y <- cbind(y)
if (ncol(y) > 1)
- stop("Use gumbel() to handle multivariate responses")
+ stop("Use gumbel() to handle multivariate responses")
if (min(y) <= 0)
- stop("all response values must be positive")
+ stop("all response values must be positive")
@@ -1844,20 +1843,20 @@ setMethod("guplot", "vlm",
predictors.names <-
- c(namesof("location", .llocat , earg = .elocat , tag = FALSE),
- namesof("scale", .lscale , earg = .escale , tag = FALSE))
+ c(namesof("location", .llocat , earg = .elocat , tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
- extra$R = .R
- extra$mpv = .mpv
- extra$percentiles = .percentiles
+ extra$R <- .R
+ extra$mpv <- .mpv
+ extra$percentiles <- .percentiles
if (!length(etastart)) {
- sca.init = if (is.Numeric( .iscale, positive = TRUE))
+ sca.init <- if (is.Numeric( .iscale, positive = TRUE))
.iscale else 1.5 * (0.01+sqrt(var(y)*6)) / pi
- sca.init = rep(sca.init, length.out = n)
+ sca.init <- rep(sca.init, length.out = n)
EulerM <- -digamma(1)
- loc.init = (y - sca.init * EulerM)
+ loc.init <- (y - sca.init * EulerM)
etastart <-
cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
theta2eta(sca.init, .lscale , earg = .escale ))
@@ -1867,75 +1866,75 @@ setMethod("guplot", "vlm",
.iscale = iscale,
.R = R, .mpv = mpv, .percentiles = percentiles ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
- locat = eta2theta(eta[, 1], .llocat, earg = .elocat)
- sigma = eta2theta(eta[, 2], .lscale , earg = .escale )
+ locat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
+ sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
EulerM <- -digamma(1)
- Percentiles = extra$percentiles
- mpv = extra$mpv
- LP = length(Percentiles) # may be 0
+ Percentiles <- extra$percentiles
+ mpv <- extra$mpv
+ LP <- length(Percentiles) # may be 0
if (!LP) return(locat + sigma * EulerM)
- mu = matrix(as.numeric(NA), nrow(eta), LP + mpv)
- Rvec = extra$R
+ mu <- matrix(as.numeric(NA), nrow(eta), LP + mpv)
+ Rvec <- extra$R
if (1 <= LP)
for(ii in 1:LP) {
- ci = if (is.Numeric(Rvec)) Rvec * (1 - Percentiles[ii] / 100) else
+ ci <- if (is.Numeric(Rvec)) Rvec * (1 - Percentiles[ii] / 100) else
-log(Percentiles[ii] / 100)
- mu[,ii] = locat - sigma * log(ci)
+ mu[,ii] <- locat - sigma * log(ci)
}
if (mpv)
- mu[, ncol(mu)] = locat - sigma * log(log(2))
- dmn2 = if (LP >= 1) paste(as.character(Percentiles), "%",
+ mu[, ncol(mu)] <- locat - sigma * log(log(2))
+ dmn2 <- if (LP >= 1) paste(as.character(Percentiles), "%",
sep = "") else NULL
if (mpv)
- dmn2 = c(dmn2, "MPV")
- dimnames(mu) = list(dimnames(eta)[[1]], dmn2)
+ dmn2 <- c(dmn2, "MPV")
+ dimnames(mu) <- list(dimnames(eta)[[1]], dmn2)
mu
}, list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
last = eval(substitute(expression({
- misc$link = c(location = .llocat, scale = .lscale)
- misc$earg = list(location = .elocat, scale = .escale)
- misc$true.mu = !length( .percentiles) # @fitted is not a true mu
- misc$R = .R
- misc$mpv = .mpv
+ misc$link <- c(location = .llocat, scale = .lscale)
+ misc$earg <- list(location = .elocat, scale = .escale)
+ misc$true.mu <- !length( .percentiles) # @fitted is not a true mu
+ misc$R <- .R
+ misc$mpv <- .mpv
misc$percentiles = .percentiles
}), list( .llocat = llocat, .lscale = lscale, .mpv = mpv,
.elocat = elocat, .escale = escale,
.R = R, .percentiles = percentiles ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE,eta,extra = NULL) {
- loc = eta2theta(eta[, 1], .llocat , earg = .elocat )
- sca = eta2theta(eta[, 2], .lscale , earg = .escale )
+ loc <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ sca <- eta2theta(eta[, 2], .lscale , earg = .escale )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dgumbel(x=y, location = loc, scale = sca, log = TRUE))
+ sum(w * dgumbel(x = y, location = loc, scale = sca, log = TRUE))
}
}, list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
vfamily = "egumbel",
deriv = eval(substitute(expression({
- loc = eta2theta(eta[, 1], .llocat , earg = .elocat )
- sca = eta2theta(eta[, 2], .lscale , earg = .escale )
- zedd = (y-loc) / sca
- temp2 = -expm1(-zedd)
- dl.dloc = temp2 / sca
- dl.dsca = -1/sca + temp2 * zedd / sca
- dloc.deta = dtheta.deta(loc, .llocat , earg = .elocat)
- dsca.deta = dtheta.deta(sca, .lscale , earg = .escale )
+ loc <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ sca <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ zedd <- (y-loc) / sca
+ temp2 <- -expm1(-zedd)
+ dl.dloc <- temp2 / sca
+ dl.dsca <- -1/sca + temp2 * zedd / sca
+ dloc.deta <- dtheta.deta(loc, .llocat , earg = .elocat)
+ dsca.deta <- dtheta.deta(sca, .lscale , earg = .escale )
c(w) * cbind(dl.dloc * dloc.deta,
dl.dsca * dsca.deta)
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
weight=expression({
- digamma1 = digamma(1)
- ned2l.dsca2 = ((2+digamma1)*digamma1 + trigamma(1) + 1) / sca^2
- ned2l.dloc2 = 1 / sca^2
- ned2l.dscaloc = -(1 + digamma1) / sca^2
+ digamma1 <- digamma(1)
+ ned2l.dsca2 <- ((2 + digamma1) * digamma1 + trigamma(1) + 1) / sca^2
+ ned2l.dloc2 <- 1 / sca^2
+ ned2l.dscaloc <- -(1 + digamma1) / sca^2
wz = matrix(as.numeric(NA), n, dimm(M = 2))
- wz[, iam(1, 1, M)] = ned2l.dloc2 * dloc.deta^2
- wz[, iam(2, 2, M)] = ned2l.dsca2 * dsca.deta^2
- wz[, iam(1, 2, M)] = ned2l.dscaloc * dloc.deta * dsca.deta
+ wz[, iam(1, 1, M)] <- ned2l.dloc2 * dloc.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dsca2 * dsca.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dscaloc * dloc.deta * dsca.deta
c(w) * wz
}))
@@ -1947,8 +1946,7 @@ setMethod("guplot", "vlm",
cgumbel <- function(llocation = "identity",
lscale = "loge",
iscale = NULL,
- mean = TRUE, percentiles = NULL, zero = 2)
-{
+ mean = TRUE, percentiles = NULL, zero = 2) {
llocat <- as.list(substitute(llocation))
elocat <- link2list(llocat)
llocat <- attr(elocat, "function.name")
@@ -1971,18 +1969,18 @@ setMethod("guplot", "vlm",
new("vglmff",
blurb = c("Censored Gumbel distribution\n\n",
- "Links: ",
- namesof("location", llocat, earg = elocat, tag = TRUE),
- ", ",
- namesof("scale", lscale, earg = escale, tag = TRUE),
- "\n",
- "Mean: location + scale*0.5772..\n",
- "Variance: pi^2 * scale^2 / 6"),
- constraints=eval(substitute(expression({
+ "Links: ",
+ namesof("location", llocat, earg = elocat, tag = TRUE),
+ ", ",
+ namesof("scale", lscale, earg = escale, tag = TRUE),
+ "\n",
+ "Mean: location + scale*0.5772..\n",
+ "Variance: pi^2 * scale^2 / 6"),
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- y = cbind(y)
+ y <- cbind(y)
if (ncol(y) > 1)
stop("Use gumbel.block() to handle multivariate responses")
if (any(y) <= 0)
@@ -1993,9 +1991,9 @@ setMethod("guplot", "vlm",
if (!length(extra$leftcensored))
- extra$leftcensored = rep(FALSE, length.out = n)
+ extra$leftcensored <- rep(FALSE, length.out = n)
if (!length(extra$rightcensored))
- extra$rightcensored = rep(FALSE, length.out = n)
+ extra$rightcensored <- rep(FALSE, length.out = n)
if (any(extra$rightcensored & extra$leftcensored))
stop("some observations are both right and left censored!")
@@ -2004,11 +2002,11 @@ setMethod("guplot", "vlm",
namesof("scale", .lscale , earg = .escale , tag = FALSE))
if (!length(etastart)) {
- sc.init = if (is.Numeric( .iscale, positive = TRUE))
+ sc.init <- if (is.Numeric( .iscale, positive = TRUE))
.iscale else 1.1 * sqrt(var(y) * 6 ) / pi
- sc.init = rep(sc.init, length.out = n)
+ sc.init <- rep(sc.init, length.out = n)
EulerM <- -digamma(1)
- loc.init = (y - sc.init * EulerM)
+ loc.init <- (y - sc.init * EulerM)
loc.init[loc.init <= 0] = min(y)
etastart <-
cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
@@ -2018,17 +2016,17 @@ setMethod("guplot", "vlm",
.llocat = llocat,
.elocat = elocat, .escale = escale ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
- loc = eta2theta(eta[, 1], .llocat)
- sc = eta2theta(eta[, 2], .lscale)
+ loc <- eta2theta(eta[, 1], .llocat)
+ sc <- eta2theta(eta[, 2], .lscale)
EulerM <- -digamma(1)
if (.mean) loc + sc * EulerM else {
- LP = length(.percentiles) # 0 if NULL
- mu = matrix(as.numeric(NA), nrow(eta), LP)
+ LP <- length(.percentiles) # 0 if NULL
+ mu <- matrix(as.numeric(NA), nrow(eta), LP)
for(ii in 1:LP) {
- ci = -log( .percentiles[ii] / 100)
- mu[, ii] = loc - sc * log(ci)
+ ci <- -log( .percentiles[ii] / 100)
+ mu[, ii] <- loc - sc * log(ci)
}
- dmn2 = paste(as.character(.percentiles), "%", sep = "")
+ dmn2 <- paste(as.character(.percentiles), "%", sep = "")
dimnames(mu) <- list(dimnames(eta)[[1]], dmn2)
mu
}
@@ -2047,17 +2045,17 @@ setMethod("guplot", "vlm",
.percentiles = percentiles ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE,eta,extra = NULL) {
- loc = eta2theta(eta[, 1], .llocat, earg = .elocat )
- sc = eta2theta(eta[, 2], .lscale , earg = .escale )
- zedd = (y-loc) / sc
-
- cenL = extra$leftcensored
- cenU = extra$rightcensored
- cen0 = !cenL & !cenU # uncensored obsns
- Fy = exp(-exp(-zedd))
- ell1 = -log(sc[cen0]) - zedd[cen0] - exp(-zedd[cen0])
- ell2 = log(Fy[cenL])
- ell3 = log1p(-Fy[cenU])
+ loc <- eta2theta(eta[, 1], .llocat, earg = .elocat )
+ sc <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ zedd <- (y-loc) / sc
+
+ cenL <- extra$leftcensored
+ cenU <- extra$rightcensored
+ cen0 <- !cenL & !cenU # uncensored obsns
+ Fy <- exp(-exp(-zedd))
+ ell1 <- -log(sc[cen0]) - zedd[cen0] - exp(-zedd[cen0])
+ ell2 <- log(Fy[cenL])
+ ell3 <- log1p(-Fy[cenU])
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
@@ -2066,30 +2064,30 @@ setMethod("guplot", "vlm",
.elocat = elocat, .escale = escale ))),
vfamily = "cgumbel",
deriv = eval(substitute(expression({
- cenL = extra$leftcensored
- cenU = extra$rightcensored
- cen0 = !cenL & !cenU # uncensored obsns
-
- loc = eta2theta(eta[, 1], .llocat, earg = .elocat )
- sc = eta2theta(eta[, 2], .lscale , earg = .escale )
- zedd = (y-loc) / sc
- temp2 = -expm1(-zedd)
- dl.dloc = temp2 / sc
- dl.dsc = -1/sc + temp2 * zedd / sc
- dloc.deta = dtheta.deta(loc, .llocat, earg = .elocat )
- dsc.deta = dtheta.deta(sc, .lscale , earg = .escale )
-
- ezedd = exp(-zedd)
- Fy = exp(-ezedd)
- dFy.dloc = -ezedd * Fy / sc
- dFy.dsc = zedd * dFy.dloc # -zedd * exp(-zedd) * Fy / sc
+ cenL <- extra$leftcensored
+ cenU <- extra$rightcensored
+ cen0 <- !cenL & !cenU # uncensored obsns
+
+ loc <- eta2theta(eta[, 1], .llocat, earg = .elocat )
+ sc <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ zedd <- (y-loc) / sc
+ temp2 <- -expm1(-zedd)
+ dl.dloc <- temp2 / sc
+ dl.dsc <- -1/sc + temp2 * zedd / sc
+ dloc.deta <- dtheta.deta(loc, .llocat, earg = .elocat )
+ dsc.deta <- dtheta.deta(sc, .lscale , earg = .escale )
+
+ ezedd <- exp(-zedd)
+ Fy <- exp(-ezedd)
+ dFy.dloc <- -ezedd * Fy / sc
+ dFy.dsc <- zedd * dFy.dloc # -zedd * exp(-zedd) * Fy / sc
if (any(cenL)) {
- dl.dloc[cenL] = -ezedd[cenL] / sc[cenL]
- dl.dsc[cenL] = -zedd[cenL] * ezedd[cenL] / sc[cenL]
+ dl.dloc[cenL] <- -ezedd[cenL] / sc[cenL]
+ dl.dsc[cenL] <- -zedd[cenL] * ezedd[cenL] / sc[cenL]
}
if (any(cenU)) {
- dl.dloc[cenU] = -dFy.dloc[cenU] / (1-Fy[cenU])
- dl.dsc[cenU] = -dFy.dsc[cenU] / (1-Fy[cenU])
+ dl.dloc[cenU] <- -dFy.dloc[cenU] / (1-Fy[cenU])
+ dl.dsc[cenU] <- -dFy.dsc[cenU] / (1-Fy[cenU])
}
c(w) * cbind(dl.dloc * dloc.deta,
dl.dsc * dsc.deta)
@@ -2097,33 +2095,33 @@ setMethod("guplot", "vlm",
.llocat = llocat,
.elocat = elocat, .escale = escale ))),
weight=expression({
- A1 = ifelse(cenL, Fy, 0)
- A3 = ifelse(cenU, 1-Fy, 0)
- A2 = 1 - A1 - A3 # Middle; uncensored
- digamma1 = digamma(1)
- ed2l.dsc2 = ((2+digamma1)*digamma1 + trigamma(1) + 1) / sc^2
- ed2l.dloc2 = 1 / sc^2
- ed2l.dlocsc = -(1 + digamma1) / sc^2
- wz = matrix(as.numeric(NA), n, dimm(M = 2))
- wz[, iam(1, 1, M)] = A2 * ed2l.dloc2 * dloc.deta^2
- wz[, iam(2, 2, M)] = A2 * ed2l.dsc2 * dsc.deta^2
- wz[, iam(1, 2, M)] = A2 * ed2l.dlocsc * dloc.deta * dsc.deta
- d2l.dloc2 = -ezedd / sc^2
- d2l.dsc2 = (2 - zedd) * zedd * ezedd / sc^2
- d2l.dlocsc = (1 - zedd) * ezedd / sc^2
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)]-A1^2 * d2l.dloc2 * dloc.deta^2
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)]-A1^2 * d2l.dsc2 * dsc.deta^2
- wz[, iam(1, 2, M)] = wz[, iam(1, 2, M)]-A1^2 * d2l.dlocsc *
+ A1 <- ifelse(cenL, Fy, 0)
+ A3 <- ifelse(cenU, 1-Fy, 0)
+ A2 <- 1 - A1 - A3 # Middle; uncensored
+ digamma1 <- digamma(1)
+ ed2l.dsc2 <- ((2+digamma1)*digamma1 + trigamma(1) + 1) / sc^2
+ ed2l.dloc2 <- 1 / sc^2
+ ed2l.dlocsc <- -(1 + digamma1) / sc^2
+ wz <- matrix(as.numeric(NA), n, dimm(M = 2))
+ wz[, iam(1, 1, M)] <- A2 * ed2l.dloc2 * dloc.deta^2
+ wz[, iam(2, 2, M)] <- A2 * ed2l.dsc2 * dsc.deta^2
+ wz[, iam(1, 2, M)] <- A2 * ed2l.dlocsc * dloc.deta * dsc.deta
+ d2l.dloc2 <- -ezedd / sc^2
+ d2l.dsc2 <- (2 - zedd) * zedd * ezedd / sc^2
+ d2l.dlocsc <- (1 - zedd) * ezedd / sc^2
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)]-A1^2 * d2l.dloc2 * dloc.deta^2
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)]-A1^2 * d2l.dsc2 * dsc.deta^2
+ wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)]-A1^2 * d2l.dlocsc *
dloc.deta * dsc.deta
- d2Fy.dloc2 = dFy.dloc * dl.dloc + Fy * d2l.dloc2
- d2Fy.dsc2 = dFy.dsc * dl.dsc + Fy * d2l.dsc2
- d2Fy.dlocsc = dFy.dsc * dl.dloc + Fy * d2l.dlocsc
- d2l.dloc2 = -((1-Fy) * d2Fy.dloc2 - dFy.dloc^2) / (1-Fy)^2
- d2l.dsc2 = -((1-Fy) * d2Fy.dsc2 - dFy.dsc^2) / (1-Fy)^2
- d2l.dlocsc = -((1-Fy) * d2Fy.dlocsc - dFy.dloc * dFy.dsc) / (1-Fy)^2
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)]-A3^2 * d2l.dloc2 * dloc.deta^2
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)]-A3^2 * d2l.dsc2 * dsc.deta^2
- wz[, iam(1, 2, M)] = wz[, iam(1, 2, M)]-A3^2 * d2l.dlocsc *
+ d2Fy.dloc2 <- dFy.dloc * dl.dloc + Fy * d2l.dloc2
+ d2Fy.dsc2 <- dFy.dsc * dl.dsc + Fy * d2l.dsc2
+ d2Fy.dlocsc <- dFy.dsc * dl.dloc + Fy * d2l.dlocsc
+ d2l.dloc2 <- -((1-Fy) * d2Fy.dloc2 - dFy.dloc^2) / (1-Fy)^2
+ d2l.dsc2 <- -((1-Fy) * d2Fy.dsc2 - dFy.dsc^2) / (1-Fy)^2
+ d2l.dlocsc <- -((1-Fy) * d2Fy.dlocsc - dFy.dloc * dFy.dsc) / (1-Fy)^2
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)]-A3^2 * d2l.dloc2 * dloc.deta^2
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)]-A3^2 * d2l.dsc2 * dsc.deta^2
+ wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)]-A3^2 * d2l.dlocsc *
dloc.deta * dsc.deta
c(w) * wz
}))
@@ -2136,19 +2134,19 @@ dfrechet <- function(x, location = 0, scale = 1, shape, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- L = max(length(x), length(scale), length(shape), length(location))
- x = rep(x, length.out = L);
- scale = rep(scale, length.out = L);
- shape = rep(shape, length.out = L);
- location = rep(location, length.out = L);
+ L <- max(length(x), length(scale), length(shape), length(location))
+ x <- rep(x, length.out = L)
+ scale <- rep(scale, length.out = L)
+ shape <- rep(shape, length.out = L)
+ location <- rep(location, length.out = L)
- logdensity = rep(log(0), length.out = L)
- xok = (x > location)
- rzedd = scale / (x - location)
- logdensity[xok] = log(shape[xok]) - (rzedd[xok]^shape[xok]) +
+ logdensity <- rep(log(0), length.out = L)
+ xok <- (x > location)
+ rzedd <- scale / (x - location)
+ logdensity[xok] <- log(shape[xok]) - (rzedd[xok]^shape[xok]) +
(shape[xok]+1) * log(rzedd[xok]) - log(scale[xok])
- logdensity[shape <= 0] = NaN
- logdensity[scale <= 0] = NaN
+ logdensity[shape <= 0] <- NaN
+ logdensity[scale <= 0] <- NaN
if (log.arg) logdensity else exp(logdensity)
}
@@ -2159,9 +2157,9 @@ pfrechet <- function(q, location = 0, scale = 1, shape) {
stop("scale must be positive")
if (!is.Numeric(shape, positive = TRUE))
stop("shape must be positive")
- rzedd = scale / (q - location)
+ rzedd <- scale / (q - location)
ans <- exp(-(rzedd^shape))
- ans[q <= location] = 0
+ ans[q <= location] <- 0
ans
}
@@ -2193,8 +2191,7 @@ rfrechet <- function(n, location = 0, scale = 1, shape) {
-frechet2.control <- function(save.weight = TRUE, ...)
-{
+frechet2.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -2205,8 +2202,7 @@ frechet2.control <- function(save.weight = TRUE, ...)
lshape = logoff(offset = -2),
iscale = NULL, ishape = NULL,
nsimEIM = 250,
- zero = NULL)
-{
+ zero = NULL) {
if (!is.Numeric(location))
stop("bad input for argument 'location'")
@@ -2229,7 +2225,7 @@ frechet2.control <- function(save.weight = TRUE, ...)
"Links: ",
namesof("scale", link = lscale, earg = escale ), ", ",
namesof("shape", link = lshape, earg = eshape )),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -2252,7 +2248,7 @@ frechet2.control <- function(save.weight = TRUE, ...)
namesof("shape", .lshape , earg = .eshape, short = TRUE))
- extra$location = rep( .location, length.out = n) # stored here
+ extra$location <- rep( .location , length.out = n) # stored here
if (!length(etastart)) {
@@ -2261,37 +2257,37 @@ frechet2.control <- function(save.weight = TRUE, ...)
stop("initial values for 'location' are out of range")
- frech.aux = function(shapeval, y, x, w, extraargs) {
- myprobs = c(0.25, 0.5, 0.75)
- myobsns = quantile(y, probs = myprobs)
- myquant = (-log(myprobs))^(-1/shapeval)
- myfit = lsfit(x = myquant, y = myobsns, intercept = TRUE)
+ frech.aux <- function(shapeval, y, x, w, extraargs) {
+ myprobs <- c(0.25, 0.5, 0.75)
+ myobsns <- quantile(y, probs = myprobs)
+ myquant <- (-log(myprobs))^(-1/shapeval)
+ myfit <- lsfit(x = myquant, y = myobsns, intercept = TRUE)
sum(myfit$resid^2)
}
- shape.grid = c(100, 70, 40, 20, 12, 8, 4, 2, 1.5)
- shape.grid = c(1 / shape.grid, 1, shape.grid)
- try.this = getMaxMin(shape.grid, objfun = frech.aux,
+ shape.grid <- c(100, 70, 40, 20, 12, 8, 4, 2, 1.5)
+ shape.grid <- c(1 / shape.grid, 1, shape.grid)
+ try.this <- getMaxMin(shape.grid, objfun = frech.aux,
y = y, x = x, w = w, maximize = FALSE,
abs.arg = TRUE)
- shape.init = if (length( .ishape ))
+ shape.init <- if (length( .ishape ))
rep( .ishape , length.out = n) else {
rep(try.this , length.out = n) # variance exists if shape > 2
}
- myprobs = c(0.25, 0.5, 0.75)
- myobsns = quantile(y, probs = myprobs)
- myquant = (-log(myprobs))^(-1/shape.init[1])
- myfit = lsfit(x = myquant, y = myobsns)
+ myprobs <- c(0.25, 0.5, 0.75)
+ myobsns <- quantile(y, probs = myprobs)
+ myquant <- (-log(myprobs))^(-1/shape.init[1])
+ myfit <- lsfit(x = myquant, y = myobsns)
- Scale.init = if (length( .iscale ))
+ Scale.init <- if (length( .iscale ))
rep( .iscale , length.out = n) else {
if (all(shape.init > 1)) {
myfit$coef[2]
} else {
- rep( 1.0, length.out = n)
+ rep(1.0, length.out = n)
}
}
@@ -2304,9 +2300,9 @@ frechet2.control <- function(save.weight = TRUE, ...)
.iscale = iscale, .ishape = ishape,
.location = location ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- loc = extra$location
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+ loc <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
ans <- rep(as.numeric(NA), length.out = length(shape))
ok <- shape > 1
@@ -2319,15 +2315,15 @@ frechet2.control <- function(save.weight = TRUE, ...)
misc$earg <- list("scale" = .escale , "shape" = .eshape )
- misc$nsimEIM = .nsimEIM
+ misc$nsimEIM <- .nsimEIM
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- loctn = extra$location
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+ loctn <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * dfrechet(x = y, location = loctn, scale = Scale,
@@ -2336,15 +2332,15 @@ frechet2.control <- function(save.weight = TRUE, ...)
.escale = escale, .eshape = eshape ))),
vfamily = c("frechet2", "vextremes"),
deriv = eval(substitute(expression({
- loctn = extra$location
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+ loctn <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
- rzedd = Scale / (y - loctn) # reciprocial of zedd
- dl.dloctn = (shape + 1) / (y - loctn) -
+ rzedd <- Scale / (y - loctn) # reciprocial of zedd
+ dl.dloctn <- (shape + 1) / (y - loctn) -
(shape / (y - loctn)) * (rzedd)^shape
- dl.dScale = shape * (1 - rzedd^shape) / Scale
- dl.dshape = 1 / shape + log(rzedd) * (1 - rzedd^shape)
+ dl.dScale <- shape * (1 - rzedd^shape) / Scale
+ dl.dshape <- 1 / shape + log(rzedd) * (1 - rzedd^shape)
dthetas.detas <- cbind(
dScale.deta <- dtheta.deta(Scale, .lscale , earg = .escale ),
@@ -2361,7 +2357,7 @@ frechet2.control <- function(save.weight = TRUE, ...)
if (length( .nsimEIM )) {
for(ii in 1:( .nsimEIM )) {
- ysim <- rfrechet(n, loc = loctn, scale = Scale, shape = shape)
+ ysim <- rfrechet(n, location = loctn, scale = Scale, shape = shape)
rzedd <- Scale / (ysim - loctn) # reciprocial of zedd
dl.dloctn <- (shape + 1) / (ysim - loctn) -
@@ -2395,8 +2391,7 @@ frechet2.control <- function(save.weight = TRUE, ...)
-frechet3.control <- function(save.weight = TRUE, ...)
-{
+frechet3.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -2410,8 +2405,7 @@ if (FALSE)
lshape = logoff(offset = -2),
ilocation = NULL, iscale = NULL, ishape = NULL,
nsimEIM = 250,
- zero = 1)
-{
+ zero = 1) {
lscale <- as.list(substitute(lscale))
escale <- link2list(lscale)
lscale <- attr(escale, "function.name")
@@ -2436,7 +2430,7 @@ if (FALSE)
namesof("difference", link = ldiffr, earg = ediffr), ", ",
namesof("scale", link = lscale, earg = escale), ", ",
namesof("shape", link = lshape, earg = eshape)),
- constraints=eval(substitute(expression({
+ constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -2448,58 +2442,58 @@ if (FALSE)
namesof("scale", .lscale , earg = .escale, short = TRUE),
namesof("shape", .lshape , earg = .eshape, short = TRUE))
- anchorpt = if (is.Numeric( .anchor, allowable.length = 1))
+ anchorpt <- if (is.Numeric( .anchor, allowable.length = 1))
.anchor else min(y)
if (min(y) < anchorpt)
stop("anchor point is too large")
- extra$LHSanchor = anchorpt
+ extra$LHSanchor <- anchorpt
if (!length(etastart)) {
- frech.aux = function(shapeval, y, x, w, extraargs) {
- myprobs = c(0.25, 0.5, 0.75)
- myobsns = quantile(y, probs = myprobs)
- myquant = (-log(myprobs))^(-1/shapeval)
- myfit = lsfit(x = myquant, y = myobsns, intercept = TRUE)
+ frech.aux <- function(shapeval, y, x, w, extraargs) {
+ myprobs <- c(0.25, 0.5, 0.75)
+ myobsns <- quantile(y, probs = myprobs)
+ myquant <- (-log(myprobs))^(-1/shapeval)
+ myfit <- lsfit(x = myquant, y = myobsns, intercept = TRUE)
sum(myfit$resid^2)
}
- shape.grid = c(100, 70, 40, 20, 12, 8, 4, 2, 1.5)
- shape.grid = c(1 / shape.grid, 1, shape.grid)
- try.this = getMaxMin(shape.grid, objfun = frech.aux,
+ shape.grid <- c(100, 70, 40, 20, 12, 8, 4, 2, 1.5)
+ shape.grid <- c(1 / shape.grid, 1, shape.grid)
+ try.this <- getMaxMin(shape.grid, objfun = frech.aux,
y = y, x = x, w = w, maximize = FALSE,
abs.arg = TRUE)
shape.init =
if (length( .ishape ))
- rep( .ishape, length.out = n) else {
- rep(try.this, length.out = n) # variance exists if shape > 2
+ rep( .ishape , length.out = n) else {
+ rep(try.this , length.out = n) # variance exists if shape > 2
}
- myprobs = c(0.25, 0.5, 0.75)
- myobsns = quantile(y, probs = myprobs)
- myquant = (-log(myprobs))^(-1/shape.init[1])
- myfit = lsfit(x = myquant, y = myobsns)
+ myprobs <- c(0.25, 0.5, 0.75)
+ myobsns <- quantile(y, probs = myprobs)
+ myquant <- (-log(myprobs))^(-1/shape.init[1])
+ myfit <- lsfit(x = myquant, y = myobsns)
plot(myobsns ~ myquant)
- Scale.init = if (length( .iscale )) {
+ Scale.init <- if (length( .iscale )) {
rep( .iscale , length.out = n)
} else {
if (all(shape.init > 1)) {
myfit$coef[2]
} else {
- rep( 1.0, length.out = n)
+ rep(1.0, length.out = n)
}
}
- locinit = if (length( .ilocation))
- rep( .ilocation, length.out = n) else {
+ locinit <- if (length( .ilocation ))
+ rep( .ilocation , length.out = n) else {
if (myfit$coef[1] < min(y)) {
rep(myfit$coef[1], length.out = n)
} else {
@@ -2524,13 +2518,13 @@ if (FALSE)
.iscale = iscale, .ishape = ishape,
.ilocation = ilocation, .anchor = anchor ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- loctn = extra$LHSanchor -
+ loctn <- extra$LHSanchor -
eta2theta(eta[, 1], .ldiffr , earg = .ediffr)
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- shape = eta2theta(eta[, 3], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 3], .lshape , earg = .eshape )
ans <- rep(as.numeric(NA), length.out = length(shape))
- okay = shape > 1
- ans[okay] = loctn[okay] + Scale[okay] * gamma(1 - 1/shape[okay])
+ okay <- shape > 1
+ ans[okay] <- loctn[okay] + Scale[okay] * gamma(1 - 1/shape[okay])
ans
}, list( .ldiffr = ldiffr, .lscale = lscale, .lshape = lshape,
.ediffr = ediffr, .escale = escale, .eshape = eshape ))),
@@ -2543,19 +2537,19 @@ if (FALSE)
"scale" = .escale,
"shape" = .eshape)
- misc$nsimEIM = .nsimEIM
+ misc$nsimEIM <- .nsimEIM
- extra$location = loctn # Store the location parameter estimate here
+ extra$location <- loctn # Store the location parameter estimate here
}), list( .ldiffr = ldiffr, .lscale = lscale, .lshape = lshape,
.ediffr = ediffr, .escale = escale, .eshape = eshape,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- loctn = extra$LHSanchor -
+ loctn <- extra$LHSanchor -
eta2theta(eta[, 1], .ldiffr , earg = .ediffr)
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- shape = eta2theta(eta[, 3], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 3], .lshape , earg = .eshape )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(w * dfrechet(x = y, location = loctn, scale = Scale,
@@ -2565,18 +2559,18 @@ if (FALSE)
.ediffr = ediffr, .escale = escale, .eshape = eshape ))),
vfamily = c("frechet3", "vextremes"),
deriv = eval(substitute(expression({
- Difrc = eta2theta(eta[, 1], .ldiffr , earg = .ediffr )
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- shape = eta2theta(eta[, 3], .lshape , earg = .eshape )
+ Difrc <- eta2theta(eta[, 1], .ldiffr , earg = .ediffr )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 3], .lshape , earg = .eshape )
- loctn = extra$LHSanchor - Difrc
- rzedd = Scale / (y - loctn) # reciprocial of zedd
+ loctn <- extra$LHSanchor - Difrc
+ rzedd <- Scale / (y - loctn) # reciprocial of zedd
- dl.dloct = (shape + 1) / (y - loctn) -
+ dl.dloct <- (shape + 1) / (y - loctn) -
(shape / (y - loctn)) * (rzedd)^shape
- dl.ddifff = -dl.dloct
- dl.dScale = shape * (1 - rzedd^shape) / Scale
- dl.dshape = 1 / shape + log(rzedd) * (1 - rzedd^shape)
+ dl.ddifff <- -dl.dloct
+ dl.dScale <- shape * (1 - rzedd^shape) / Scale
+ dl.dshape <- 1 / shape + log(rzedd) * (1 - rzedd^shape)
dthetas.detas <- cbind(
ddifff.deta <- dtheta.deta(Difrc, .ldiffr , earg = .ediffr ),
@@ -2588,45 +2582,41 @@ if (FALSE)
dl.dScale,
dl.dshape) * dthetas.detas
- print("head(ans)")
- print( head(ans) )
- print("summary(ans)")
- print( summary(ans) )
ans
}), list( .ldiffr = ldiffr, .lscale = lscale, .lshape = lshape,
.ediffr = ediffr, .escale = escale, .eshape = eshape ))),
weight = eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
if (length( .nsimEIM )) {
for(ii in 1:( .nsimEIM )) {
- ysim = rfrechet(n, loc = loctn, scale = Scale, shape = shape)
+ ysim <- rfrechet(n, location = loctn, scale = Scale, shape = shape)
- rzedd = Scale / (ysim - loctn) # reciprocial of zedd
+ rzedd <- Scale / (ysim - loctn) # reciprocial of zedd
- dl.dloct = (shape + 1) / (ysim - loctn) -
+ dl.dloct <- (shape + 1) / (ysim - loctn) -
(shape / (ysim - loctn)) * (rzedd)^shape
- dl.ddifff = -dl.dloct
+ dl.ddifff <- -dl.dloct
- dl.dScale = shape * (1 - rzedd^shape) / Scale
- dl.dshape = 1 / shape + log(rzedd) * (1 - rzedd^shape)
+ dl.dScale <- shape * (1 - rzedd^shape) / Scale
+ dl.dshape <- 1 / shape + log(rzedd) * (1 - rzedd^shape)
rm(ysim)
- temp3 = cbind(dl.ddifff, dl.dScale, dl.dshape)
- run.varcov = run.varcov +
+ temp3 <- cbind(dl.ddifff, dl.dScale, dl.dshape)
+ run.varcov <- run.varcov +
temp3[, ind1$row.index] *
temp3[, ind1$col.index]
}
- run.varcov = run.varcov / .nsimEIM
+ run.varcov <- run.varcov / .nsimEIM
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = c(w) * wz * dthetas.detas[, ind1$row] *
+ wz <- c(w) * wz * dthetas.detas[, ind1$row] *
dthetas.detas[, ind1$col]
} else {
stop("argument 'nsimEIM' must be numeric")
@@ -2642,16 +2632,14 @@ if (FALSE)
}
-recnormal1.control <- function(save.weight = TRUE, ...)
-{
+recnormal1.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
recnormal1 <- function(lmean = "identity", lsd = "loge",
imean = NULL, isd = NULL, imethod = 1,
- zero = NULL)
-{
+ zero = NULL) {
lmean <- as.list(substitute(lmean))
emean <- link2list(lmean)
lmean <- attr(emean, "function.name")
@@ -2672,12 +2660,12 @@ recnormal1.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Upper record values from a univariate normal distribution\n\n",
- "Links: ",
- namesof("mean", lmean, emean, tag = TRUE), "; ",
- namesof("sd", lsdev, esdev, tag = TRUE),
- "\n",
- "Variance: sd^2"),
- constraints=eval(substitute(expression({
+ "Links: ",
+ namesof("mean", lmean, emean, tag = TRUE), "; ",
+ namesof("sd", lsdev, esdev, tag = TRUE),
+ "\n",
+ "Variance: sd^2"),
+ constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -2698,10 +2686,10 @@ recnormal1.control <- function(save.weight = TRUE, ...)
if (!length(etastart)) {
- mean.init = if (length( .imean)) rep( .imean ,
- length.out = n) else {
+ mean.init <- if (length( .imean )) rep( .imean ,
+ length.out = n) else {
if (.lmean == "loge") pmax(1/1024, min(y)) else min(y)}
- sd.init = if (length( .isdev)) rep( .isdev, length.out = n) else {
+ sd.init <- if (length( .isdev)) rep( .isdev , length.out = n) else {
if (.imethod == 1) 1*(sd(c(y))) else
if (.imethod == 2) 5*(sd(c(y))) else
.5*(sd(c(y)))
@@ -2727,48 +2715,48 @@ recnormal1.control <- function(save.weight = TRUE, ...)
.emean = emean, .esdev = esdev ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
- sdev = eta2theta(eta[, 2], .lsdev)
+ sdev <- eta2theta(eta[, 2], .lsdev)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- zedd = (y - mu) / sdev
- NN = nrow(eta)
+ zedd <- (y - mu) / sdev
+ NN <- nrow(eta)
sum(w * (-log(sdev) - 0.5 * zedd^2)) -
sum(w[-NN] * pnorm(zedd[-NN], lower.tail = FALSE, log.p = TRUE))
}
}, list( .lsdev = lsdev, .esdev = esdev ))),
vfamily = c("recnormal1"),
deriv = eval(substitute(expression({
- NN = nrow(eta)
- mymu = eta2theta(eta[, 1], .lmean)
- sdev = eta2theta(eta[, 2], .lsdev)
- zedd = (y - mymu) / sdev
- temp200 = dnorm(zedd) / (1-pnorm(zedd))
- dl.dmu = (zedd - temp200) / sdev
- dl.dmu[NN] = zedd[NN] / sdev[NN]
- dl.dsd = (-1 + zedd^2 - zedd * temp200) / sdev
- dl.dsd[NN] = (-1 + zedd[NN]^2) / sdev[NN]
-
- dmu.deta = dtheta.deta(mymu, .lmean, .emean )
- dsd.deta = dtheta.deta(sdev, .lsdev, .esdev )
+ NN <- nrow(eta)
+ mymu <- eta2theta(eta[, 1], .lmean)
+ sdev <- eta2theta(eta[, 2], .lsdev)
+ zedd <- (y - mymu) / sdev
+ temp200 <- dnorm(zedd) / (1-pnorm(zedd))
+ dl.dmu <- (zedd - temp200) / sdev
+ dl.dmu[NN] <- zedd[NN] / sdev[NN]
+ dl.dsd <- (-1 + zedd^2 - zedd * temp200) / sdev
+ dl.dsd[NN] <- (-1 + zedd[NN]^2) / sdev[NN]
+
+ dmu.deta <- dtheta.deta(mymu, .lmean, .emean )
+ dsd.deta <- dtheta.deta(sdev, .lsdev, .esdev )
if (iter == 1) {
- etanew = eta
+ etanew <- eta
} else {
- derivold = derivnew
- etaold = etanew
- etanew = eta
+ derivold <- derivnew
+ etaold <- etanew
+ etanew <- eta
}
- derivnew = c(w) * cbind(dl.dmu * dmu.deta,
+ derivnew <- c(w) * cbind(dl.dmu * dmu.deta,
dl.dsd * dsd.deta)
derivnew
}), list( .lmean = lmean, .lsdev = lsdev,
.emean = emean, .esdev = esdev ))),
weight = expression({
if (iter == 1) {
- wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ wznew <- cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
} else {
- wzold = wznew
- wznew = qnupdate(w=w, wzold = wzold, dderiv=(derivold - derivnew),
+ wzold <- wznew
+ wznew <- qnupdate(w=w, wzold = wzold, dderiv=(derivold - derivnew),
deta=etanew-etaold, M = M,
trace=trace) # weights incorporated in args
}
@@ -2778,14 +2766,12 @@ recnormal1.control <- function(save.weight = TRUE, ...)
-recexp1.control <- function(save.weight = TRUE, ...)
-{
+recexp1.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
- recexp1 <- function(lrate = "loge", irate = NULL, imethod = 1)
-{
+ recexp1 <- function(lrate = "loge", irate = NULL, imethod = 1) {
lrate <- as.list(substitute(lrate))
erate <- link2list(lrate)
lrate <- attr(erate, "function.name")
@@ -2802,10 +2788,10 @@ recexp1.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Upper record values from a ",
"1-parameter exponential distribution\n\n",
- "Links: ",
- namesof("rate", lrate, erate, tag = TRUE),
- "\n",
- "Variance: 1/rate^2"),
+ "Links: ",
+ namesof("rate", lrate, erate, tag = TRUE),
+ "\n",
+ "Variance: 1/rate^2"),
initialize = eval(substitute(expression({
predictors.names <-
c(namesof("rate", .lrate , .erate , tag = FALSE))
@@ -2819,15 +2805,15 @@ recexp1.control <- function(save.weight = TRUE, ...)
if (!length(etastart)) {
- rate.init = if (length( .irate))
- rep( .irate, len = n) else {
- init.rate =
+ rate.init <- if (length( .irate ))
+ rep( .irate , len = n) else {
+ init.rate <-
if (.imethod == 1) length(y) / y[length(y), 1] else
if (.imethod == 2) 1/mean(y) else 1/median(y)
if (.lrate == "loge") pmax(1/1024, init.rate) else
init.rate}
- etastart =
+ etastart <-
cbind(theta2eta(rep(rate.init, len = n), .lrate , .erate ))
}
}), list( .lrate = lrate,
@@ -2847,26 +2833,26 @@ recexp1.control <- function(save.weight = TRUE, ...)
rate = eta2theta(eta, .lrate , .erate )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- NN = length(eta)
- y = cbind(y)
+ NN <- length(eta)
+ y <- cbind(y)
sum(w * log(rate)) - w[NN] * rate[NN] * y[NN, 1]
}
}, list( .lrate = lrate, .erate = erate ))),
vfamily = c("recexp1"),
deriv = eval(substitute(expression({
- NN = length(eta)
- rate = c(eta2theta(eta, .lrate , .erate ))
+ NN <- length(eta)
+ rate <- c(eta2theta(eta, .lrate , .erate ))
- dl.drate = 1 / rate
- dl.drate[NN] = 1/ rate[NN] - y[NN, 1]
+ dl.drate <- 1 / rate
+ dl.drate[NN] <- 1/ rate[NN] - y[NN, 1]
- drate.deta = dtheta.deta(rate, .lrate , .erate )
+ drate.deta <- dtheta.deta(rate, .lrate , .erate )
c(w) * cbind(dl.drate * drate.deta)
}), list( .lrate = lrate, .erate = erate ))),
weight = expression({
- ed2l.drate2 = 1 / rate^2
- wz = drate.deta^2 * ed2l.drate2
+ ed2l.drate2 <- 1 / rate^2
+ wz <- drate.deta^2 * ed2l.drate2
c(w) * wz
}))
}
@@ -2927,22 +2913,22 @@ recexp1.control <- function(save.weight = TRUE, ...)
if (!length(etastart)) {
- use.this = if ( .imethod == 1) median(y) + 1/8 else
+ use.this <- if ( .imethod == 1) median(y) + 1/8 else
weighted.mean(y,w)
if ( .dimension == 2) {
- myratio = exp(lgamma( .ostatistic + 0.5) -
+ myratio <- exp(lgamma( .ostatistic + 0.5) -
lgamma( .ostatistic ))
- density.init = if (is.Numeric( .idensity ))
- rep( .idensity, len = n) else
+ density.init <- if (is.Numeric( .idensity ))
+ rep( .idensity , len = n) else
rep(myratio^2 / (pi * use.this^2), len = n)
- etastart = theta2eta(density.init, .link, earg = .earg)
+ etastart <- theta2eta(density.init, .link, earg = .earg)
} else {
- myratio = exp(lgamma( .ostatistic +1/3) -
+ myratio <- exp(lgamma( .ostatistic +1/3) -
lgamma( .ostatistic ))
- density.init = if (is.Numeric( .idensity ))
- rep( .idensity, len = n) else
+ density.init <- if (is.Numeric( .idensity ))
+ rep( .idensity , len = n) else
rep(3 * myratio^3 / (4 * pi * use.this^3), len = n)
- etastart = theta2eta(density.init, .link, earg = .earg)
+ etastart <- theta2eta(density.init, .link, earg = .earg)
}
}
}), list( .link = link, .earg = earg, .ostatistic = ostatistic,
@@ -2951,20 +2937,21 @@ recexp1.control <- function(save.weight = TRUE, ...)
linkinv = eval(substitute(function(eta, extra = NULL) {
density = eta2theta(eta, .link, earg = .earg)
if ( .dimension == 2) {
- myratio = exp(lgamma( .ostatistic +0.5) - lgamma( .ostatistic ))
+ myratio <- exp(lgamma( .ostatistic +0.5) - lgamma( .ostatistic ))
myratio / sqrt(density * pi)
} else {
- myratio = exp(lgamma( .ostatistic +1/3) - lgamma( .ostatistic))
+ myratio <- exp(lgamma( .ostatistic +1/3) - lgamma( .ostatistic))
myratio / (4 * density * pi/3)^(1/3)
}
}, list( .link = link, .earg = earg, .ostatistic = ostatistic,
.dimension = dimension ))),
last = eval(substitute(expression({
- misc$link = c("density" = .link)
- misc$earg = list("density" = .earg)
- misc$expected = TRUE
- misc$ostatistic = .ostatistic
- misc$dimension = .dimension
+ misc$link <- c("density" = .link)
+ misc$earg <- list("density" = .earg)
+
+ misc$expected <- TRUE
+ misc$ostatistic <- .ostatistic
+ misc$dimension <- .dimension
}), list( .link = link, .earg = earg, .ostatistic = ostatistic,
.dimension = dimension ))),
loglikelihood = eval(substitute(
@@ -2983,22 +2970,22 @@ recexp1.control <- function(save.weight = TRUE, ...)
.dimension = dimension ))),
vfamily = c("poissonp"),
deriv = eval(substitute(expression({
- density = eta2theta(eta, .link, earg = .earg)
+ density <- eta2theta(eta, .link, earg = .earg)
if ( .dimension == 2) {
- dl.ddensity = .ostatistic / density - pi * y^2
+ dl.ddensity <- .ostatistic / density - pi * y^2
} else {
- dl.ddensity = .ostatistic / density - (4/3) * pi * y^3
+ dl.ddensity <- .ostatistic / density - (4/3) * pi * y^3
}
- ddensity.deta = dtheta.deta(density, .link, earg = .earg)
+ ddensity.deta <- dtheta.deta(density, .link, earg = .earg)
c(w) * dl.ddensity * ddensity.deta
}), list( .link = link, .earg = earg, .ostatistic = ostatistic,
.dimension = dimension ))),
weight = eval(substitute(expression({
- ned2l.ddensity2 = .ostatistic / density^2
- wz = ddensity.deta^2 * ned2l.ddensity2
+ ned2l.ddensity2 <- .ostatistic / density^2
+ wz <- ddensity.deta^2 * ned2l.ddensity2
c(w) * wz
}), list( .link = link, .earg = earg, .ostatistic = ostatistic,
.dimension = dimension ))))
diff --git a/R/family.fishing.R b/R/family.fishing.R
index 73cea0f..1ac7499 100644
--- a/R/family.fishing.R
+++ b/R/family.fishing.R
@@ -1,46 +1,47 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
-DeLury = function(catch, effort,
- type=c("DeLury","Leslie"),
- ricker=FALSE) {
- type = match.arg(type, c("DeLury","Leslie"))[1]
- if (!is.logical(ricker)) stop("bad input for 'ricker'")
- if ((LLL <- Lcatch <- length(catch)) != (Leffort <- length(effort)))
- stop("length(catch) != length(effort)")
-
- CPUE = catch / effort
- if (type == "DeLury") {
- Et = cumsum(effort) - ifelse(ricker, 0.5, 1) * effort
- logCPUE = log(CPUE)
- lmfit = lm(logCPUE ~ Et, x = TRUE)
- myq = catchabilityCoefficient = -coef(lmfit)[2]
- N0 = exp(coef(lmfit)["(Intercept)"]) / myq
- } else {
- Kt = cumsum(catch) - ifelse(ricker, 0.5, 1) * catch
- lmfit = lm(CPUE ~ Kt, x = TRUE)
- myq = catchabilityCoefficient = -coef(lmfit)[2]
- N0 = coef(lmfit)["(Intercept)"] / myq
- }
-
- rlist =
- list(catch=catch,
- effort=effort,
- type=type,
- N0 = N0,
- CPUE = CPUE,
- lmfit=lmfit)
- if (type == "DeLury") {
- rlist$E = Et
- } else {
- rlist$K = Kt
- }
- rlist
+DeLury <- function(catch, effort,
+ type = c("DeLury", "Leslie"),
+ ricker = FALSE) {
+ type <- match.arg(type, c("DeLury", "Leslie"))[1]
+ if (!is.logical(ricker))
+ stop("bad input for argument 'ricker'")
+ if ((LLL <- Lcatch <- length(catch)) != (Leffort <- length(effort)))
+ stop("length(catch) != length(effort)")
+
+ CPUE <- catch / effort
+ if (type == "DeLury") {
+ Et <- cumsum(effort) - ifelse(ricker, 0.5, 1) * effort
+ logCPUE <- log(CPUE)
+ lmfit <- lm(logCPUE ~ Et, x = TRUE)
+ myq <- catchabilityCoefficient <- -coef(lmfit)[2]
+ N0 <- exp(coef(lmfit)["(Intercept)"]) / myq
+ } else {
+ Kt <- cumsum(catch) - ifelse(ricker, 0.5, 1) * catch
+ lmfit <- lm(CPUE ~ Kt, x = TRUE)
+ myq <- catchabilityCoefficient <- -coef(lmfit)[2]
+ N0 <- coef(lmfit)["(Intercept)"] / myq
+ }
+
+ rlist <-
+ list(catch = catch,
+ effort = effort,
+ type = type,
+ N0 = N0,
+ CPUE = CPUE,
+ lmfit = lmfit)
+ if (type == "DeLury") {
+ rlist$E <- Et
+ } else {
+ rlist$K <- Kt
+ }
+ rlist
}
@@ -48,13 +49,13 @@ DeLury = function(catch, effort,
-wffc.P1 = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
- ifelse(length >= min.eligible, c1 + (ppm/100) *
- ceiling( signif(100*length, digits = 8) ), 0)
+wffc.P1 <- function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
+ ifelse(length >= min.eligible, c1 + (ppm/100) *
+ ceiling( signif(100 * length, digits = 8) ), 0)
-wffc.P1star = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
- ifelse(length >= min.eligible, c1 + ppm * length, 0)
+wffc.P1star <- function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
+ ifelse(length >= min.eligible, c1 + ppm * length, 0)
@@ -70,28 +71,29 @@ wffc.P1star = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
-wffc.P2 = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
- wffc.P1(length, c1 = c1, min.eligible = min.eligible, ppm = ppm) +
- ifelse(length >= min.eligible,
+wffc.P2 <- function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
+ wffc.P1(length, c1 = c1, min.eligible = min.eligible, ppm = ppm) +
+ ifelse(length >= min.eligible,
ceiling(100*(length-min.eligible))^2, 0)
-wffc.P2star = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
- wffc.P1star(length, c1 = c1, min.eligible = min.eligible, ppm = ppm) +
- ifelse(length >= min.eligible, 10000 * (length-min.eligible)^2, 0)
+
+wffc.P2star <- function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
+ wffc.P1star(length, c1 = c1, min.eligible = min.eligible, ppm = ppm) +
+ ifelse(length >= min.eligible, 10000 * (length-min.eligible)^2, 0)
-wffc.P3 = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000) {
+wffc.P3 <- function(length, c1 = 100, min.eligible = 0.18, ppm = 2000) {
- temp1 = floor((ceiling(100*length)/100) / min.eligible) # zz not sure
- temp1 = floor(length / min.eligible)
- ans = ifelse(temp1 >= 1, c1, length * 0) # Handles NAs
- ans = ans + ifelse(temp1 >= 1, ppm * (ceiling(100*length)/100), 0)
- maxtemp1 = max(temp1, na.rm = TRUE)
+ temp1 <- floor((ceiling(100*length)/100) / min.eligible) # zz not sure
+ temp1 <- floor(length / min.eligible)
+ ans <- ifelse(temp1 >= 1, c1, length * 0) # Handles NAs
+ ans <- ans + ifelse(temp1 >= 1, ppm * (ceiling(100*length)/100), 0)
+ maxtemp1 <- max(temp1, na.rm = TRUE)
if (maxtemp1 > 1)
for (ii in 2:maxtemp1) {
- ans = ans +
+ ans <- ans +
ifelse(ii < temp1, min.eligible * (ii-1) * ppm, 0) +
ifelse(ii == temp1, (ceiling(100*length)/100 -
ii*min.eligible) * (ii-1) * ppm, 0)
@@ -101,16 +103,16 @@ wffc.P3 = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000) {
-wffc.P3star = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000) {
- temp1 = floor(length / min.eligible)
- ans = ifelse(temp1 >= 1, c1, length * 0) # Handles NAs
- ans = ans + ifelse(temp1 >= 1, length * ppm, 0)
- maxtemp1 = max(temp1, na.rm = TRUE)
+wffc.P3star <- function(length, c1 = 100, min.eligible = 0.18, ppm = 2000) {
+ temp1 <- floor(length / min.eligible)
+ ans <- ifelse(temp1 >= 1, c1, length * 0) # Handles NAs
+ ans <- ans + ifelse(temp1 >= 1, length * ppm, 0)
+ maxtemp1 <- max(temp1, na.rm = TRUE)
if (maxtemp1 > 1)
for (ii in 2:maxtemp1) {
- ans = ans + ifelse(ii < temp1, min.eligible * (ii-1) * ppm, 0) +
- ifelse(ii == temp1, (length - ii*min.eligible) *
- (ii-1) * ppm, 0)
+ ans <- ans + ifelse(ii < temp1, min.eligible * (ii-1) * ppm, 0) +
+ ifelse(ii == temp1, (length - ii*min.eligible) *
+ (ii-1) * ppm, 0)
}
ans
}
diff --git a/R/family.functions.R b/R/family.functions.R
index 42e608f..cb3b0f5 100644
--- a/R/family.functions.R
+++ b/R/family.functions.R
@@ -1,15 +1,15 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
-fill =
-fill1 = fill2 = fill3 =
-function(x, values = 0, ncolx = ncol(x)) {
- x = as.matrix(x)
- matrix(values, nrow = nrow(x), ncol = ncolx, byrow = TRUE)
+fill <-
+fill1 <- fill2 <- fill3 <-
+ function(x, values = 0, ncolx = ncol(x)) {
+ x <- as.matrix(x)
+ matrix(values, nrow = nrow(x), ncol = ncolx, byrow = TRUE)
}
@@ -20,22 +20,20 @@ extract.arg <- function(a) {
-remove.arg <- function(string)
-{
+remove.arg <- function(string) {
- nc <- nchar(string)
- bits <- substring(string, 1:nc, 1:nc)
- b1 <- (1:nc)[bits == "("]
- b1 <- if (length(b1)) b1[1]-1 else nc
- if (b1 == 0)
- return("")
- string <- paste(bits[1:b1], collapse = "")
- string
+ nc <- nchar(string)
+ bits <- substring(string, 1:nc, 1:nc)
+ b1 <- (1:nc)[bits == "("]
+ b1 <- if (length(b1)) b1[1]-1 else nc
+ if (b1 == 0)
+ return("")
+ string <- paste(bits[1:b1], collapse = "")
+ string
}
-add.arg <- function(string, arg.string)
-{
+add.arg <- function(string, arg.string) {
if (arg.string == "")
return(string)
@@ -45,243 +43,229 @@ add.arg <- function(string, arg.string)
if (substring(string, nc-1, nc-1) == "(") {
paste(substring(string, 1, nc-2), "(", arg.string, ")",
sep = "")
- } else
+ } else {
paste(substring(string, 1, nc-1), ", ", arg.string, ")",
sep = "")
+ }
} else {
paste(string, "(", arg.string, ")", sep = "")
}
}
-get.arg <- function(string)
-{
+get.arg <- function(string) {
nc <- nchar(string)
bits <- substring(string, 1:nc, 1:nc)
b1 <- (1:nc)[bits == "("]
b2 <- (1:nc)[bits == ")"]
- b1 <- if (length(b1)) min(b1) else return("") # stop('no "(" in string')
- b2 <- if (length(b2)) max(b2) else return("") # stop('no ")" in string')
+ b1 <- if (length(b1)) min(b1) else return("")
+ b2 <- if (length(b2)) max(b2) else return("")
if (b2-b1 == 1) "" else paste(bits[(1+b1):(b2-1)], collapse = "")
}
-ei <- function(i,n)
+
+ eifun <- function(i, n)
cbind(as.numeric((1:n) == i))
-ei = function(i, n)
- diag(n)[,i,drop = FALSE]
+ eifun <- function(i, n)
+ diag(n)[, i, drop = FALSE]
-eij = function(i, n) {
- temp = matrix(0, n, 1)
- if (length(i))
- temp[i,] = 1
- temp
+ eijfun <- function(i, n) {
+ temp <- matrix(0, n, 1)
+ if (length(i))
+ temp[i, ] <- 1
+ temp
}
-dneg.binomial <- function(x, k, prob)
-{
+dneg.binomial <- function(x, k, prob) {
care.exp(x * log1p(-prob) + k * log(prob) + lgamma(x+k) -
lgamma(k) - lgamma(x + 1))
}
-tapplymat1 <- function(mat,
- function.arg = c("cumsum", "diff", "cumprod"))
-{
+tapplymat1 <-
+ function(mat,
+ function.arg = c("cumsum", "diff", "cumprod")) {
- if (!missing(function.arg))
- function.arg <- as.character(substitute(function.arg))
- function.arg <- match.arg(function.arg,
- c("cumsum", "diff", "cumprod"))[1]
+ if (!missing(function.arg))
+ function.arg <- as.character(substitute(function.arg))
+ function.arg <- match.arg(function.arg,
+ c("cumsum", "diff", "cumprod"))[1]
- type <- switch(function.arg,
- cumsum = 1,
- diff = 2,
- cumprod = 3,
- stop("function.arg not matched"))
+ type <- switch(function.arg,
+ cumsum = 1,
+ diff = 2,
+ cumprod = 3,
+ stop("function.arg not matched"))
- if (!is.matrix(mat))
- mat <- as.matrix(mat)
- nr <- nrow(mat)
- nc <- ncol(mat)
- fred <- dotC(name = "tapplymat1", mat = as.double(mat),
- as.integer(nr), as.integer(nc), as.integer(type))
+ if (!is.matrix(mat))
+ mat <- as.matrix(mat)
+ nr <- nrow(mat)
+ nc <- ncol(mat)
+ fred <- dotC(name = "tapplymat1", mat = as.double(mat),
+ as.integer(nr), as.integer(nc), as.integer(type))
- dim(fred$mat) <- c(nr, nc)
- dimnames(fred$mat) <- dimnames(mat)
- switch(function.arg,
- cumsum = fred$mat,
- diff = fred$mat[,-1,drop = FALSE],
- cumprod = fred$mat)
+ dim(fred$mat) <- c(nr, nc)
+ dimnames(fred$mat) <- dimnames(mat)
+ switch(function.arg,
+ cumsum = fred$mat,
+ diff = fred$mat[, -1, drop = FALSE],
+ cumprod = fred$mat)
}
-matrix.power <- function(wz, M, power, fast = TRUE)
-{
+matrix.power <- function(wz, M, power, fast = TRUE) {
- n <- nrow(wz)
- index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
- dimm.value <- if (is.matrix(wz)) ncol(wz) else 1
- if (dimm.value > M*(M+1)/2)
- stop("too many columns")
+ n <- nrow(wz)
+ index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
+ dimm.value <- if (is.matrix(wz)) ncol(wz) else 1
+ if (dimm.value > M*(M+1)/2)
+ stop("too many columns")
- if (M == 1 || dimm.value == M) {
- WW <- wz^power # May contain NAs
- return(t(WW))
- }
+ if (M == 1 || dimm.value == M) {
+ WW <- wz^power # May contain NAs
+ return(t(WW))
+ }
- if (fast) {
- k <- veigen(t(wz), M = M) # matrix.arg)
- evals <- k$values # M x n
- evects <- k$vectors # M x M x n
- } else {
- stop("sorry, cannot handle matrix-band form yet")
- k <- unlist(apply(wz,3,eigen), use.names = FALSE)
- dim(k) <- c(M,M+1,n)
- evals <- k[,1,,drop = TRUE] # M x n
- evects <- k[,-1,,drop = TRUE] # M x M x n
- }
+ if (fast) {
+ k <- veigen(t(wz), M = M) # matrix.arg)
+ evals <- k$values # M x n
+ evects <- k$vectors # M x M x n
+ } else {
+ stop("sorry, cannot handle matrix-band form yet")
+ k <- unlist(apply(wz, 3, eigen), use.names = FALSE)
+ dim(k) <- c(M, M+1, n)
+ evals <- k[, 1, , drop = TRUE] # M x n
+ evects <- k[, -1, , drop = TRUE] # M x M x n
+ }
- temp <- evals^power # Some values may be NAs
+ temp <- evals^power # Some values may be NAs
- index <- as.vector( matrix(1, 1, M) %*% is.na(temp) )
+ index <- as.vector( matrix(1, 1, M) %*% is.na(temp) )
- index <- (index == 0)
- if (!all(index)) {
- warning(paste("Some weight matrices have negative",
- "eigenvalues. They\nwill be assigned NAs"))
- temp[,!index] <- 1
- }
+ index <- (index == 0)
+ if (!all(index)) {
+ warning(paste("Some weight matrices have negative",
+ "eigenvalues. They\nwill be assigned NAs"))
+ temp[,!index] <- 1
+ }
- WW <- mux55(evects, temp, M = M)
- WW[,!index] <- NA
- WW
+ WW <- mux55(evects, temp, M = M)
+ WW[,!index] <- NA
+ WW
}
-rss.vgam <- function(z, wz, M)
-{
+ResSS.vgam <- function(z, wz, M) {
- if (M == 1)
- return(sum(c(wz) * c(z^2)))
+ if (M == 1)
+ return(sum(c(wz) * c(z^2)))
- wz.z <- mux22(t(wz), z, M = M, as.matrix = TRUE)
- ans <- sum(wz.z * z)
- ans
+ wz.z <- mux22(t(wz), z, M = M, as.matrix = TRUE)
+ sum(wz.z * z)
}
-wweighted.mean <- function(y, w = NULL, matrix.arg = TRUE)
-{
- if (!matrix.arg)
- stop("currently, matrix.arg must be TRUE")
- y <- as.matrix(y)
- M <- ncol(y)
- n <- nrow(y)
- if (M == 1) {
- if (missing(w)) mean(y) else sum(w * y)/sum(w)
- } else {
- if (missing(w)) y %*% rep(1, n) else {
- numer <- mux22(t(w), y, M, as.matrix = TRUE)
- numer <- t(numer) %*% rep(1, n)
- denom <- t(w) %*% rep(1, n)
- denom <- matrix(denom, 1, length(denom))
- if (matrix.arg)
- denom <- m2adefault(denom, M = M)[,,1]
- c(solve(denom, numer))
- }
+wweighted.mean <- function(y, w = NULL, matrix.arg = TRUE) {
+ if (!matrix.arg)
+ stop("currently, matrix.arg must be TRUE")
+ y <- as.matrix(y)
+ M <- ncol(y)
+ n <- nrow(y)
+ if (M == 1) {
+ if (missing(w)) mean(y) else sum(w * y)/sum(w)
+ } else {
+ if (missing(w)) y %*% rep(1, n) else {
+ numer <- mux22(t(w), y, M, as.matrix = TRUE)
+ numer <- t(numer) %*% rep(1, n)
+ denom <- t(w) %*% rep(1, n)
+ denom <- matrix(denom, 1, length(denom))
+ if (matrix.arg)
+ denom <- m2adefault(denom, M = M)[,,1]
+ c(solve(denom, numer))
}
+ }
}
-veigen <- function(x, M)
-{
-
-
- n <- ncol(x)
- index <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- dimm.value <- nrow(x) # usually M or M(M+1)/2
-
- z <- dotFortran(name = "veigen",
- as.integer(M),
- as.integer(n),
- as.double(x),
- values = double(M * n),
- as.integer(1),
- vectors = double(M*M*n),
- double(M),
- double(M),
- wk = double(M*M),
- as.integer(index$row), as.integer(index$col),
- as.integer(dimm.value),
- error.code = integer(1))
-
- if (z$error.code)
- stop("eigen algorithm (rs) returned error code ",
- z$error.code)
- ord <- M:1
- dim(z$values) <- c(M, n)
- z$values <- z$values[ord,,drop = FALSE]
- dim(z$vectors) <- c(M, M, n)
- z$vectors <- z$vectors[, ord, , drop = FALSE]
- return(list(values = z$values,
- vectors = z$vectors))
-}
+veigen <- function(x, M) {
+ n <- ncol(x)
+ index <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ dimm.value <- nrow(x) # usually M or M(M+1)/2
+ z <- dotFortran(name = "veigen",
+ as.integer(M),
+ as.integer(n),
+ as.double(x),
+ values = double(M * n),
+ as.integer(1),
+ vectors = double(M*M*n),
+ double(M),
+ double(M),
+ wk = double(M*M),
+ as.integer(index$row), as.integer(index$col),
+ as.integer(dimm.value),
+ error.code = integer(1))
-
-ima <- function(j, k, M)
-{
- if (length(M) > 1 || M <= 0 || j <= 0 || k <= 0 ||
- j > M || k > M)
- stop("input wrong")
- m <- diag(M)
- m[col(m) <= row(m)] <- 1:(M*(M+1)/2)
- if (j >= k) m[j,k] else m[k,j]
+ if (z$error.code)
+ stop("eigen algorithm (rs) returned error code ", z$error.code)
+ ord <- M:1
+ dim(z$values) <- c(M, n)
+ z$values <- z$values[ord, , drop = FALSE]
+ dim(z$vectors) <- c(M, M, n)
+ z$vectors <- z$vectors[, ord, , drop = FALSE]
+ return(list(values = z$values,
+ vectors = z$vectors))
}
-checkwz <- function(wz, M, trace = FALSE,
- wzepsilon = .Machine$double.eps^0.75) {
- if (wzepsilon > 0.5)
- warning("'wzepsilon' is probably too large")
- if (!is.matrix(wz))
- wz = as.matrix(wz)
- if ((temp <- sum(wz[, 1:M, drop = FALSE] < wzepsilon)))
- warning(paste(temp, "elements replaced by",
- signif(wzepsilon, 5)))
- wz[, 1:M] = pmax(wzepsilon, wz[, 1:M])
- wz
-}
-
-
+ima <- function(j, k, M) {
+ if (length(M) > 1 || M <= 0 || j <= 0 || k <= 0 ||
+ j > M || k > M)
+ stop("input wrong in ima()")
+ m <- diag(M)
+ m[col(m) <= row(m)] <- 1:(M*(M+1)/2)
+ if (j >= k) m[j, k] else m[k, j]
+}
+checkwz <- function(wz, M, trace = FALSE,
+ wzepsilon = .Machine$double.eps^0.75) {
+ if (wzepsilon > 0.5)
+ warning("'wzepsilon' is probably too large")
+ if (!is.matrix(wz))
+ wz <- as.matrix(wz)
+ if ((temp <- sum(wz[, 1:M, drop = FALSE] < wzepsilon)))
+ warning(paste(temp, "elements replaced by",
+ signif(wzepsilon, 5)))
+ wz[, 1:M] <- pmax(wzepsilon, wz[, 1:M])
+ wz
+}
diff --git a/R/family.genetic.R b/R/family.genetic.R
index c08c125..1068685 100644
--- a/R/family.genetic.R
+++ b/R/family.genetic.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -14,8 +14,7 @@
G1G2G3 <- function(link = "logit",
- ip1 = NULL, ip2 = NULL, iF = NULL)
-{
+ ip1 = NULL, ip2 = NULL, iF = NULL) {
link <- as.list(substitute(link))
earg <- link2list(link)
@@ -25,21 +24,21 @@
new("vglmff",
blurb = c("G1-G2-G3 phenotype\n\n",
- "Links: ",
- namesof("p1", link, earg = earg), ", ",
- namesof("p2", link, earg = earg), ", ",
- namesof("f", link, earg = earg, tag = FALSE)),
+ "Links: ",
+ namesof("p1", link, earg = earg), ", ",
+ namesof("p2", link, earg = earg), ", ",
+ namesof("f", link, earg = earg, tag = FALSE)),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
- mustart.orig = mustart
+ mustart.orig <- mustart
- delete.zero.colns = FALSE
+ delete.zero.colns <- FALSE
eval(process.categorical.data.vgam)
if (length(mustart.orig))
- mustart = mustart.orig
+ mustart <- mustart.orig
- ok.col.ny = c("G1G1","G1G2","G1G3","G2G2","G2G3","G3G3")
+ ok.col.ny <- c("G1G1","G1G2","G1G3","G2G2","G2G3","G3G3")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
setequal(ok.col.ny, col.ny)) {
if (!all(ok.col.ny == col.ny))
@@ -48,43 +47,44 @@
"c('G1G1','G1G2','G1G3','G2G2','G2G3','G3G3')")
}
- predictors.names <-
- c(namesof("p1", .link , earg = .earg , tag = FALSE),
- namesof("p2", .link , earg = .earg , tag = FALSE),
- namesof("f", .link , earg = .earg , tag = FALSE))
+ predictors.names <-
+ c(namesof("p1", .link , earg = .earg , tag = FALSE),
+ namesof("p2", .link , earg = .earg , tag = FALSE),
+ namesof("f", .link , earg = .earg , tag = FALSE))
- if (is.null(etastart)) {
+ if (is.null(etastart)) {
- mydeterminant = mustart[, 2] * mustart[, 3] +
- mustart[, 2] * mustart[, 5] +
- mustart[, 3] * mustart[, 5]
- p1 = if (is.numeric( .ip1 )) rep( .ip1 , len = n) else
- mustart[, 2] * mustart[, 3] / mydeterminant
- p2 = if (is.numeric( .ip2 )) rep( .ip2 , len = n) else
- mustart[, 2] * mustart[, 5] / mydeterminant
- ff = if (is.numeric( .iF )) rep( .iF , len = n) else
- abs(1 - mustart[, 2] / (2 * p1 * p2))
+ mydeterminant <- mustart[, 2] * mustart[, 3] +
+ mustart[, 2] * mustart[, 5] +
+ mustart[, 3] * mustart[, 5]
+ p1 <- if (is.numeric( .ip1 )) rep( .ip1 , len = n) else
+ mustart[, 2] * mustart[, 3] / mydeterminant
+ p2 <- if (is.numeric( .ip2 )) rep( .ip2 , len = n) else
+ mustart[, 2] * mustart[, 5] / mydeterminant
+ ff <- if (is.numeric( .iF )) rep( .iF , len = n) else
+ abs(1 - mustart[, 2] / (2 * p1 * p2))
- if (any(p1 <= 0) || any(p1 >= 1))
- stop("bad initial value for 'p1'")
- if (any(p2 <= 0) || any(p2 >= 1))
- stop("bad initial value for 'p2'")
+ if (any(p1 <= 0) || any(p1 >= 1))
+ stop("bad initial value for 'p1'")
+ if (any(p2 <= 0) || any(p2 >= 1))
+ stop("bad initial value for 'p2'")
- etastart = cbind(theta2eta(p1, .link , earg = .earg ),
- theta2eta(p2, .link , earg = .earg ),
- theta2eta(ff, .link , earg = .earg ))
- mustart <- NULL # Since etastart has been computed.
+ etastart <-
+ cbind(theta2eta(p1, .link , earg = .earg ),
+ theta2eta(p2, .link , earg = .earg ),
+ theta2eta(ff, .link , earg = .earg ))
+ mustart <- NULL # Since etastart has been computed.
- }
+ }
}), list( .link = link, .ip1 = ip1, .ip2 = ip2, .iF = iF,
.earg = earg))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
- p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
- f = eta2theta(eta[, 3], link = .link , earg = .earg )
- p3 = abs(1 - p1 - p2)
+ p1 <- eta2theta(eta[, 1], link = .link , earg = .earg )
+ p2 <- eta2theta(eta[, 2], link = .link , earg = .earg )
+ f <- eta2theta(eta[, 3], link = .link , earg = .earg )
+ p3 <- abs(1 - p1 - p2)
cbind("G1G1" = f*p1+(1-f)*p1^2,
"G1G2" = 2*p1*p2*(1-f),
"G1G3" = 2*p1*p3*(1-f),
@@ -94,9 +94,11 @@
}, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
- misc$link = c(p1 = .link , p2 = .link , f = .link )
- misc$earg = list(p1 = .earg , p2 = .earg , f = .earg )
- misc$expected = TRUE
+ misc$link <- c(p1 = .link , p2 = .link , f = .link )
+
+ misc$earg <- list(p1 = .earg , p2 = .earg , f = .earg )
+
+ misc$expected <- TRUE
}), list( .link = link, .earg = earg))),
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
@@ -107,34 +109,35 @@
},
vfamily = c("G1G2G3", "vgenetic"),
deriv = eval(substitute(expression({
- p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
- p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
- p3 = 1-p1-p2
- f = eta2theta(eta[, 3], link = .link , earg = .earg )
- dP1 = cbind(f + 2*p1*(1-f), 2*(1-f)*p2, 2*(1-f)*(1-p2-2*p1),
+ p1 <- eta2theta(eta[, 1], link = .link , earg = .earg )
+ p2 <- eta2theta(eta[, 2], link = .link , earg = .earg )
+ p3 <- 1-p1-p2
+ f <- eta2theta(eta[, 3], link = .link , earg = .earg )
+ dP1 <- cbind(f + 2*p1*(1-f), 2*(1-f)*p2, 2*(1-f)*(1-p2-2*p1),
0, -2*(1-f)*p2, -f - 2*p3*(1-f))
- dP2 = cbind(0, 2*p1*(1-f), -2*(1-f)*p1, f+2*p2*(1-f),
+ dP2 <- cbind(0, 2*p1*(1-f), -2*(1-f)*p1, f+2*p2*(1-f),
2*(1-f)*(1-p1-2*p2), -f - 2*p3*(1-f))
- dP3 = cbind(p1*(1-p1), -2*p1*p2, -2*p1*p3, p2*(1-p2), -2*p2*p3,
+ dP3 <- cbind(p1*(1-p1), -2*p1*p2, -2*p1*p3, p2*(1-p2), -2*p2*p3,
p3*(1-p3))
- dl1 = rowSums(y * dP1 / mu)
- dl2 = rowSums(y * dP2 / mu)
- dl3 = rowSums(y * dP3 / mu)
- dPP.deta = dtheta.deta(cbind(p1, p2, f), link = .link , earg = .earg )
+ dl1 <- rowSums(y * dP1 / mu)
+ dl2 <- rowSums(y * dP2 / mu)
+ dl3 <- rowSums(y * dP3 / mu)
+ dPP.deta <- dtheta.deta(cbind(p1, p2, f),
+ link = .link , earg = .earg )
c(w) * cbind(dPP.deta[, 1] * dl1,
- dPP.deta[, 2] * dl2,
- dPP.deta[, 3] * dl3)
+ dPP.deta[, 2] * dl2,
+ dPP.deta[, 3] * dl3)
}), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
- dPP = array(c(dP1,dP2,dP3), c(n,6, 3))
+ dPP <- array(c(dP1, dP2, dP3), c(n, 6, 3))
- wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
+ wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
for(i1 in 1:M)
for(i2 in i1:M) {
- index = iam(i1,i2, M)
- wz[,index] = rowSums(dPP[, , i1, drop = TRUE] *
- dPP[, , i2, drop = TRUE] / mu) *
- dPP.deta[, i1] * dPP.deta[, i2]
+ index <- iam(i1,i2, M)
+ wz[,index] <- rowSums(dPP[, , i1, drop = TRUE] *
+ dPP[, , i2, drop = TRUE] / mu) *
+ dPP.deta[, i1] * dPP.deta[, i2]
}
c(w) * wz
}), list( .link = link, .earg = earg))))
@@ -142,8 +145,7 @@
- AAaa.nohw <- function(link = "logit", ipA = NULL, iF = NULL)
-{
+ AAaa.nohw <- function(link = "logit", ipA = NULL, iF = NULL) {
link <- as.list(substitute(link))
earg <- link2list(link)
@@ -152,56 +154,58 @@
new("vglmff",
blurb = c("AA-Aa-aa phenotype (without Hardy-Weinberg assumption)\n\n",
- "Links: ",
- namesof("pA", link, earg = earg), ", ",
- namesof("f", "identity", tag = FALSE)),
+ "Links: ",
+ namesof("pA", link, earg = earg), ", ",
+ namesof("f", "identity", tag = FALSE)),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
- mustart.orig = mustart
+ mustart.orig <- mustart
- delete.zero.colns = FALSE
+ delete.zero.colns <- FALSE
eval(process.categorical.data.vgam)
- if (length(mustart.orig))
- mustart = mustart.orig
+ if (length(mustart.orig))
+ mustart <- mustart.orig
- ok.col.ny = c("AA","Aa","aa")
- if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
- setequal(ok.col.ny, col.ny)) {
- if (!all(ok.col.ny == col.ny))
- stop("the columns of the response matrix should have names ",
- "(output of colnames()) ordered as c('AA','Aa','aa')")
- }
+ ok.col.ny <- c("AA","Aa","aa")
+ if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
+ setequal(ok.col.ny, col.ny)) {
+ if (!all(ok.col.ny == col.ny))
+ stop("the columns of the response matrix should have names ",
+ "(output of colnames()) ordered as c('AA','Aa','aa')")
+ }
predictors.names <-
c(namesof("pA", .link , earg = .earg , tag = FALSE),
namesof("f", "identity", earg = list(), tag = FALSE))
if (is.null(etastart)) {
- pA = if (is.numeric( .ipA )) rep( .ipA , len = n) else
- c(sqrt(mustart[, 1] - mustart[, 2] / 2))
- f = if (is.numeric( .iF )) rep( .iF , len = n) else
- rep(0.01, len = n) # 1- mustart[, 2]/(2*pA*(1-pA))
+ pA <- if (is.numeric( .ipA )) rep( .ipA , len = n) else
+ c(sqrt(mustart[, 1] - mustart[, 2] / 2))
+ f <- if (is.numeric( .iF )) rep( .iF , len = n) else
+ rep(0.01, len = n) # 1- mustart[, 2]/(2*pA*(1-pA))
if (any(pA <= 0) || any(pA >= 1))
- stop("bad initial value for 'pA'")
- etastart = cbind(theta2eta(pA, .link , earg = .earg ),
- theta2eta(f, "identity"))
+ stop("bad initial value for 'pA'")
+ etastart <- cbind(theta2eta(pA, .link , earg = .earg ),
+ theta2eta(f, "identity"))
mustart <- NULL # Since etastart has been computed.
}
}), list( .link = link, .ipA = ipA, .iF = iF, .earg = earg))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- pA = eta2theta(eta[, 1], link = .link , earg = .earg )
- f = eta2theta(eta[, 2], link = "identity", earg = list())
+ pA <- eta2theta(eta[, 1], link = .link , earg = .earg )
+ f <- eta2theta(eta[, 2], link = "identity", earg = list())
cbind(AA = pA^2+pA*(1-pA)*f,
Aa = 2*pA*(1-pA)*(1-f),
aa = (1-pA)^2 + pA*(1-pA)*f)
}, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
- misc$link = c(pA = .link , f = "identity")
- misc$earg = list(pA = .earg , f = list() )
- misc$expected = TRUE
+ misc$link <- c(pA = .link , f = "identity")
+
+ misc$earg <- list(pA = .earg , f = list() )
+
+ misc$expected <- TRUE
}), list( .link = link, .earg = earg))),
@@ -213,33 +217,33 @@
},
vfamily = c("AAaa.nohw", "vgenetic"),
deriv = eval(substitute(expression({
- pA = eta2theta(eta[, 1], link = .link , earg = .earg )
- f = eta2theta(eta[, 2], link = "identity")
- dP1 = cbind(f + 2*pA*(1-f),
- 2*(1-f)*(1-2*pA),
- -2*(1-pA) +f*(1-2*pA))
- dP2 = cbind(pA*(1-pA),
- -2*pA*(1-pA),
- pA*(1-pA))
- dl1 = rowSums(y * dP1 / mu)
- dl2 = rowSums(y * dP2 / mu)
-
- dPP.deta = dtheta.deta(pA, link = .link , earg = .earg )
+ pA <- eta2theta(eta[, 1], link = .link , earg = .earg )
+ f <- eta2theta(eta[, 2], link = "identity")
+ dP1 <- cbind(f + 2*pA*(1-f),
+ 2*(1-f)*(1-2*pA),
+ -2*(1-pA) +f*(1-2*pA))
+ dP2 <- cbind(pA*(1-pA),
+ -2*pA*(1-pA),
+ pA*(1-pA))
+ dl1 <- rowSums(y * dP1 / mu)
+ dl2 <- rowSums(y * dP2 / mu)
+
+ dPP.deta <- dtheta.deta(pA, link = .link , earg = .earg )
c(w) * cbind(dPP.deta * dl1,
- dl2)
+ dl2)
}), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
- dPP = array(c(dP1, dP2), c(n, 3, 2))
- dPP.deta = cbind(dtheta.deta(pA, link = .link , earg = .earg ),
- dtheta.deta(f, link = "identity"))
- wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+ dPP <- array(c(dP1, dP2), c(n, 3, 2))
+ dPP.deta <- cbind(dtheta.deta(pA, link = .link , earg = .earg ),
+ dtheta.deta(f, link = "identity"))
+ wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
for(i1 in 1:M)
- for(i2 in i1:M) {
- index = iam(i1,i2, M)
- wz[,index] = rowSums(dPP[,,i1,drop = TRUE] *
- dPP[,,i2,drop = TRUE] / mu) *
- dPP.deta[,i1] * dPP.deta[,i2]
+ for(i2 in i1:M) {
+ index <- iam(i1, i2, M)
+ wz[,index] <- rowSums(dPP[,,i1,drop = TRUE] *
+ dPP[,,i2,drop = TRUE] / mu) *
+ dPP.deta[, i1] * dPP.deta[, i2]
}
c(w) * wz
}), list( .link = link, .earg = earg))))
@@ -248,8 +252,7 @@
- AB.Ab.aB.ab2 <- function(link = "logit", init.p = NULL)
-{
+ AB.Ab.aB.ab2 <- function(link = "logit", init.p = NULL) {
link <- as.list(substitute(link))
earg <- link2list(link)
@@ -258,20 +261,20 @@
new("vglmff",
blurb = c("AB-Ab-aB-ab2 phenotype\n\n",
- "Links: ",
- namesof("p", link, earg = earg)),
+ "Links: ",
+ namesof("p", link, earg = earg)),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
- mustart.orig = mustart
+ mustart.orig <- mustart
- delete.zero.colns = FALSE
+ delete.zero.colns <- FALSE
eval(process.categorical.data.vgam)
predictors.names <- namesof("p", .link , earg = .earg , tag = FALSE)
if (length(mustart.orig))
- mustart = mustart.orig
+ mustart <- mustart.orig
- ok.col.ny = c("AB","Ab","aB","ab")
+ ok.col.ny <- c("AB","Ab","aB","ab")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
setequal(ok.col.ny, col.ny)) {
if (!all(ok.col.ny == col.ny))
@@ -280,14 +283,14 @@
}
if (is.null(etastart)) {
- p.init = if (is.numeric(.init.p)) rep(.init.p, n) else
+ p.init <- if (is.numeric(.init.p)) rep(.init.p, n) else
c(1 - 2 * sqrt(mustart[, 4]))
- etastart = theta2eta(p.init, .link , earg = .earg )
+ etastart <- theta2eta(p.init, .link , earg = .earg )
mustart <- NULL # Since etastart has been computed.
}
}), list( .link = link, .init.p=init.p, .earg = earg))),
linkinv = eval(substitute(function(eta,extra = NULL) {
- p = eta2theta(eta, link = .link , earg = .earg )
+ p <- eta2theta(eta, link = .link , earg = .earg )
cbind("AB" = (2+(1-p)^2),
"Ab" = (1-(1-p)^2),
"aB" = (1-(1-p)^2),
@@ -295,39 +298,40 @@
}, list( .link = link, .earg = earg) )),
last = eval(substitute(expression({
- misc$link = c(p = .link )
- misc$earg = list(p = .earg )
- misc$expected = TRUE
+ misc$link <- c(p = .link )
+
+ misc$earg <- list(p = .earg )
+
+ misc$expected <- TRUE
}), list( .link = link, .earg = earg) )),
- loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x = w * y, size = w, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
+ },
vfamily = c("AB.Ab.aB.ab2", "vgenetic"),
deriv = eval(substitute(expression({
- pp = eta2theta(eta, link = .link , earg = .earg )
- dP1 = cbind(-0.5*(1-pp),
- 0.5*(1-pp),
- 0.5*(1-pp),
- -0.5*(1-pp))
- dl1 = rowSums(y * dP1 / mu)
- dPP.deta = dtheta.deta(pp, link = .link , earg = .earg )
+ pp <- eta2theta(eta, link = .link , earg = .earg )
+ dP1 <- cbind(-0.5*(1-pp),
+ 0.5*(1-pp),
+ 0.5*(1-pp),
+ -0.5*(1-pp))
+ dl1 <- rowSums(y * dP1 / mu)
+ dPP.deta <- dtheta.deta(pp, link = .link , earg = .earg )
c(w) * dPP.deta * dl1
- }), list( .link = link, .earg = earg) )),
+ }), list( .link = link, .earg = earg) )),
weight = eval(substitute(expression({
- wz = rowSums(dP1 * dP1 / mu) * dPP.deta^2
+ wz <- rowSums(dP1 * dP1 / mu) * dPP.deta^2
c(w) * wz
}), list( .link = link, .earg = earg) )))
}
- A1A2A3 <- function(link = "logit", ip1 = NULL, ip2 = NULL)
-{
+ A1A2A3 <- function(link = "logit", ip1 = NULL, ip2 = NULL) {
link <- as.list(substitute(link))
earg <- link2list(link)
link <- attr(earg, "function.name")
@@ -336,20 +340,20 @@
new("vglmff",
blurb = c("A1A2A3 Allele System ",
"(A1A1, A1A2, A2A2, A1A3, A2A3, A3A3)\n\n",
- "Links: ",
- namesof("p1", link, earg = earg), ", ",
- namesof("p2", link, earg = earg, tag = FALSE)),
+ "Links: ",
+ namesof("p1", link, earg = earg), ", ",
+ namesof("p2", link, earg = earg, tag = FALSE)),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
- mustart.orig = mustart
+ mustart.orig <- mustart
- delete.zero.colns = FALSE
+ delete.zero.colns <- FALSE
eval(process.categorical.data.vgam)
if (length(mustart.orig))
- mustart = mustart.orig
+ mustart <- mustart.orig
- ok.col.ny = c("A1A1","A1A2","A2A2","A1A3","A2A3","A3A3")
+ ok.col.ny <- c("A1A1","A1A2","A2A2","A1A3","A2A3","A3A3")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
setequal(ok.col.ny, col.ny)) {
if (!all(ok.col.ny == col.ny))
@@ -363,19 +367,19 @@
namesof("pB", .link , earg = .earg , tag = FALSE))
if (is.null(etastart)) {
- p1 = if (is.numeric(.ip1)) rep(.ip1, n) else
+ p1 <- if (is.numeric(.ip1)) rep(.ip1, n) else
c(sqrt(mustart[, 1]))
- p2 = if (is.numeric(.ip2)) rep(.ip2, n) else
+ p2 <- if (is.numeric(.ip2)) rep(.ip2, n) else
c(sqrt(mustart[, 3]))
- etastart = cbind(theta2eta(p1, .link , earg = .earg ),
- theta2eta(p2, .link , earg = .earg ))
+ etastart <- cbind(theta2eta(p1, .link , earg = .earg ),
+ theta2eta(p2, .link , earg = .earg ))
mustart <- NULL # Since etastart has been computed.
}
}), list( .link = link, .ip1 = ip1, .ip2 = ip2, .earg = earg))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
- p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
- qq = abs(1 - p1 - p2)
+ p1 <- eta2theta(eta[, 1], link = .link , earg = .earg )
+ p2 <- eta2theta(eta[, 2], link = .link , earg = .earg )
+ qq <- abs(1 - p1 - p2)
cbind(A1A1 = p1*p1,
A1A2 = 2*p1*p2,
A2A2 = p2*p2,
@@ -385,9 +389,11 @@
}, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
- misc$link = c(p1 = .link , p2 = .link )
- misc$earg = list(p1 = .earg , p2 = .earg )
- misc$expected = TRUE
+ misc$link <- c(p1 = .link , p2 = .link )
+
+ misc$earg <- list(p1 = .earg , p2 = .earg )
+
+ misc$expected <- TRUE
}), list( .link = link, .earg = earg))),
@@ -399,27 +405,27 @@
},
vfamily = c("A1A2A3", "vgenetic"),
deriv = eval(substitute(expression({
- p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
- p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
+ p1 <- eta2theta(eta[, 1], link = .link , earg = .earg )
+ p2 <- eta2theta(eta[, 2], link = .link , earg = .earg )
- dl.dp1 = (2*y[, 1]+y[, 2]+y[, 4])/p1 - (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
- dl.dp2 = (2*y[, 3]+y[, 2]+y[,5])/p2 - (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
+ dl.dp1 <- (2*y[, 1]+y[, 2]+y[, 4])/p1 - (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
+ dl.dp2 <- (2*y[, 3]+y[, 2]+y[,5])/p2 - (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
- dp1.deta = dtheta.deta(p1, link = .link , earg = .earg )
- dp2.deta = dtheta.deta(p2, link = .link , earg = .earg )
+ dp1.deta <- dtheta.deta(p1, link = .link , earg = .earg )
+ dp2.deta <- dtheta.deta(p2, link = .link , earg = .earg )
c(w) * cbind(dl.dp1 * dp1.deta,
dl.dp2 * dp2.deta)
}), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
- qq = 1-p1-p2
- wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
- ed2l.dp12 = 2 * (1/p1 + 1/qq)
- ed2l.dp22 = 2 * (1/p2 + 1/qq)
- ed2l.dp1dp2 = 2 / qq
- wz[, iam(1, 1, M)] = dp1.deta^2 * ed2l.dp12
- wz[, iam(2, 2, M)] = dp2.deta^2 * ed2l.dp22
- wz[, iam(1, 2, M)] = ed2l.dp1dp2 * dp1.deta * dp2.deta
+ qq <- 1-p1-p2
+ wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+ ned2l.dp12 <- 2 * (1/p1 + 1/qq)
+ ned2l.dp22 <- 2 * (1/p2 + 1/qq)
+ ned2l.dp1dp2 <- 2 / qq
+ wz[, iam(1, 1, M)] <- ned2l.dp12 * dp1.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dp22 * dp2.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dp1dp2 * dp1.deta * dp2.deta
c(w) * wz
}), list( .link = link, .earg = earg))))
}
@@ -428,8 +434,7 @@
MNSs <- function(link = "logit",
- imS = NULL, ims = NULL, inS = NULL)
-{
+ imS = NULL, ims = NULL, inS = NULL) {
link <- as.list(substitute(link))
earg <- link2list(link)
@@ -438,21 +443,21 @@
new("vglmff",
blurb = c("MNSs Blood Group System (MS-Ms-MNS-MNs-NS-Ns phenotype)\n\n",
- "Links: ",
- namesof("mS", link, earg = earg), ", ",
- namesof("ms", link, earg = earg), ", ",
- namesof("nS", link, earg = earg, tag = FALSE)),
+ "Links: ",
+ namesof("mS", link, earg = earg), ", ",
+ namesof("ms", link, earg = earg), ", ",
+ namesof("nS", link, earg = earg, tag = FALSE)),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
- mustart.orig = mustart
+ mustart.orig <- mustart
- delete.zero.colns = FALSE
+ delete.zero.colns <- FALSE
eval(process.categorical.data.vgam)
if (length(mustart.orig))
- mustart = mustart.orig
+ mustart <- mustart.orig
- ok.col.ny = c("MS","Ms","MNS","MNs","NS","Ns")
+ ok.col.ny <- c("MS","Ms","MNS","MNs","NS","Ns")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
setequal(ok.col.ny, col.ny)) {
if (!all(ok.col.ny == col.ny))
@@ -467,24 +472,24 @@
namesof("nS", .link , earg = .earg , tag = FALSE))
if (is.null(etastart)) {
- ms = if (is.numeric(.ims)) rep(.ims, n) else
+ ms <- if (is.numeric(.ims)) rep(.ims, n) else
c(sqrt(mustart[, 2]))
- ns = c(sqrt(mustart[,6]))
- nS = if (is.numeric(.inS)) rep(.inS, n) else
+ ns <- c(sqrt(mustart[,6]))
+ nS <- if (is.numeric(.inS)) rep(.inS, n) else
c(-ns + sqrt(ns^2 + mustart[,5])) # Solve a quadratic eqn
- mS = if (is.numeric(.imS)) rep(.imS, n) else
+ mS <- if (is.numeric(.imS)) rep(.imS, n) else
1-ns-ms-nS
- etastart = cbind(theta2eta(mS, .link , earg = .earg ),
+ etastart <- cbind(theta2eta(mS, .link , earg = .earg ),
theta2eta(ms, .link , earg = .earg ),
theta2eta(nS, .link , earg = .earg ))
mustart <- NULL # Since etastart has been computed.
}
}), list( .link = link, .imS = imS, .ims = ims, .inS = inS, .earg = earg))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mS = eta2theta(eta[, 1], link = .link , earg = .earg )
- ms = eta2theta(eta[, 2], link = .link , earg = .earg )
- nS = eta2theta(eta[, 3], link = .link , earg = .earg )
- ns = abs(1 - mS - ms - nS)
+ mS <- eta2theta(eta[, 1], link = .link , earg = .earg )
+ ms <- eta2theta(eta[, 2], link = .link , earg = .earg )
+ nS <- eta2theta(eta[, 3], link = .link , earg = .earg )
+ ns <- abs(1 - mS - ms - nS)
cbind(MS = mS^2 + 2*mS*ms,
Ms = ms^2,
MNS = 2*(mS*nS + ms*nS + mS*ns),
@@ -494,43 +499,44 @@
}, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
- misc$link = c(mS = .link , ms = .link , nS = .link )
- misc$earg = list(mS = .earg , ms = .earg , nS = .earg )
+ misc$link <- c(mS = .link , ms = .link , nS = .link )
- misc$expected = TRUE
+ misc$earg <- list(mS = .earg , ms = .earg , nS = .earg )
+
+ misc$expected <- TRUE
}), list( .link = link, .earg = earg))),
- loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x = w * y, size = w, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
+ },
vfamily = c("MNSs", "vgenetic"),
deriv = eval(substitute(expression({
- mS = eta2theta(eta[, 1], link = .link , earg = .earg )
- ms = eta2theta(eta[, 2], link = .link , earg = .earg )
- nS = eta2theta(eta[, 3], link = .link , earg = .earg )
- ns = 1-mS-ms-nS
- dP1 = cbind(2*(mS+ms), 0, 2*(nS+ns-mS), -2*ms, -2*nS, -2*ns)
- dP2 = cbind(2*mS, 2*ms, 2*(nS-mS), 2*(ns-ms), -2*nS, -2*ns)
- dP3 = cbind(0, 0, 2*ms, -2*ms, 2*ns, -2*ns) # n x 6
- dl1 = rowSums(y * dP1 / mu)
- dl2 = rowSums(y * dP2 / mu)
- dl3 = rowSums(y * dP3 / mu)
- dPP.deta = dtheta.deta(cbind(mS, ms, nS), link = .link , earg = .earg )
+ mS <- eta2theta(eta[, 1], link = .link , earg = .earg )
+ ms <- eta2theta(eta[, 2], link = .link , earg = .earg )
+ nS <- eta2theta(eta[, 3], link = .link , earg = .earg )
+ ns <- 1-mS-ms-nS
+ dP1 <- cbind(2*(mS+ms), 0, 2*(nS+ns-mS), -2*ms, -2*nS, -2*ns)
+ dP2 <- cbind(2*mS, 2*ms, 2*(nS-mS), 2*(ns-ms), -2*nS, -2*ns)
+ dP3 <- cbind(0, 0, 2*ms, -2*ms, 2*ns, -2*ns) # n x 6
+ dl1 <- rowSums(y * dP1 / mu)
+ dl2 <- rowSums(y * dP2 / mu)
+ dl3 <- rowSums(y * dP3 / mu)
+ dPP.deta <- dtheta.deta(cbind(mS, ms, nS), link = .link , earg = .earg )
c(w) * dPP.deta * cbind(dl1, dl2, dl3)
}), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
- dPP = array(c(dP1,dP2,dP3), c(n,6, 3))
- wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
+ dPP <- array(c(dP1,dP2,dP3), c(n,6, 3))
+ wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
for(i1 in 1:M)
for(i2 in i1:M) {
- index = iam(i1,i2, M)
- wz[,index] = rowSums(dPP[,,i1,drop = TRUE] *
- dPP[,,i2,drop = TRUE] / mu) *
- dPP.deta[,i1] * dPP.deta[,i2]
+ index <- iam(i1,i2, M)
+ wz[,index] <- rowSums(dPP[,,i1,drop = TRUE] *
+ dPP[,,i2,drop = TRUE] / mu) *
+ dPP.deta[,i1] * dPP.deta[,i2]
}
c(w) * wz
}), list( .link = link, .earg = earg))))
@@ -541,8 +547,7 @@
- ABO <- function(link = "logit", ipA = NULL, ipO = NULL)
-{
+ ABO <- function(link = "logit", ipA = NULL, ipO = NULL) {
link <- as.list(substitute(link))
earg <- link2list(link)
link <- attr(earg, "function.name")
@@ -556,15 +561,15 @@
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
- mustart.orig = mustart
+ mustart.orig <- mustart
- delete.zero.colns = FALSE
+ delete.zero.colns <- FALSE
eval(process.categorical.data.vgam)
if (length(mustart.orig))
- mustart = mustart.orig
+ mustart <- mustart.orig
- ok.col.ny = c("A","B","AB","O")
+ ok.col.ny <- c("A","B","AB","O")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
setequal(ok.col.ny, col.ny)) {
if (!all(ok.col.ny == col.ny))
@@ -578,12 +583,12 @@
namesof("pB", .link , earg = .earg , tag = FALSE))
if (!length(etastart)) {
- pO = if (is.Numeric( .ipO )) rep( .ipO , len = n) else
+ pO <- if (is.Numeric( .ipO )) rep( .ipO , len = n) else
c(sqrt(mustart[, 4]))
- pA = if (is.Numeric( .ipA )) rep( .ipA , len = n) else
+ pA <- if (is.Numeric( .ipA )) rep( .ipA , len = n) else
c(1 - sqrt(mustart[, 2] + mustart[, 4]))
- pB = abs(1 - pA - pO)
- etastart = cbind(theta2eta(pA, .link , earg = .earg ),
+ pB <- abs(1 - pA - pO)
+ etastart <- cbind(theta2eta(pA, .link , earg = .earg ),
theta2eta(pB, .link , earg = .earg ))
mustart <- NULL # Since etastart has been computed.
}
@@ -591,9 +596,9 @@
linkinv = eval(substitute(function(eta, extra = NULL) {
- pA = eta2theta(eta[, 1], link = .link , earg = .earg )
- pB = eta2theta(eta[, 2], link = .link , earg = .earg )
- pO = abs(1 - pA - pB)
+ pA <- eta2theta(eta[, 1], link = .link , earg = .earg )
+ pB <- eta2theta(eta[, 2], link = .link , earg = .earg )
+ pO <- abs(1 - pA - pB)
cbind(A = pA*(pA+2*pO),
B = pB*(pB+2*pO),
AB = 2*pA*pB,
@@ -601,9 +606,11 @@
}, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
- misc$link = c(pA = .link , pB = .link )
- misc$earg = list(pA = .earg , pB = .earg )
- misc$expected = TRUE
+ misc$link <- c(pA = .link , pB = .link )
+
+ misc$earg <- list(pA = .earg , pB = .earg )
+
+ misc$expected <- TRUE
}), list( .link = link, .earg = earg))),
@@ -618,37 +625,37 @@
vfamily = c("ABO", "vgenetic"),
deriv = eval(substitute(expression({
- ppp = eta2theta(eta[, 1], link = .link , earg = .earg )
- qqq = eta2theta(eta[, 2], link = .link , earg = .earg )
- rrr = abs(1 - ppp - qqq)
+ ppp <- eta2theta(eta[, 1], link = .link , earg = .earg )
+ qqq <- eta2theta(eta[, 2], link = .link , earg = .earg )
+ rrr <- abs(1 - ppp - qqq)
- pbar = 2*rrr + ppp
- qbar = 2*rrr + qqq
- naa = y[, 1]
- nbb = y[, 2]
- nab = y[, 3]
- noo = y[, 4]
+ pbar <- 2*rrr + ppp
+ qbar <- 2*rrr + qqq
+ naa <- y[, 1]
+ nbb <- y[, 2]
+ nab <- y[, 3]
+ noo <- y[, 4]
- dl.dp = (naa+nab)/ppp - naa/pbar - 2*nbb/qbar - 2*noo/rrr
- dl.dq = (nbb+nab)/qqq - 2*naa/pbar - nbb/qbar - 2*noo/rrr
- dp.deta = dtheta.deta(ppp, link = .link , earg = .earg )
- dq.deta = dtheta.deta(qqq, link = .link , earg = .earg )
+ dl.dp <- (naa+nab)/ppp - naa/pbar - 2*nbb/qbar - 2*noo/rrr
+ dl.dq <- (nbb+nab)/qqq - 2*naa/pbar - nbb/qbar - 2*noo/rrr
+ dp.deta <- dtheta.deta(ppp, link = .link , earg = .earg )
+ dq.deta <- dtheta.deta(qqq, link = .link , earg = .earg )
c(w) * cbind(dl.dp * dp.deta,
dl.dq * dq.deta)
}), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+ wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
- ed2l.dp2 = (1 + 2/ppp + 4*qqq/qbar + ppp/pbar)
- ed2l.dq2 = (1 + 2/qqq + 4*ppp/pbar + qqq/qbar)
- ed2l.dpdq = 2 * (1 + qqq/qbar + ppp/pbar)
+ ned2l.dp2 <- (1 + 2/ppp + 4*qqq/qbar + ppp/pbar)
+ ned2l.dq2 <- (1 + 2/qqq + 4*ppp/pbar + qqq/qbar)
+ ned2l.dpdq <- 2 * (1 + qqq/qbar + ppp/pbar)
- wz[, iam(1, 1, M)] = ed2l.dp2 * dp.deta^2
- wz[, iam(2, 2, M)] = ed2l.dq2 * dq.deta^2
- wz[, iam(1, 2, M)] = ed2l.dpdq * dp.deta * dq.deta
+ wz[, iam(1, 1, M)] <- ned2l.dp2 * dp.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dq2 * dq.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dpdq * dp.deta * dq.deta
c(w) * wz
}), list( .link = link, .earg = earg))))
}
@@ -656,8 +663,7 @@
- AB.Ab.aB.ab <- function(link = "logit", init.p = NULL)
-{
+ AB.Ab.aB.ab <- function(link = "logit", init.p = NULL) {
link <- as.list(substitute(link))
earg <- link2list(link)
link <- attr(earg, "function.name")
@@ -665,18 +671,18 @@
new("vglmff",
blurb = c("AB-Ab-aB-ab phenotype\n\n",
- "Links: ", namesof("p", link, earg = earg, tag = TRUE)),
+ "Links: ", namesof("p", link, earg = earg, tag = TRUE)),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
- mustart.orig = mustart
+ mustart.orig <- mustart
- delete.zero.colns = FALSE
+ delete.zero.colns <- FALSE
eval(process.categorical.data.vgam)
if (length(mustart.orig))
- mustart = mustart.orig
+ mustart <- mustart.orig
- ok.col.ny = c("AB","Ab","aB","ab")
+ ok.col.ny <- c("AB","Ab","aB","ab")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
setequal(ok.col.ny, col.ny)) {
if (!all(ok.col.ny == col.ny))
@@ -688,15 +694,15 @@
predictors.names <- namesof("p", .link , earg = .earg , tag = FALSE)
if (is.null(etastart)) {
- p = if (is.numeric( .init.p )) rep(.init.p, len = n) else
+ p <- if (is.numeric( .init.p )) rep(.init.p, len = n) else
c(sqrt(4 * mustart[, 4]))
- etastart = cbind(theta2eta(p, .link , earg = .earg ))
+ etastart <- cbind(theta2eta(p, .link , earg = .earg ))
mustart <- NULL # Since etastart has been computed.
}
}), list( .link = link, .init.p=init.p, .earg = earg))),
linkinv = eval(substitute(function(eta,extra = NULL) {
- p = eta2theta(eta, link = .link , earg = .earg )
- pp4 = p * p / 4
+ p <- eta2theta(eta, link = .link , earg = .earg )
+ pp4 <- p * p / 4
cbind(AB = 0.5 + pp4,
Ab = 0.25 - pp4,
aB = 0.25 - pp4,
@@ -704,9 +710,11 @@
}, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
- misc$link = c(p = .link )
- misc$earg = list(p = .earg )
- misc$expected = TRUE
+ misc$link <- c(p = .link )
+
+ misc$earg <- list(p = .earg )
+
+ misc$expected <- TRUE
}), list( .link = link, .earg = earg))),
@@ -718,31 +726,30 @@
},
vfamily = c("AB.Ab.aB.ab", "vgenetic"),
deriv = eval(substitute(expression({
- pp = eta2theta(eta, link = .link , earg = .earg )
+ pp <- eta2theta(eta, link = .link , earg = .earg )
- p2 = pp*pp
- nAB = w * y[, 1]
- nAb = w * y[, 2]
- naB = w * y[, 3]
- nab = w * y[, 4]
+ p2 <- pp*pp
+ nAB <- w * y[, 1]
+ nAb <- w * y[, 2]
+ naB <- w * y[, 3]
+ nab <- w * y[, 4]
- dl.dp = 8 * pp * (nAB/(2+p2) - (nAb+naB)/(1-p2) + nab/p2)
+ dl.dp <- 8 * pp * (nAB/(2+p2) - (nAb+naB)/(1-p2) + nab/p2)
- dp.deta = dtheta.deta(pp, link = .link , earg = .earg )
+ dp.deta <- dtheta.deta(pp, link = .link , earg = .earg )
dl.dp * dp.deta
}), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
- ed2l.dp2 = 4 * p2 * (1/(2+p2) + 2/(1-p2) + 1/p2)
- wz = cbind((dp.deta^2) * ed2l.dp2)
+ ned2l.dp2 <- 4 * p2 * (1/(2+p2) + 2/(1-p2) + 1/p2)
+ wz <- cbind((dp.deta^2) * ned2l.dp2)
c(w) * wz
}), list( .link = link, .earg = earg))))
}
- AA.Aa.aa <- function(link = "logit", init.pA = NULL)
-{
+ AA.Aa.aa <- function(link = "logit", init.pA = NULL) {
link <- as.list(substitute(link))
earg <- link2list(link)
link <- attr(earg, "function.name")
@@ -750,18 +757,18 @@
new("vglmff",
blurb = c("AA-Aa-aa phenotype\n\n",
- "Links: ", namesof("pA", link, earg = earg)),
+ "Links: ", namesof("pA", link, earg = earg)),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
- mustart.orig = mustart
+ mustart.orig <- mustart
- delete.zero.colns = FALSE
+ delete.zero.colns <- FALSE
eval(process.categorical.data.vgam)
if (length(mustart.orig))
- mustart = mustart.orig
+ mustart <- mustart.orig
- ok.col.ny = c("AA","Aa","aa")
+ ok.col.ny <- c("AA","Aa","aa")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
setequal(ok.col.ny, col.ny)) {
if (!all(ok.col.ny == col.ny))
@@ -773,23 +780,25 @@
predictors.names <- namesof("pA", .link , earg = .earg , tag = FALSE)
if (is.null(etastart)) {
- pA = if (is.numeric(.init.pA)) rep(.init.pA, n) else
+ pA <- if (is.numeric(.init.pA)) rep(.init.pA, n) else
c(sqrt(mustart[, 1]))
- etastart = cbind(theta2eta(pA, .link , earg = .earg ))
+ etastart <- cbind(theta2eta(pA, .link , earg = .earg ))
mustart <- NULL # Since etastart has been computed.
}
}), list( .link = link, .init.pA=init.pA, .earg = earg))),
linkinv = eval(substitute(function(eta,extra = NULL) {
- pA = eta2theta(eta, link = .link , earg = .earg )
- pp = pA*pA
+ pA <- eta2theta(eta, link = .link , earg = .earg )
+ pp <- pA*pA
cbind(AA = pp,
Aa = 2*pA*(1-pA),
aa = (1-pA)^2)
}, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
- misc$link = c("pA" = .link )
- misc$earg = list("pA" = .earg )
+ misc$link <- c("pA" = .link )
+
+ misc$earg <- list("pA" = .earg )
+
misc$expected = TRUE
}), list( .link = link, .earg = earg))),
@@ -802,17 +811,17 @@
},
vfamily = c("AA.Aa.aa", "vgenetic"),
deriv = eval(substitute(expression({
- pA = eta2theta(eta, link = .link , earg = .earg )
- nAA = w * y[, 1]
- nAa = w * y[, 2]
- naa = w * y[, 3]
- dl.dpA = (2*nAA+nAa)/pA - (nAa+2*naa)/(1-pA)
- dpA.deta = dtheta.deta(pA, link = .link , earg = .earg )
+ pA <- eta2theta(eta, link = .link , earg = .earg )
+ nAA <- w * y[, 1]
+ nAa <- w * y[, 2]
+ naa <- w * y[, 3]
+ dl.dpA <- (2*nAA+nAa)/pA - (nAa+2*naa)/(1-pA)
+ dpA.deta <- dtheta.deta(pA, link = .link , earg = .earg )
dl.dpA * dpA.deta
}), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
- d2l.dp2 = (2*nAA+nAa)/pA^2 + (nAa+2*naa)/(1-pA)^2
- wz = cbind((dpA.deta^2) * d2l.dp2)
+ ned2l.dp2 <- (2*nAA+nAa)/pA^2 + (nAa+2*naa)/(1-pA)^2
+ wz <- cbind((dpA.deta^2) * ned2l.dp2)
wz
}), list( .link = link, .earg = earg))))
}
diff --git a/R/family.glmgam.R b/R/family.glmgam.R
index a3168e0..ea46f8b 100644
--- a/R/family.glmgam.R
+++ b/R/family.glmgam.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -15,15 +15,15 @@
binomialff <- function(link = "logit",
dispersion = 1, mv = FALSE, onedpar = !mv,
- parallel = FALSE, zero = NULL,
+ parallel = FALSE, apply.parint = FALSE,
+ zero = NULL,
bred = FALSE,
- earg.link = FALSE)
+ earg.link = FALSE) {
-{
+ if (!is.logical(bred) || length(bred) > 1)
+ stop("argument 'bred' must be a single logical")
- if (bred)
- stop("currently 'bred = TRUE' is not working")
estimated.dispersion <- dispersion == 0
@@ -43,20 +43,25 @@
ans <-
new("vglmff",
- blurb = if (mv) c("Multivariate binomial model\n\n",
+ blurb = if (mv) c("Multiple binomial model\n\n",
"Link: ", namesof("mu[,j]", link, earg = earg), "\n",
"Variance: mu[,j]*(1-mu[,j])") else
c("Binomial model\n\n",
"Link: ", namesof("mu", link, earg = earg), "\n",
"Variance: mu * (1 - mu)"),
constraints = eval(substitute(expression({
- constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+ apply.int = .apply.parint )
+
constraints <- cm.zero.vgam(constraints, x, .zero , M)
- }), list( .parallel = parallel, .zero = zero ))),
+ }), list( .zero = zero,
+ .parallel = parallel, .apply.parint = apply.parint ))),
infos = eval(substitute(function(...) {
list(Musual = 1,
+ bred = .bred ,
zero = .zero )
- }, list( .zero = zero ))),
+ }, list( .zero = zero,
+ .bred = bred ))),
initialize = eval(substitute(expression({
assign("CQO.FastAlgorithm",
@@ -81,14 +86,14 @@
w <- temp5$w
y <- temp5$y
- M = ncol(y)
+ M <- ncol(y)
if (!all(y == 0 | y == 1))
stop("response must contain 0's and 1's only")
- dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
- dn2 = if (length(dn2)) {
+ dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 <- if (length(dn2)) {
paste("E[", dn2, "]", sep = "")
} else {
paste("mu", 1:M, sep = "")
@@ -98,33 +103,33 @@
"mu", .link , earg = .earg , short = TRUE)
if (!length(mustart) && !length(etastart))
- mustart = matrix(colMeans(y), nrow = nrow(y), ncol = ncol(y),
+ mustart <- matrix(colMeans(y), nrow = nrow(y), ncol = ncol(y),
byrow = TRUE)
if (!all(w == 1))
- extra$orig.w = w
+ extra$orig.w <- w
- extra$mv = TRUE
+ extra$mv <- TRUE
} else {
if (!all(w == 1))
- extra$orig.w = w
+ extra$orig.w <- w
- NCOL = function (x) if (is.array(x) && length(dim(x)) > 1 ||
+ NCOL <- function (x) if (is.array(x) && length(dim(x)) > 1 ||
is.data.frame(x)) ncol(x) else as.integer(1)
if (NCOL(y) == 1) {
- if (is.factor(y)) y = (y != levels(y)[1])
- nvec = rep(1, n)
+ if (is.factor(y)) y <- (y != levels(y)[1])
+ nvec <- rep(1, n)
y[w == 0] <- 0
- if (!all(y == 0 || y == 1))
+ if (!all(y == 0 | y == 1))
stop("response values 'y' must be 0 or 1")
if (!length(mustart) && !length(etastart))
- mustart = (0.5 + w * y) / (1 + w)
+ mustart <- (0.5 + w * y) / (1 + w)
- no.successes = y
+ no.successes <- y
if (min(y) < 0)
stop("Negative data not allowed!")
if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
@@ -134,12 +139,12 @@
stop("Negative data not allowed!")
if (any(abs(y - round(y)) > 1.0e-8))
stop("Count data must be integer-valued")
- y = round(y)
- nvec = y[, 1] + y[, 2]
- y = ifelse(nvec > 0, y[, 1] / nvec, 0)
- w = w * nvec
+ y <- round(y)
+ nvec <- y[, 1] + y[, 2]
+ y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
+ w <- w * nvec
if (!length(mustart) && !length(etastart))
- mustart = (0.5 + nvec * y) / (1 + nvec)
+ mustart <- (0.5 + nvec * y) / (1 + nvec)
} else {
stop("for the binomialff family, response 'y' must be a ",
"vector of 0 and 1's\n",
@@ -150,10 +155,25 @@
predictors.names <-
namesof("mu", .link , earg = .earg , short = TRUE)
}
- }), list( .link = link, .mv = mv, .earg = earg))),
+
+
+ if ( .bred ) {
+ if ( !control$save.weight ) {
+ save.weight <- control$save.weight <- TRUE
+ }
+ }
+
+
+
+ }), list( .link = link, .mv = mv,
+ .earg = earg, .bred = bred ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
mu <- eta2theta(eta, link = .link , earg = .earg )
+
+
+ colnames(mu) <- NULL
+
mu
}, list( .link = link, .earg = earg ))),
@@ -165,33 +185,36 @@
dpar <- .dispersion
if (!dpar) {
- temp87 = (y-mu)^2 * wz / (dtheta.deta(mu, link = .link ,
+ temp87 <- (y-mu)^2 * wz / (dtheta.deta(mu, link = .link ,
earg = .earg )^2) # w cancel
if (.mv && ! .onedpar) {
- dpar = rep(as.numeric(NA), len = M)
- temp87 = cbind(temp87)
- nrow.mu = if (is.matrix(mu)) nrow(mu) else length(mu)
- for(ii in 1:M)
- dpar[ii] = sum(temp87[, ii]) / (nrow.mu - ncol(x))
+ dpar <- rep(as.numeric(NA), len = M)
+ temp87 <- cbind(temp87)
+ nrow.mu <- if (is.matrix(mu)) nrow(mu) else length(mu)
+ for(ii in 1:M)
+ dpar[ii] <- sum(temp87[, ii]) / (nrow.mu - ncol(x))
if (is.matrix(y) && length(dimnames(y)[[2]]) == length(dpar))
- names(dpar) = dimnames(y)[[2]]
- } else
- dpar = sum(temp87) / (length(mu) - ncol(x))
+ names(dpar) <- dimnames(y)[[2]]
+ } else {
+ dpar <- sum(temp87) / (length(mu) - ncol(x))
+ }
}
- misc$mv = .mv
+
+ misc$mv <- .mv
misc$dispersion <- dpar
misc$default.dispersion <- 1
misc$estimated.dispersion <- .estimated.dispersion
misc$bred <- .bred
+ misc$expected <- TRUE
- misc$link = rep( .link , length = M)
- names(misc$link) = if (M > 1) dn2 else "mu"
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- if (M > 1) dn2 else "mu"
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:M) misc$earg[[ii]] = .earg
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for(ii in 1:M)
+ misc$earg[[ii]] <- .earg
- misc$expected = TRUE
}), list( .dispersion = dispersion,
.estimated.dispersion = estimated.dispersion,
.onedpar = onedpar, .mv = mv,
@@ -208,17 +231,17 @@
w * (y / mu - (1-y) / (1-mu))
} else {
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
- smallno = 1.0e6 * .Machine$double.eps
- smallno = sqrt(.Machine$double.eps)
+ smallno <- 1.0e6 * .Machine$double.eps
+ smallno <- sqrt(.Machine$double.eps)
if (max(abs(ycounts - round(ycounts))) > smallno)
warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ ycounts <- round(ycounts)
if ( .mv ) {
sum((ycounts * log(mu) +
@@ -233,55 +256,55 @@
vfamily = c("binomialff", "vcategorical"),
deriv = eval(substitute(expression({
- ybred <- if ( .bred ) {
- adjustment <- Hvector <-
- hatvaluesbasic(X_vlm = X_vlm_save,
- diagWm = (c(w) * c(mu * (1 - mu)))^1)
-
-
- y + Hvector * (0.5 - pi)
- y + (c(w) * (mu * (1 - mu))) * Hvector * (0.5 - pi)
- y + (1 / c(w)) * Hvector * (0.5 - pi) # close
- y + (c(mu * (1 - mu)) / c(w)) * Hvector * (0.5 - pi) # closer
- y + (c(mu * (1 - mu)) / c(w)) * Hvector * (0.5 - pi) / 2 # closest
- y + (1 / c(w)) * Hvector * (0.5 - pi) / 2 #
+ yBRED <- if ( .bred ) {
+ Hvector <- hatvaluesbasic(X_vlm = X_vlm_save,
+ diagWm = c(t(w * mu))) # Handles M>1
+
+ varY <- mu * (1 - mu) / w # Is a matrix if M>1. Seems the most correct.
+ d1.ADJ <- dtheta.deta(mu, .link , earg = .earg )
+ d2.ADJ <- d2theta.deta2(mu, .link , earg = .earg )
+
+
+ yBRED <- y + matrix(Hvector, n, M, byrow = TRUE) *
+ varY * d2.ADJ / (2 * d1.ADJ^2)
+ yBRED
} else {
y
}
- answer <-
- if ( .link == "logit") {
- c(w) * (ybred - mu)
+
+ answer <- if ( .link == "logit") {
+ c(w) * (yBRED - mu)
} else if ( .link == "cloglog") {
- mu.use = mu
- smallno = 100 * .Machine$double.eps
- mu.use[mu.use < smallno] = smallno
- mu.use[mu.use > 1.0 - smallno] = 1.0 - smallno
- -c(w) * (ybred - mu) * log1p(-mu.use) / mu.use
+ mu.use <- mu
+ smallno <- 100 * .Machine$double.eps
+ mu.use[mu.use < smallno] <- smallno
+ mu.use[mu.use > 1.0 - smallno] <- 1.0 - smallno
+ -c(w) * (yBRED - mu) * log1p(-mu.use) / mu.use
} else {
c(w) * dtheta.deta(mu, link = .link , earg = .earg ) *
- (ybred / mu - 1.0) / (1.0 - mu)
+ (yBRED / mu - 1.0) / (1.0 - mu)
}
answer
}), list( .link = link, .earg = earg, .bred = bred))),
weight = eval(substitute(expression({
- tmp100 = mu * (1.0 - mu)
+ tmp100 <- mu * (1.0 - mu)
- tmp200 = if ( .link == "logit") {
- cbind(c(w) * tmp100)
- } else if ( .link == "cloglog") {
- cbind(c(w) * (1.0 - mu.use) * (log1p(-mu.use))^2 / mu.use)
- } else {
- cbind(c(w) * dtheta.deta(mu, link = .link ,
- earg = .earg )^2 / tmp100)
- }
+ tmp200 <- if ( .link == "logit") {
+ cbind(c(w) * tmp100)
+ } else if ( .link == "cloglog") {
+ cbind(c(w) * (1.0 - mu.use) * (log1p(-mu.use))^2 / mu.use)
+ } else {
+ cbind(c(w) * dtheta.deta(mu, link = .link ,
+ earg = .earg )^2 / tmp100)
+ }
for(ii in 1:M) {
- index500 = !is.finite(tmp200[, ii]) |
- (abs(tmp200[, ii]) < .Machine$double.eps)
+ index500 <- !is.finite(tmp200[, ii]) |
+ (abs(tmp200[, ii]) < .Machine$double.eps)
if (any(index500)) { # Diagonal 0's are bad
- tmp200[index500, ii] = .Machine$double.eps
+ tmp200[index500, ii] <- .Machine$double.eps
}
}
tmp200
@@ -290,9 +313,10 @@
if (!mv)
- ans at deviance =
+ ans at deviance <-
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Deviance.categorical.data.vgam(mu = cbind(mu, 1-mu), y = cbind(y, 1-y),
+ Deviance.categorical.data.vgam(mu = cbind(mu, 1-mu),
+ y = cbind(y, 1-y),
w = w, residuals = residuals,
eta = eta, extra = extra)
}
@@ -302,8 +326,7 @@
- gammaff <- function(link = "nreciprocal", dispersion = 0)
-{
+ gammaff <- function(link = "nreciprocal", dispersion = 0) {
estimated.dispersion <- dispersion == 0
@@ -314,8 +337,8 @@
new("vglmff",
blurb = c("Gamma distribution\n\n",
- "Link: ", namesof("mu", link, earg = earg), "\n",
- "Variance: mu^2 / k"),
+ "Link: ", namesof("mu", link, earg = earg), "\n",
+ "Variance: mu^2 / k"),
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
devi <- -2 * w * (log(ifelse(y == 0, 1, y/mu)) - (y - mu)/mu)
if (residuals) {
@@ -340,17 +363,17 @@
mustart <- y + 0.167 * (y == 0)
- M = if (is.matrix(y)) ncol(y) else 1
- dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
- dn2 = if (length(dn2)) {
+ M <- if (is.matrix(y)) ncol(y) else 1
+ dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 <- if (length(dn2)) {
paste("E[", dn2, "]", sep = "")
} else {
paste("mu", 1:M, sep = "")
}
predictors.names <-
- namesof(if (M > 1) dn2 else "mu", .link ,
- earg = .earg , short = TRUE)
+ namesof(if (M > 1) dn2 else "mu", .link ,
+ earg = .earg , short = TRUE)
if (!length(etastart))
etastart <- theta2eta(mustart, link = .link , earg = .earg )
@@ -361,30 +384,31 @@
last = eval(substitute(expression({
dpar <- .dispersion
if (!dpar) {
- if (M == 1) {
- temp = w * dmu.deta^2
- dpar = sum(w * (y-mu)^2 * wz / temp) / (length(mu) - ncol(x))
- } else {
- dpar = rep(0, len = M)
- for(spp in 1:M) {
- temp = w * dmu.deta[,spp]^2
- dpar[spp] = sum(w * (y[,spp]-mu[,spp])^2 * wz[,spp]/temp) /
- (length(mu[,spp]) - ncol(x))
- }
+ if (M == 1) {
+ temp <- w * dmu.deta^2
+ dpar <- sum(w * (y-mu)^2 * wz / temp) / (length(mu) - ncol(x))
+ } else {
+ dpar <- rep(0, len = M)
+ for(spp in 1:M) {
+ temp <- w * dmu.deta[,spp]^2
+ dpar[spp] <- sum(w * (y[,spp]-mu[,spp])^2 * wz[,spp]/temp) / (
+ length(mu[,spp]) - ncol(x))
}
+ }
}
misc$dispersion <- dpar
misc$default.dispersion <- 0
misc$estimated.dispersion <- .estimated.dispersion
- misc$link = rep( .link , length = M)
- names(misc$link) = if (M > 1) paste("mu", 1:M, sep = "") else "mu"
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- if (M > 1) paste("mu", 1:M, sep = "") else "mu"
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:M) misc$earg[[ii]] = .earg
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for(ii in 1:M)
+ misc$earg[[ii]] <- .earg
- misc$expected = TRUE
+ misc$expected <- TRUE
misc$multipleResponses <- TRUE
}), list( .dispersion = dispersion, .earg = earg,
.estimated.dispersion = estimated.dispersion,
@@ -397,12 +421,12 @@
Musual <- 1
ncoly <- ncol(as.matrix(y))
- dl.dmu = (y-mu) / mu^2
- dmu.deta = dtheta.deta(theta = mu, link = .link , earg = .earg )
+ dl.dmu <- (y-mu) / mu^2
+ dmu.deta <- dtheta.deta(theta = mu, link = .link , earg = .earg )
c(w) * dl.dmu * dmu.deta
}), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
- d2l.dmu2 = 1 / mu^2
+ d2l.dmu2 <- 1 / mu^2
wz <- dmu.deta^2 * d2l.dmu2
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
}), list( .link = link, .earg = earg))))
@@ -412,8 +436,7 @@
inverse.gaussianff <- function(link = "natural.ig",
- dispersion = 0)
-{
+ dispersion = 0) {
estimated.dispersion <- dispersion == 0
warning("@deviance() not finished")
warning("needs checking, but I'm sure it works")
@@ -425,8 +448,8 @@
new("vglmff",
blurb = c("Inverse Gaussian distribution\n\n",
- "Link: ", namesof("mu", link, earg = earg), "\n",
- "Variance: mu^3 / k"),
+ "Link: ", namesof("mu", link, earg = earg), "\n",
+ "Variance: mu^3 / k"),
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
pow <- 3 # Use Quasi()$deviance with pow==3
devy <- y^(2-pow) / (1-pow) - y^(2-pow) / (2-pow)
@@ -455,9 +478,9 @@
- M = if (is.matrix(y)) ncol(y) else 1
- dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
- dn2 = if (length(dn2)) {
+ M <- if (is.matrix(y)) ncol(y) else 1
+ dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 <- if (length(dn2)) {
paste("E[", dn2, "]", sep = "")
} else {
paste("mu", 1:M, sep = "")
@@ -483,15 +506,15 @@
misc$default.dispersion <- 0
misc$estimated.dispersion <- .estimated.dispersion
- misc$link = rep( .link , length = M)
- names(misc$link) = if (M > 1) paste("mu", 1:M, sep = "") else "mu"
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- if (M > 1) paste("mu", 1:M, sep = "") else "mu"
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
for(ii in 1:M)
- misc$earg[[ii]] = .earg
+ misc$earg[[ii]] <- .earg
- misc$expected = TRUE
+ misc$expected <- TRUE
misc$multipleResponses <- TRUE
}), list( .dispersion = dispersion,
.estimated.dispersion = estimated.dispersion,
@@ -523,18 +546,18 @@ dinv.gaussian <- function(x, mu, lambda, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- LLL = max(length(x), length(mu), length(lambda))
+ LLL <- max(length(x), length(mu), length(lambda))
x <- rep(x, len = LLL);
mu <- rep(mu, len = LLL);
lambda <- rep(lambda, len = LLL)
logdensity <- rep(log(0), len = LLL)
- xok = (x > 0)
+ xok <- (x > 0)
logdensity[xok] = 0.5 * log(lambda[xok] / (2 * pi * x[xok]^3)) -
lambda[xok] *
(x[xok]-mu[xok])^2 / (2*mu[xok]^2 * x[xok])
- logdensity[mu <= 0] = NaN
- logdensity[lambda <= 0] = NaN
+ logdensity[mu <= 0] <- NaN
+ logdensity[lambda <= 0] <- NaN
if (log.arg) logdensity else exp(logdensity)
}
@@ -545,36 +568,37 @@ pinv.gaussian <- function(q, mu, lambda) {
if (any(lambda <= 0))
stop("lambda must be positive")
- LLL = max(length(q), length(mu), length(lambda))
- q = rep(q, len = LLL)
- mu = rep(mu, len = LLL)
- lambda = rep(lambda, len = LLL)
+ LLL <- max(length(q), length(mu), length(lambda))
+ q <- rep(q, len = LLL)
+ mu <- rep(mu, len = LLL)
+ lambda <- rep(lambda, len = LLL)
ans <- q
- ans[q <= 0] = 0
- bb = q > 0
- ans[bb] = pnorm( sqrt(lambda[bb]/q[bb]) * (q[bb]/mu[bb] - 1)) +
- exp(2*lambda[bb]/mu[bb]) *
- pnorm(-sqrt(lambda[bb]/q[bb]) * (q[bb]/mu[bb] + 1))
+ ans[q <= 0] <- 0
+ bb <- q > 0
+ ans[bb] <- pnorm( sqrt(lambda[bb]/q[bb]) * (q[bb]/mu[bb] - 1)) +
+ exp(2*lambda[bb]/mu[bb]) *
+ pnorm(-sqrt(lambda[bb]/q[bb]) * (q[bb]/mu[bb] + 1))
ans
}
rinv.gaussian <- function(n, mu, lambda) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- mu = rep(mu, len = use.n); lambda = rep(lambda, len = use.n)
+ mu <- rep(mu, len = use.n);
+ lambda <- rep(lambda, len = use.n)
- u = runif(use.n)
- Z = rnorm(use.n)^2 # rchisq(use.n, df = 1)
- phi = lambda / mu
- y1 = 1 - 0.5 * (sqrt(Z^2 + 4*phi*Z) - Z) / phi
+ u <- runif(use.n)
+ Z <- rnorm(use.n)^2 # rchisq(use.n, df = 1)
+ phi <- lambda / mu
+ y1 <- 1 - 0.5 * (sqrt(Z^2 + 4*phi*Z) - Z) / phi
ans <- mu * ifelse((1+y1)*u > 1, 1/y1, y1)
- ans[mu <= 0] = NaN
- ans[lambda <= 0] = NaN
+ ans[mu <= 0] <- NaN
+ ans[lambda <= 0] <- NaN
ans
}
@@ -590,10 +614,9 @@ rinv.gaussian <- function(n, mu, lambda) {
inv.gaussianff <- function(lmu = "loge", llambda = "loge",
imethod = 1, ilambda = NULL,
- parallel = FALSE, intercept.apply = FALSE,
+ parallel = FALSE, apply.parint = FALSE,
shrinkage.init = 0.99,
- zero = NULL)
-{
+ zero = NULL) {
@@ -628,19 +651,19 @@ rinv.gaussian <- function(n, mu, lambda) {
new("vglmff",
blurb = c("Inverse Gaussian distribution\n\n",
- "f(y) = sqrt(lambda/(2*pi*y^3)) * ",
- "exp(-lambda * (y - mu)^2 / (2 * mu^2 * y)); y, mu & lambda > 0",
- "Link: ", namesof("mu", lmu, earg = emu), ", ",
- namesof("lambda", llambda, earg = elambda), "\n",
- "Mean: ", "mu\n",
- "Variance: mu^3 / lambda"),
+ "f(y) = sqrt(lambda/(2*pi*y^3)) * ",
+ "exp(-lambda * (y - mu)^2 / (2 * mu^2 * y)); y, mu & lambda > 0",
+ "Link: ", namesof("mu", lmu, earg = emu), ", ",
+ namesof("lambda", llambda, earg = elambda), "\n",
+ "Mean: ", "mu\n",
+ "Variance: mu^3 / lambda"),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
- intercept.apply = .intercept.apply )
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+ apply.int = .apply.parint )
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero,
- .parallel = parallel, .intercept.apply = intercept.apply ))),
+ .parallel = parallel, .apply.parint = apply.parint ))),
infos = eval(substitute(function(...) {
list(Musual = 2,
zero = .zero )
@@ -730,10 +753,10 @@ rinv.gaussian <- function(n, mu, lambda) {
misc$expected <- TRUE
misc$multipleResponses <- FALSE
misc$parallel <- .parallel
- misc$intercept.apply <- .intercept.apply
+ misc$apply.parint <- .apply.parint
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda,
- .parallel = parallel, .intercept.apply = intercept.apply,
+ .parallel = parallel, .apply.parint = apply.parint,
.sinit = shrinkage.init,
.imethod = imethod ))),
@@ -794,14 +817,14 @@ rinv.gaussian <- function(n, mu, lambda) {
imu = NULL, imethod = 1,
parallel = FALSE, zero = NULL,
bred = FALSE,
- earg.link = FALSE)
-{
+ earg.link = FALSE) {
- if (bred)
- stop("currently 'bred = TRUE' is not working")
- estimated.dispersion <- dispersion==0
+ if (!is.logical(bred) || length(bred) > 1)
+ stop("argument 'bred' must be a single logical")
+
+ estimated.dispersion <- (dispersion == 0)
if (earg.link) {
@@ -833,16 +856,18 @@ rinv.gaussian <- function(n, mu, lambda) {
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .parallel = parallel, .zero = zero ))),
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- nz = y > 0
- devi = -(y - mu)
- devi[nz] = devi[nz] + y[nz] * log(y[nz]/mu[nz])
+ nz <- (y > 0)
+ devi <- -(y - mu)
+ devi[nz] <- devi[nz] + y[nz] * log(y[nz]/mu[nz])
if (residuals) sign(y - mu) * sqrt(2 * abs(devi) * c(w)) else
2 * sum(c(w) * devi)
},
infos = eval(substitute(function(...) {
list(Musual = 1,
+ bred = .bred ,
zero = .zero )
- }, list( .zero = zero ))),
+ }, list( .zero = zero,
+ .bred = bred ))),
initialize = eval(substitute(expression({
@@ -858,11 +883,12 @@ rinv.gaussian <- function(n, mu, lambda) {
y <- temp5$y
- M = ncoly = ncol(y)
+ M <- ncoly <- ncol(y)
assign("CQO.FastAlgorithm", ( .link == "loge"), envir = VGAM:::VGAMenv)
- dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
- dn2 = if (length(dn2)) {
+
+ dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 <- if (length(dn2)) {
paste("E[", dn2, "]", sep = "")
} else {
paste("mu", 1:M, sep = "")
@@ -871,63 +897,76 @@ rinv.gaussian <- function(n, mu, lambda) {
namesof(if (M > 1) dn2 else "mu", .link ,
earg = .earg , short = TRUE)
+
+ if ( .bred ) {
+ if ( !control$save.weight ) {
+ save.weight <- control$save.weight <- TRUE
+ }
+ }
+
+
+
if (!length(etastart)) {
- mu.init = pmax(y, 1/8)
+ mu.init <- pmax(y, 1/8)
for(iii in 1:ncol(y)) {
if ( .imethod == 2) {
- mu.init[, iii] = weighted.mean(y[, iii], w[, iii]) + 1/8
+ mu.init[, iii] <- weighted.mean(y[, iii], w[, iii]) + 1/8
} else if ( .imethod == 3) {
- mu.init[, iii] = median(y[, iii]) + 1/8
+ mu.init[, iii] <- median(y[, iii]) + 1/8
}
}
if (length( .imu ))
- mu.init = matrix( .imu , n, ncoly, byrow = TRUE)
+ mu.init <- matrix( .imu , n, ncoly, byrow = TRUE)
etastart <- theta2eta(mu.init, link = .link , earg = .earg )
}
}), list( .link = link, .estimated.dispersion = estimated.dispersion,
+ .bred = bred,
.imethod = imethod, .imu = imu, .earg = earg))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mu = eta2theta(eta, link = .link , earg = .earg )
+ mu <- eta2theta(eta, link = .link , earg = .earg )
mu
}, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
- rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
+ rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
dpar <- .dispersion
if (!dpar) {
- temp87 = (y-mu)^2 *
+ temp87 <- (y-mu)^2 *
wz / (dtheta.deta(mu, link = .link , earg = .earg )^2) # w cancel
- if (M > 1 && ! .onedpar) {
- dpar = rep(as.numeric(NA), len = M)
- temp87 = cbind(temp87)
- nrow.mu = if (is.matrix(mu)) nrow(mu) else length(mu)
+ if (M > 1 && ! .onedpar ) {
+ dpar <- rep(as.numeric(NA), length = M)
+ temp87 <- cbind(temp87)
+ nrow.mu <- if (is.matrix(mu)) nrow(mu) else length(mu)
for(ii in 1:M)
- dpar[ii] = sum(temp87[, ii]) / (nrow.mu - ncol(x))
- if (is.matrix(y) && length(dimnames(y)[[2]])==length(dpar))
- names(dpar) = dimnames(y)[[2]]
+ dpar[ii] <- sum(temp87[, ii]) / (nrow.mu - ncol(x))
+ if (is.matrix(y) && length(dimnames(y)[[2]]) == length(dpar))
+ names(dpar) <- dimnames(y)[[2]]
} else {
- dpar = sum(temp87) / (length(mu) - ncol(x))
+ dpar <- sum(temp87) / (length(mu) - ncol(x))
}
}
misc$dispersion <- dpar
misc$default.dispersion <- 1
misc$estimated.dispersion <- .estimated.dispersion
- misc$expected = TRUE
- misc$imethod = .imethod
+ misc$expected <- TRUE
+ misc$imethod <- .imethod
misc$multipleResponses <- TRUE
+ misc$bred <- .bred
- misc$link = rep( .link , length = M)
- names(misc$link) = if (M > 1) dn2 else "mu"
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- if (M > 1) dn2 else "mu"
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
for(ii in 1:M)
- misc$earg[[ii]] = .earg
- }), list( .dispersion = dispersion, .imethod=imethod,
+ misc$earg[[ii]] <- .earg
+
+ }), list( .dispersion = dispersion, .imethod = imethod,
.estimated.dispersion = estimated.dispersion,
+ .bred = bred,
.onedpar = onedpar, .link = link, .earg = earg))),
linkfun = eval(substitute( function(mu, extra = NULL) {
@@ -942,38 +981,44 @@ rinv.gaussian <- function(n, mu, lambda) {
},
vfamily = "poissonff",
deriv = eval(substitute(expression({
- answer <-
- if ( .link == "loge" && (any(mu < .Machine$double.eps))) {
- c(w) * (y - mu)
+ yBRED <- if ( .bred ) {
+ Hvector <- hatvaluesbasic(X_vlm = X_vlm_save,
+ diagWm = c(t(w * mu))) # Handles M>1
+
+
+ varY <- mu # Is a matrix if M>1.
+ d1.BRED <- dtheta.deta(mu, .link , earg = .earg )
+ d2.BRED <- d2theta.deta2(mu, .link , earg = .earg )
+ y + matrix(Hvector, n, M, byrow = TRUE) *
+ varY * d2.BRED / (2 * d1.BRED^2)
+ } else {
+ y
+ }
+
+
+ answer <- if ( .link == "loge" && (any(mu < .Machine$double.eps))) {
+ c(w) * (yBRED - mu)
} else {
lambda <- mu
- dl.dlambda <- (y - lambda) / lambda
+ dl.dlambda <- (yBRED - lambda) / lambda
dlambda.deta <- dtheta.deta(theta = lambda,
link = .link , earg = .earg )
c(w) * dl.dlambda * dlambda.deta
}
-
- if ( .bred ) {
- adjustment <- Hvector <-
- hatvaluesbasic(X_vlm = X_vlm_save,
- diagWm = c(w) * mu)
- answer + (c(w) * mu) * Hvector / 2
- } else {
- answer
- }
+ answer
}), list( .link = link, .earg = earg, .bred = bred))),
weight = eval(substitute(expression({
if ( .link == "loge" && (any(mu < .Machine$double.eps))) {
- tmp600 = mu
- tmp600[tmp600 < .Machine$double.eps] = .Machine$double.eps
+ tmp600 <- mu
+ tmp600[tmp600 < .Machine$double.eps] <- .Machine$double.eps
c(w) * tmp600
} else {
- d2l.dlambda2 = 1 / lambda
- d2lambda.deta2 = d2theta.deta2(theta = lambda,
- link = .link , earg = .earg )
- c(w) * dlambda.deta^2 * d2l.dlambda2
+ ned2l.dlambda2 <- 1 / lambda
+ ned2lambda.deta2 <- d2theta.deta2(theta = lambda,
+ link = .link , earg = .earg )
+ c(w) * dlambda.deta^2 * ned2l.dlambda2
}
}), list( .link = link, .earg = earg))))
}
@@ -1025,8 +1070,7 @@ rinv.gaussian <- function(n, mu, lambda) {
dexppoisson <- function(lmean = "loge",
ldispersion = "logit",
idispersion = 0.8,
- zero = NULL)
-{
+ zero = NULL) {
if (!is.Numeric(idispersion, positive = TRUE))
stop("bad input for 'idispersion'")
@@ -1045,11 +1089,11 @@ rinv.gaussian <- function(n, mu, lambda) {
new("vglmff",
blurb = c("Double exponential Poisson distribution\n\n",
- "Link: ",
- namesof("mean", lmean, earg = emean), ", ",
- namesof("dispersion", ldisp, earg = edisp), "\n",
- "Mean: ", "mean\n",
- "Variance: mean / dispersion"),
+ "Link: ",
+ namesof("mean", lmean, earg = emean), ", ",
+ namesof("dispersion", ldisp, earg = edisp), "\n",
+ "Mean: ", "mean\n",
+ "Variance: mean / dispersion"),
constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
@@ -1069,9 +1113,9 @@ rinv.gaussian <- function(n, mu, lambda) {
ncol.y.max = 1)
- M = if (is.matrix(y)) ncol(y) else 1
- dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
- dn2 = if (length(dn2)) {
+ M <- if (is.matrix(y)) ncol(y) else 1
+ dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 <- if (length(dn2)) {
paste("E[", dn2, "]", sep = "")
} else {
"mu"
@@ -1080,7 +1124,7 @@ rinv.gaussian <- function(n, mu, lambda) {
c(namesof(dn2, link = .lmean, earg = .emean, short = TRUE),
namesof("dispersion", link = .ldisp, earg = .edisp, short = TRUE))
- init.mu = pmax(y, 1/8)
+ init.mu <- pmax(y, 1/8)
tmp2 <- rep( .idisp , length.out = n)
if (!length(etastart))
@@ -1095,17 +1139,19 @@ rinv.gaussian <- function(n, mu, lambda) {
}, list( .lmean = lmean, .emean = emean,
.ldisp = ldisp, .edisp = edisp ))),
last = eval(substitute(expression({
- misc$expected <- TRUE
misc$link <- c(mean = .lmean , dispersion = .ldisp )
+
misc$earg <- list(mean = .emean , dispersion = .edisp )
+
+ misc$expected <- TRUE
}), list( .lmean = lmean, .emean = emean,
.ldisp = ldisp, .edisp = edisp ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 1], link = .lmean,
- earg = .emean )
- Disper = eta2theta(eta[, 2], link = .ldisp,
- earg = .edisp )
+ lambda <- eta2theta(eta[, 1], link = .lmean,
+ earg = .emean )
+ Disper <- eta2theta(eta[, 2], link = .ldisp,
+ earg = .edisp )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
sum(w * (0.5*log(Disper) +
@@ -1115,27 +1161,27 @@ rinv.gaussian <- function(n, mu, lambda) {
.ldisp = ldisp, .edisp = edisp ))),
vfamily = "dexppoisson",
deriv = eval(substitute(expression({
- lambda = eta2theta(eta[, 1], link = .lmean, earg = .emean)
- Disper = eta2theta(eta[, 2], link = .ldisp,
- earg = .edisp)
+ lambda <- eta2theta(eta[, 1], link = .lmean, earg = .emean)
+ Disper <- eta2theta(eta[, 2], link = .ldisp,
+ earg = .edisp)
- dl.dlambda = Disper * (y / lambda - 1)
- dl.dDisper = y * log(lambda) + y - lambda + 0.5 / Disper
+ dl.dlambda <- Disper * (y / lambda - 1)
+ dl.dDisper <- y * log(lambda) + y - lambda + 0.5 / Disper
- dlambda.deta = dtheta.deta(theta = lambda, link = .lmean,
- earg = .emean)
- dDisper.deta = dtheta.deta(theta = Disper, link = .ldisp,
- earg = .edisp)
+ dlambda.deta <- dtheta.deta(theta = lambda, link = .lmean,
+ earg = .emean)
+ dDisper.deta <- dtheta.deta(theta = Disper, link = .ldisp,
+ earg = .edisp)
c(w) * cbind(dl.dlambda * dlambda.deta,
dl.dDisper * dDisper.deta)
}), list( .lmean = lmean, .emean = emean,
.ldisp = ldisp, .edisp = edisp ))),
weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), nrow = n, ncol = 2) # diagonal
- usethis.lambda = pmax(lambda, .Machine$double.eps / 10000)
- wz[, iam(1, 1, M)] = (Disper / usethis.lambda) * dlambda.deta^2
- wz[, iam(2, 2, M)] = (0.5 / Disper^2) * dDisper.deta^2
+ wz <- matrix(as.numeric(NA), nrow = n, ncol = 2) # diagonal
+ usethis.lambda <- pmax(lambda, .Machine$double.eps / 10000)
+ wz[, iam(1, 1, M)] <- (Disper / usethis.lambda) * dlambda.deta^2
+ wz[, iam(2, 2, M)] <- (0.5 / Disper^2) * dDisper.deta^2
c(w) * wz
}), list( .lmean = lmean, .emean = emean,
.ldisp = ldisp,
@@ -1163,38 +1209,38 @@ rinv.gaussian <- function(n, mu, lambda) {
new("vglmff",
blurb = c("Double Exponential Binomial distribution\n\n",
- "Link: ",
- namesof("mean", lmean, earg = emean), ", ",
- namesof("dispersion", ldisp, earg = edisp), "\n",
- "Mean: ", "mean\n"),
+ "Link: ",
+ namesof("mean", lmean, earg = emean), ", ",
+ namesof("dispersion", ldisp, earg = edisp), "\n",
+ "Mean: ", "mean\n"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero , M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
if (!all(w == 1))
- extra$orig.w = w
+ extra$orig.w <- w
if (ncol(cbind(w)) != 1)
stop("'weights' must be a vector or a one-column matrix")
- NCOL = function (x)
+ NCOL <- function (x)
if (is.array(x) && length(dim(x)) > 1 ||
is.data.frame(x)) ncol(x) else as.integer(1)
if (NCOL(y) == 1) {
- if (is.factor(y)) y = (y != levels(y)[1])
- nvec = rep(1, n)
+ if (is.factor(y)) y <- (y != levels(y)[1])
+ nvec <- rep(1, n)
y[w == 0] <- 0
- if (!all(y == 0 || y == 1))
+ if (!all(y == 0 | y == 1))
stop("response values 'y' must be 0 or 1")
init.mu =
- mustart = (0.5 + w * y) / (1 + w)
+ mustart <- (0.5 + w * y) / (1 + w)
- no.successes = y
+ no.successes <- y
if (min(y) < 0)
stop("Negative data not allowed!")
if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
@@ -1204,12 +1250,12 @@ rinv.gaussian <- function(n, mu, lambda) {
stop("Negative data not allowed!")
if (any(abs(y - round(y)) > 1.0e-8))
stop("Count data must be integer-valued")
- y = round(y)
- nvec = y[, 1] + y[, 2]
- y = ifelse(nvec > 0, y[, 1] / nvec, 0)
- w = w * nvec
- init.mu =
- mustart = (0.5 + nvec * y) / (1 + nvec)
+ y <- round(y)
+ nvec <- y[, 1] + y[, 2]
+ y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
+ w <- w * nvec
+ init.mu <-
+ mustart <- (0.5 + nvec * y) / (1 + nvec)
} else
stop("for the dexpbinomial family, response 'y' must be a ",
"vector of 0 and 1's\n",
@@ -1218,8 +1264,8 @@ rinv.gaussian <- function(n, mu, lambda) {
"or a 2-column matrix where col 1 is the no. of ",
"successes and col 2 is the no. of failures")
- dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
- dn2 = if (length(dn2)) {
+ dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 <- if (length(dn2)) {
paste("E[", dn2, "]", sep = "")
} else {
"mu"
@@ -1232,8 +1278,8 @@ rinv.gaussian <- function(n, mu, lambda) {
tmp2 <- rep( .idisp , len = n)
if (!length(etastart))
- etastart = cbind(theta2eta(init.mu, .lmean, earg = .emean),
- theta2eta(tmp2, .ldisp, earg = .edisp))
+ etastart <- cbind(theta2eta(init.mu, .lmean, earg = .emean),
+ theta2eta(tmp2, .ldisp, earg = .edisp))
}), list( .lmean = lmean, .emean = emean,
.ldisp = ldisp, .edisp = edisp,
.idisp = idisp ))),
@@ -1242,55 +1288,57 @@ rinv.gaussian <- function(n, mu, lambda) {
}, list( .lmean = lmean, .emean = emean,
.ldisp = ldisp, .edisp = edisp ))),
last = eval(substitute(expression({
- misc$expected <- TRUE
misc$link <- c("mean" = .lmean, "dispersion" = .ldisp)
+
misc$earg <- list( mean = .emean, dispersion = .edisp)
+
+ misc$expected <- TRUE
}), list( .lmean = lmean, .emean = emean,
.ldisp = ldisp, .edisp = edisp ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- prob = eta2theta(eta[, 1], link = .lmean, earg = .emean)
- Disper = eta2theta(eta[, 2], link = .ldisp, earg = .edisp)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ prob <- eta2theta(eta[, 1], link = .lmean, earg = .emean)
+ Disper <- eta2theta(eta[, 2], link = .ldisp, earg = .edisp)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
- temp1 = y * log(ifelse(y > 0, y, 1)) # y*log(y)
- temp2 = (1.0-y) * log1p(ifelse(y < 1, -y, 0)) # (1-y)*log(1-y)
- sum(0.5 * log(Disper) + w * (y * Disper * log(prob) +
- (1-y) * Disper * log1p(-prob) +
- temp1 * (1-Disper) + temp2 * (1 - Disper)))
- }
- }, list( .lmean = lmean, .emean = emean,
- .ldisp = ldisp, .edisp = edisp ))),
- vfamily = "dexpbinomial",
- deriv = eval(substitute(expression({
- prob = eta2theta(eta[, 1], link = .lmean, earg = .emean)
- Disper = eta2theta(eta[, 2], link = .ldisp, earg = .edisp)
- temp1 = y * log(ifelse(y > 0, y, 1)) # y*log(y)
- temp2 = (1.0-y) * log1p(ifelse(y < 1, -y, 0)) # (1-y)*log(1-y)
- temp3 = prob * (1.0-prob)
- temp3 = pmax(temp3, .Machine$double.eps * 10000)
-
- dl.dprob = w * Disper * (y - prob) / temp3
- dl.dDisper = 0.5 / Disper + w * (y * log(prob) +
- (1-y)*log1p(-prob) - temp1 - temp2)
-
- dprob.deta = dtheta.deta(theta = prob, .lmean, earg = .emean)
- dDisper.deta = dtheta.deta(theta = Disper, .ldisp, earg = .edisp)
-
- cbind(dl.dprob * dprob.deta,
- dl.dDisper * dDisper.deta)
- }), list( .lmean = lmean, .emean = emean,
- .ldisp = ldisp, .edisp = edisp ))),
- weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), nrow = n, ncol = 2) # diagonal
- wz[, iam(1, 1, M)] = w * (Disper / temp3) * dprob.deta^2
- wz[, iam(2, 2, M)] = (0.5 / Disper^2) * dDisper.deta^2
- wz
- }), list( .lmean = lmean, .emean = emean,
- .ldisp = ldisp, .edisp = edisp ))))
+ temp1 <- y * log(ifelse(y > 0, y, 1)) # y*log(y)
+ temp2 <- (1.0-y) * log1p(ifelse(y < 1, -y, 0)) # (1-y)*log(1-y)
+ sum(0.5 * log(Disper) + w * (y * Disper * log(prob) +
+ (1-y) * Disper * log1p(-prob) +
+ temp1 * (1-Disper) + temp2 * (1 - Disper)))
+ }
+ }, list( .lmean = lmean, .emean = emean,
+ .ldisp = ldisp, .edisp = edisp ))),
+ vfamily = "dexpbinomial",
+ deriv = eval(substitute(expression({
+ prob <- eta2theta(eta[, 1], link = .lmean, earg = .emean)
+ Disper <- eta2theta(eta[, 2], link = .ldisp, earg = .edisp)
+ temp1 <- y * log(ifelse(y > 0, y, 1)) # y*log(y)
+ temp2 <- (1.0-y) * log1p(ifelse(y < 1, -y, 0)) # (1-y)*log(1-y)
+ temp3 <- prob * (1.0-prob)
+ temp3 <- pmax(temp3, .Machine$double.eps * 10000)
+
+ dl.dprob <- w * Disper * (y - prob) / temp3
+ dl.dDisper <- 0.5 / Disper + w * (y * log(prob) +
+ (1-y)*log1p(-prob) - temp1 - temp2)
+
+ dprob.deta <- dtheta.deta(theta = prob, .lmean, earg = .emean)
+ dDisper.deta <- dtheta.deta(theta = Disper, .ldisp, earg = .edisp)
+
+ cbind(dl.dprob * dprob.deta,
+ dl.dDisper * dDisper.deta)
+ }), list( .lmean = lmean, .emean = emean,
+ .ldisp = ldisp, .edisp = edisp ))),
+ weight = eval(substitute(expression({
+ wz <- matrix(as.numeric(NA), nrow = n, ncol = 2) # diagonal
+ wz[, iam(1, 1, M)] <- w * (Disper / temp3) * dprob.deta^2
+ wz[, iam(2, 2, M)] <- (0.5 / Disper^2) * dDisper.deta^2
+ wz
+ }), list( .lmean = lmean, .emean = emean,
+ .ldisp = ldisp, .edisp = edisp ))))
}
@@ -1298,8 +1346,7 @@ rinv.gaussian <- function(n, mu, lambda) {
mbinomial <- function(mvar = NULL, link = "logit",
parallel = TRUE,
- smallno = .Machine$double.eps^(3/4))
-{
+ smallno = .Machine$double.eps^(3/4)) {
link <- as.list(substitute(link))
earg <- link2list(link)
link <- attr(earg, "function.name")
@@ -1312,8 +1359,8 @@ rinv.gaussian <- function(n, mu, lambda) {
if (is.logical(parallel) && !parallel)
stop("'parallel' must be TRUE")
- temp = terms(mvar)
- mvar = attr(temp,"term.labels")
+ temp <- terms(mvar)
+ mvar <- attr(temp,"term.labels")
if (length(mvar) != 1) stop("cannot obtain the matching variable")
if (!is.character(mvar) || length(mvar) != 1) {
stop("bad input for 'mvar'")
@@ -1321,15 +1368,15 @@ rinv.gaussian <- function(n, mu, lambda) {
new("vglmff",
blurb = c("Matched binomial model (intercepts fitted)\n\n",
- "Link: ", namesof("mu[,j]", link, earg = earg)),
+ "Link: ", namesof("mu[,j]", link, earg = earg)),
constraints = eval(substitute(expression({
constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
- intercept.apply = TRUE)
+ apply.int = TRUE)
constraints[[extra$mvar]] <- diag(M)
specialCM <- list(a = vector("list", M-1))
for(ii in 1:(M-1)) {
- specialCM[[1]][[ii]] =
+ specialCM[[1]][[ii]] <-
(constraints[[extra$mvar]])[, 1+ii,drop = FALSE]
}
names(specialCM) = extra$mvar
@@ -1338,131 +1385,135 @@ rinv.gaussian <- function(n, mu, lambda) {
if (!all(w == 1))
extra$orig.w = w
- mvar = .mvar
+ mvar <- .mvar
- NCOL = function (x)
+ NCOL <- function (x)
if (is.array(x) && length(dim(x)) > 1 ||
is.data.frame(x)) ncol(x) else as.integer(1)
if (NCOL(y) == 1) {
- if (is.factor(y)) y = y != levels(y)[1]
- nvec = rep(1, n)
+ if (is.factor(y)) y <- y != levels(y)[1]
+ nvec <- rep(1, n)
if (!all(y >= 0 & y <= 1))
stop("response values must be in [0, 1]")
- mustart = (0.5 + w * y) / (1 + w)
- no.successes = w * y
+ mustart <- (0.5 + w * y) / (1 + w)
+ no.successes <- w * y
if (any(abs(no.successes - round(no.successes)) > 0.001))
stop("Number of successes must be integer-valued")
} else if (NCOL(y) == 2) {
if (any(abs(y - round(y)) > 0.001))
stop("Count data must be integer-valued")
- nvec = y[, 1] + y[, 2]
- y = ifelse(nvec > 0, y[, 1] / nvec, 0)
- w = w * nvec
- mustart = (0.5 + nvec * y) / (1 + nvec)
+ nvec <- y[, 1] + y[, 2]
+ y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
+ w <- w * nvec
+ mustart <- (0.5 + nvec * y) / (1 + nvec)
} else
stop("Response not of the right form")
- temp1 = attr(x, "assign")
- if (colnames(x)[1] != "(Intercept)") stop("x must have an intercept")
- M = CCC = length(temp1[[mvar]]) + (colnames(x)[1] == "(Intercept)")
- temp9 = x[,temp1[[mvar]],drop = FALSE]
- temp9 = temp9 * matrix(2:CCC, n, CCC-1, byrow = TRUE)
- temp9 = apply(temp9, 1, max)
- temp9[temp9 == 0] = 1
- extra$NoMatchedSets = CCC
- extra$n = n
- extra$M = M
- extra$mvar = mvar
- extra$index9 = temp9
+ temp1 <- attr(x, "assign")
+ if (colnames(x)[1] != "(Intercept)")
+ stop("x must have an intercept")
+ M <- CCC <- length(temp1[[mvar]]) +
+ (colnames(x)[1] == "(Intercept)")
+ temp9 <- x[,temp1[[mvar]],drop = FALSE]
+ temp9 <- temp9 * matrix(2:CCC, n, CCC-1, byrow = TRUE)
+ temp9 <- apply(temp9, 1, max)
+ temp9[temp9 == 0] <- 1
+ extra$NoMatchedSets <- CCC
+ extra$n <- n
+ extra$M <- M
+ extra$mvar <- mvar
+ extra$index9 <- temp9
- predictors.names <-
- namesof("mu", .link , earg = .earg , short = TRUE)
- predictors.names <- rep(predictors.names, len = M)
- }), list( .link = link, .earg = earg, .mvar = mvar ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- mu = eta2theta(eta, link = .link , earg = .earg )
- mu[cbind(1:extra$n, extra$index9)]
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = rep( .link , length = M)
- names(misc$link) = if (M > 1) paste("mu(matched set ",
- 1:M, ")", sep = "") else "mu"
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:M) misc$earg[[ii]] = .earg
-
- misc$expected = TRUE
- }), list( .link = link, .earg = earg))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- temp = theta2eta(mu, .link , earg = .earg )
- matrix(temp, extra$n, extra$M)
- }, list( .link = link, .earg = earg))),
- loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals) w * (y / mu - (1-y) / (1-mu)) else {
+ predictors.names <-
+ namesof("mu", .link , earg = .earg , short = TRUE)
+ predictors.names <- rep(predictors.names, len = M)
+ }), list( .link = link, .earg = earg, .mvar = mvar ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ mu <- eta2theta(eta, link = .link , earg = .earg )
+ mu[cbind(1:extra$n, extra$index9)]
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- if (M > 1) paste("mu(matched set ",
+ 1:M, ")", sep = "") else "mu"
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for(ii in 1:M)
+ misc$earg[[ii]] <- .earg
- smallno = 1.0e6 * .Machine$double.eps
- if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ misc$expected <- TRUE
+ }), list( .link = link, .earg = earg))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ temp <- theta2eta(mu, .link , earg = .earg )
+ matrix(temp, extra$n, extra$M)
+ }, list( .link = link, .earg = earg))),
+ loglikelihood =
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ if (residuals) w * (y / mu - (1-y) / (1-mu)) else {
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dbinom(x = ycounts, size = nvec, prob = mu,
- log = TRUE))
- }
- },
- vfamily = c("mbinomial", "vcategorical"),
- deriv = eval(substitute(expression({
- answer =
- if ( .link == "logit") {
- w * (y - mu)
- } else if ( .link == "cloglog") {
- mu.use = mu
- smallno = 100 * .Machine$double.eps
- mu.use[mu.use < smallno] = smallno
- mu.use[mu.use > 1 - smallno] = 1 - smallno
- -w * (y - mu) * log1p(-mu.use) / mu.use
- } else
- w * dtheta.deta(mu, link = .link , earg = .earg ) *
- (y/mu - 1)/(1-mu)
- result = matrix(0, n, M)
- result[cbind(1:n, extra$index9)] = answer
- result
- }), list( .link = link, .earg = earg))),
- weight = eval(substitute(expression({
- tmp100 = mu*(1-mu)
- answer = if ( .link == "logit") {
- cbind(w * tmp100)
- } else if ( .link == "cloglog") {
- cbind(w * (1-mu.use) * (log1p(-mu.use))^2 / mu.use )
- } else {
- cbind(w * dtheta.deta(mu, link = .link ,
- earg = .earg )^2 / tmp100)
- }
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
- result = matrix( .smallno, n, M)
- result[cbind(1:n, extra$index9)] = answer
- result
- }), list( .link = link, .earg = earg, .smallno = smallno ))))
+ smallno <- 1.0e6 * .Machine$double.eps
+ if (max(abs(ycounts - round(ycounts))) > smallno)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts <- round(ycounts)
+
+ sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dbinom(x = ycounts, size = nvec, prob = mu,
+ log = TRUE))
+ }
+ },
+ vfamily = c("mbinomial", "vcategorical"),
+ deriv = eval(substitute(expression({
+ answer <- if ( .link == "logit") {
+ w * (y - mu)
+ } else if ( .link == "cloglog") {
+ mu.use <- mu
+ smallno <- 100 * .Machine$double.eps
+ mu.use[mu.use < smallno] <- smallno
+ mu.use[mu.use > 1 - smallno] <- 1 - smallno
+ -w * (y - mu) * log1p(-mu.use) / mu.use
+ } else {
+ w * dtheta.deta(mu, link = .link , earg = .earg ) *
+ (y/mu - 1)/(1-mu)
+ }
+ result <- matrix(0, n, M)
+ result[cbind(1:n, extra$index9)] = answer
+ result
+ }), list( .link = link, .earg = earg))),
+ weight = eval(substitute(expression({
+ tmp100 <- mu*(1-mu)
+ answer <- if ( .link == "logit") {
+ cbind(w * tmp100)
+ } else if ( .link == "cloglog") {
+ cbind(w * (1-mu.use) * (log1p(-mu.use))^2 / mu.use )
+ } else {
+ cbind(w * dtheta.deta(mu, link = .link ,
+ earg = .earg )^2 / tmp100)
+ }
+
+ result <- matrix( .smallno, n, M)
+ result[cbind(1:n, extra$index9)] <- answer
+ result
+ }), list( .link = link, .earg = earg, .smallno = smallno ))))
}
mypool <- function(x, index) {
- answer = x
- uindex = unique(index)
- for(ii in uindex) {
- ind0 = index == ii
- answer[ind0] = sum(x[ind0])
- }
- answer
+ answer <- x
+ uindex <- unique(index)
+ for(ii in uindex) {
+ ind0 <- (index == ii)
+ answer[ind0] <- sum(x[ind0])
+ }
+ answer
}
@@ -1474,8 +1525,7 @@ mypool <- function(x, index) {
if (FALSE)
- mbino <- function()
-{
+ mbino <- function() {
link <- "logit"
earg <- list()
parallel <- TRUE
@@ -1494,7 +1544,7 @@ mypool <- function(x, index) {
"Link: ", namesof("mu[,j]", link, earg = earg)),
constraints = eval(substitute(expression({
constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
- intercept.apply = FALSE)
+ apply.int = FALSE)
}), list( .parallel = parallel ))),
initialize = eval(substitute(expression({
if (colnames(x)[1] == "(Intercept)")
@@ -1546,13 +1596,15 @@ mypool <- function(x, index) {
namesof("mu", .link , earg = .earg , short = TRUE)
}), list( .link = link, .earg = earg, .mvar = mvar ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- denominator = exp(eta)
- numerator = mypool(denominator, extra$mvar)
+ denominator <- exp(eta)
+ numerator <- mypool(denominator, extra$mvar)
numerator / denominator
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link <- c(mu = .link )
- misc$earg <- list( mu = .earg )
+ misc$link <- c(mu = .link )
+
+ misc$earg <- list(mu = .earg )
+
misc$expected <- TRUE
}), list( .link = link, .earg = earg))),
loglikelihood =
@@ -1592,9 +1644,7 @@ mypool <- function(x, index) {
augbinomial <- function(link = "logit", mv = FALSE,
- parallel = TRUE)
-
-{
+ parallel = TRUE) {
if (!is.logical(parallel) ||
length(parallel) != 1 ||
@@ -1672,7 +1722,7 @@ mypool <- function(x, index) {
if (is.factor(y)) y = (y != levels(y)[1])
nvec = rep(1, n)
y[w == 0] <- 0
- if (!all(y == 0 || y == 1))
+ if (!all(y == 0 | y == 1))
stop("response values 'y' must be 0 or 1")
if (!length(mustart) && !length(etastart))
mustart = (0.5 + w * y) / (1 + w)
@@ -1715,16 +1765,17 @@ mypool <- function(x, index) {
mu
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$mv = .mv
- misc$link = rep( .link , length = M)
- names(misc$link) = if (M > 1) dn2 else "mu"
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- if (M > 1) dn2 else "mu"
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:M) misc$earg[[ii]] = .earg
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for(ii in 1:M)
+ misc$earg[[ii]] <- .earg
- misc$parallel = .parallel
- misc$expected = TRUE
+ misc$parallel <- .parallel
+ misc$expected <- TRUE
+ misc$mv <- .mv
}), list( .link = link, .mv = mv, .earg = earg,
.parallel = parallel ))),
linkfun = eval(substitute(function(mu, extra = NULL) {
diff --git a/R/family.loglin.R b/R/family.loglin.R
index 028114e..eda531c 100644
--- a/R/family.loglin.R
+++ b/R/family.loglin.R
@@ -1,23 +1,23 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
- loglinb2 <- function(exchangeable = FALSE, zero = NULL)
-{
+
+ loglinb2 <- function(exchangeable = FALSE, zero = NULL) {
new("vglmff",
blurb = c("Log-linear model for binary data\n\n",
- "Links: ",
- "Identity: u1, u2, u12",
- "\n"),
+ "Links: ",
+ "Identity: u1, u2, u12",
+ "\n"),
constraints = eval(substitute(expression({
constraints <- cm.vgam(matrix(c(1,1,0, 0,0,1), 3, 2), x,
- .exchangeable, constraints,
- intercept.apply = TRUE)
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ .exchangeable , constraints,
+ apply.int = TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .exchangeable = exchangeable, .zero = zero ))),
initialize = expression({
@@ -59,10 +59,12 @@
"11" = exp(u1+u2+u12) / denom)
},
last = expression({
- misc$link = c("u1" = "identity", "u2" = "identity", "u12" = "identity")
- misc$earg = list(u1 = list(), u2 = list(), u12 = list())
+ misc$link <- c("u1" = "identity", "u2" = "identity",
+ "u12" = "identity")
+ misc$earg <- list("u1" = list(), "u2" = list(),
+ "u12" = list())
- misc$expected = TRUE
+ misc$expected <- TRUE
misc$multipleResponses <- TRUE
}),
linkfun = function(mu, extra = NULL) {
@@ -115,19 +117,20 @@
}
- loglinb3 <- function(exchangeable = FALSE, zero = NULL)
-{
+
+
+ loglinb3 <- function(exchangeable = FALSE, zero = NULL) {
new("vglmff",
blurb = c("Log-linear model for trivariate binary data\n\n",
- "Links: ",
- "Identity: u1, u2, u3, u12, u13, u23",
- "\n"),
+ "Links: ",
+ "Identity: u1, u2, u3, u12, u13, u23",
+ "\n"),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(c(1,1,1,0,0,0, 0,0,0,1,1,1), 6, 2), x,
- .exchangeable, constraints,
- intercept.apply = TRUE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.vgam(matrix(c(1,1,1,0,0,0, 0,0,0,1,1,1), 6, 2), x,
+ .exchangeable, constraints,
+ apply.int = TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .exchangeable = exchangeable, .zero = zero ))),
initialize = expression({
predictors.names <- c("u1", "u2", "u3", "u12", "u13", "u23")
@@ -150,32 +153,21 @@
if (ncol(y) != 3)
stop("ncol(y) must be = 3")
+
+ if (FALSE)
extra$my.expression <- expression({
- u1 <- eta[,1]
- u2 <- eta[,2]
- u3 <- eta[,3]
- u12 <- eta[,4]
- u13 <- eta[,5]
- u23 <- eta[,6]
+ u1 <- eta[, 1]
+ u2 <- eta[, 2]
+ u3 <- eta[, 3]
+ u12 <- eta[, 4]
+ u13 <- eta[, 5]
+ u23 <- eta[, 6]
denom <- 1 + exp(u1) + exp(u2) + exp(u3) + exp(u1 + u2 + u12) +
exp(u1 + u3 + u13) + exp(u2 + u3 + u23) +
exp(u1 + u2 + u3 + u12 + u13 + u23)
})
- extra$deriv.expression <- expression({
- allterms <- exp(u1+u2+u3+u12+u13+u23)
- A1 <- exp(u1) + exp(u1 + u2 + u12) + exp(u1 + u3 + u13) +
- allterms
- A2 <- exp(u2) + exp(u1 + u2 + u12) + exp(u2 + u3 + u23) +
- allterms
- A3 <- exp(u3) + exp(u3 + u2 + u23) + exp(u1 + u3 + u13) +
- allterms
- A12 <- exp(u1 + u2 + u12) + allterms
- A13 <- exp(u1 + u3 + u13) + allterms
- A23 <- exp(u2 + u3 + u23) + allterms
- })
-
if (length(mustart) + length(etastart) == 0) {
mustart <- matrix(as.numeric(NA), nrow(y), 2^3)
@@ -192,7 +184,17 @@
}
}),
linkinv = function(eta, extra = NULL) {
- eval(extra$my.expression)
+ u1 <- eta[, 1]
+ u2 <- eta[, 2]
+ u3 <- eta[, 3]
+ u12 <- eta[, 4]
+ u13 <- eta[, 5]
+ u23 <- eta[, 6]
+ denom <- 1 + exp(u1) + exp(u2) + exp(u3) + exp(u1 + u2 + u12) +
+ exp(u1 + u3 + u13) + exp(u2 + u3 + u23) +
+ exp(u1 + u2 + u3 + u12 + u13 + u23)
+
+
cbind("000" = 1,
"001" = exp(u3),
"010" = exp(u2),
@@ -203,13 +205,13 @@
"111" = exp(u1+u2+u3+u12+u13+u23)) / denom
},
last = expression({
- misc$link = rep("identity", length = M)
- names(misc$link) = predictors.names
+ misc$link <- rep("identity", length = M)
+ names(misc$link) <- predictors.names
- misc$earg = list(u1 = list(), u2 = list(), u3 = list(),
- u12 = list(), u13 = list(), u23 = list())
+ misc$earg <- list(u1 = list(), u2 = list(), u3 = list(),
+ u12 = list(), u13 = list(), u23 = list())
- misc$expected = TRUE
+ misc$expected <- TRUE
misc$multipleResponses <- TRUE
}),
@@ -224,7 +226,16 @@
cbind(u1, u2, u3, u12, u13, u23)
},
loglikelihood = function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
- eval(extra$my.expression)
+ u1 <- eta[, 1]
+ u2 <- eta[, 2]
+ u3 <- eta[, 3]
+ u12 <- eta[, 4]
+ u13 <- eta[, 5]
+ u23 <- eta[, 6]
+ denom <- 1 + exp(u1) + exp(u2) + exp(u3) + exp(u1 + u2 + u12) +
+ exp(u1 + u3 + u13) + exp(u2 + u3 + u23) +
+ exp(u1 + u2 + u3 + u12 + u13 + u23)
+
u0 <- -log(denom)
if (residuals)
stop("loglikelihood residuals not implemented yet") else
@@ -233,8 +244,30 @@
},
vfamily = c("loglinb3"),
deriv = expression({
- eval(extra$my.expression)
- eval(extra$deriv.expression)
+ u1 <- eta[, 1]
+ u2 <- eta[, 2]
+ u3 <- eta[, 3]
+ u12 <- eta[, 4]
+ u13 <- eta[, 5]
+ u23 <- eta[, 6]
+ denom <- 1 + exp(u1) + exp(u2) + exp(u3) + exp(u1 + u2 + u12) +
+ exp(u1 + u3 + u13) + exp(u2 + u3 + u23) +
+ exp(u1 + u2 + u3 + u12 + u13 + u23)
+
+
+
+ allterms <- exp(u1+u2+u3+u12+u13+u23)
+ A1 <- exp(u1) + exp(u1 + u2 + u12) + exp(u1 + u3 + u13) +
+ allterms
+ A2 <- exp(u2) + exp(u1 + u2 + u12) + exp(u2 + u3 + u23) +
+ allterms
+ A3 <- exp(u3) + exp(u3 + u2 + u23) + exp(u1 + u3 + u13) +
+ allterms
+ A12 <- exp(u1 + u2 + u12) + allterms
+ A13 <- exp(u1 + u3 + u13) + allterms
+ A23 <- exp(u2 + u3 + u23) + allterms
+
+
c(w) * cbind(-A1/denom + y[,1],
-A2/denom + y[,2],
-A3/denom + y[,3],
diff --git a/R/family.math.R b/R/family.math.R
index cef2156..241435d 100644
--- a/R/family.math.R
+++ b/R/family.math.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -18,36 +18,36 @@ lambertW <- function(x, tolerance = 1.0e-10, maxit = 50) {
if (any(Im(x) != 0.0))
stop("argument 'x' must be real, not complex!")
- ans = x
- ans[!is.na(x) & x < -exp(-1)] = NA
- ans[!is.na(x) & x >= -exp(-1)] = log1p(x[!is.na(x) & x >= -exp(-1)])
- ans[!is.na(x) & x >= 0 ] = sqrt(x[!is.na(x) & x >= 0 ]) / 2
+ ans <- x
+ ans[!is.na(x) & x < -exp(-1)] <- NA
+ ans[!is.na(x) & x >= -exp(-1)] <- log1p(x[!is.na(x) & x >= -exp(-1)])
+ ans[!is.na(x) & x >= 0 ] <- sqrt(x[!is.na(x) & x >= 0 ]) / 2
- cutpt = 3.0
+ cutpt <- 3.0
if (any(myTF <- !is.na(x) & x > cutpt)) {
- L1 = log(x[!is.na(x) & x > cutpt]) # log(as.complex(x))
- L2 = log(L1) # log(as.complex(L1))
- wzinit = L1 - L2 +
+ L1 <- log(x[!is.na(x) & x > cutpt]) # log(as.complex(x))
+ L2 <- log(L1) # log(as.complex(L1))
+ wzinit <- L1 - L2 +
(L2 +
(L2*( -2 + L2)/(2) +
(L2*( 6 + L2*(-9 + L2* 2)) / (6) +
L2*(-12 + L2*(36 + L2*(-22 + L2*3))) / (12*L1)) / L1) / L1) / L1
- ans[myTF] = wzinit
+ ans[myTF] <- wzinit
}
for (ii in 1:maxit) {
- exp1 = exp(ans)
- exp2 = ans * exp1
- delta = (exp2 - x) / (exp2 + exp1 -
+ exp1 <- exp(ans)
+ exp2 <- ans * exp1
+ delta <- (exp2 - x) / (exp2 + exp1 -
((ans + 2) * (exp2 - x) / (2 * (ans + 1.0))))
- ans = ans - delta
+ ans <- ans - delta
if (all(is.na(delta) ||
max(abs(delta), na.rm = TRUE) < tolerance)) break
if (ii == maxit)
warning("did not converge")
}
- ans[x == Inf] = Inf
+ ans[x == Inf] <- Inf
ans
}
@@ -56,9 +56,54 @@ lambertW <- function(x, tolerance = 1.0e-10, maxit = 50) {
+ pgamma.deriv <- function(q, shape, tmax = 100) {
+ nnn <- max(length(q), length(shape))
+ if (length(q) != nnn)
+ q <- rep(q, length = nnn)
+ if (length(shape) != nnn)
+ shape <- rep(shape, length = nnn)
+ if (!is.Numeric(q, positive = TRUE))
+ stop("bad input for argument 'q'")
+ if (!is.Numeric(shape, positive = TRUE))
+ stop("bad input for argument 'shape'")
+ if (!is.Numeric(tmax, allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'tmax'")
+ if (tmax < 10)
+ warning("probably argument 'tmax' is too small")
+
+
+ gplog <- lgamma(shape)
+ gp1log <- gplog + log(shape)
+ psip <- digamma(shape)
+ psip1 <- psip + 1 / shape
+ psidp <- trigamma(shape)
+ psidp1 <- psidp - 1 / shape^2
+
+ fred <-
+ dotC(name = "VGAM_C_vdigami",
+ d = as.double(matrix(0, 6, nnn)),
+ x = as.double(q), p = as.double(shape),
+ as.double(gplog), as.double(gp1log), as.double(psip),
+ as.double(psip1), as.double(psidp), as.double(psidp1),
+ ifault = integer(nnn),
+ tmax = as.double(tmax),
+ as.integer(nnn))
+ answer <- matrix(fred$d, nnn, 6, byrow = TRUE)
+ dimnames(answer) <- list(names(q),
+ c("q", "q^2", "shape", "shape^2",
+ "q.shape", "pgamma(q, shape)"))
+
+ if (any(fred$ifault != 0)) {
+ indices <- which(fred$ifault != 0)
+ warning("convergence problems with elements ",
+ indices)
+ }
+
+ answer
+}
diff --git a/R/family.mixture.R b/R/family.mixture.R
index 8ce3179..dd4bc3a 100644
--- a/R/family.mixture.R
+++ b/R/family.mixture.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -28,8 +28,7 @@ mix2normal1.control <- function(trace = TRUE, ...) {
qmu = c(0.2, 0.8),
equalsd = TRUE,
nsimEIM = 100,
- zero = 1)
-{
+ zero = 1) {
lphi <- as.list(substitute(lphi))
ephi <- link2list(lphi)
lphi <- attr(ephi, "function.name")
@@ -78,19 +77,19 @@ mix2normal1.control <- function(trace = TRUE, ...) {
new("vglmff",
blurb = c("Mixture of two univariate normals\n\n",
- "Links: ",
- namesof("phi", lphi, earg = ephi, tag = FALSE), ", ",
- namesof("mu1", lmu, earg = emu1, tag = FALSE), ", ",
- namesof("sd1", lsd, earg = esd1, tag = FALSE), ", ",
- namesof("mu2", lmu, earg = emu2, tag = FALSE), ", ",
- namesof("sd2", lsd, earg = esd2, tag = FALSE), "\n",
- "Mean: phi*mu1 + (1 - phi)*mu2\n",
- "Variance: phi*sd1^2 + (1 - phi)*sd2^2 + ",
- "phi*(1 - phi)*(mu1-mu2)^2"),
+ "Links: ",
+ namesof("phi", lphi, earg = ephi, tag = FALSE), ", ",
+ namesof("mu1", lmu, earg = emu1, tag = FALSE), ", ",
+ namesof("sd1", lsd, earg = esd1, tag = FALSE), ", ",
+ namesof("mu2", lmu, earg = emu2, tag = FALSE), ", ",
+ namesof("sd2", lsd, earg = esd2, tag = FALSE), "\n",
+ "Mean: phi*mu1 + (1 - phi)*mu2\n",
+ "Variance: phi*sd1^2 + (1 - phi)*sd2^2 + ",
+ "phi*(1 - phi)*(mu1-mu2)^2"),
constraints = eval(substitute(expression({
- constraints = cm.vgam(rbind(diag(4), c(0,0, 1,0)), x, .equalsd,
- constraints, intercept.apply = TRUE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.vgam(rbind(diag(4), c(0, 0, 1,0)), x, .equalsd ,
+ constraints, apply.int = TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero, .equalsd = equalsd ))),
initialize = eval(substitute(expression({
@@ -105,7 +104,7 @@ mix2normal1.control <- function(trace = TRUE, ...) {
- predictors.names = c(
+ predictors.names <- c(
namesof("phi", .lphi, tag = FALSE),
namesof("mu1", .lmu, earg = .emu1, tag = FALSE),
namesof("sd1", .lsd, earg = .esd1, tag = FALSE),
@@ -115,31 +114,32 @@ mix2normal1.control <- function(trace = TRUE, ...) {
if (!length(etastart)) {
- qy = quantile(y, prob = .qmu)
- init.phi = rep(if(length(.iphi)) .iphi else 0.5, length = n)
- init.mu1 = rep(if(length(.imu1)) .imu1 else qy[1], length = n)
- init.mu2 = rep(if(length(.imu2)) .imu2 else qy[2], length = n)
- ind.1 = if (init.mu1[1] < init.mu2[1])
+ qy <- quantile(y, prob = .qmu)
+ init.phi <- rep(if(length(.iphi)) .iphi else 0.5, length = n)
+ init.mu1 <- rep(if(length(.imu1)) .imu1 else qy[1], length = n)
+ init.mu2 <- rep(if(length(.imu2)) .imu2 else qy[2], length = n)
+ ind.1 <- if (init.mu1[1] < init.mu2[1])
1:round(n* init.phi[1]) else
round(n* init.phi[1]):n
- ind.2 = if (init.mu1[1] < init.mu2[1])
+ ind.2 <- if (init.mu1[1] < init.mu2[1])
round(n* init.phi[1]):n else
1:round(n* init.phi[1])
- sorty = sort(y)
- init.sd1 = rep(if(length( .isd1 )) .isd1 else sd(sorty[ind.1]),
- len = n)
- init.sd2 = rep(if(length( .isd2 )) .isd2 else sd(sorty[ind.2]),
- len = n)
+ sorty <- sort(y)
+ init.sd1 <- rep(if(length( .isd1 )) .isd1 else sd(sorty[ind.1]),
+ len = n)
+ init.sd2 <- rep(if(length( .isd2 )) .isd2 else sd(sorty[ind.2]),
+ len = n)
if ( .equalsd ) {
- init.sd1 = init.sd2 = (init.sd1 + init.sd2)/2
+ init.sd1 <- init.sd2 = (init.sd1 + init.sd2)/2
if (!all.equal( .esd1, .esd2 ))
stop("'esd1' and 'esd2' must be equal if 'equalsd = TRUE'")
}
- etastart = cbind(theta2eta(init.phi, .lphi, earg = .ephi),
- theta2eta(init.mu1, .lmu, earg = .emu1),
- theta2eta(init.sd1, .lsd, earg = .esd1),
- theta2eta(init.mu2, .lmu, earg = .emu2),
- theta2eta(init.sd2, .lsd, earg = .esd2))
+ etastart <- cbind(
+ theta2eta(init.phi, .lphi, earg = .ephi),
+ theta2eta(init.mu1, .lmu, earg = .emu1),
+ theta2eta(init.sd1, .lsd, earg = .esd1),
+ theta2eta(init.mu2, .lmu, earg = .emu2),
+ theta2eta(init.sd2, .lsd, earg = .esd2))
}
}), list(.lphi = lphi, .lmu = lmu,
.iphi = iphi, .imu1 = imu1, .imu2 = imu2,
@@ -147,23 +147,23 @@ mix2normal1.control <- function(trace = TRUE, ...) {
.esd1 = esd1, .esd2 = esd2, .equalsd = equalsd,
.lsd = lsd, .isd1 = isd1, .isd2 = isd2, .qmu = qmu))),
linkinv = eval(substitute(function(eta, extra = NULL){
- phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- mu1 = eta2theta(eta[, 2], link = .lmu, earg = .emu1)
- mu2 = eta2theta(eta[, 4], link = .lmu, earg = .emu2)
+ phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ mu1 <- eta2theta(eta[, 2], link = .lmu, earg = .emu1)
+ mu2 <- eta2theta(eta[, 4], link = .lmu, earg = .emu2)
phi * mu1 + (1 - phi) * mu2
}, list( .lphi = lphi, .lmu = lmu,
.ephi = ephi, .emu1 = emu1, .emu2 = emu2,
.esd1 = esd1, .esd2 = esd2 ))),
last = eval(substitute(expression({
- misc$link = c("phi" = .lphi, "mu1" = .lmu,
- "sd1" = .lsd, "mu2" = .lmu, "sd2" = .lsd)
+ misc$link <- c("phi" = .lphi, "mu1" = .lmu,
+ "sd1" = .lsd , "mu2" = .lmu, "sd2" = .lsd)
- misc$earg = list("phi" = .ephi, "mu1" = .emu1,
- "sd1" = .esd1, "mu2" = .emu2, "sd2" = .esd2)
+ misc$earg <- list("phi" = .ephi, "mu1" = .emu1,
+ "sd1" = .esd1, "mu2" = .emu2, "sd2" = .esd2)
- misc$expected = TRUE
- misc$equalsd = .equalsd
- misc$nsimEIM = .nsimEIM
+ misc$expected <- TRUE
+ misc$equalsd <- .equalsd
+ misc$nsimEIM <- .nsimEIM
misc$multipleResponses <- FALSE
}), list(.lphi = lphi, .lmu = lmu, .lsd = lsd, .equalsd = equalsd,
.ephi = ephi, .emu1 = emu1, .emu2 = emu2,
@@ -171,13 +171,13 @@ mix2normal1.control <- function(trace = TRUE, ...) {
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
- phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- mu1 = eta2theta(eta[, 2], link = .lmu, earg = .emu1)
- sd1 = eta2theta(eta[, 3], link = .lsd, earg = .esd1)
- mu2 = eta2theta(eta[, 4], link = .lmu, earg = .emu2)
- sd2 = eta2theta(eta[, 5], link = .lsd, earg = .esd2)
- f1 = dnorm(y, mean=mu1, sd=sd1)
- f2 = dnorm(y, mean=mu2, sd=sd2)
+ phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ mu1 <- eta2theta(eta[, 2], link = .lmu, earg = .emu1)
+ sd1 <- eta2theta(eta[, 3], link = .lsd, earg = .esd1)
+ mu2 <- eta2theta(eta[, 4], link = .lmu, earg = .emu2)
+ sd2 <- eta2theta(eta[, 5], link = .lsd, earg = .esd2)
+ f1 <- dnorm(y, mean=mu1, sd=sd1)
+ f2 <- dnorm(y, mean=mu2, sd=sd2)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * log(phi*f1 + (1 - phi)*f2))
@@ -187,30 +187,30 @@ mix2normal1.control <- function(trace = TRUE, ...) {
.lsd = lsd ))),
vfamily = c("mix2normal1"),
deriv = eval(substitute(expression({
- phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- mu1 = eta2theta(eta[, 2], link = .lmu, earg = .emu1)
- sd1 = eta2theta(eta[, 3], link = .lsd, earg = .esd1)
- mu2 = eta2theta(eta[, 4], link = .lmu, earg = .emu2)
- sd2 = eta2theta(eta[, 5], link = .lsd, earg = .esd2)
- dphi.deta = dtheta.deta(phi, link = .lphi, earg = .ephi)
- dmu1.deta = dtheta.deta(mu1, link = .lmu, earg = .emu1)
- dmu2.deta = dtheta.deta(mu2, link = .lmu, earg = .emu2)
- dsd1.deta = dtheta.deta(sd1, link = .lsd, earg = .esd1)
- dsd2.deta = dtheta.deta(sd2, link = .lsd, earg = .esd2)
- f1 = dnorm(y, mean=mu1, sd=sd1)
- f2 = dnorm(y, mean=mu2, sd=sd2)
- pdf = phi*f1 + (1 - phi)*f2
- z1 = (y-mu1) / sd1
- z2 = (y-mu2) / sd2
- df1.dmu1 = z1 * f1 / sd1
- df2.dmu2 = z2 * f2 / sd2
- df1.dsd1 = (z1^2 - 1) * f1 / sd1
- df2.dsd2 = (z2^2 - 1) * f2 / sd2
- dl.dphi = (f1-f2) / pdf
- dl.dmu1 = phi * df1.dmu1 / pdf
- dl.dmu2 = (1 - phi) * df2.dmu2 / pdf
- dl.dsd1 = phi * df1.dsd1 / pdf
- dl.dsd2 = (1 - phi) * df2.dsd2 / pdf
+ phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ mu1 <- eta2theta(eta[, 2], link = .lmu, earg = .emu1)
+ sd1 <- eta2theta(eta[, 3], link = .lsd, earg = .esd1)
+ mu2 <- eta2theta(eta[, 4], link = .lmu, earg = .emu2)
+ sd2 <- eta2theta(eta[, 5], link = .lsd, earg = .esd2)
+ dphi.deta <- dtheta.deta(phi, link = .lphi, earg = .ephi)
+ dmu1.deta <- dtheta.deta(mu1, link = .lmu, earg = .emu1)
+ dmu2.deta <- dtheta.deta(mu2, link = .lmu, earg = .emu2)
+ dsd1.deta <- dtheta.deta(sd1, link = .lsd, earg = .esd1)
+ dsd2.deta <- dtheta.deta(sd2, link = .lsd, earg = .esd2)
+ f1 <- dnorm(y, mean=mu1, sd=sd1)
+ f2 <- dnorm(y, mean=mu2, sd=sd2)
+ pdf <- phi*f1 + (1 - phi)*f2
+ z1 <- (y-mu1) / sd1
+ z2 <- (y-mu2) / sd2
+ df1.dmu1 <- z1 * f1 / sd1
+ df2.dmu2 <- z2 * f2 / sd2
+ df1.dsd1 <- (z1^2 - 1) * f1 / sd1
+ df2.dsd2 <- (z2^2 - 1) * f2 / sd2
+ dl.dphi <- (f1-f2) / pdf
+ dl.dmu1 <- phi * df1.dmu1 / pdf
+ dl.dmu2 <- (1 - phi) * df2.dmu2 / pdf
+ dl.dsd1 <- phi * df1.dsd1 / pdf
+ dl.dsd2 <- (1 - phi) * df2.dsd2 / pdf
c(w) * cbind(dl.dphi * dphi.deta,
dl.dmu1 * dmu1.deta,
dl.dsd1 * dsd1.deta,
@@ -222,38 +222,38 @@ mix2normal1.control <- function(trace = TRUE, ...) {
.nsimEIM = nsimEIM ))),
weight = eval(substitute(expression({
- d3 = deriv3(~ log(
+ d3 <- deriv3(~ log(
phi * dnorm((ysim-mu1)/sd1) / sd1 +
(1 - phi) * dnorm((ysim-mu2)/sd2) / sd2),
c("phi","mu1","sd1","mu2","sd2"), hessian= TRUE)
- run.mean = 0
+ run.mean <- 0
for(ii in 1:( .nsimEIM )) {
- ysim = ifelse(runif(n) < phi, rnorm(n, mu1, sd1),
- rnorm(n, mu2, sd2))
+ ysim <- ifelse(runif(n) < phi, rnorm(n, mu1, sd1),
+ rnorm(n, mu2, sd2))
- eval.d3 = eval(d3)
- d2l.dthetas2 = attr(eval.d3, "hessian")
+ eval.d3 <- eval(d3)
+ d2l.dthetas2 <- attr(eval.d3, "hessian")
rm(ysim)
- temp3 = matrix(0, n, dimm(M))
+ temp3 <- matrix(0, n, dimm(M))
for(ss in 1:M)
for(tt in ss:M)
- temp3[,iam(ss,tt, M)] = -d2l.dthetas2[,ss,tt]
+ temp3[,iam(ss,tt, M)] <- -d2l.dthetas2[,ss,tt]
- run.mean = ((ii-1) * run.mean + temp3) / ii
+ run.mean <- ((ii-1) * run.mean + temp3) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(run.mean), n, dimm(M), byrow = TRUE) else
run.mean
- dtheta.detas = cbind(dphi.deta,
- dmu1.deta,
- dsd1.deta,
- dmu2.deta,
- dsd2.deta)
- index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- wz = wz * dtheta.detas[, index0$row] *
- dtheta.detas[, index0$col]
+ dtheta.detas <- cbind(dphi.deta,
+ dmu1.deta,
+ dsd1.deta,
+ dmu2.deta,
+ dsd2.deta)
+ index0 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ wz <- wz * dtheta.detas[, index0$row] *
+ dtheta.detas[, index0$col]
c(w) * wz
}), list(.lphi = lphi, .lmu = lmu, .nsimEIM = nsimEIM ))))
}
@@ -268,8 +268,7 @@ mix2poisson.control <- function(trace = TRUE, ...) {
mix2poisson <- function(lphi = "logit", llambda = "loge",
iphi = 0.5, il1 = NULL, il2 = NULL,
- qmu = c(0.2, 0.8), nsimEIM = 100, zero = 1)
-{
+ qmu = c(0.2, 0.8), nsimEIM = 100, zero = 1) {
lphi <- as.list(substitute(lphi))
ephi <- link2list(lphi)
@@ -304,13 +303,13 @@ mix2poisson.control <- function(trace = TRUE, ...) {
new("vglmff",
blurb = c("Mixture of two Poisson distributions\n\n",
- "Links: ",
- namesof("phi",lphi, earg = ephi), ", ",
- namesof("lambda1", llambda, earg = el1, tag = FALSE), ", ",
- namesof("lambda2", llambda, earg = el2, tag = FALSE), "\n",
- "Mean: phi*lambda1 + (1 - phi)*lambda2"),
+ "Links: ",
+ namesof("phi",lphi, earg = ephi), ", ",
+ namesof("lambda1", llambda, earg = el1, tag = FALSE), ", ",
+ namesof("lambda2", llambda, earg = el2, tag = FALSE), "\n",
+ "Mean: phi*lambda1 + (1 - phi)*lambda2"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -330,19 +329,19 @@ mix2poisson.control <- function(trace = TRUE, ...) {
- predictors.names =
+ predictors.names <-
c(namesof("phi", .lphi , earg = .ephi , tag = FALSE),
namesof("lambda1", .llambda , earg = .el1 , tag = FALSE),
namesof("lambda2", .llambda , earg = .el2 , tag = FALSE))
if (!length(etastart)) {
- qy = quantile(y, prob = .qmu)
- init.phi = rep(if(length(.iphi)) .iphi else 0.5, length = n)
- init.lambda1 = rep(if(length(.il1)) .il1 else qy[1], length = n)
- init.lambda2 = rep(if(length(.il2)) .il2 else qy[2], length = n)
+ qy <- quantile(y, prob = .qmu)
+ init.phi <- rep(if(length(.iphi)) .iphi else 0.5, length = n)
+ init.lambda1 <- rep(if(length(.il1)) .il1 else qy[1], length = n)
+ init.lambda2 <- rep(if(length(.il2)) .il2 else qy[2], length = n)
if (!length(etastart))
- etastart = cbind(theta2eta(init.phi, .lphi , earg = .ephi ),
+ etastart <- cbind(theta2eta(init.phi, .lphi , earg = .ephi ),
theta2eta(init.lambda1, .llambda , earg = .el1 ),
theta2eta(init.lambda2, .llambda , earg = .el2 ))
}
@@ -351,32 +350,32 @@ mix2poisson.control <- function(trace = TRUE, ...) {
.iphi = iphi, .il1 = il1, .il2 = il2,
.qmu = qmu))),
linkinv = eval(substitute(function(eta, extra = NULL){
- phi = eta2theta(eta[, 1], link = .lphi , earg = .ephi )
- lambda1 = eta2theta(eta[, 2], link = .llambda , earg = .el1 )
- lambda2 = eta2theta(eta[, 3], link = .llambda , earg = .el2 )
+ phi <- eta2theta(eta[, 1], link = .lphi , earg = .ephi )
+ lambda1 <- eta2theta(eta[, 2], link = .llambda , earg = .el1 )
+ lambda2 <- eta2theta(eta[, 3], link = .llambda , earg = .el2 )
phi * lambda1 + (1 - phi) * lambda2
}, list(.lphi = lphi, .llambda = llambda,
.ephi = ephi, .el1 = el1, .el2 = el2 ))),
last = eval(substitute(expression({
- misc$link =
+ misc$link <-
c("phi" = .lphi, "lambda1" = .llambda, "lambda2" = .llambda )
- misc$earg =
+ misc$earg <-
list("phi" = .ephi, "lambda1" = .el1, "lambda2" = .el2 )
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
+ misc$expected <- TRUE
+ misc$nsimEIM <- .nsimEIM
misc$multipleResponses <- FALSE
}), list(.lphi = lphi, .llambda = llambda,
.ephi = ephi, .el1 = el1, .el2 = el2,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
- phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- lambda1 = eta2theta(eta[, 2], link = .llambda, earg = .el1)
- lambda2 = eta2theta(eta[, 3], link = .llambda, earg = .el2)
- f1 = dpois(y, lam = lambda1)
- f2 = dpois(y, lam = lambda2)
+ phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ lambda1 <- eta2theta(eta[, 2], link = .llambda, earg = .el1)
+ lambda2 <- eta2theta(eta[, 3], link = .llambda, earg = .el2)
+ f1 <- dpois(y, lam = lambda1)
+ f2 <- dpois(y, lam = lambda2)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * log(phi*f1 + (1 - phi)*f2))
@@ -384,22 +383,22 @@ mix2poisson.control <- function(trace = TRUE, ...) {
.ephi = ephi, .el1 = el1, .el2 = el2 ))),
vfamily = c("mix2poisson"),
deriv = eval(substitute(expression({
- phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- lambda1 = eta2theta(eta[, 2], link = .llambda, earg = .el1)
- lambda2 = eta2theta(eta[, 3], link = .llambda, earg = .el2)
-
- dphi.deta = dtheta.deta(phi, link = .lphi, earg = .ephi)
- dlambda1.deta = dtheta.deta(lambda1, link = .llambda, earg = .el1)
- dlambda2.deta = dtheta.deta(lambda2, link = .llambda, earg = .el2)
-
- f1 = dpois(x = y, lam = lambda1)
- f2 = dpois(x = y, lam = lambda2)
- pdf = phi*f1 + (1 - phi)*f2
- df1.dlambda1 = dpois(y-1, lam = lambda1) - f1
- df2.dlambda2 = dpois(y-1, lam = lambda2) - f2
- dl.dphi = (f1-f2) / pdf
- dl.dlambda1 = phi * df1.dlambda1 / pdf
- dl.dlambda2 = (1 - phi) * df2.dlambda2 / pdf
+ phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ lambda1 <- eta2theta(eta[, 2], link = .llambda, earg = .el1)
+ lambda2 <- eta2theta(eta[, 3], link = .llambda, earg = .el2)
+
+ dphi.deta <- dtheta.deta(phi, link = .lphi, earg = .ephi)
+ dlambda1.deta <- dtheta.deta(lambda1, link = .llambda, earg = .el1)
+ dlambda2.deta <- dtheta.deta(lambda2, link = .llambda, earg = .el2)
+
+ f1 <- dpois(x = y, lam = lambda1)
+ f2 <- dpois(x = y, lam = lambda2)
+ pdf <- phi*f1 + (1 - phi)*f2
+ df1.dlambda1 <- dpois(y-1, lam = lambda1) - f1
+ df2.dlambda2 <- dpois(y-1, lam = lambda2) - f2
+ dl.dphi <- (f1-f2) / pdf
+ dl.dlambda1 <- phi * df1.dlambda1 / pdf
+ dl.dlambda2 <- (1 - phi) * df2.dlambda2 / pdf
c(w) * cbind(dl.dphi * dphi.deta,
dl.dlambda1 * dlambda1.deta,
@@ -408,56 +407,56 @@ mix2poisson.control <- function(trace = TRUE, ...) {
.ephi = ephi, .el1 = el1, .el2 = el2,
.nsimEIM = nsimEIM ))),
weight = eval(substitute(expression({
- run.mean = 0
+ run.mean <- 0
for(ii in 1:( .nsimEIM )) {
- ysim = ifelse(runif(n) < phi, rpois(n, lambda1),
+ ysim <- ifelse(runif(n) < phi, rpois(n, lambda1),
rpois(n, lambda2))
- f1 = dpois(x = ysim, lam = lambda1)
- f2 = dpois(x = ysim, lam = lambda2)
- pdf = phi*f1 + (1 - phi)*f2
+ f1 <- dpois(x = ysim, lam = lambda1)
+ f2 <- dpois(x = ysim, lam = lambda2)
+ pdf <- phi*f1 + (1 - phi)*f2
- df1.dlambda1 = dpois(ysim-1, lam = lambda1) - f1
- df2.dlambda2 = dpois(ysim-1, lam = lambda2) - f2
+ df1.dlambda1 <- dpois(ysim-1, lam = lambda1) - f1
+ df2.dlambda2 <- dpois(ysim-1, lam = lambda2) - f2
- dl.dphi = (f1 - f2) / pdf
- dl.dlambda1 = phi * df1.dlambda1 / pdf
- dl.dlambda2 = (1 - phi) * df2.dlambda2 / pdf
+ dl.dphi <- (f1 - f2) / pdf
+ dl.dlambda1 <- phi * df1.dlambda1 / pdf
+ dl.dlambda2 <- (1 - phi) * df2.dlambda2 / pdf
- d2f1.dlambda12 = dpois(ysim-2, lambda1) -
+ d2f1.dlambda12 <- dpois(ysim-2, lambda1) -
2*dpois(ysim-1, lambda1) +
dpois(ysim, lambda1)
- d2f2.dlambda22 = dpois(ysim-2, lambda2) -
+ d2f2.dlambda22 <- dpois(ysim-2, lambda2) -
2*dpois(ysim-1, lambda2) +
dpois(ysim, lambda2)
- d2l.dphi2 = dl.dphi^2
- d2l.dlambda12 = phi * (phi * df1.dlambda1^2 / pdf -
+ d2l.dphi2 <- dl.dphi^2
+ d2l.dlambda12 <- phi * (phi * df1.dlambda1^2 / pdf -
d2f1.dlambda12) / pdf
- d2l.dlambda22 = (1 - phi) * ((1 - phi) * df2.dlambda2^2 / pdf -
+ d2l.dlambda22 <- (1 - phi) * ((1 - phi) * df2.dlambda2^2 / pdf -
d2f2.dlambda22) / pdf
- d2l.dlambda1lambda2 = phi * (1 - phi) *
+ d2l.dlambda1lambda2 <- phi * (1 - phi) *
df1.dlambda1 * df2.dlambda2 / pdf^2
- d2l.dphilambda1 = df1.dlambda1 * (phi*(f1-f2)/pdf - 1) / pdf
- d2l.dphilambda2 = df2.dlambda2 * ((1 - phi)*(f1-f2)/pdf - 1) / pdf
+ d2l.dphilambda1 <- df1.dlambda1 * (phi*(f1-f2)/pdf - 1) / pdf
+ d2l.dphilambda2 <- df2.dlambda2 * ((1 - phi)*(f1-f2)/pdf - 1) / pdf
rm(ysim)
- temp3 = matrix(0, n, dimm(M))
- temp3[,iam(1, 1, M = 3)] = d2l.dphi2
- temp3[,iam(2, 2, M = 3)] = d2l.dlambda12
- temp3[,iam(3, 3, M = 3)] = d2l.dlambda22
- temp3[,iam(1, 2, M = 3)] = d2l.dphilambda1
- temp3[,iam(1, 3, M = 3)] = d2l.dphilambda2
- temp3[,iam(2, 3, M = 3)] = d2l.dlambda1lambda2
- run.mean = ((ii-1) * run.mean + temp3) / ii
+ temp3 <- matrix(0, n, dimm(M))
+ temp3[,iam(1, 1, M = 3)] <- d2l.dphi2
+ temp3[,iam(2, 2, M = 3)] <- d2l.dlambda12
+ temp3[,iam(3, 3, M = 3)] <- d2l.dlambda22
+ temp3[,iam(1, 2, M = 3)] <- d2l.dphilambda1
+ temp3[,iam(1, 3, M = 3)] <- d2l.dphilambda2
+ temp3[,iam(2, 3, M = 3)] <- d2l.dlambda1lambda2
+ run.mean <- ((ii-1) * run.mean + temp3) / ii
}
- wz = if (intercept.only)
- matrix(colMeans(run.mean), n, dimm(M), byrow = TRUE) else
- run.mean
+ wz <- if (intercept.only)
+ matrix(colMeans(run.mean), n, dimm(M), byrow = TRUE) else
+ run.mean
- dtheta.detas = cbind(dphi.deta, dlambda1.deta, dlambda2.deta)
- index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- wz = wz * dtheta.detas[, index0$row] *
- dtheta.detas[, index0$col]
+ dtheta.detas <- cbind(dphi.deta, dlambda1.deta, dlambda2.deta)
+ index0 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ wz <- wz * dtheta.detas[, index0$row] *
+ dtheta.detas[, index0$col]
c(w) * wz
}), list(.lphi = lphi, .llambda = llambda,
@@ -475,8 +474,7 @@ mix2exp.control <- function(trace = TRUE, ...) {
mix2exp <- function(lphi = "logit", llambda = "loge",
iphi = 0.5, il1 = NULL, il2 = NULL,
- qmu = c(0.8, 0.2), nsimEIM = 100, zero = 1)
-{
+ qmu = c(0.8, 0.2), nsimEIM = 100, zero = 1) {
lphi <- as.list(substitute(lphi))
ephi <- link2list(lphi)
lphi <- attr(ephi, "function.name")
@@ -510,14 +508,14 @@ mix2exp.control <- function(trace = TRUE, ...) {
new("vglmff",
blurb = c("Mixture of two univariate exponentials\n\n",
- "Links: ",
- namesof("phi", lphi, earg = ephi, tag = FALSE), ", ",
- namesof("lambda1", llambda, earg = el1 , tag = FALSE), ", ",
- namesof("lambda2", llambda, earg = el2 , tag = FALSE), "\n",
- "Mean: phi / lambda1 + (1 - phi) / lambda2\n"),
+ "Links: ",
+ namesof("phi", lphi, earg = ephi, tag = FALSE), ", ",
+ namesof("lambda1", llambda, earg = el1 , tag = FALSE), ", ",
+ namesof("lambda2", llambda, earg = el2 , tag = FALSE), "\n",
+ "Mean: phi / lambda1 + (1 - phi) / lambda2\n"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -536,52 +534,52 @@ mix2exp.control <- function(trace = TRUE, ...) {
- predictors.names =
+ predictors.names <-
c(namesof("phi", .lphi , earg = .ephi , tag = FALSE),
namesof("lambda1", .llambda , earg = .el1 , tag = FALSE),
namesof("lambda2", .llambda , earg = .el2 , tag = FALSE))
if (!length(etastart)) {
- qy = quantile(y, prob = .qmu)
- init.phi = rep(if(length(.iphi)) .iphi else 0.5, length = n)
- init.lambda1 = rep(if(length(.il1)) .il1 else 1/qy[1], length = n)
- init.lambda2 = rep(if(length(.il2)) .il2 else 1/qy[2], length = n)
+ qy <- quantile(y, prob = .qmu)
+ init.phi <- rep(if(length(.iphi)) .iphi else 0.5, length = n)
+ init.lambda1 <- rep(if(length(.il1)) .il1 else 1/qy[1], length = n)
+ init.lambda2 <- rep(if(length(.il2)) .il2 else 1/qy[2], length = n)
if (!length(etastart))
- etastart = cbind(theta2eta(init.phi, .lphi, earg = .ephi),
- theta2eta(init.lambda1, .llambda, earg = .el1),
- theta2eta(init.lambda2, .llambda, earg = .el2))
+ etastart <- cbind(theta2eta(init.phi, .lphi, earg = .ephi),
+ theta2eta(init.lambda1, .llambda, earg = .el1),
+ theta2eta(init.lambda2, .llambda, earg = .el2))
}
}), list(.lphi = lphi, .llambda = llambda,
.ephi = ephi, .el1 = el1, .el2 = el2,
.iphi = iphi, .il1 = il1, .il2 = il2,
.qmu = qmu))),
linkinv = eval(substitute(function(eta, extra = NULL){
- phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- lambda1 = eta2theta(eta[, 2], link = .llambda, earg = .el1)
- lambda2 = eta2theta(eta[, 3], link = .llambda, earg = .el2)
+ phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ lambda1 <- eta2theta(eta[, 2], link = .llambda, earg = .el1)
+ lambda2 <- eta2theta(eta[, 3], link = .llambda, earg = .el2)
phi / lambda1 + (1 - phi) / lambda2
}, list(.lphi = lphi, .llambda = llambda,
.ephi = ephi, .el1 = el1, .el2 = el2 ))),
last = eval(substitute(expression({
- misc$link =
+ misc$link <-
c("phi" = .lphi, "lambda1" = .llambda, "lambda2" = .llambda)
- misc$earg =
+ misc$earg <-
list("phi" = .ephi, "lambda1" = .el1, "lambda2" = .el2)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
+ misc$expected <- TRUE
+ misc$nsimEIM <- .nsimEIM
misc$multipleResponses <- FALSE
}), list(.lphi = lphi, .llambda = llambda, .nsimEIM = nsimEIM,
.ephi = ephi, .el1 = el1, .el2 = el2 ))),
loglikelihood = eval(substitute(
function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
- phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- lambda1 = eta2theta(eta[, 2], link = .llambda, earg = .el1)
- lambda2 = eta2theta(eta[, 3], link = .llambda, earg = .el2)
+ phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ lambda1 <- eta2theta(eta[, 2], link = .llambda, earg = .el1)
+ lambda2 <- eta2theta(eta[, 3], link = .llambda, earg = .el2)
- f1 = dexp(y, rate=lambda1)
- f2 = dexp(y, rate=lambda2)
+ f1 <- dexp(y, rate=lambda1)
+ f2 <- dexp(y, rate=lambda2)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * log(phi*f1 + (1 - phi)*f2))
@@ -589,22 +587,22 @@ mix2exp.control <- function(trace = TRUE, ...) {
.ephi = ephi, .el1 = el1, .el2 = el2 ))),
vfamily = c("mix2exp"),
deriv = eval(substitute(expression({
- phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- lambda1 = eta2theta(eta[, 2], link = .llambda, earg = .el1)
- lambda2 = eta2theta(eta[, 3], link = .llambda, earg = .el2)
-
- dphi.deta = dtheta.deta(phi, link = .lphi, earg = .ephi)
- dlambda1.deta = dtheta.deta(lambda1, link = .llambda, earg = .el1)
- dlambda2.deta = dtheta.deta(lambda2, link = .llambda, earg = .el2)
-
- f1 = dexp(x = y, rate=lambda1)
- f2 = dexp(x = y, rate=lambda2)
- pdf = phi*f1 + (1 - phi)*f2
- df1.dlambda1 = exp(-lambda1*y) - y * dexp(y, rate=lambda1)
- df2.dlambda2 = exp(-lambda2*y) - y * dexp(y, rate=lambda2)
- dl.dphi = (f1-f2) / pdf
- dl.dlambda1 = phi * df1.dlambda1 / pdf
- dl.dlambda2 = (1 - phi) * df2.dlambda2 / pdf
+ phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ lambda1 <- eta2theta(eta[, 2], link = .llambda, earg = .el1)
+ lambda2 <- eta2theta(eta[, 3], link = .llambda, earg = .el2)
+
+ dphi.deta <- dtheta.deta(phi, link = .lphi, earg = .ephi)
+ dlambda1.deta <- dtheta.deta(lambda1, link = .llambda, earg = .el1)
+ dlambda2.deta <- dtheta.deta(lambda2, link = .llambda, earg = .el2)
+
+ f1 <- dexp(x = y, rate=lambda1)
+ f2 <- dexp(x = y, rate=lambda2)
+ pdf <- phi*f1 + (1 - phi)*f2
+ df1.dlambda1 <- exp(-lambda1*y) - y * dexp(y, rate=lambda1)
+ df2.dlambda2 <- exp(-lambda2*y) - y * dexp(y, rate=lambda2)
+ dl.dphi <- (f1-f2) / pdf
+ dl.dlambda1 <- phi * df1.dlambda1 / pdf
+ dl.dlambda2 <- (1 - phi) * df2.dlambda2 / pdf
c(w) * cbind(dl.dphi * dphi.deta,
dl.dlambda1 * dlambda1.deta,
@@ -612,49 +610,49 @@ mix2exp.control <- function(trace = TRUE, ...) {
}), list(.lphi = lphi, .llambda = llambda,
.ephi = ephi, .el1 = el1, .el2 = el2 ))),
weight = eval(substitute(expression({
- run.mean = 0
+ run.mean <- 0
for(ii in 1:( .nsimEIM )) {
- ysim = ifelse(runif(n) < phi, rexp(n, lambda1),
- rexp(n, lambda2))
- f1 = dexp(x = ysim, rate=lambda1)
- f2 = dexp(x = ysim, rate=lambda2)
- pdf = phi*f1 + (1 - phi)*f2
-
- df1.dlambda1 = exp(-lambda1*ysim) - ysim * dexp(ysim, rate=lambda1)
- df2.dlambda2 = exp(-lambda2*ysim) - ysim * dexp(ysim, rate=lambda2)
- dl.dphi = (f1-f2) / pdf
- dl.dlambda1 = phi * df1.dlambda1 / pdf
- dl.dlambda2 = (1 - phi) * df2.dlambda2 / pdf
- d2f1.dlambda12 = ysim*(ysim*lambda1-2)*exp(-lambda1*ysim)
- d2f2.dlambda22 = ysim*(ysim*lambda2-2)*exp(-lambda2*ysim)
- d2l.dphi2 = dl.dphi^2
- d2l.dlambda12 = phi * (phi * df1.dlambda1^2 / pdf -
- d2f1.dlambda12) / pdf
- d2l.dlambda22 = (1 - phi) * ((1 - phi) * df2.dlambda2^2 / pdf -
- d2f2.dlambda22) / pdf
- d2l.dlambda1lambda2 = phi * (1 - phi) *
+ ysim <- ifelse(runif(n) < phi, rexp(n, lambda1),
+ rexp(n, lambda2))
+ f1 <- dexp(x = ysim, rate=lambda1)
+ f2 <- dexp(x = ysim, rate=lambda2)
+ pdf <- phi*f1 + (1 - phi)*f2
+
+ df1.dlambda1 <- exp(-lambda1*ysim) - ysim * dexp(ysim, rate=lambda1)
+ df2.dlambda2 <- exp(-lambda2*ysim) - ysim * dexp(ysim, rate=lambda2)
+ dl.dphi <- (f1-f2) / pdf
+ dl.dlambda1 <- phi * df1.dlambda1 / pdf
+ dl.dlambda2 <- (1 - phi) * df2.dlambda2 / pdf
+ d2f1.dlambda12 <- ysim*(ysim*lambda1-2)*exp(-lambda1*ysim)
+ d2f2.dlambda22 <- ysim*(ysim*lambda2-2)*exp(-lambda2*ysim)
+ d2l.dphi2 <- dl.dphi^2
+ d2l.dlambda12 <- phi * (phi * df1.dlambda1^2 / pdf -
+ d2f1.dlambda12) / pdf
+ d2l.dlambda22 <- (1 - phi) * ((1 - phi) * df2.dlambda2^2 / pdf -
+ d2f2.dlambda22) / pdf
+ d2l.dlambda1lambda2 <- phi * (1 - phi) *
df1.dlambda1 * df2.dlambda2 / pdf^2
- d2l.dphilambda1 = df1.dlambda1 * (phi*(f1-f2)/pdf - 1) / pdf
- d2l.dphilambda2 = df2.dlambda2 * ((1 - phi)*(f1-f2)/pdf - 1) / pdf
+ d2l.dphilambda1 <- df1.dlambda1 * (phi*(f1-f2)/pdf - 1) / pdf
+ d2l.dphilambda2 <- df2.dlambda2 * ((1 - phi)*(f1-f2)/pdf - 1) / pdf
rm(ysim)
- temp3 = matrix(0, n, dimm(M))
- temp3[,iam(1, 1, M = 3)] = d2l.dphi2
- temp3[,iam(2, 2, M = 3)] = d2l.dlambda12
- temp3[,iam(3, 3, M = 3)] = d2l.dlambda22
- temp3[,iam(1, 2, M = 3)] = d2l.dphilambda1
- temp3[,iam(1, 3, M = 3)] = d2l.dphilambda2
- temp3[,iam(2, 3, M = 3)] = d2l.dlambda1lambda2
- run.mean = ((ii-1) * run.mean + temp3) / ii
+ temp3 <- matrix(0, n, dimm(M))
+ temp3[,iam(1, 1, M = 3)] <- d2l.dphi2
+ temp3[,iam(2, 2, M = 3)] <- d2l.dlambda12
+ temp3[,iam(3, 3, M = 3)] <- d2l.dlambda22
+ temp3[,iam(1, 2, M = 3)] <- d2l.dphilambda1
+ temp3[,iam(1, 3, M = 3)] <- d2l.dphilambda2
+ temp3[,iam(2, 3, M = 3)] <- d2l.dlambda1lambda2
+ run.mean <- ((ii-1) * run.mean + temp3) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(run.mean), n, dimm(M), byrow = TRUE) else
run.mean
- dtheta.detas = cbind(dphi.deta, dlambda1.deta, dlambda2.deta)
- index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- wz = wz * dtheta.detas[, index0$row] *
- dtheta.detas[, index0$col]
+ dtheta.detas <- cbind(dphi.deta, dlambda1.deta, dlambda2.deta)
+ index0 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ wz <- wz * dtheta.detas[, index0$row] *
+ dtheta.detas[, index0$col]
c(w) * wz
}), list(.lphi = lphi, .llambda = llambda,
.ephi = ephi, .el1 = el1, .el2 = el2,
diff --git a/R/family.nonlinear.R b/R/family.nonlinear.R
index 7cec868..0a81284 100644
--- a/R/family.nonlinear.R
+++ b/R/family.nonlinear.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -12,8 +12,7 @@
-vnonlinear.control <- function(save.weight = TRUE, ...)
-{
+vnonlinear.control <- function(save.weight = TRUE, ...) {
@@ -72,8 +71,7 @@ subset_lohi <- function(xvec, yvec,
-micmen.control <- function(save.weight = TRUE, ...)
-{
+micmen.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -89,8 +87,7 @@ micmen.control <- function(save.weight = TRUE, ...)
firstDeriv = c("nsimEIM", "rpar"),
probs.x = c(0.15, 0.85),
nsimEIM = 500,
- dispersion = 0, zero = NULL)
-{
+ dispersion = 0, zero = NULL) {
@@ -126,12 +123,12 @@ micmen.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Michaelis-Menton regression model\n",
- "Y_i = theta1 * u_i / (theta2 + u_i) + e_i\n\n",
- "Links: ",
- namesof("theta1", link1, earg = earg1), ", ",
- namesof("theta2", link2, earg = earg2),
- "\n",
- "Variance: constant"),
+ "Y_i = theta1 * u_i / (theta2 + u_i) + e_i\n\n",
+ "Links: ",
+ namesof("theta1", link1, earg = earg1), ", ",
+ namesof("theta2", link2, earg = earg2),
+ "\n",
+ "Variance: constant"),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M = 2)
@@ -142,7 +139,7 @@ micmen.control <- function(save.weight = TRUE, ...)
if (residuals) {
if (M > 1) NULL else (y - mu) * sqrt(w)
} else {
- rss.vgam(y - mu, w, M = M)
+ ResSS.vgam(y - mu, w, M = M)
}
},
@@ -216,9 +213,9 @@ micmen.control <- function(save.weight = TRUE, ...)
.link2 = link2, .earg2 = earg2))),
last = eval(substitute(expression({
- misc$link <- c(theta1 = .link1 , theta2 = .link2)
+ misc$link <- c(theta1 = .link1 , theta2 = .link2 )
- misc$earg <- list(theta1 = .earg1, theta2 = .earg2 )
+ misc$earg <- list(theta1 = .earg1 , theta2 = .earg2 )
misc$rpar <- rpar
fit$df.residual <- n - rank # Not nrow_X_vlm - rank
@@ -376,8 +373,7 @@ skira.control <- function(save.weight = TRUE, ...) {
smallno = 1.0e-3,
nsimEIM = 500,
firstDeriv = c("nsimEIM", "rpar"),
- dispersion = 0, zero = NULL)
-{
+ dispersion = 0, zero = NULL) {
firstDeriv <- match.arg(firstDeriv, c("nsimEIM", "rpar"))[1]
@@ -420,7 +416,7 @@ skira.control <- function(save.weight = TRUE, ...) {
ncol(y) else 1
if (residuals) {
if (M > 1) NULL else (y - mu) * sqrt(w)
- } else rss.vgam(y - mu, w, M = M)
+ } else ResSS.vgam(y - mu, w, M = M)
},
initialize = eval(substitute(expression({
@@ -477,7 +473,9 @@ skira.control <- function(save.weight = TRUE, ...) {
wt.temp.max <- median(wt.temp) * 100
wt.temp[wt.temp > wt.temp.max] <- wt.temp.max
- mylm.wfit <- lm.wfit(x = cbind(1, xx), y = 1 / yy, w = wt.temp)
+ mylm.wfit <- lm.wfit(x = cbind(1, xx),
+ y = c(1 / yy),
+ w = c(wt.temp))
init1 <- mylm.wfit$coef[1]
init2 <- mylm.wfit$coef[2]
} else if (( .imethod == 4) || ( .imethod == 5)) {
@@ -535,9 +533,9 @@ skira.control <- function(save.weight = TRUE, ...) {
}, list( .link1 = link1, .earg1 = earg1,
.link2 = link2, .earg2 = earg2 ))),
last = eval(substitute(expression({
- misc$link <- c(theta1 = .link1 , theta2 = .link2)
+ misc$link <- c(theta1 = .link1 , theta2 = .link2 )
- misc$earg <- list(theta1 = .earg1, theta2 = .earg2 )
+ misc$earg <- list(theta1 = .earg1 , theta2 = .earg2 )
misc$rpar <- rpar
misc$orig.rpar <- .rpar
diff --git a/R/family.normal.R b/R/family.normal.R
index 9c8a9a8..c55f5dc 100644
--- a/R/family.normal.R
+++ b/R/family.normal.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -12,17 +12,17 @@
VGAM.weights.function <- function(w, M, n) {
- ncolw = ncol(as.matrix(w))
+ ncolw <- ncol(as.matrix(w))
if (ncolw == 1) {
- wz = matrix(w, nrow = n, ncol = M) # w_i * diag(M)
+ wz <- matrix(w, nrow = n, ncol = M) # w_i * diag(M)
} else if (ncolw == M) {
- wz = as.matrix(w)
+ wz <- as.matrix(w)
} else if (ncolw < M && M > 1) {
stop("ambiguous input for 'weights'")
} else if (ncolw > M*(M+1)/2) {
stop("too many columns")
} else {
- wz = as.matrix(w)
+ wz <- as.matrix(w)
}
wz
}
@@ -38,8 +38,7 @@ VGAM.weights.function <- function(w, M, n) {
- gaussianff <- function(dispersion = 0, parallel = FALSE, zero = NULL)
-{
+ gaussianff <- function(dispersion = 0, parallel = FALSE, zero = NULL) {
if (!is.Numeric(dispersion, allowable.length = 1) ||
dispersion < 0)
@@ -66,7 +65,7 @@ VGAM.weights.function <- function(w, M, n) {
temp
} else (y-mu) * sqrt(wz)
} else {
- rss.vgam(y-mu, wz = wz, M = M)
+ ResSS.vgam(y-mu, wz = wz, M = M)
}
},
@@ -95,37 +94,38 @@ VGAM.weights.function <- function(w, M, n) {
y <- temp5$y
- M = if (is.matrix(y)) ncol(y) else 1
- dy = dimnames(y)
+ M <- if (is.matrix(y)) ncol(y) else 1
+ dy <- dimnames(y)
predictors.names <- if (!is.null(dy[[2]])) dy[[2]] else
paste("Y", 1:M, sep = "")
if (!length(etastart))
- etastart = 0 * y
+ etastart <- 0 * y
}), list( .parallel = parallel, .zero = zero ))),
linkinv = function(eta, extra = NULL) eta,
last = eval(substitute(expression({
- dy = dimnames(y)
+ dy <- dimnames(y)
if (!is.null(dy[[2]]))
- dimnames(fit$fitted.values) = dy
- dpar = .dispersion
+ dimnames(fit$fitted.values) <- dy
+ dpar <- .dispersion
if (!dpar) {
- wz = VGAM.weights.function(w = w, M = M, n = n)
- temp5 = rss.vgam(y-mu, wz = wz, M = M)
- dpar = temp5 / (length(y) -
+ wz <- VGAM.weights.function(w = w, M = M, n = n)
+ temp5 <- ResSS.vgam(y-mu, wz = wz, M = M)
+ dpar <- temp5 / (length(y) -
(if(is.numeric(ncol(X_vlm_save))) ncol(X_vlm_save) else 0))
}
- misc$dispersion = dpar
- misc$default.dispersion = 0
- misc$estimated.dispersion = .estimated.dispersion
+ misc$dispersion <- dpar
+ misc$default.dispersion <- 0
+ misc$estimated.dispersion <- .estimated.dispersion
+
+ misc$link <- rep("identity", length = M)
+ names(misc$link) <- predictors.names
- misc$link = rep("identity", length = M)
- names(misc$link) = predictors.names
- misc$earg = vector("list", M)
+ misc$earg <- vector("list", M)
for (ilocal in 1:M)
misc$earg[[ilocal]] <- list()
- names(misc$link) = predictors.names
+ names(misc$link) <- predictors.names
if (is.R()) {
@@ -136,40 +136,45 @@ VGAM.weights.function <- function(w, M, n) {
remove("CQO.FastAlgorithm")
}
- misc$expected = TRUE
+ misc$expected <- TRUE
misc$multipleResponses <- TRUE
}), list( .dispersion = dispersion,
.estimated.dispersion = estimated.dispersion ))),
loglikelihood =
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- M = if (is.matrix(y)) ncol(y) else 1
- n = if (is.matrix(y)) nrow(y) else length(y)
- wz = VGAM.weights.function(w = w, M = M, n = n)
- temp1 = rss.vgam(y-mu, wz = wz, M = M)
+ M <- if (is.matrix(y)) ncol(y) else 1
+ n <- if (is.matrix(y)) nrow(y) else length(y)
+ wz <- VGAM.weights.function(w = w, M = M, n = n)
+ print("head(wz) -----------------------------")
+ print( head(wz) )
+ temp1 <- ResSS.vgam(y-mu, wz = wz, M = M)
if (M == 1 || ncol(wz) == M) {
+ print("hi3 ooooo")
-0.5 * temp1 + 0.5 * sum(log(wz)) - n * (M / 2) * log(2*pi)
} else {
if (all(wz[1, ] == apply(wz, 2, min)) &&
all(wz[1, ] == apply(wz, 2, max))) {
- onewz = m2adefault(wz[1, , drop = FALSE], M = M)
- onewz = onewz[, ,1] # M x M
+ onewz <- m2adefault(wz[1, , drop = FALSE], M = M)
+ onewz <- onewz[,, 1] # M x M
+
- logdet <- sum(log(eigen(onewz, symmetric = TRUE,
- only.values = TRUE)$values))
+ logdet <- determinant(onewz)$modulus
logretval <- -0.5 * temp1 + 0.5 * n * logdet -
n * (M / 2) * log(2*pi)
+
+ distval <- stop("variable 'distval' not computed yet")
+ logretval <- -(ncol(onewz) * log(2 * pi) + logdet + distval)/2
logretval
} else {
- logretval = -0.5 * temp1 - n * (M / 2) * log(2*pi)
+ logretval <- -0.5 * temp1 - n * (M / 2) * log(2*pi)
for (ii in 1:n) {
- onewz = m2adefault(wz[ii, , drop = FALSE], M = M)
- onewz = onewz[, ,1] # M x M
- logdet <- sum(log(eigen(onewz, symmetric = TRUE,
- only.values = TRUE)$values))
- logretval = logretval + 0.5 * logdet
+ onewz <- m2adefault(wz[ii, , drop = FALSE], M = M)
+ onewz <- onewz[,, 1] # M x M
+ logdet <- determinant(onewz)$modulus
+ logretval <- logretval + 0.5 * logdet
}
logretval
}
@@ -178,7 +183,7 @@ VGAM.weights.function <- function(w, M, n) {
linkfun = function(mu, extra = NULL) mu,
vfamily = "gaussianff",
deriv = expression({
- wz = VGAM.weights.function(w = w, M = M, n = n)
+ wz <- VGAM.weights.function(w = w, M = M, n = n)
mux22(cc = t(wz), xmat = y-mu, M = M, as.matrix = TRUE)
}),
weight = expression({
@@ -201,10 +206,10 @@ dposnorm <- function(x, mean = 0, sd = 1, log = FALSE) {
rm(log)
- L = max(length(x), length(mean), length(sd))
- x = rep(x, len = L);
- mean = rep(mean, len = L);
- sd = rep(sd, len = L);
+ L <- max(length(x), length(mean), length(sd))
+ x <- rep(x, len = L);
+ mean <- rep(mean, len = L);
+ sd <- rep(sd, len = L);
if (log.arg) {
ifelse(x < 0, log(0), dnorm(x, mean = mean, sd = sd, log = TRUE) -
@@ -216,10 +221,10 @@ dposnorm <- function(x, mean = 0, sd = 1, log = FALSE) {
pposnorm <- function(q, mean = 0, sd = 1) {
- L = max(length(q), length(mean), length(sd))
- q = rep(q, len = L);
- mean = rep(mean, len = L);
- sd = rep(sd, len = L);
+ L <- max(length(q), length(mean), length(sd))
+ q <- rep(q, len = L);
+ mean <- rep(mean, len = L);
+ sd <- rep(sd, len = L);
ifelse(q < 0, 0, (pnorm(q, mean = mean, sd = sd) -
pnorm(0, mean = mean, sd = sd)) / pnorm(q = mean/sd))
}
@@ -236,8 +241,8 @@ qposnorm <- function(p, mean = 0, sd = 1) {
rposnorm <- function(n, mean = 0, sd = 1) {
if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'n'")
- mean = rep(mean, length = n)
- sd = rep(sd, length = n)
+ mean <- rep(mean, length = n)
+ sd <- rep(sd, length = n)
qnorm(p = runif(n, min = pnorm(0, mean = mean, sd = sd)),
mean = mean, sd = sd)
}
@@ -245,7 +250,7 @@ rposnorm <- function(n, mean = 0, sd = 1) {
posnormal1.control <- function(save.weight = TRUE, ...) {
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
@@ -253,8 +258,7 @@ rposnorm <- function(n, mean = 0, sd = 1) {
posnormal1 <- function(lmean = "identity", lsd = "loge",
imean = NULL, isd = NULL,
- nsimEIM = 100, zero = NULL)
-{
+ nsimEIM = 100, zero = NULL) {
warning("this VGAM family function is not working properly yet")
@@ -319,96 +323,98 @@ rposnorm <- function(n, mean = 0, sd = 1) {
namesof("sd", .lsd, earg = .esd, tag = FALSE))
if (!length(etastart)) {
- init.me = if (length( .i.mean)) rep( .i.mean, len = n) else NULL
- init.sd = if (length( .i.sd )) rep( .i.sd , len = n) else NULL
- if (!length(init.me))
- init.me = rep(quantile(y, probs=0.40), len = n)
- if (!length(init.sd))
- init.sd = rep(sd(c(y)) * 1.2, len = n)
- etastart = cbind(theta2eta(init.me, .lmean, earg = .emean),
- theta2eta(init.sd, .lsd, earg = .esd ))
+ init.me <- if (length( .imean)) rep( .imean, len = n) else NULL
+ init.sd <- if (length( .isd )) rep( .isd , len = n) else NULL
+ if (!length(init.me))
+ init.me <- rep(quantile(y, probs = 0.40), len = n)
+ if (!length(init.sd))
+ init.sd <- rep(sd(c(y)) * 1.2, len = n)
+ etastart <- cbind(theta2eta(init.me, .lmean, earg = .emean),
+ theta2eta(init.sd, .lsd, earg = .esd ))
}
}), list( .lmean = lmean, .lsd = lsd,
- .i.mean = imean, .i.sd = isd,
+ .imean = imean, .isd = isd,
.emean = emean, .esd = esd
))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mymu = eta2theta(eta[, 1], .lmean, earg = .emean )
- mysd = eta2theta(eta[, 2], .lsd, earg = .esd )
+ mymu <- eta2theta(eta[, 1], .lmean, earg = .emean )
+ mysd <- eta2theta(eta[, 2], .lsd, earg = .esd )
mymu + mysd * dnorm(-mymu/mysd) / pnorm(mymu/mysd)
}, list( .lmean = lmean, .lsd = lsd,
.emean = emean, .esd = esd
))),
last = eval(substitute(expression({
- misc$link = c("mean" = .lmean , "sd" = .lsd )
- misc$earg = list("mean" = .emean , "sd" = .esd )
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
+ misc$link <- c("mean" = .lmean , "sd" = .lsd )
+
+ misc$earg <- list("mean" = .emean , "sd" = .esd )
+
+ misc$expected <- TRUE
+ misc$nsimEIM <- .nsimEIM
}), list( .lmean = lmean, .lsd = lsd,
.emean = emean, .esd = esd,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mymu = eta2theta(eta[, 1], .lmean, earg = .emean)
- mysd = eta2theta(eta[, 2], .lsd, earg = .esd )
+ mymu <- eta2theta(eta[, 1], .lmean, earg = .emean)
+ mysd <- eta2theta(eta[, 2], .lsd, earg = .esd )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(c(w) * dposnorm(x=y, m=mymu, sd = mysd, log = TRUE))
+ sum(c(w) * dposnorm(x = y, m = mymu, sd = mysd, log = TRUE))
}
}, list( .lmean = lmean, .lsd = lsd,
.emean = emean, .esd = esd ))),
vfamily = c("posnormal1"),
deriv = eval(substitute(expression({
- mymu = eta2theta(eta[, 1], .lmean, earg = .emean)
- mysd = eta2theta(eta[, 2], .lsd, earg = .esd )
+ mymu <- eta2theta(eta[, 1], .lmean, earg = .emean)
+ mysd <- eta2theta(eta[, 2], .lsd, earg = .esd )
- zedd = (y-mymu) / mysd
- temp7 = dnorm(-mymu/mysd)
- temp8 = pnorm(mymu/mysd) * mysd
+ zedd <- (y-mymu) / mysd
+ temp7 <- dnorm(-mymu/mysd)
+ temp8 <- pnorm(mymu/mysd) * mysd
- dl.dmu = zedd / mysd^2 - temp7 / temp8
- dl.dsd = (mymu*temp7/temp8 + zedd^3 / mysd - 1) / mysd
+ dl.dmu <- zedd / mysd^2 - temp7 / temp8
+ dl.dsd <- (mymu*temp7/temp8 + zedd^3 / mysd - 1) / mysd
- dmu.deta = dtheta.deta(mymu, .lmean, earg = .emean)
- dsd.deta = dtheta.deta(mysd, .lsd, earg = .esd )
- dthetas.detas = cbind(dmu.deta, dsd.deta)
+ dmu.deta <- dtheta.deta(mymu, .lmean, earg = .emean)
+ dsd.deta <- dtheta.deta(mysd, .lsd, earg = .esd )
+ dthetas.detas <- cbind(dmu.deta, dsd.deta)
c(w) * dthetas.detas * cbind(dl.dmu, dl.dsd)
}), list( .lmean = lmean, .lsd = lsd,
.emean = emean, .esd = esd ))),
weight = eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
if (length( .nsimEIM )) {
for(ii in 1:( .nsimEIM )) {
ysim <- rposnorm(n, m=mymu, sd = mysd)
- zedd = (ysim-mymu) / mysd
- temp7 = dnorm(-mymu/mysd)
- temp8 = pnorm(mymu/mysd) * mysd
- dl.dmu = zedd / mysd^2 - temp7 / temp8
- dl.dsd = (mymu*temp7/temp8 + zedd^3 / mysd - 1) / mysd
+ zedd <- (ysim-mymu) / mysd
+ temp7 <- dnorm(-mymu/mysd)
+ temp8 <- pnorm(mymu/mysd) * mysd
+ dl.dmu <- zedd / mysd^2 - temp7 / temp8
+ dl.dsd <- (mymu*temp7/temp8 + zedd^3 / mysd - 1) / mysd
rm(ysim)
- temp3 = matrix(c(dl.dmu, dl.dsd), n, 2)
- run.varcov = ((ii-1) * run.varcov +
+ temp3 <- matrix(c(dl.dmu, dl.dsd), n, 2)
+ run.varcov <- ((ii-1) * run.varcov +
temp3[, ind1$row.index]*temp3[, ind1$col.index]) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
- wz = c(w) * matrix(wz, n, dimm(M))
+ wz <- wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
+ wz <- c(w) * matrix(wz, n, dimm(M))
} else {
- wz = matrix(as.numeric(NA), n, dimm(M))
- ed2l.dmu2 = (1 - temp7*mymu/temp8) / mysd^2 - (temp7/temp8)^2
- ed2l.dmusd = (temp7 /(mysd * temp8)) * (1 + (mymu/mysd)^2 +
- mymu*temp7 / temp8)
- ed2l.dsd2 = 2 / mysd^2 - (temp7 * mymu /(mysd^2 * temp8)) *
- (1 + (mymu/mysd)^2 + mymu*temp7/temp8)
- wz[, iam(1, 1, M)] = ed2l.dmu2 * dmu.deta^2
- wz[, iam(2, 2, M)] = ed2l.dsd2 * dsd.deta^2
- wz[, iam(1, 2, M)] = ed2l.dmusd * dsd.deta * dmu.deta
+ wz <- matrix(as.numeric(NA), n, dimm(M))
+ ned2l.dmu2 <- (1 - temp7*mymu/temp8) / mysd^2 - (temp7/temp8)^2
+ ned2l.dmusd <- (temp7 /(mysd * temp8)) * (1 + (mymu/mysd)^2 +
+ mymu*temp7 / temp8)
+ ned2l.dsd2 <- 2 / mysd^2 - (temp7 * mymu /(mysd^2 * temp8)) *
+ (1 + (mymu/mysd)^2 + mymu*temp7/temp8)
+ wz[, iam(1, 1, M)] <- ned2l.dmu2 * dmu.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dsd2 * dsd.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dmusd * dsd.deta * dmu.deta
wz = c(w) * wz
}
wz
@@ -426,8 +432,7 @@ dbetanorm <- function(x, shape1, shape2, mean = 0, sd = 1, log = FALSE) {
rm(log)
- ans =
- if (log.arg) {
+ ans <- if (log.arg) {
dnorm(x = x, mean = mean, sd = sd, log = TRUE) +
(shape1-1) * pnorm(q = x, mean = mean, sd = sd, log.p = TRUE) +
(shape2-1) * pnorm(q = x, mean = mean, sd = sd, log.p = TRUE,
@@ -447,10 +452,10 @@ dbetanorm <- function(x, shape1, shape2, mean = 0, sd = 1, log = FALSE) {
pbetanorm <- function(q, shape1, shape2, mean = 0, sd = 1,
- lower.tail = TRUE, log.p = FALSE) {
- pbeta(q=pnorm(q = q, mean = mean, sd = sd),
- shape1=shape1, shape2=shape2,
- lower.tail = lower.tail, log.p = log.p)
+ lower.tail = TRUE, log.p = FALSE) {
+ pbeta(q = pnorm(q = q, mean = mean, sd = sd),
+ shape1 = shape1, shape2 = shape2,
+ lower.tail = lower.tail, log.p = log.p)
}
@@ -482,11 +487,11 @@ dtikuv <- function(x, d, mean = 0, sigma = 1, log = FALSE) {
max(d) >= 2)
stop("bad input for argument 'd'")
- L = max(length(x), length(mean), length(sigma))
- x = rep(x, len = L); mean = rep(mean, len = L);
- sigma = rep(sigma, len = L);
- hh = 2 - d
- KK = 1 / (1 + 1/hh + 0.75/hh^2)
+ L <- max(length(x), length(mean), length(sigma))
+ x <- rep(x, len = L); mean <- rep(mean, len = L);
+ sigma <- rep(sigma, len = L);
+ hh <- 2 - d
+ KK <- 1 / (1 + 1/hh + 0.75/hh^2)
if (log.arg) {
dnorm(x = x, mean = mean, sd = sigma, log = TRUE) + log(KK) +
2 * log1p(((x-mean)/sigma)^2 / (2*hh))
@@ -502,22 +507,23 @@ ptikuv <- function(q, d, mean = 0, sigma = 1) {
max(d) >= 2)
stop("bad input for argument 'd'")
- L = max(length(q), length(mean), length(sigma))
- q = rep(q, len = L); mean = rep(mean, len = L);
- sigma = rep(sigma, len = L);
- zedd1 = 0.5 * ((q - mean) / sigma)^2
- ans = q*0 + 0.5
- hh = 2 - d
- KK = 1 / (1 + 1/hh + 0.75/hh^2)
+ L <- max(length(q), length(mean), length(sigma))
+ q <- rep(q, len = L);
+ mean <- rep(mean, len = L);
+ sigma <- rep(sigma, len = L);
+ zedd1 <- 0.5 * ((q - mean) / sigma)^2
+ ans <- q*0 + 0.5
+ hh <- 2 - d
+ KK <- 1 / (1 + 1/hh + 0.75/hh^2)
if (any(lhs <- q < mean)) {
- ans[lhs] = ( KK/(2*sqrt(pi))) * (
+ ans[lhs] <- ( KK/(2*sqrt(pi))) * (
gamma(0.5) * (1 - pgamma(zedd1[lhs], 0.5)) +
2 * gamma(1.5) * (1 - pgamma(zedd1[lhs], 1.5)) / hh +
gamma(2.5) * (1 - pgamma(zedd1[lhs], 2.5)) / hh^2)
}
if (any(rhs <- q > mean)) {
- ans[rhs] = 1.0 - Recall(q = (2*mean[rhs] - q[rhs]), d = d,
- mean = mean[rhs], sigma = sigma[rhs])
+ ans[rhs] <- 1.0 - Recall(q = (2*mean[rhs] - q[rhs]), d = d,
+ mean = mean[rhs], sigma = sigma[rhs])
}
ans
}
@@ -532,26 +538,28 @@ qtikuv <- function(p, d, mean = 0, sigma = 1, ...) {
stop("bad input for argument 'mean'")
if (!is.Numeric(sigma))
stop("bad input for argument 'sigma'")
- L = max(length(p), length(mean), length(sigma))
- p = rep(p, len = L);
- mean = rep(mean, len = L);
- sigma = rep(sigma, len = L);
- ans = rep(0.0, len = L)
+
+ L <- max(length(p), length(mean), length(sigma))
+ p <- rep(p, len = L);
+ mean <- rep(mean, len = L);
+ sigma <- rep(sigma, len = L);
+ ans <- rep(0.0, len = L)
myfun <- function(x, d, mean = 0, sigma = 1, p)
ptikuv(q = x, d = d, mean = mean, sigma = sigma) - p
- for(i in 1:L) {
- Lower = ifelse(p[i] <= 0.5, mean[i] - 3 * sigma[i], mean[i])
- while (ptikuv(q = Lower, d = d, mean = mean[i],
- sigma = sigma[i]) > p[i])
- Lower = Lower - sigma[i]
- Upper = ifelse(p[i] >= 0.5, mean[i] + 3 * sigma[i], mean[i])
- while (ptikuv(q = Upper, d = d, mean = mean[i],
- sigma = sigma[i]) < p[i])
- Upper = Upper + sigma[i]
- ans[i] = uniroot(f = myfun, lower = Lower, upper = Upper,
- d = d, p = p[i],
- mean = mean[i], sigma = sigma[i], ...)$root
+
+ for (ii in 1:L) {
+ Lower <- ifelse(p[ii] <= 0.5, mean[ii] - 3 * sigma[ii], mean[ii])
+ while (ptikuv(q = Lower, d = d, mean = mean[ii],
+ sigma = sigma[ii]) > p[ii])
+ Lower <- Lower - sigma[ii]
+ Upper <- ifelse(p[ii] >= 0.5, mean[ii] + 3 * sigma[ii], mean[ii])
+ while (ptikuv(q = Upper, d = d, mean = mean[ii],
+ sigma = sigma[ii]) < p[ii])
+ Upper <- Upper + sigma[ii]
+ ans[ii] <- uniroot(f = myfun, lower = Lower, upper = Upper,
+ d = d, p = p[ii],
+ mean = mean[ii], sigma = sigma[ii], ...)$root
}
ans
}
@@ -570,30 +578,30 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
Smallno > 0.01 ||
Smallno < 2 * .Machine$double.eps)
stop("bad input for argument 'Smallno'")
- ans = rep(0.0, len = n)
-
- ptr1 = 1; ptr2 = 0
- hh = 2 - d
- KK = 1 / (1 + 1/hh + 0.75/hh^2)
- ymax = ifelse(hh < 2,
- dtikuv(x = mean + sigma*sqrt(4 - 2*hh),
- d = d, mean = mean, sigma = sigma),
- KK / (sqrt(2 * pi) * sigma))
+ ans <- rep(0.0, len = n)
+
+ ptr1 <- 1; ptr2 <- 0
+ hh <- 2 - d
+ KK <- 1 / (1 + 1/hh + 0.75/hh^2)
+ ymax <- ifelse(hh < 2,
+ dtikuv(x = mean + sigma*sqrt(4 - 2*hh),
+ d = d, mean = mean, sigma = sigma),
+ KK / (sqrt(2 * pi) * sigma))
while (ptr2 < n) {
- Lower = mean - 5 * sigma
+ Lower <- mean - 5 * sigma
while (ptikuv(q = Lower, d = d, mean = mean, sigma = sigma) > Smallno)
- Lower = Lower - sigma
- Upper = mean + 5 * sigma
+ Lower <- Lower - sigma
+ Upper <- mean + 5 * sigma
while (ptikuv(q = Upper, d = d, mean = mean, sigma = sigma) < 1-Smallno)
- Upper = Upper + sigma
- x = runif(2*n, min = Lower, max = Upper)
- index = runif(2*n, max = ymax) <
+ Upper <- Upper + sigma
+ x <- runif(2*n, min = Lower, max = Upper)
+ index <- runif(2*n, max = ymax) <
dtikuv(x, d = d, mean = mean, sigma = sigma)
- sindex = sum(index)
+ sindex <- sum(index)
if (sindex) {
- ptr2 = min(n, ptr1 + sindex - 1)
+ ptr2 <- min(n, ptr1 + sindex - 1)
ans[ptr1:ptr2] = (x[index])[1:(1+ptr2-ptr1)]
- ptr1 = ptr2 + 1
+ ptr1 <- ptr2 + 1
}
}
ans
@@ -603,8 +611,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
tikuv <- function(d, lmean = "identity", lsigma = "loge",
- isigma = NULL, zero = 2)
-{
+ isigma = NULL, zero = 2) {
lmean <- as.list(substitute(lmean))
@@ -612,8 +619,8 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
lmean <- attr(emean, "function.name")
lsigma <- as.list(substitute(lsigma))
- e.sigma <- link2list(lsigma)
- l.sigma <- attr(e.sigma, "function.name")
+ esigma <- link2list(lsigma)
+ lsigma <- attr(esigma, "function.name")
@@ -631,7 +638,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
"distribution\n",
"Link: ",
namesof("mean", lmean, earg = emean), ", ",
- namesof("sigma", l.sigma, earg = e.sigma),
+ namesof("sigma", lsigma, earg = esigma),
"\n", "\n",
"Mean: mean"),
constraints = eval(substitute(expression({
@@ -650,78 +657,81 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
predictors.names <-
c(namesof("mean", .lmean, earg = .emean, tag = FALSE),
- namesof("sigma", .l.sigma, earg = .e.sigma, tag = FALSE))
+ namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
if (!length(etastart)) {
- sigma.init = if (length(.i.sigma)) rep(.i.sigma, length = n) else {
- hh = 2 - .d
- KK = 1 / (1 + 1/hh + 0.75/hh^2)
- K2 = 1 + 3/hh + 15/(4*hh^2)
+ sigma.init <- if (length(.isigma)) rep(.isigma, length = n) else {
+ hh <- 2 - .d
+ KK <- 1 / (1 + 1/hh + 0.75/hh^2)
+ K2 <- 1 + 3/hh + 15/(4*hh^2)
rep(sqrt(var(y) / (KK*K2)), len = n)
}
- mean.init = rep(weighted.mean(y, w), len = n)
- etastart = cbind(theta2eta(mean.init, .lmean, earg = .emean),
- theta2eta(sigma.init, .l.sigma, earg = .e.sigma))
+ mean.init <- rep(weighted.mean(y, w), len = n)
+ etastart <-
+ cbind(theta2eta(mean.init, .lmean, earg = .emean),
+ theta2eta(sigma.init, .lsigma, earg = .esigma))
}
- }),list( .lmean = lmean, .l.sigma = l.sigma,
- .i.sigma = isigma, .d = d,
- .emean = emean, .e.sigma = e.sigma ))),
+ }),list( .lmean = lmean, .lsigma = lsigma,
+ .isigma = isigma, .d = d,
+ .emean = emean, .esigma = esigma ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
eta2theta(eta[, 1], .lmean, earg = .emean)
}, list( .lmean = lmean,
- .emean = emean, .e.sigma = e.sigma ))),
+ .emean = emean, .esigma = esigma ))),
last = eval(substitute(expression({
- misc$link = c("mean"= .lmean , "sigma"= .l.sigma )
- misc$earg = list("mean"= .emean , "sigma"= .e.sigma )
- misc$expected = TRUE
- misc$d = .d
- }), list( .lmean = lmean, .l.sigma = l.sigma, .d = d,
- .emean = emean, .e.sigma = e.sigma ))),
+ misc$link <- c("mean"= .lmean , "sigma"= .lsigma )
+
+ misc$earg <- list("mean"= .emean , "sigma"= .esigma )
+
+ misc$expected <- TRUE
+ misc$d <- .d
+ }), list( .lmean = lmean, .lsigma = lsigma, .d = d,
+ .emean = emean, .esigma = esigma ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mymu = eta2theta(eta[, 1], .lmean, earg = .emean)
- sigma = eta2theta(eta[, 2], .l.sigma, earg = .e.sigma)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dtikuv(x=y, d = .d , mean = mymu,
- sigma = sigma, log = TRUE))
- }
- }, list( .lmean = lmean, .l.sigma = l.sigma, .d = d,
- .emean = emean, .e.sigma = e.sigma ))),
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mymu <- eta2theta(eta[, 1], .lmean, earg = .emean)
+ sigma <- eta2theta(eta[, 2], .lsigma, earg = .esigma)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w) * dtikuv(x = y, d = .d , mean = mymu,
+ sigma = sigma, log = TRUE))
+ }
+ }, list( .lmean = lmean, .lsigma = lsigma, .d = d,
+ .emean = emean, .esigma = esigma ))),
vfamily = c("tikuv"),
deriv = eval(substitute(expression({
- mymu = eta2theta(eta[, 1], .lmean, earg = .emean)
- sigma = eta2theta(eta[, 2], .l.sigma, earg = .e.sigma)
+ mymu <- eta2theta(eta[, 1], .lmean, earg = .emean)
+ sigma <- eta2theta(eta[, 2], .lsigma, earg = .esigma)
- dmu.deta = dtheta.deta(mymu, .lmean, earg = .emean)
- dsigma.deta = dtheta.deta(sigma, .l.sigma, earg = .e.sigma)
+ dmu.deta <- dtheta.deta(mymu, .lmean, earg = .emean)
+ dsigma.deta <- dtheta.deta(sigma, .lsigma, earg = .esigma)
- zedd = (y - mymu) / sigma
- hh = 2 - .d
- gzedd = zedd / (1 + 0.5*zedd^2 / hh)
+ zedd <- (y - mymu) / sigma
+ hh <- 2 - .d
+ gzedd <- zedd / (1 + 0.5*zedd^2 / hh)
- dl.dmu = zedd / sigma - 2 * gzedd / (hh*sigma)
- dl.dsigma = (zedd^2 - 1 - 2 * zedd * gzedd / hh) / sigma
+ dl.dmu <- zedd / sigma - 2 * gzedd / (hh*sigma)
+ dl.dsigma <- (zedd^2 - 1 - 2 * zedd * gzedd / hh) / sigma
c(w) * cbind(dl.dmu * dmu.deta,
dl.dsigma * dsigma.deta)
- }), list( .lmean = lmean, .l.sigma = l.sigma, .d = d,
- .emean = emean, .e.sigma = e.sigma ))),
+ }), list( .lmean = lmean, .lsigma = lsigma, .d = d,
+ .emean = emean, .esigma = esigma ))),
weight = eval(substitute(expression({
- ayy = 1 / (2*hh)
- Dnos = 1 - (2/hh) * (1 - ayy) / (1 + 2*ayy + 3*ayy^2)
- Dstar = -1 + 3 * (1 + 2*ayy + 11*ayy^2) / (1 + 2*ayy + 3*ayy^2)
+ ayy <- 1 / (2*hh)
+ Dnos <- 1 - (2/hh) * (1 - ayy) / (1 + 2*ayy + 3*ayy^2)
+ Dstar <- -1 + 3 * (1 + 2*ayy + 11*ayy^2) / (1 + 2*ayy + 3*ayy^2)
- ned2l.dmymu2 = Dnos / sigma^2
- ned2l.dnu2 = Dstar / sigma^2
+ ned2l.dmymu2 <- Dnos / sigma^2
+ ned2l.dnu2 <- Dstar / sigma^2
- wz = matrix(as.numeric(NA), n, M) # diagonal matrix
- wz[, iam(1, 1, M)] = ned2l.dmymu2 * dmu.deta^2
- wz[, iam(2, 2, M)] = ned2l.dnu2 * dsigma.deta^2
+ wz <- matrix(as.numeric(NA), n, M) # diagonal matrix
+ wz[, iam(1, 1, M)] <- ned2l.dmymu2 * dmu.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dnu2 * dsigma.deta^2
c(w) * wz
- }), list( .lmean = lmean, .l.sigma = l.sigma,
- .emean = emean, .e.sigma = e.sigma ))))
+ }), list( .lmean = lmean, .lsigma = lsigma,
+ .emean = emean, .esigma = esigma ))))
}
@@ -732,10 +742,10 @@ dfnorm <- function(x, mean = 0, sd = 1, a1 = 1, a2=1) {
stop("bad input for arguments 'a1' and 'a2'")
if (any(a1 <= 0 | a2 <= 0))
stop("arguments 'a1' and 'a2' must have positive values only")
- ans = dnorm(x = x/(a1*sd) - mean/sd)/(a1*sd) +
- dnorm(x = x/(a2*sd) + mean/sd)/(a2*sd)
- ans[x < 0] = 0
- ans[a1 <= 0 | a2 <= 0 | is.na(a1) | is.na(a2)] = NA
+ ans <- dnorm(x = x/(a1*sd) - mean/sd)/(a1*sd) +
+ dnorm(x = x/(a2*sd) + mean/sd)/(a2*sd)
+ ans[x < 0] <- 0
+ ans[a1 <= 0 | a2 <= 0 | is.na(a1) | is.na(a2)] <- NA
ans
}
@@ -746,10 +756,10 @@ pfnorm <- function(q, mean = 0, sd = 1, a1 = 1, a2=1) {
stop("bad input for arguments 'a1' and 'a2'")
if (any(a1 <= 0 | a2 <= 0))
stop("arguments 'a1' and 'a2' must have positive values only")
- L = max(length(q), length(mean), length(sd))
- q = rep(q, len = L);
- mean = rep(mean, len = L);
- sd = rep(sd, len = L);
+ L <- max(length(q), length(mean), length(sd))
+ q <- rep(q, len = L);
+ mean <- rep(mean, len = L);
+ sd <- rep(sd, len = L);
ifelse(q < 0, 0,
pnorm(q = q/(a1*sd) - mean/sd) -
@@ -766,26 +776,27 @@ qfnorm <- function(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...) {
if (any(a1 <= 0 | a2 <= 0))
stop("arguments 'a1' and 'a2' must have positive values only")
- L = max(length(p), length(mean), length(sd), length(a1), length(a2))
- p = rep(p, len = L);
- mean = rep(mean, len = L);
- sd = rep(sd, len = L);
- a1 = rep(a1, len = L);
- a2 = rep(a2, len = L);
- ans = rep(0.0, len = L)
+ L <- max(length(p), length(mean), length(sd), length(a1), length(a2))
+ p <- rep(p, len = L);
+ mean <- rep(mean, len = L);
+ sd <- rep(sd, len = L);
+ a1 <- rep(a1, len = L);
+ a2 <- rep(a2, len = L);
+ ans <- rep(0.0 , len = L)
myfun <- function(x, mean = 0, sd = 1, a1 = 1, a2=2, p)
pfnorm(q = x, mean = mean, sd = sd, a1 = a1, a2 = a2) - p
+
for(i in 1:L) {
- mytheta = mean[i]/sd[i]
- EY = sd[i] * ((a1[i]+a2[i]) *
+ mytheta <- mean[i]/sd[i]
+ EY <- sd[i] * ((a1[i]+a2[i]) *
(mytheta * pnorm(mytheta) + dnorm(mytheta)) -
a2[i] * mytheta)
- Upper = 2 * EY
+ Upper <- 2 * EY
while (pfnorm(q = Upper, mean = mean[i], sd = sd[i],
a1 = a1[i], a2 = a2[i]) < p[i])
- Upper = Upper + sd[i]
- ans[i] = uniroot(f = myfun, lower = 0, upper = Upper,
+ Upper <- Upper + sd[i]
+ ans[i] <- uniroot(f = myfun, lower = 0, upper = Upper,
mean = mean[i],
sd = sd[i], a1 = a1[i], a2 = a2[i],
p = p[i], ...)$root
@@ -802,7 +813,7 @@ rfnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
stop("bad input for arguments 'a1' and 'a2'")
if (any(a1 <= 0 | a2 <= 0))
stop("arguments 'a1' and 'a2' must have positive values only")
- X = rnorm(n, mean = mean, sd = sd)
+ X <- rnorm(n, mean = mean, sd = sd)
pmax(a1 * X, -a2*X)
}
@@ -812,13 +823,13 @@ rfnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
fnormal1 <- function(lmean = "identity", lsd = "loge",
imean = NULL, isd = NULL,
a1 = 1, a2 = 1,
- nsimEIM = 500, imethod = 1, zero = NULL)
-{
+ nsimEIM = 500, imethod = 1, zero = NULL) {
if (!is.Numeric(a1, positive = TRUE, allowable.length = 1) ||
!is.Numeric(a2, positive = TRUE, allowable.length = 1))
stop("bad input for arguments 'a1' and 'a2'")
if (any(a1 <= 0 | a2 <= 0))
stop("arguments 'a1' and 'a2' must each be a positive value")
+
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
@@ -837,8 +848,6 @@ rfnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
-
-
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
@@ -856,9 +865,19 @@ rfnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
new("vglmff",
blurb = c("(Generalized) folded univariate normal distribution\n\n",
- "Link: ",
- namesof("mean", lmean, earg = emean, tag = TRUE), "; ",
- namesof("sd", lsd, earg = esd, tag = TRUE)),
+ "Link: ",
+ namesof("mean", lmean, earg = emean, tag = TRUE), "; ",
+ namesof("sd", lsd, earg = esd, tag = TRUE)),
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ a1 = .a1 ,
+ a2 = .a2 ,
+ zero = .zero ,
+ nsimEIM = .nsimEIM )
+ }, list( .zero = zero,
+ .a1 = a1, .a2 = a2,
+ .nsimEIM = nsimEIM ))),
+
initialize = eval(substitute(expression({
temp5 <-
@@ -877,124 +896,128 @@ rfnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
namesof("sd", .lsd, earg = .esd, tag = FALSE))
if (!length(etastart)) {
- junk = lm.wfit(x = x, y=y, w = w)
+ junk <- lm.wfit(x = x, y = c(y), w = c(w))
if (FALSE) {
if ((ncol(cbind(w)) != 1) || any(w != round(w)))
stop("'weights' must be a vector or a one-column matrix ",
"with integer values")
- m1d = meany = weighted.mean(y, w)
- m2d = weighted.mean(y^2, w)
- stddev = sqrt( sum(c(w) * junk$resid^2) / junk$df.residual )
- Ahat = m1d^2 / m2d
- thetahat = sqrt(max(1/Ahat -1, 0.1))
- mean.init = rep(if(length( .i.mean)) .i.mean else
+ m1d <- meany <- weighted.mean(y, w)
+ m2d <- weighted.mean(y^2, w)
+ stddev <- sqrt( sum(c(w) * junk$resid^2) / junk$df.residual )
+ Ahat <- m1d^2 / m2d
+ thetahat <- sqrt(max(1/Ahat -1, 0.1))
+ mean.init <- rep(if(length( .imean)) .imean else
thetahat * sqrt((stddev^2 + meany^2) * Ahat), len = n)
- sd.init = rep(if(length( .i.sd)) .i.sd else
+ sd.init <- rep(if(length( .isd)) .isd else
sqrt((stddev^2 + meany^2) * Ahat), len = n)
}
- stddev = sqrt( sum(c(w) * junk$resid^2) / junk$df.residual )
- meany = weighted.mean(y, w)
- mean.init = rep(if(length( .i.mean)) .i.mean else
- {if( .imethod == 1) median(y) else meany}, len = n)
- sd.init = rep(if(length( .i.sd)) .i.sd else
- {if( .imethod == 1) stddev else 1.2*sd(c(y))}, len = n)
- etastart = cbind(theta2eta(mean.init, .lmean, earg = .emean),
- theta2eta(sd.init, .lsd, earg = .esd ))
+ stddev <- sqrt( sum(c(w) * junk$resid^2) / junk$df.residual )
+ meany <- weighted.mean(y, w)
+ mean.init <- rep(if(length( .imean )) .imean else
+ {if( .imethod == 1) median(y) else meany}, len = n)
+ sd.init <- rep(if(length( .isd )) .isd else
+ {if( .imethod == 1) stddev else 1.2*sd(c(y))}, len = n)
+ etastart <- cbind(theta2eta(mean.init, .lmean, earg = .emean),
+ theta2eta(sd.init, .lsd, earg = .esd ))
}
}), list( .lmean = lmean, .lsd = lsd,
.emean = emean, .esd = esd,
- .i.mean = imean, .i.sd = isd,
+ .imean = imean, .isd = isd,
.a1 = a1, .a2 = a2, .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- mymu = eta2theta(eta[, 1], .lmean, earg = .emean)
- mysd = eta2theta(eta[, 2], .lsd, earg = .esd )
- mytheta = mymu/mysd
- mysd * (( .a1+ .a2) * (mytheta * pnorm(mytheta) +
- dnorm(mytheta)) - .a2 * mytheta)
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ mymu <- eta2theta(eta[, 1], .lmean, earg = .emean)
+ mysd <- eta2theta(eta[, 2], .lsd, earg = .esd )
+ mytheta <- mymu / mysd
+ mysd * (( .a1 + .a2 ) * (mytheta * pnorm(mytheta) +
+ dnorm(mytheta)) - .a2 * mytheta)
}, list( .lmean = lmean, .lsd = lsd,
.emean = emean, .esd = esd,
.a1 = a1, .a2 = a2 ))),
last = eval(substitute(expression({
- misc$link = c("mu" = .lmean , "sd" = .lsd )
+ misc$link <- c("mu" = .lmean , "sd" = .lsd )
- misc$earg = list("mu" = .emean , "sd" = .esd )
+ misc$earg <- list("mu" = .emean , "sd" = .esd )
misc$multipleResponses <- FALSE
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- misc$simEIM = TRUE
- misc$imethod = .imethod
- misc$a1 = .a1
- misc$a2 = .a2
+ misc$expected <- TRUE
+ misc$nsimEIM <- .nsimEIM
+ misc$simEIM <- TRUE
+ misc$imethod <- .imethod
+ misc$a1 <- .a1
+ misc$a2 <- .a2
}), list( .lmean = lmean, .lsd = lsd,
.emean = emean, .esd = esd,
.imethod = imethod, .nsimEIM = nsimEIM,
.a1 = a1, .a2 = a2 ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mymu = eta2theta(eta[, 1], .lmean, earg = .emean)
- mysd = eta2theta(eta[, 2], .lsd, earg = .esd )
- a1vec = .a1
- a2vec = .a2
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w)*log(dnorm(x=y/(a1vec*mysd) - mymu/mysd)/(a1vec*mysd) +
- dnorm(x=y/(a2vec*mysd) + mymu/mysd)/(a2vec*mysd)))
- }
- }, list( .lmean = lmean, .lsd = lsd,
- .emean = emean, .esd = esd, .a1 = a1, .a2 = a2 ))),
- vfamily = c("fnormal1"),
- deriv = eval(substitute(expression({
- mymu = eta2theta(eta[, 1], .lmean, earg = .emean)
- mysd = eta2theta(eta[, 2], .lsd, earg = .esd )
- dmu.deta = dtheta.deta(mymu, .lmean, earg = .emean)
- dsd.deta = dtheta.deta(mysd, .lsd, earg = .esd )
- a1vec = .a1
- a2vec = .a2
- d3 = deriv3(~ log((exp(-0.5*(y/(a1vec*mysd) - mymu/mysd)^2)/a1vec +
- exp(-0.5*(y/(a2vec*mysd) +
- mymu/mysd)^2)/a2vec)/(mysd*sqrt(2*pi))),
- name=c("mymu","mysd"), hessian= FALSE)
- eval.d3 = eval(d3)
- dl.dthetas = attr(eval.d3, "gradient") # == cbind(dl.dmu, dl.dsd)
- DTHETA.detas = cbind(dmu.deta, dsd.deta)
- c(w) * DTHETA.detas * dl.dthetas
- }), list( .lmean = lmean, .lsd = lsd, .emean = emean, .esd = esd,
- .a1 = a1, .a2 = a2 ))),
- weight = eval(substitute(expression({
- de3 = deriv3(~ log((exp(-0.5*(ysim/(a1vec*mysd) -
- mymu/mysd)^2)/a1vec +
- exp(-0.5*(ysim/(a2vec*mysd) +
- mymu/mysd)^2)/a2vec)/(mysd*sqrt(2*pi))),
- name=c("mymu","mysd"), hessian= TRUE)
- run.mean = 0
- for(ii in 1:( .nsimEIM )) {
- ysim = rfnorm(n = n, mean = mymu, sd = mysd,
- a1 = a1vec, a2 = a2vec)
- eval.de3 = eval(de3)
- d2l.dthetas2 = attr(eval.de3, "hessian")
- rm(ysim)
-
- temp3 = matrix(0, n, dimm(M))
- for(ss in 1:M)
- for(tt in ss:M)
- temp3[, iam(ss,tt, M)] = -d2l.dthetas2[, ss,tt]
-
- run.mean = ((ii-1) * run.mean + temp3) / ii
- }
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mymu <- eta2theta(eta[, 1], .lmean, earg = .emean)
+ mysd <- eta2theta(eta[, 2], .lsd, earg = .esd )
+ a1vec <- .a1
+ a2vec <- .a2
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w)*log(dnorm(x=y/(a1vec*mysd) - mymu/mysd)/(a1vec*mysd) +
+ dnorm(x=y/(a2vec*mysd) + mymu/mysd)/(a2vec*mysd)))
+ }
+ }, list( .lmean = lmean, .lsd = lsd,
+ .emean = emean, .esd = esd, .a1 = a1, .a2 = a2 ))),
+ vfamily = c("fnormal1"),
+ deriv = eval(substitute(expression({
+ Musual <- 2
+ mymu <- eta2theta(eta[, 1], .lmean, earg = .emean)
+ mysd <- eta2theta(eta[, 2], .lsd, earg = .esd )
+
+ dmu.deta <- dtheta.deta(mymu, .lmean, earg = .emean)
+ dsd.deta <- dtheta.deta(mysd, .lsd, earg = .esd )
+
+ a1vec <- .a1
+ a2vec <- .a2
+ d3 <- deriv3(~ log((exp(-0.5*(y/(a1vec*mysd) - mymu/mysd)^2)/a1vec +
+ exp(-0.5*(y/(a2vec*mysd) +
+ mymu/mysd)^2)/a2vec)/(mysd*sqrt(2*pi))),
+ name = c("mymu", "mysd"), hessian = FALSE)
+ eval.d3 <- eval(d3)
+ dl.dthetas <- attr(eval.d3, "gradient") # == cbind(dl.dmu, dl.dsd)
+ DTHETA.detas <- cbind(dmu.deta, dsd.deta)
+ c(w) * DTHETA.detas * dl.dthetas
+ }), list( .lmean = lmean, .lsd = lsd,
+ .emean = emean, .esd = esd, .a1 = a1, .a2 = a2 ))),
+ weight = eval(substitute(expression({
+ de3 <- deriv3(~ log((exp(-0.5*(ysim/(a1vec*mysd) -
+ mymu/mysd)^2)/a1vec +
+ exp(-0.5*(ysim/(a2vec*mysd) +
+ mymu/mysd)^2)/a2vec)/(mysd*sqrt(2*pi))),
+ name = c("mymu", "mysd"), hessian = TRUE)
+ run.mean <- 0
+ for(ii in 1:( .nsimEIM )) {
+ ysim <- abs(rnorm(n, m = mymu, sd = mysd))
+ ysim <- rfnorm(n = n, mean = mymu, sd = mysd,
+ a1 = a1vec, a2 = a2vec)
+ eval.de3 <- eval(de3)
+ d2l.dthetas2 <- attr(eval.de3, "hessian")
+ rm(ysim)
+
+ temp3 <- matrix(0, n, dimm(M))
+ for(ss in 1:M)
+ for(tt in ss:M)
+ temp3[, iam(ss,tt, M)] <- -d2l.dthetas2[, ss,tt]
+
+ run.mean <- ((ii-1) * run.mean + temp3) / ii
+ }
+
+ wz <- if (intercept.only)
+ matrix(colMeans(run.mean), n, dimm(M), byrow = TRUE) else
+ run.mean
- wz = if (intercept.only)
- matrix(colMeans(run.mean), n, dimm(M), byrow = TRUE) else
- run.mean
+ index0 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ wz <- wz * DTHETA.detas[, index0$row] * DTHETA.detas[, index0$col]
- index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- wz = wz * DTHETA.detas[, index0$row] * DTHETA.detas[, index0$col]
- c(w) * wz
- }), list( .nsimEIM = nsimEIM, .a1 = a1, .a2 = a2 ))))
+ }), list( .nsimEIM = nsimEIM, .a1 = a1, .a2 = a2 ))))
}
@@ -1011,8 +1034,7 @@ lqnorm.control <- function(trace = TRUE, ...) {
lqnorm <- function(qpower = 2,
link = "identity",
- imethod = 1, imu = NULL, shrinkage.init = 0.95)
-{
+ imethod = 1, imu = NULL, shrinkage.init = 0.95) {
link <- as.list(substitute(link))
@@ -1037,8 +1059,8 @@ lqnorm <- function(qpower = 2,
new("vglmff",
blurb = c("Minimizing the q-norm of residuals\n",
- "Links: ",
- namesof("Y1", link, earg = earg, tag = TRUE)),
+ "Links: ",
+ namesof("Y1", link, earg = earg, tag = TRUE)),
initialize = eval(substitute(expression({
temp5 <-
@@ -1051,8 +1073,8 @@ lqnorm <- function(qpower = 2,
y <- temp5$y
- M = if (is.matrix(y)) ncol(y) else 1
- dy = dimnames(y)
+ M <- if (is.matrix(y)) ncol(y) else 1
+ dy <- dimnames(y)
predictors.names <- if (!is.null(dy[[2]])) dy[[2]] else
@@ -1062,33 +1084,33 @@ lqnorm <- function(qpower = 2,
if (!length(etastart)) {
- meany = weighted.mean(y, w)
- mean.init = rep(if(length( .i.mu)) .i.mu else
+ meany <- weighted.mean(y, w)
+ mean.init <- rep(if(length( .i.mu)) .i.mu else
{if( .imethod == 2) median(y) else
if ( .imethod == 1) meany else
.sinit * meany + (1 - .sinit) * y
}, len = n)
- etastart = theta2eta(mean.init, link = .link, earg = .earg)
+ etastart <- theta2eta(mean.init, link = .link, earg = .earg)
}
}), list( .imethod = imethod, .i.mu = imu,
.sinit = shrinkage.init,
.link = link, .earg = earg ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mu = eta2theta(eta, link = .link, earg = .earg)
+ mu <- eta2theta(eta, link = .link, earg = .earg)
mu
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- dy = dimnames(y)
+ dy <- dimnames(y)
if (!is.null(dy[[2]]))
dimnames(fit$fitted.values) = dy
- misc$link = rep( .link, length = M)
- names(misc$link) = predictors.names
+ misc$link <- rep( .link, length = M)
+ names(misc$link) <- predictors.names
- misc$earg = list(mu = .earg)
+ misc$earg <- list(mu = .earg)
- misc$qpower = .qpower
- misc$imethod = .imethod
- misc$objectiveFunction = sum( c(w) * (abs(y - mu))^(.qpower) )
+ misc$qpower <- .qpower
+ misc$imethod <- .imethod
+ misc$objectiveFunction <- sum( c(w) * (abs(y - mu))^(.qpower) )
}), list( .qpower = qpower,
.link = link, .earg = earg,
.imethod = imethod ))),
@@ -1097,15 +1119,15 @@ lqnorm <- function(qpower = 2,
}, list( .link = link, .earg = earg ))),
vfamily = "lqnorm",
deriv = eval(substitute(expression({
- dmu.deta = dtheta.deta(theta=mu, link = .link, earg = .earg )
- myresid = y - mu
- signresid = sign(myresid)
- temp2 = (abs(myresid))^(.qpower-1)
+ dmu.deta <- dtheta.deta(theta=mu, link = .link, earg = .earg )
+ myresid <- y - mu
+ signresid <- sign(myresid)
+ temp2 <- (abs(myresid))^(.qpower-1)
.qpower * c(w) * temp2 * signresid * dmu.deta
}), list( .qpower = qpower, .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- temp3 = (abs(myresid))^(.qpower-2)
- wz = .qpower * (.qpower - 1) * c(w) * temp3 * dmu.deta^2
+ temp3 <- (abs(myresid))^(.qpower-2)
+ wz <- .qpower * (.qpower - 1) * c(w) * temp3 * dmu.deta^2
wz
}), list( .qpower = qpower, .link = link, .earg = earg ))))
}
@@ -1124,38 +1146,38 @@ dtobit <- function(x, mean = 0, sd = 1,
rm(log)
- L = max(length(x), length(mean), length(sd), length(Lower),
+ L <- max(length(x), length(mean), length(sd), length(Lower),
length(Upper))
- x = rep(x, len = L);
- mean = rep(mean, len = L);
- sd = rep(sd, len = L);
- Lower = rep(Lower, len = L);
- Upper = rep(Upper, len = L);
+ x <- rep(x, len = L);
+ mean <- rep(mean, len = L);
+ sd <- rep(sd, len = L);
+ Lower <- rep(Lower, len = L);
+ Upper <- rep(Upper, len = L);
- ans = dnorm(x = x, mean = mean, sd = sd, log = log.arg)
- ans[x < Lower] = if (log.arg) log(0.0) else 0.0
- ans[x > Upper] = if (log.arg) log(0.0) else 0.0
+ ans <- dnorm(x = x, mean = mean, sd = sd, log = log.arg)
+ ans[x < Lower] <- if (log.arg) log(0.0) else 0.0
+ ans[x > Upper] <- if (log.arg) log(0.0) else 0.0
ind3 <- x == Lower
- ans[ind3] = if (log.arg) {
- log(exp(ans[ind3]) +
- pnorm(q = Lower[ind3], mean = mean[ind3], sd = sd[ind3]))
- } else {
- ans[ind3] +
- pnorm(q = Lower[ind3], mean = mean[ind3], sd = sd[ind3])
- }
+ ans[ind3] <- if (log.arg) {
+ log(exp(ans[ind3]) +
+ pnorm(q = Lower[ind3], mean = mean[ind3], sd = sd[ind3]))
+ } else {
+ ans[ind3] +
+ pnorm(q = Lower[ind3], mean = mean[ind3], sd = sd[ind3])
+ }
ind4 <- x == Upper
- ans[ind4] = if (log.arg) {
- log(exp(ans[ind4]) +
- pnorm(q = Upper[ind4], mean = mean[ind4], sd = sd[ind4],
- lower.tail = FALSE))
- } else {
- ans[ind4] +
- pnorm(q = Upper[ind4], mean = mean[ind4], sd = sd[ind4],
- lower.tail = FALSE)
- }
+ ans[ind4] <- if (log.arg) {
+ log(exp(ans[ind4]) +
+ pnorm(q = Upper[ind4], mean = mean[ind4], sd = sd[ind4],
+ lower.tail = FALSE))
+ } else {
+ ans[ind4] +
+ pnorm(q = Upper[ind4], mean = mean[ind4], sd = sd[ind4],
+ lower.tail = FALSE)
+ }
ans
}
@@ -1170,21 +1192,21 @@ ptobit <- function(q, mean = 0, sd = 1,
if (!is.logical(log.p) || length(log.p) != 1)
stop("argument 'log.p' must be a single logical")
- L = max(length(q), length(mean), length(sd), length(Lower),
+ L <- max(length(q), length(mean), length(sd), length(Lower),
length(Upper))
- q = rep(q, len = L);
- mean = rep(mean, len = L);
- sd = rep(sd, len = L);
- Lower = rep(Lower, len = L);
- Upper = rep(Upper, len = L);
+ q <- rep(q, len = L);
+ mean <- rep(mean, len = L);
+ sd <- rep(sd, len = L);
+ Lower <- rep(Lower, len = L);
+ Upper <- rep(Upper, len = L);
- ans = pnorm(q = q, mean = mean, sd = sd, lower.tail = lower.tail)
+ ans <- pnorm(q = q, mean = mean, sd = sd, lower.tail = lower.tail)
ind1 <- (q < Lower)
- ans[ind1] = if (lower.tail) ifelse(log.p, log(0.0), 0.0) else
- ifelse(log.p, log(1.0), 1.0)
+ ans[ind1] <- if (lower.tail) ifelse(log.p, log(0.0), 0.0) else
+ ifelse(log.p, log(1.0), 1.0)
ind2 <- (Upper <= q)
- ans[ind2] = if (lower.tail) ifelse(log.p, log(1.0), 1.0) else
- ifelse(log.p, log(0.0), 0.0)
+ ans[ind2] <- if (lower.tail) ifelse(log.p, log(1.0), 1.0) else
+ ifelse(log.p, log(0.0), 0.0)
ans
}
@@ -1195,23 +1217,23 @@ ptobit <- function(q, mean = 0, sd = 1,
qtobit <- function(p, mean = 0, sd = 1,
Lower = 0, Upper = Inf) {
- L = max(length(p), length(mean), length(sd), length(Lower),
- length(Upper))
- p = rep(p, len = L);
- mean = rep(mean, len = L);
- sd = rep(sd, len = L);
- Lower = rep(Lower, len = L);
- Upper = rep(Upper, len = L);
+ L <- max(length(p), length(mean), length(sd), length(Lower),
+ length(Upper))
+ p <- rep(p, len = L);
+ mean <- rep(mean, len = L);
+ sd <- rep(sd, len = L);
+ Lower <- rep(Lower, len = L);
+ Upper <- rep(Upper, len = L);
- ans = qnorm(p = p, mean = mean, sd = sd)
- pnorm.Lower = ptobit(q = Lower, mean = mean, sd = sd)
- pnorm.Upper = ptobit(q = Upper, mean = mean, sd = sd)
+ ans <- qnorm(p = p, mean = mean, sd = sd)
+ pnorm.Lower <- ptobit(q = Lower, mean = mean, sd = sd)
+ pnorm.Upper <- ptobit(q = Upper, mean = mean, sd = sd)
ind1 <- (p <= pnorm.Lower)
- ans[ind1] = Lower[ind1]
+ ans[ind1] <- Lower[ind1]
ind2 <- (pnorm.Upper <= p)
- ans[ind2] = Upper[ind2]
+ ans[ind2] <- Upper[ind2]
ans
}
@@ -1222,24 +1244,24 @@ qtobit <- function(p, mean = 0, sd = 1,
rtobit <- function(n, mean = 0, sd = 1,
- Lower = 0, Upper = Inf) {
+ Lower = 0, Upper = Inf) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- L = max(use.n, length(mean), length(sd), length(Lower),
+ L <- max(use.n, length(mean), length(sd), length(Lower),
length(Upper))
- mean = rep(mean, len = L);
- sd = rep(sd, len = L);
- Lower = rep(Lower, len = L);
- Upper = rep(Upper, len = L);
+ mean <- rep(mean, len = L);
+ sd <- rep(sd, len = L);
+ Lower <- rep(Lower, len = L);
+ Upper <- rep(Upper, len = L);
- ans = rnorm(n = use.n, mean = mean, sd = sd)
+ ans <- rnorm(n = use.n, mean = mean, sd = sd)
cenL <- (ans < Lower)
- ans[cenL] = Lower[cenL]
+ ans[cenL] <- Lower[cenL]
cenU <- (ans > Upper)
- ans[cenU] = Upper[cenU]
+ ans[cenU] <- Upper[cenU]
attr(ans, "Lower") <- Lower
attr(ans, "Upper") <- Upper
@@ -1251,8 +1273,7 @@ rtobit <- function(n, mean = 0, sd = 1,
-tobit.control <- function(save.weight = TRUE, ...)
-{
+tobit.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -1272,8 +1293,8 @@ tobit.control <- function(save.weight = TRUE, ...)
lmu <- as.list(substitute(lmu))
- e.mu <- link2list(lmu)
- l.mu <- attr(e.mu, "function.name")
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
lsd <- as.list(substitute(lsd))
esd <- link2list(lsd)
@@ -1306,18 +1327,18 @@ tobit.control <- function(save.weight = TRUE, ...)
c("uncensored", "censored", "mean.obs"))[1]
- stdTobit = all(Lower == 0.0) &&
- all(!is.finite(Upper)) &&
- all(lmu == "identity")
+ stdTobit <- all(Lower == 0.0) &&
+ all(!is.finite(Upper)) &&
+ all(lmu == "identity")
new("vglmff",
blurb = c("Tobit model\n\n",
- "Links: ",
- namesof("mu", l.mu, earg = e.mu, tag = TRUE), "; ",
- namesof("sd", lsd, earg = esd, tag = TRUE), "\n",
- "Mean: mu", "\n",
- "Conditional variance: sd^2"),
+ "Links: ",
+ namesof("mu", lmu, earg = emu, tag = TRUE), "; ",
+ namesof("sd", lsd, earg = esd, tag = TRUE), "\n",
+ "Mean: mu", "\n",
+ "Conditional variance: sd^2"),
constraints = eval(substitute(expression({
dotzero <- .zero
@@ -1333,7 +1354,7 @@ tobit.control <- function(save.weight = TRUE, ...)
}, list( .zero = zero, .nsimEIM = nsimEIM ))),
initialize = eval(substitute(expression({
- Musual = 2
+ Musual <- 2
temp5 <-
@@ -1348,23 +1369,23 @@ tobit.control <- function(save.weight = TRUE, ...)
- ncoly = ncol(y)
- M = Musual * ncoly
+ ncoly <- ncol(y)
+ M <- Musual * ncoly
- Lowmat = matrix( .Lower , nrow = n, ncol = ncoly, byrow = TRUE)
- Uppmat = matrix( .Upper , nrow = n, ncol = ncoly, byrow = TRUE)
+ Lowmat <- matrix( .Lower , nrow = n, ncol = ncoly, byrow = TRUE)
+ Uppmat <- matrix( .Upper , nrow = n, ncol = ncoly, byrow = TRUE)
- extra$censoredL = (y <= Lowmat)
- extra$censoredU = (y >= Uppmat)
+ extra$censoredL <- (y <= Lowmat)
+ extra$censoredU <- (y >= Uppmat)
if (any(y < Lowmat)) {
warning("replacing response values less than the value ",
.Lower , " by ", .Lower )
- y[y < Lowmat] = Lowmat[y < Lowmat]
+ y[y < Lowmat] <- Lowmat[y < Lowmat]
}
if (any(y > Uppmat)) {
warning("replacing response values greater than the value ",
.Upper, " by ", .Upper)
- y[y > Uppmat] = Uppmat[y > Uppmat]
+ y[y > Uppmat] <- Uppmat[y > Uppmat]
}
temp1.names <-
@@ -1372,7 +1393,7 @@ tobit.control <- function(save.weight = TRUE, ...)
temp2.names <-
if (ncoly == 1) "sd" else paste("sd", 1:ncoly, sep = "")
predictors.names <-
- c(namesof(temp1.names, .l.mu, earg = .e.mu, tag = FALSE),
+ c(namesof(temp1.names, .lmu, earg = .emu, tag = FALSE),
namesof(temp2.names, .lsd, earg = .esd, tag = FALSE))
predictors.names <- predictors.names[interleave.VGAM(M, M = Musual)]
@@ -1396,76 +1417,76 @@ tobit.control <- function(save.weight = TRUE, ...)
if (length( .i.mu ))
mu.init <- matrix( .i.mu , n, ncoly, byrow = TRUE)
- if (length( .i.sd ))
- sd.init <- matrix( .i.sd , n, ncoly, byrow = TRUE)
+ if (length( .isd ))
+ sd.init <- matrix( .isd , n, ncoly, byrow = TRUE)
- etastart <- cbind(theta2eta(mu.init, .l.mu, earg = .e.mu ),
+ etastart <- cbind(theta2eta(mu.init, .lmu, earg = .emu ),
theta2eta(sd.init, .lsd, earg = .esd ))
etastart <- etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
}
}), list( .Lower = Lower, .Upper = Upper,
- .l.mu = l.mu, .lsd = lsd,
- .e.mu = e.mu, .esd = esd,
- .i.mu = imu, .i.sd = isd,
+ .lmu = lmu, .lsd = lsd,
+ .emu = emu, .esd = esd,
+ .i.mu = imu, .isd = isd,
.imethod = imethod ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
- Musual = 2
- ncoly = ncol(eta) / Musual
- mum = eta2theta(eta[, Musual*(1:ncoly)-1, drop=FALSE], .l.mu, earg = .e.mu )
+ Musual <- 2
+ ncoly <- ncol(eta) / Musual
+ mum <- eta2theta(eta[, Musual*(1:ncoly)-1, drop=FALSE], .lmu, earg = .emu )
if ( .type.fitted == "uncensored")
return(mum)
- Lowmat = matrix( .Lower, nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
- Uppmat = matrix( .Upper, nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
+ Lowmat <- matrix( .Lower, nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
+ Uppmat <- matrix( .Upper, nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
if ( .type.fitted == "censored") {
mum[mum < Lowmat] <- Lowmat[mum < Lowmat]
mum[mum > Uppmat] <- Uppmat[mum > Uppmat]
mum
} else {
- sdm = eta2theta(eta[, Musual*(1:ncoly)-0, drop = FALSE],
+ sdm <- eta2theta(eta[, Musual*(1:ncoly)-0, drop = FALSE],
.lsd , earg = .esd )
- zeddL = (Lowmat - mum) / sdm
- zeddU = (Uppmat - mum) / sdm
- Phi.L = pnorm(zeddL)
- phi.L = dnorm(zeddL)
- Phi.U = pnorm(zeddU)
- phi.U = dnorm(zeddU)
+ zeddL <- (Lowmat - mum) / sdm
+ zeddU <- (Uppmat - mum) / sdm
+ Phi.L <- pnorm(zeddL)
+ phi.L <- dnorm(zeddL)
+ Phi.U <- pnorm(zeddU)
+ phi.U <- dnorm(zeddU)
mum * (Phi.U - Phi.L) +
sdm * (phi.L - phi.U) +
Lowmat * Phi.L +
Uppmat * (1 - Phi.U)
}
- }, list( .l.mu = l.mu, .lsd = lsd,
- .e.mu = e.mu, .esd = esd,
+ }, list( .lmu = lmu, .lsd = lsd,
+ .emu = emu, .esd = esd,
.Lower = Lower, .Upper = Upper,
.type.fitted = type.fitted ))),
last = eval(substitute(expression({
- temp0303 = c(rep( .l.mu, length = ncoly),
- rep( .lsd, length = ncoly))
+ temp0303 <- c(rep( .lmu, length = ncoly),
+ rep( .lsd, length = ncoly))
names(temp0303) =
c(if (ncoly == 1) "mu" else paste("mu", 1:ncoly, sep = ""),
if (ncoly == 1) "sd" else paste("sd", 1:ncoly, sep = ""))
- temp0303 = temp0303[interleave.VGAM(M, M = Musual)]
- misc$link = temp0303 # Already named
+ temp0303 <- temp0303[interleave.VGAM(M, M = Musual)]
+ misc$link <- temp0303 # Already named
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
for(ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] = .e.mu
- misc$earg[[Musual*ii ]] = .esd
+ misc$earg[[Musual*ii-1]] <- .emu
+ misc$earg[[Musual*ii ]] <- .esd
}
misc$multipleResponses <- TRUE
- misc$expected = TRUE
- misc$imethod = .imethod
- misc$nsimEIM = .nsimEIM
- misc$Musual = Musual
- misc$stdTobit = .stdTobit
- misc$Lower = Lowmat
- misc$Upper = Uppmat
+ misc$expected <- TRUE
+ misc$imethod <- .imethod
+ misc$nsimEIM <- .nsimEIM
+ misc$Musual <- Musual
+ misc$stdTobit <- .stdTobit
+ misc$Lower <- Lowmat
+ misc$Upper <- Uppmat
if ( .stdTobit ) {
@@ -1474,38 +1495,38 @@ tobit.control <- function(save.weight = TRUE, ...)
}
- }), list( .l.mu = l.mu, .lsd = lsd,
- .e.mu = e.mu, .esd = esd,
+ }), list( .lmu = lmu, .lsd = lsd,
+ .emu = emu, .esd = esd,
.nsimEIM = nsimEIM, .imethod = imethod,
.stdTobit = stdTobit,
.Lower = Lower,
.Upper = Upper ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual = 2
- y = cbind(y)
- ncoly = ncol(y)
-
- cenL = extra$censoredL
- cenU = extra$censoredU
- cen0 = !cenL & !cenU # uncensored obsns
- Lowmat = matrix( .Lower , nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
- Uppmat = matrix( .Upper , nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
-
-
- mum = eta2theta(eta[, Musual*(1:ncoly)-1, drop = FALSE],
- .l.mu , earg = .e.mu )
- sdm = eta2theta(eta[, Musual*(1:ncoly)-0, drop = FALSE],
- .lsd , earg = .esd )
-
- ell0 = dnorm( y[cen0], mean = mum[cen0], sd = sdm[cen0],
- log = TRUE)
- ellL = pnorm(Lowmat[cenL], mean = mum[cenL], sd = sdm[cenL],
- log.p = TRUE, lower.tail = TRUE)
- ellU = pnorm(Uppmat[cenU], mean = mum[cenU], sd = sdm[cenU],
- log.p = TRUE, lower.tail = FALSE)
-
- wmat = matrix(w, nrow = nrow(eta), ncol = ncoly)
+ Musual <- 2
+ y <- cbind(y)
+ ncoly <- ncol(y)
+
+ cenL <- extra$censoredL
+ cenU <- extra$censoredU
+ cen0 <- !cenL & !cenU # uncensored obsns
+ Lowmat <- matrix( .Lower , nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
+ Uppmat <- matrix( .Upper , nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
+
+
+ mum <- eta2theta(eta[, Musual*(1:ncoly)-1, drop = FALSE],
+ .lmu , earg = .emu )
+ sdm <- eta2theta(eta[, Musual*(1:ncoly)-0, drop = FALSE],
+ .lsd , earg = .esd )
+
+ ell0 <- dnorm( y[cen0], mean = mum[cen0], sd = sdm[cen0],
+ log = TRUE)
+ ellL <- pnorm(Lowmat[cenL], mean = mum[cenL], sd = sdm[cenL],
+ log.p = TRUE, lower.tail = TRUE)
+ ellU <- pnorm(Uppmat[cenU], mean = mum[cenU], sd = sdm[cenU],
+ log.p = TRUE, lower.tail = FALSE)
+
+ wmat <- matrix(w, nrow = nrow(eta), ncol = ncoly)
if (residuals) {
stop("loglikelihood residuals not ",
"implemented yet")
@@ -1514,174 +1535,176 @@ tobit.control <- function(save.weight = TRUE, ...)
sum(wmat[cenL] * ellL) +
sum(wmat[cenU] * ellU)
}
- }, list( .l.mu = l.mu, .lsd = lsd,
- .e.mu = e.mu, .esd = esd,
+ }, list( .lmu = lmu, .lsd = lsd,
+ .emu = emu, .esd = esd,
.Lower = Lower, .Upper = Upper ))),
vfamily = c("tobit"),
deriv = eval(substitute(expression({
- Musual = 2
- y = cbind(y)
- ncoly = ncol(y)
+ Musual <- 2
+ y <- cbind(y)
+ ncoly <- ncol(y)
- Lowmat = matrix( .Lower , nrow = n, ncol = ncoly, byrow = TRUE)
- Uppmat = matrix( .Upper , nrow = n, ncol = ncoly, byrow = TRUE)
+ Lowmat <- matrix( .Lower , nrow = n, ncol = ncoly, byrow = TRUE)
+ Uppmat <- matrix( .Upper , nrow = n, ncol = ncoly, byrow = TRUE)
- cenL = extra$censoredL
- cenU = extra$censoredU
- cen0 = !cenL & !cenU # uncensored obsns
+ cenL <- extra$censoredL
+ cenU <- extra$censoredU
+ cen0 <- !cenL & !cenU # uncensored obsns
- mum = eta2theta(eta[, Musual*(1:ncoly)-1, drop = FALSE], .l.mu, earg = .e.mu )
- sdm = eta2theta(eta[, Musual*(1:ncoly)-0, drop = FALSE], .lsd, earg = .esd )
+ mum <- eta2theta(eta[, Musual*(1:ncoly)-1, drop = FALSE],
+ .lmu, earg = .emu )
+ sdm <- eta2theta(eta[, Musual*(1:ncoly)-0, drop = FALSE],
+ .lsd, earg = .esd )
- zedd = (y - mum) / sdm
- dl.dmu = zedd / sdm
- dl.dsd = (zedd^2 - 1) / sdm
+ zedd <- (y - mum) / sdm
+ dl.dmu <- zedd / sdm
+ dl.dsd <- (zedd^2 - 1) / sdm
- dmu.deta = dtheta.deta(mum, .l.mu, earg = .e.mu )
- dsd.deta = dtheta.deta(sdm, .lsd, earg = .esd )
+ dmu.deta <- dtheta.deta(mum, .lmu, earg = .emu )
+ dsd.deta <- dtheta.deta(sdm, .lsd, earg = .esd )
if (any(cenL)) {
- mumL = Lowmat - mum
- temp21L = mumL[cenL] / sdm[cenL]
- PhiL = pnorm(temp21L)
- phiL = dnorm(temp21L)
- fred21 = phiL / PhiL
- dl.dmu[cenL] = -fred21 / sdm[cenL]
- dl.dsd[cenL] = fred21 * (-mumL[cenL] / sdm[cenL]^2)
+ mumL <- Lowmat - mum
+ temp21L <- mumL[cenL] / sdm[cenL]
+ PhiL <- pnorm(temp21L)
+ phiL <- dnorm(temp21L)
+ fred21 <- phiL / PhiL
+ dl.dmu[cenL] <- -fred21 / sdm[cenL]
+ dl.dsd[cenL] <- fred21 * (-mumL[cenL] / sdm[cenL]^2)
}
if (any(cenU)) {
- mumU = Uppmat - mum
- temp21U = mumU[cenU] / sdm[cenU]
- PhiU = pnorm(temp21U, lower.tail = FALSE)
- phiU = dnorm(temp21U)
- fred21 = -phiU / PhiU
- dl.dmu[cenU] = -fred21 / sdm[cenU] # Negated
- dl.dsd[cenU] = fred21 * (-mumU[cenU] / sdm[cenU]^2)
+ mumU <- Uppmat - mum
+ temp21U <- mumU[cenU] / sdm[cenU]
+ PhiU <- pnorm(temp21U, lower.tail = FALSE)
+ phiU <- dnorm(temp21U)
+ fred21 <- -phiU / PhiU
+ dl.dmu[cenU] <- -fred21 / sdm[cenU] # Negated
+ dl.dsd[cenU] <- fred21 * (-mumU[cenU] / sdm[cenU]^2)
}
- dthetas.detas = cbind(dmu.deta, dsd.deta)
- dThetas.detas = dthetas.detas[, interleave.VGAM(M, M = Musual)]
+ dthetas.detas <- cbind(dmu.deta, dsd.deta)
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
- myderiv = cbind(c(w) * dl.dmu,
- c(w) * dl.dsd) * dthetas.detas
+ myderiv <- cbind(c(w) * dl.dmu,
+ c(w) * dl.dsd) * dthetas.detas
myderiv[, interleave.VGAM(M, M = Musual)]
- }), list( .l.mu = l.mu, .lsd = lsd,
- .e.mu = e.mu, .esd = esd,
+ }), list( .lmu = lmu, .lsd = lsd,
+ .emu = emu, .esd = esd,
.Lower = Lower, .Upper = Upper ))),
weight = eval(substitute(expression({
- wz = matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
- ind1 = iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
+ ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
if (is.numeric( .nsimEIM ) &&
! .stdTobit ) {
- run.varcov = 0
+ run.varcov <- 0
for(spp. in 1:ncoly) {
- run.varcov = 0
- muvec = mum[, spp.]
- sdvec = sdm[, spp.]
+ run.varcov <- 0
+ muvec <- mum[, spp.]
+ sdvec <- sdm[, spp.]
for(ii in 1:( .nsimEIM )) {
- ysim = rtobit(n = n, mean = muvec, sd = sdvec,
+ ysim <- rtobit(n = n, mean = muvec, sd = sdvec,
Lower = Lowmat[, spp.], Upper = Uppmat[, spp.])
- cenL = attr(ysim, "cenL")
- cenU = attr(ysim, "cenU")
- cen0 = !cenL & !cenU # uncensored obsns
+ cenL <- attr(ysim, "cenL")
+ cenU <- attr(ysim, "cenU")
+ cen0 <- !cenL & !cenU # uncensored obsns
- zedd = (ysim - muvec) / sdvec
- dl.dmu = zedd / sdvec
- dl.dsd = (zedd^2 - 1) / sdvec
+ zedd <- (ysim - muvec) / sdvec
+ dl.dmu <- zedd / sdvec
+ dl.dsd <- (zedd^2 - 1) / sdvec
if (any(cenL)) {
- mumL = Lowmat[, spp.] - muvec
- temp21L = mumL[cenL] / sdvec[cenL]
- PhiL = pnorm(temp21L)
- phiL = dnorm(temp21L)
- fred21 = phiL / PhiL
- dl.dmu[cenL] = -fred21 / sdvec[cenL]
- dl.dsd[cenL] = fred21 * (-mumL[cenL] / sdvec[cenL]^2)
+ mumL <- Lowmat[, spp.] - muvec
+ temp21L <- mumL[cenL] / sdvec[cenL]
+ PhiL <- pnorm(temp21L)
+ phiL <- dnorm(temp21L)
+ fred21 <- phiL / PhiL
+ dl.dmu[cenL] <- -fred21 / sdvec[cenL]
+ dl.dsd[cenL] <- fred21 * (-mumL[cenL] / sdvec[cenL]^2)
}
if (any(cenU)) {
- mumU = Uppmat[, spp.] - muvec
- temp21U = mumU[cenU] / sdvec[cenU]
- PhiU = pnorm(temp21U, lower.tail = FALSE)
- phiU = dnorm(temp21U)
- fred21 = -phiU / PhiU
- dl.dmu[cenU] = -fred21 / sdvec[cenU] # Negated
- dl.dsd[cenU] = fred21 * (-mumU[cenU] / sdvec[cenU]^2)
+ mumU <- Uppmat[, spp.] - muvec
+ temp21U <- mumU[cenU] / sdvec[cenU]
+ PhiU <- pnorm(temp21U, lower.tail = FALSE)
+ phiU <- dnorm(temp21U)
+ fred21 <- -phiU / PhiU
+ dl.dmu[cenU] <- -fred21 / sdvec[cenU] # Negated
+ dl.dsd[cenU] <- fred21 * (-mumU[cenU] / sdvec[cenU]^2)
}
rm(ysim)
- temp3 = cbind(dl.dmu, dl.dsd)
- run.varcov = run.varcov +
+ temp3 <- cbind(dl.dmu, dl.dsd)
+ run.varcov <- run.varcov +
temp3[, ind1$row.index] *
temp3[, ind1$col.index]
}
- run.varcov = run.varcov / .nsimEIM
+ run.varcov <- run.varcov / .nsimEIM
- wz1 = if (intercept.only && FALSE)
+ wz1 <- if (intercept.only && FALSE)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else
run.varcov
- wz1 = wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
- dThetas.detas[, Musual * (spp. - 1) + ind1$col]
+ wz1 <- wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
+ dThetas.detas[, Musual * (spp. - 1) + ind1$col]
for(jay in 1:Musual)
- for(kay in jay:Musual) {
- cptr = iam((spp. - 1) * Musual + jay,
- (spp. - 1) * Musual + kay,
- M = M)
- wz[, cptr] = wz1[, iam(jay, kay, M = Musual)]
- }
+ for(kay in jay:Musual) {
+ cptr <- iam((spp. - 1) * Musual + jay,
+ (spp. - 1) * Musual + kay,
+ M = M)
+ wz[, cptr] = wz1[, iam(jay, kay, M = Musual)]
+ }
} # End of for(spp.) loop
} else {
- wz1 = matrix(0.0, n, dimm(Musual))
+ wz1 <- matrix(0.0, n, dimm(Musual))
for(spp. in 1:ncoly) {
- zedd = (y[, spp.] - mum[, spp.]) / sdm[, spp.]
- zedd0 = ( mum[, spp.]) / sdm[, spp.]
- phivec = dnorm(zedd0)
- Phivec = pnorm(zedd0)
+ zedd <- (y[, spp.] - mum[, spp.]) / sdm[, spp.]
+ zedd0 <- ( mum[, spp.]) / sdm[, spp.]
+ phivec <- dnorm(zedd0)
+ Phivec <- pnorm(zedd0)
- wz1[, iam(1, 1, M = Musual)] = -(phivec * zedd0 -
+ wz1[, iam(1, 1, M = Musual)] <- -(phivec * zedd0 -
phivec^2 / (1 - Phivec) -
Phivec)
- wz1[, iam(2, 2, M = Musual)] = -(phivec * zedd0^3 +
+ wz1[, iam(2, 2, M = Musual)] <- -(phivec * zedd0^3 +
phivec * zedd0 -
phivec^2 * zedd0^2 / (1 - Phivec) -
2 * Phivec)
- wz1[, iam(1, 2, M = Musual)] = +(phivec * zedd0^2 +
+ wz1[, iam(1, 2, M = Musual)] <- +(phivec * zedd0^2 +
phivec -
phivec^2 * zedd0 / (1 - Phivec))
- wz1 = wz1 / sdm[, spp.]^2
- wz1 = wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
- dThetas.detas[, Musual * (spp. - 1) + ind1$col]
+ wz1 <- wz1 / sdm[, spp.]^2
+ wz1 <- wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
+ dThetas.detas[, Musual * (spp. - 1) + ind1$col]
for(jay in 1:Musual)
for(kay in jay:Musual) {
- cptr = iam((spp. - 1) * Musual + jay,
- (spp. - 1) * Musual + kay,
- M = M)
- wz[, cptr] = wz1[, iam(jay, kay, M = Musual)]
+ cptr <- iam((spp. - 1) * Musual + jay,
+ (spp. - 1) * Musual + kay,
+ M = M)
+ wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
}
} # End of for(spp.) loop
} # End of EIM
- temp = w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
+ temp <- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
- }), list( .l.mu = lmu, .Lower = Lower, .Upper = Upper,
+ }), list( .lmu = lmu, .Lower = Lower, .Upper = Upper,
.lsd = lsd,
.stdTobit = stdTobit,
.nsimEIM = nsimEIM ))))
@@ -1696,9 +1719,9 @@ tobit.control <- function(save.weight = TRUE, ...)
imethod = 1,
isd = NULL,
parallel = FALSE,
- intercept.apply = FALSE,
- zero = -2)
-{
+ apply.parint = FALSE,
+ smallno = 1.0e-5,
+ zero = -2) {
@@ -1708,13 +1731,13 @@ tobit.control <- function(save.weight = TRUE, ...)
emean <- link2list(lmean)
lmean <- attr(emean, "function.name")
- lsd <- as.list(substitute(lsd))
- esd <- link2list(lsd)
- lsd <- attr(esd, "function.name")
+ lsdev <- as.list(substitute(lsd))
+ esdev <- link2list(lsdev)
+ lsdev <- attr(esdev, "function.name")
- lvar <- as.list(substitute(lvar))
- e.var <- link2list(lvar)
- l.var <- attr(e.var, "function.name")
+ lvare <- as.list(substitute(lvar))
+ evare <- link2list(lvare)
+ lvare <- attr(evare, "function.name")
@@ -1725,16 +1748,25 @@ tobit.control <- function(save.weight = TRUE, ...)
stop("bad input for argument 'zero'")
+ if (!is.Numeric(smallno, allowable.length = 1,
+ positive = TRUE))
+ stop("argument 'smallno' must be positive and close to 0")
+ if (smallno > 0.1) {
+ warning("replacing argument 'smallno' with 0.1")
+ smallno <- 0.1
+ }
+
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
imethod > 4)
stop("argument 'imethod' must be 1 or 2 or 3 or 4")
- if (!is.logical(var.arg) || length(var.arg) != 1)
+ if (!is.logical(var.arg) ||
+ length(var.arg) != 1)
stop("argument 'var.arg' must be a single logical")
- if (!is.logical(intercept.apply) ||
- length(intercept.apply) != 1)
- stop("argument 'intercept.apply' must be a single logical")
+ if (!is.logical(apply.parint) ||
+ length(apply.parint) != 1)
+ stop("argument 'apply.parint' must be a single logical")
if (is.logical(parallel) && parallel && length(zero))
@@ -1746,8 +1778,8 @@ tobit.control <- function(save.weight = TRUE, ...)
"Links: ",
namesof("mean", lmean, earg = emean, tag = TRUE), "; ",
if (var.arg)
- namesof("var", l.var, earg = e.var, tag = TRUE) else
- namesof("sd" , lsd, earg = esd, tag = TRUE),
+ namesof("var", lvare, earg = evare, tag = TRUE) else
+ namesof("sd" , lsdev, earg = esdev, tag = TRUE),
"\n",
if (var.arg) "Variance: var" else "Variance: sd^2"),
@@ -1756,13 +1788,13 @@ tobit.control <- function(save.weight = TRUE, ...)
constraints = eval(substitute(expression({
constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
- intercept.apply = .intercept.apply )
+ apply.int = .apply.parint )
dotzero <- .zero
Musual <- 2
eval(negzero.expression)
}), list( .zero = zero,
- .parallel = parallel, .intercept.apply = intercept.apply ))),
+ .parallel = parallel, .apply.parint = apply.parint ))),
infos = eval(substitute(function(...) {
@@ -1786,10 +1818,10 @@ tobit.control <- function(save.weight = TRUE, ...)
"attribute of the response (probably due to Qvar()")
- w = attr(orig.y, "Prior.Weights")
+ w <- attr(orig.y, "Prior.Weights")
- extra$attributes.y = attributes(orig.y)
+ extra$attributes.y <- attributes(orig.y)
} else {
}
@@ -1826,8 +1858,8 @@ tobit.control <- function(save.weight = TRUE, ...)
predictors.names <-
c(namesof(mynames1, .lmean , earg = .emean , tag = FALSE),
if ( .var.arg )
- namesof(mynames2, .l.var , earg = .e.var , tag = FALSE) else
- namesof(mynames2, .lsd , earg = .esd , tag = FALSE))
+ namesof(mynames2, .lvare , earg = .evare , tag = FALSE) else
+ namesof(mynames2, .lsdev , earg = .esdev , tag = FALSE))
predictors.names <- predictors.names[interleave.VGAM(M, M = Musual)]
extra$predictors.names <- predictors.names
@@ -1866,38 +1898,51 @@ tobit.control <- function(save.weight = TRUE, ...)
}
- if (length( .i.sd )) {
- sdev.init <- matrix( .i.sd , n, ncoly, byrow = TRUE)
+ if (length( .isdev )) {
+ sdev.init <- matrix( .isdev , n, ncoly, byrow = TRUE)
}
etastart <-
- cbind(theta2eta(mean.init, .lmean , earg = .emean ),
+ cbind(theta2eta(mean.init, .lmean , earg = .emean ),
if ( .var.arg )
- theta2eta(sdev.init^2, .l.var , earg = .e.var ) else
- theta2eta(sdev.init , .lsd , earg = .esd ))
+ theta2eta(sdev.init^2, .lvare , earg = .evare ) else
+ theta2eta(sdev.init , .lsdev , earg = .esdev ))
etastart <-
etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
colnames(etastart) <- predictors.names
}
- }), list( .lmean = lmean, .lsd = lsd, .l.var = l.var,
- .emean = emean, .esd = esd, .e.var = e.var,
- .i.sd = isd,
+ }), list( .lmean = lmean, .lsdev = lsdev, .lvare = lvare,
+ .emean = emean, .esdev = esdev, .evare = evare,
+ .isdev = isd,
.var.arg = var.arg, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
Musual <- extra$Musual
ncoly <- extra$ncoly
+
+
+
+ if ( .lmean == "explink") {
+ if (any(eta[, Musual*(1:ncoly) - 1] < 0)) {
+ warning("turning some columns of 'eta' positive in @linkinv")
+ for (ii in 1:ncoly)
+ eta[, Musual*ii - 1] <- pmax( .smallno , eta[, Musual*ii - 1])
+ }
+ }
+
+
eta2theta(eta[, Musual*(1:ncoly) - 1], .lmean , earg = .emean )
}, list( .lmean = lmean,
- .emean = emean, .esd = esd , .e.var = e.var ))),
+ .emean = emean, .esdev = esdev , .evare = evare,
+ .smallno = smallno ))),
last = eval(substitute(expression({
Musual <- extra$Musual
misc$link <- c(rep( .lmean , length = ncoly),
- rep( .lsd , length = ncoly))
- misc$link <- misc$link[interleave.VGAM(Musual * ncoly, M = Musual)]
+ rep( .lsdev , length = ncoly))
+ misc$link <- misc$link [interleave.VGAM(Musual * ncoly, M = Musual)]
temp.names <- c(mynames1, mynames2)
temp.names <- temp.names[interleave.VGAM(Musual * ncoly, M = Musual)]
names(misc$link) <- temp.names
@@ -1907,7 +1952,7 @@ tobit.control <- function(save.weight = TRUE, ...)
names(misc$earg) <- temp.names
for(ii in 1:ncoly) {
misc$earg[[Musual*ii-1]] <- .emean
- misc$earg[[Musual*ii ]] <- if ( .var.arg) .e.var else .esd
+ misc$earg[[Musual*ii ]] <- if ( .var.arg ) .evare else .esdev
}
names(misc$earg) <- temp.names
@@ -1917,40 +1962,63 @@ tobit.control <- function(save.weight = TRUE, ...)
misc$imethod <- .imethod
misc$multipleResponses <- TRUE
misc$parallel <- .parallel
- misc$intercept.apply <- .intercept.apply
- }), list( .lmean = lmean, .lsd = lsd, .l.var = l.var,
- .emean = emean, .esd = esd, .e.var = e.var,
- .parallel = parallel, .intercept.apply = intercept.apply,
+ misc$apply.parint <- .apply.parint
+ misc$smallno <- .smallno
+ }), list( .lmean = lmean, .lsdev = lsdev, .lvare = lvare,
+ .emean = emean, .esdev = esdev, .evare = evare,
+ .parallel = parallel, .apply.parint = apply.parint,
+ .smallno = smallno,
.var.arg = var.arg, .imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
ncoly <- extra$ncoly
Musual <- extra$Musual
+
+ if ( .lmean == "explink") {
+ if (any(eta[, Musual*(1:ncoly) - 1] < 0)) {
+ warning("turning some columns of 'eta' positive in @loglikelihood")
+ for (ii in 1:ncoly)
+ eta[, Musual*ii - 1] <- pmax( .smallno , eta[, Musual*ii - 1])
+ }
+ }
+
if ( .var.arg ) {
- Varm <- eta2theta(eta[, Musual*(1:ncoly)], .l.var , earg = .e.var )
+ Varm <- eta2theta(eta[, Musual*(1:ncoly)], .lvare , earg = .evare )
sdev <- sqrt(Varm)
} else {
- sdev <- eta2theta(eta[, Musual*(1:ncoly)], .lsd , earg = .esd )
+ sdev <- eta2theta(eta[, Musual*(1:ncoly)], .lsdev , earg = .esdev )
}
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dnorm(y, m = mu, sd = sdev, log = TRUE))
}
- }, list( .lsd = lsd, .l.var = l.var,
- .esd = esd, .e.var = e.var,
+ }, list( .lsdev = lsdev, .lvare = lvare,
+ .esdev = esdev, .evare = evare,
+ .lmean = lmean,
+ .smallno = smallno,
.var.arg = var.arg ))),
vfamily = c("normal1"),
deriv = eval(substitute(expression({
ncoly <- extra$ncoly
Musual <- extra$Musual
- mymu <- eta2theta(eta[, Musual*(1:ncoly) - 1], .lmean , earg = .emean )
+
+ if ( .lmean == "explink") {
+ if (any(eta[, Musual*(1:ncoly) - 1] < 0)) {
+ warning("turning some columns of 'eta' positive in @deriv")
+ for (ii in 1:ncoly)
+ eta[, Musual*ii - 1] <- pmax( .smallno , eta[, Musual*ii - 1])
+ }
+ }
+
+
+ mymu <- eta2theta( eta[, Musual*(1:ncoly) - 1], .lmean , earg = .emean )
if ( .var.arg ) {
- Varm <- eta2theta(eta[, Musual*(1:ncoly) ], .l.var , earg = .e.var )
+ Varm <- eta2theta(eta[, Musual*(1:ncoly) ], .lvare , earg = .evare )
sdev <- sqrt(Varm)
} else {
- sdev <- eta2theta(eta[, Musual*(1:ncoly) ], .lsd , earg = .esd )
+ sdev <- eta2theta(eta[, Musual*(1:ncoly) ], .lsdev , earg = .esdev )
}
dl.dmu <- (y - mymu) / sdev^2
@@ -1960,11 +2028,11 @@ tobit.control <- function(save.weight = TRUE, ...)
dl.dsd <- -1.0 / sdev + (y - mymu)^2 / sdev^3
}
- dmu.deta <- dtheta.deta(mymu, .lmean , earg = .emean )
+ dmu.deta <- dtheta.deta(mymu, .lmean , earg = .emean )
if ( .var.arg ) {
- dva.deta <- dtheta.deta(Varm, .l.var , earg = .e.var )
+ dva.deta <- dtheta.deta(Varm, .lvare , earg = .evare )
} else {
- dsd.deta <- dtheta.deta(sdev, .lsd , earg = .esd )
+ dsd.deta <- dtheta.deta(sdev, .lsdev , earg = .esdev )
}
ans <- c(w) *
@@ -1973,8 +2041,9 @@ tobit.control <- function(save.weight = TRUE, ...)
dl.dsd * dsd.deta)
ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
ans
- }), list( .lmean = lmean, .lsd = lsd, .l.var = l.var,
- .emean = emean, .esd = esd, .e.var = e.var,
+ }), list( .lmean = lmean, .lsdev = lsdev, .lvare = lvare,
+ .emean = emean, .esdev = esdev, .evare = evare,
+ .smallno = smallno,
.var.arg = var.arg ))),
weight = eval(substitute(expression({
wz <- matrix(as.numeric(NA), n, M) # diag matrix; y is 1-column too
@@ -1995,7 +2064,7 @@ tobit.control <- function(save.weight = TRUE, ...)
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
}), list( .var.arg = var.arg ))))
-}
+} # End of normal1()
@@ -2014,13 +2083,11 @@ tobit.control <- function(save.weight = TRUE, ...)
imethod = 1,
isd = NULL,
ieta.coeffs = NULL,
- zero = "M")
-{
+ zero = "M") {
- print("20120730; in normal1.term()")
@@ -2030,8 +2097,8 @@ tobit.control <- function(save.weight = TRUE, ...)
lsd <- attr(esd, "function.name")
lvar <- as.list(substitute(lvar))
- e.var <- link2list(lvar)
- l.var <- attr(e.var, "function.name")
+ evar <- link2list(lvar)
+ lvar <- attr(evar, "function.name")
@@ -2057,7 +2124,7 @@ tobit.control <- function(save.weight = TRUE, ...)
"varying coefficients links/constraints\n\n",
"Links: ",
if (var.arg)
- namesof("var", l.var, earg = e.var, tag = TRUE) else
+ namesof("var", lvar, earg = evar, tag = TRUE) else
namesof("sd" , lsd, earg = esd, tag = TRUE), "; ",
"\n",
if (var.arg) "Variance: var" else "Variance: sd^2"),
@@ -2183,7 +2250,7 @@ tobit.control <- function(save.weight = TRUE, ...)
predictors.names <-
c(mynames1,
if ( .var.arg )
- namesof(mynames2, .l.var , earg = .e.var , tag = FALSE) else
+ namesof(mynames2, .lvar , earg = .evar , tag = FALSE) else
namesof(mynames2, .lsd , earg = .esd , tag = FALSE))
print("predictors.names ,,,,,,,,,")
print( predictors.names )
@@ -2226,15 +2293,15 @@ tobit.control <- function(save.weight = TRUE, ...)
}
- if (length( .i.sd )) {
- sdev.init <- matrix( .i.sd , n, ncoly, byrow = TRUE)
+ if (length( .isdev )) {
+ sdev.init <- matrix( .isdev , n, ncoly, byrow = TRUE)
}
etastart <-
cbind(eta.equi.probs,
if ( .var.arg )
- theta2eta(sdev.init^2, .l.var , earg = .e.var ) else
+ theta2eta(sdev.init^2, .lvar , earg = .evar ) else
theta2eta(sdev.init , .lsd , earg = .esd ))
colnames(etastart) <- predictors.names
@@ -2247,9 +2314,9 @@ tobit.control <- function(save.weight = TRUE, ...)
}
}), list( .linklist = linklist,
.earglist = earglist,
- .lsd = lsd, .l.var = lvar,
- .esd = esd, .e.var = evar,
- .i.sd = isd,
+ .lsd = lsd, .lvar = lvar,
+ .esd = esd, .evar = evar,
+ .isdev = isd,
.ieta.coeffs = ieta.coeffs,
.var.arg = var.arg, .imethod = imethod ))),
@@ -2275,7 +2342,7 @@ tobit.control <- function(save.weight = TRUE, ...)
rowSums(extra$Xm2 * betas.matrix)
}, list( .linklist = linklist,
.earglist = earglist,
- .esd = esd , .e.var = evar ))),
+ .esd = esd , .evar = evar ))),
last = eval(substitute(expression({
Musual <- extra$Musual
@@ -2294,8 +2361,8 @@ tobit.control <- function(save.weight = TRUE, ...)
misc$multipleResponses <- FALSE
}), list( .linklist = linklist,
.earglist = earglist,
- .lsd = lsd, .l.var = lvar,
- .esd = esd, .e.var = evar,
+ .lsd = lsd, .lvar = lvar,
+ .esd = esd, .evar = evar,
.var.arg = var.arg, .imethod = imethod ))),
loglikelihood = eval(substitute(
@@ -2303,7 +2370,7 @@ tobit.control <- function(save.weight = TRUE, ...)
ncoly <- extra$ncoly
Musual <- 1 # extra$Musual
if ( .var.arg ) {
- Varm <- eta2theta(eta[, Musual*(1:ncoly)], .l.var , earg = .e.var )
+ Varm <- eta2theta(eta[, Musual*(1:ncoly)], .lvar , earg = .evar )
sdev <- sqrt(Varm)
} else {
sdev <- eta2theta(eta[, Musual*(1:ncoly)], .lsd , earg = .esd )
@@ -2312,8 +2379,8 @@ tobit.control <- function(save.weight = TRUE, ...)
"implemented yet") else {
sum(c(w) * dnorm(y, m = mu, sd = sdev, log = TRUE))
}
- }, list( .lsd = lsd, .l.var = lvar,
- .esd = esd, .e.var = evar,
+ }, list( .lsd = lsd, .lvar = lvar,
+ .esd = esd, .evar = evar,
.var.arg = var.arg ))),
vfamily = c("normal1.term"),
deriv = eval(substitute(expression({
@@ -2324,7 +2391,7 @@ tobit.control <- function(save.weight = TRUE, ...)
Musual <- 1 # extra$Musual
if ( .var.arg ) {
- Varm <- eta2theta(eta[, Musual*(1:ncoly) ], .l.var , earg = .e.var )
+ Varm <- eta2theta(eta[, Musual*(1:ncoly) ], .lvar , earg = .evar )
sdev <- sqrt(Varm)
} else {
sdev <- eta2theta(eta[, Musual*(1:ncoly) ], .lsd , earg = .esd )
@@ -2367,7 +2434,7 @@ tobit.control <- function(save.weight = TRUE, ...)
if ( .var.arg ) {
- dva.deta <- dtheta.deta(Varm, .l.var , earg = .e.var )
+ dva.deta <- dtheta.deta(Varm, .lvar , earg = .evar )
} else {
dsd.deta <- dtheta.deta(sdev, .lsd , earg = .esd )
}
@@ -2379,8 +2446,8 @@ tobit.control <- function(save.weight = TRUE, ...)
print("head(deriv.ans)9")
print( head(ans) )
ans
- }), list( .linklist = linklist, .lsd = lsd, .l.var = lvar,
- .earglist = earglist, .esd = esd, .e.var = evar,
+ }), list( .linklist = linklist, .lsd = lsd, .lvar = lvar,
+ .earglist = earglist, .esd = esd, .evar = evar,
.var.arg = var.arg ))),
weight = eval(substitute(expression({
print("------ in @ weight -------------")
@@ -2405,16 +2472,16 @@ tobit.control <- function(save.weight = TRUE, ...)
}
- index = iam(NA, NA, M , both = TRUE, diag = TRUE)
- indtw = iam(NA, NA, M-1, both = TRUE, diag = TRUE)
+ index <- iam(NA, NA, M , both = TRUE, diag = TRUE)
+ indtw <- iam(NA, NA, M-1, both = TRUE, diag = TRUE)
print("index")
print( index )
print("indtw")
print( indtw )
- twz = dMu.deta[, indtw$row.index, drop = FALSE] *
- dMu.deta[, indtw$col.index, drop = FALSE]
+ twz <- dMu.deta[, indtw$row.index, drop = FALSE] *
+ dMu.deta[, indtw$col.index, drop = FALSE]
print("head(twz)9------------------------------------------------")
print( head(twz) )
@@ -2438,8 +2505,7 @@ tobit.control <- function(save.weight = TRUE, ...)
lognormal <- function(lmeanlog = "identity", lsdlog = "loge",
- zero = 2)
-{
+ zero = 2) {
@@ -2481,9 +2547,9 @@ tobit.control <- function(save.weight = TRUE, ...)
namesof("sdlog", .lsdlog, earg = .esdlog, tag = FALSE))
if (!length(etastart)) {
- mylm = lm.wfit(x = x, y = log(y), w = w)
- sdlog.y.est = sqrt( sum(c(w) * mylm$resid^2) / mylm$df.residual )
- etastart = cbind(
+ mylm <- lm.wfit(x = x, y = c(log(y)), w = c(w))
+ sdlog.y.est <- sqrt( sum(c(w) * mylm$resid^2) / mylm$df.residual )
+ etastart <- cbind(
meanlog = rep(theta2eta(log(median(y)), .lmulog,
earg = .emulog), length = n),
sdlog = rep(theta2eta(sdlog.y.est, .lsdlog,
@@ -2492,22 +2558,23 @@ tobit.control <- function(save.weight = TRUE, ...)
}), list( .lmulog = lmulog, .lsdlog = lsdlog,
.emulog = emulog, .esdlog = esdlog ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mulog = eta2theta(eta[, 1], .lmulog , earg = .emulog )
- sdlog = eta2theta(eta[, 2], .lsdlog , earg = .esdlog )
+ mulog <- eta2theta(eta[, 1], .lmulog , earg = .emulog )
+ sdlog <- eta2theta(eta[, 2], .lsdlog , earg = .esdlog )
exp(mulog + 0.5 * sdlog^2)
}, list( .lmulog = lmulog, .lsdlog = lsdlog,
.emulog = emulog, .esdlog = esdlog ))),
last = eval(substitute(expression({
- misc$link = c("meanlog" = .lmulog , "sdlog" = .lsdlog )
- misc$earg = list("meanlog" = .emulog , "sdlog" = .esdlog )
+ misc$link <- c("meanlog" = .lmulog , "sdlog" = .lsdlog )
+
+ misc$earg <- list("meanlog" = .emulog , "sdlog" = .esdlog )
- misc$expected = TRUE
+ misc$expected <- TRUE
}), list( .lmulog = lmulog, .lsdlog = lsdlog,
.emulog = emulog, .esdlog = esdlog ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mulog = eta2theta(eta[, 1], .lmulog, earg = .emulog)
- sdlog = eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
+ mulog <- eta2theta(eta[, 1], .lmulog, earg = .emulog)
+ sdlog <- eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dlnorm(y, meanlog = mulog, sdlog = sdlog, log = TRUE))
@@ -2516,26 +2583,26 @@ tobit.control <- function(save.weight = TRUE, ...)
.emulog = emulog, .esdlog = esdlog ))),
vfamily = c("lognormal"),
deriv = eval(substitute(expression({
- mulog = eta2theta(eta[, 1], .lmulog, earg = .emulog)
- sdlog = eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
+ mulog <- eta2theta(eta[, 1], .lmulog, earg = .emulog)
+ sdlog <- eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
- dmulog.deta = dtheta.deta(mulog, .lmulog, earg = .emulog)
- dsdlog.deta = dtheta.deta(sdlog, .lsdlog, earg = .esdlog)
+ dmulog.deta <- dtheta.deta(mulog, .lmulog, earg = .emulog)
+ dsdlog.deta <- dtheta.deta(sdlog, .lsdlog, earg = .esdlog)
- dl.dmulog = (log(y) - mulog) / sdlog^2
- dl.dsdlog = -1 / sdlog + (log(y) - mulog)^2 / sdlog^3
+ dl.dmulog <- (log(y) - mulog) / sdlog^2
+ dl.dsdlog <- -1 / sdlog + (log(y) - mulog)^2 / sdlog^3
c(w) * cbind(dl.dmulog * dmulog.deta,
dl.dsdlog * dsdlog.deta)
}), list( .lmulog = lmulog, .lsdlog = lsdlog,
.emulog = emulog, .esdlog = esdlog ))),
weight = expression({
- wz = matrix(as.numeric(NA), n, 2) # Diagonal!
- ned2l.dmulog2 = 1 / sdlog^2
- ned2l.dsdlog2 = 2 * ned2l.dmulog2
+ wz <- matrix(as.numeric(NA), n, 2) # Diagonal!
+ ned2l.dmulog2 <- 1 / sdlog^2
+ ned2l.dsdlog2 <- 2 * ned2l.dmulog2
- wz[, iam(1, 1, M)] = ned2l.dmulog2 * dmulog.deta^2
- wz[, iam(2, 2, M)] = ned2l.dsdlog2 * dsdlog.deta^2
+ wz[, iam(1, 1, M)] <- ned2l.dmulog2 * dmulog.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dsdlog2 * dsdlog.deta^2
wz = c(w) * wz
wz
@@ -2549,8 +2616,7 @@ tobit.control <- function(save.weight = TRUE, ...)
lognormal3 <- function(lmeanlog = "identity", lsdlog = "loge",
powers.try = (-3):3,
- delta = NULL, zero = 2)
-{
+ delta = NULL, zero = 2) {
if (length(delta) &&
@@ -2600,55 +2666,56 @@ tobit.control <- function(save.weight = TRUE, ...)
"lambda")
if (!length(etastart)) {
- miny = min(y)
+ miny <- min(y)
if (length( .delta)) {
- lambda.init = rep(miny- .delta, length = n)
+ lambda.init <- rep(miny- .delta, length = n)
} else {
- pvalue.vec = NULL
- powers.try = .powers.try
+ pvalue.vec <- NULL
+ powers.try <- .powers.try
for(delta in 10^powers.try) {
- pvalue.vec = c(pvalue.vec,
+ pvalue.vec <- c(pvalue.vec,
shapiro.test(sample(log(y-miny+delta),
size=min(5000, length(y ))))$p.value)
}
- index.lambda = (1:length(powers.try))[pvalue.vec ==
+ index.lambda <- (1:length(powers.try))[pvalue.vec ==
max(pvalue.vec)]
- lambda.init = miny - 10^powers.try[index.lambda]
+ lambda.init <- miny - 10^powers.try[index.lambda]
}
- mylm = lm.wfit(x = x, y=log(y-lambda.init), w = w)
- sdlog.y.est = sqrt( sum(c(w) * mylm$resid^2) / mylm$df.residual )
- etastart = cbind(mu = log(median(y - lambda.init)),
- sdlog = rep(theta2eta(sdlog.y.est, .lsdlog, earg = .esdlog),
- length = n),
- lambda = lambda.init)
+ mylm <- lm.wfit(x = x, y = c(log(y - lambda.init)), w = c(w))
+ sdlog.y.est <- sqrt( sum(c(w) * mylm$resid^2) / mylm$df.residual )
+ etastart <-
+ cbind(mu = log(median(y - lambda.init)),
+ sdlog = rep(theta2eta(sdlog.y.est, .lsdlog , earg = .esdlog ),
+ length = n),
+ lambda = lambda.init)
}
}), list( .lmulog = lmulog, .lsdlog = lsdlog,
.emulog = emulog, .esdlog = esdlog,
.delta = delta, .powers.try = powers.try ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mymu = eta2theta(eta[, 1], .lmulog, earg = .emulog)
- sdlog = eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
- lambda = eta2theta(eta[, 3], "identity", earg = list(theta = NULL))
+ mymu <- eta2theta(eta[, 1], .lmulog, earg = .emulog)
+ sdlog <- eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
+ lambda <- eta2theta(eta[, 3], "identity", earg = list(theta = NULL))
lambda + exp(mymu + 0.5 * sdlog^2)
}, list( .lmulog = lmulog, .lsdlog = lsdlog,
.emulog = emulog, .esdlog = esdlog ))),
last = eval(substitute(expression({
- misc$link = c("meanlog" = .lmulog,
- "sdlog" = .lsdlog,
- "lambda" = "identity")
+ misc$link <- c("meanlog" = .lmulog,
+ "sdlog" = .lsdlog,
+ "lambda" = "identity")
- misc$earg = list("meanlog" = .emulog,
- "sdlog" = .esdlog,
- "lambda" = list())
+ misc$earg <- list("meanlog" = .emulog,
+ "sdlog" = .esdlog,
+ "lambda" = list())
- misc$expected = TRUE
+ misc$expected <- TRUE
}), list( .lmulog = lmulog, .lsdlog = lsdlog,
.emulog = emulog, .esdlog = esdlog ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mymu = eta2theta(eta[, 1], .lmulog , earg = .emulog)
- sdlog = eta2theta(eta[, 2], .lsdlog , earg = .esdlog)
- lambda = eta2theta(eta[, 3], "identity", earg = list(theta = NULL))
+ mymu <- eta2theta(eta[, 1], .lmulog , earg = .emulog)
+ sdlog <- eta2theta(eta[, 2], .lsdlog , earg = .esdlog)
+ lambda <- eta2theta(eta[, 3], "identity", earg = list(theta = NULL))
if (any(y < lambda))
warning("bad 'y'")
if (residuals) stop("loglikelihood residuals not ",
@@ -2660,9 +2727,9 @@ tobit.control <- function(save.weight = TRUE, ...)
.emulog = emulog, .esdlog = esdlog ))),
vfamily = c("lognormal3"),
deriv = eval(substitute(expression({
- mymu = eta2theta(eta[, 1], .lmulog, earg = .emulog)
- sdlog = eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
- lambda = eta2theta(eta[, 3], "identity", earg = list(theta = NULL))
+ mymu <- eta2theta(eta[, 1], .lmulog, earg = .emulog)
+ sdlog <- eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
+ lambda <- eta2theta(eta[, 3], "identity", earg = list(theta = NULL))
if (any(y < lambda))
warning("bad 'y'")
@@ -2713,8 +2780,7 @@ dsnorm <- function(x, location = 0, scale = 1, shape = 0, log = FALSE) {
stop("bad input for argument 'scale'")
zedd <- (x - location) / scale
loglik <- log(2) + dnorm(zedd, log = TRUE) +
- pnorm(shape * zedd, log.p = TRUE) -
- log(scale)
+ pnorm(shape * zedd, log.p = TRUE) - log(scale)
if (log.arg) {
loglik
} else {
@@ -2724,20 +2790,15 @@ dsnorm <- function(x, location = 0, scale = 1, shape = 0, log = FALSE) {
-rsnorm <- function(n, location = 0, scale = 1, shape=0) {
- if (!is.Numeric(n, positive = TRUE,
- integer.valued = TRUE, allowable.length = 1))
- stop("bad input for argument 'n'")
- if (!is.Numeric(scale, positive = TRUE))
- stop("bad input for argument 'scale'")
- if (!is.Numeric(shape))
- stop("bad input for argument 'shape'")
+rsnorm <- function(n, location = 0, scale = 1, shape = 0) {
rho <- shape / sqrt(1 + shape^2)
u0 <- rnorm(n)
v <- rnorm(n)
u1 <- rho*u0 + sqrt(1 - rho^2) * v
- location + scale * ifelse(u0 >= 0, u1, -u1)
+ ans <- location + scale * ifelse(u0 >= 0, u1, -u1)
+ ans[scale <= 0] <- NA
+ ans
}
@@ -2745,8 +2806,7 @@ rsnorm <- function(n, location = 0, scale = 1, shape=0) {
skewnormal1 <- function(lshape = "identity",
ishape = NULL,
- nsimEIM = NULL)
-{
+ nsimEIM = NULL) {
lshape <- as.list(substitute(lshape))
@@ -2810,7 +2870,7 @@ rsnorm <- function(n, location = 0, scale = 1, shape=0) {
misc$earg <- list(shape = .eshape )
- misc$nsimEIM = .nsimEIM
+ misc$nsimEIM <- .nsimEIM
misc$expected <- (length( .nsimEIM ) > 0)
}), list( .eshape = eshape, .lshape = lshape,
.nsimEIM = nsimEIM ))),
@@ -2842,24 +2902,24 @@ rsnorm <- function(n, location = 0, scale = 1, shape=0) {
}), list( .eshape = eshape, .lshape = lshape ))),
weight = eval(substitute(expression({
if ( length( .nsimEIM )) {
- run.mean = 0
+ run.mean <- 0
for(ii in 1:( .nsimEIM)) {
- ysim = rsnorm(n, location = 0, scale = 1, shape = alpha)
- zedd = ysim*alpha
- tmp76 = pnorm(zedd)
- tmp86 = dnorm(zedd)
- d2l.dshape2 = -ysim*ysim*tmp86*(tmp76*zedd+tmp86)/tmp76^2
+ ysim <- rsnorm(n, location = 0, scale = 1, shape = alpha)
+ zedd <- ysim*alpha
+ tmp76 <- pnorm(zedd)
+ tmp86 <- dnorm(zedd)
+ d2l.dshape2 <- -ysim*ysim*tmp86*(tmp76*zedd+tmp86)/tmp76^2
rm(ysim)
- run.mean = ((ii-1) * run.mean + d2l.dshape2) / ii
+ run.mean <- ((ii-1) * run.mean + d2l.dshape2) / ii
}
if (intercept.only)
- run.mean = mean(run.mean)
- wz = -c(w) * (dshape.deta^2) * run.mean
+ run.mean <- mean(run.mean)
+ wz <- -c(w) * (dshape.deta^2) * run.mean
} else {
- d2shape.deta2 = d2theta.deta2(alpha, .lshape, earg = .eshape)
- d2l.dshape2 = -y*y * tmp86 * (tmp76 * zedd + tmp86) / tmp76^2
- wz = -(dshape.deta^2) * d2l.dshape2 - d2shape.deta2 * dl.dshape
- wz = c(w) * wz
+ d2shape.deta2 <- d2theta.deta2(alpha, .lshape, earg = .eshape)
+ d2l.dshape2 <- -y*y * tmp86 * (tmp76 * zedd + tmp86) / tmp76^2
+ wz <- -(dshape.deta^2) * d2l.dshape2 - d2shape.deta2 * dl.dshape
+ wz <- c(w) * wz
}
wz
}), list( .eshape = eshape, .lshape = lshape, .nsimEIM = nsimEIM ))))
@@ -2868,6 +2928,303 @@ rsnorm <- function(n, location = 0, scale = 1, shape=0) {
+if (FALSE)
+ halfnormal1 <-
+ function(lsd = "loge", lvar = "loge",
+ var.arg = FALSE,
+ imethod = 1,
+ isd = NULL,
+ parallel = FALSE,
+ apply.parint = FALSE,
+ zero = NULL) {
+
+
+
+ warning("20121101; not working; yettodo: finish it!")
+
+ lsd <- as.list(substitute(lsd))
+ esd <- link2list(lsd)
+ lsd <- attr(esd, "function.name")
+
+ lvar <- as.list(substitute(lvar))
+ evar <- link2list(lvar)
+ lvar <- attr(evar, "function.name")
+
+ emean <- list()
+
+
+ lmean <- "identity"
+
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 4)
+ stop("argument 'imethod' must be 1 or 2 or 3 or 4")
+
+ if (!is.logical(var.arg) ||
+ length(var.arg) != 1)
+ stop("argument 'var.arg' must be a single logical")
+ if (!is.logical(apply.parint) ||
+ length(apply.parint) != 1)
+ stop("argument 'apply.parint' must be a single logical")
+
+
+ if (is.logical(parallel) && parallel && length(zero))
+ stop("set 'zero = NULL' if 'parallel = TRUE'")
+
+
+ new("vglmff",
+ blurb = c("Half-normal distribution\n\n",
+ "Links: ",
+ if (var.arg)
+ namesof("var", lvar, earg = evar, tag = TRUE) else
+ namesof("sd" , lsd, earg = esd, tag = TRUE),
+ "\n",
+ if (var.arg) "Variance: var zz" else "Variance: sd^2 zz"),
+
+
+ constraints = eval(substitute(expression({
+
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+ apply.int = .apply.parint )
+
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ }), list( .zero = zero,
+ .parallel = parallel, .apply.parint = apply.parint ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero)
+ }, list( .zero = zero ))),
+
+ initialize = eval(substitute(expression({
+ orig.y <- y
+
+
+
+
+
+
+
+
+ if (length(attr(orig.y, "Prior.Weights"))) {
+ if (any(c(w) != 1))
+ warning("replacing the 'weights' argument by the 'Prior.Weights'",
+ "attribute of the response (probably due to Qvar()")
+
+
+ w <- attr(orig.y, "Prior.Weights")
+
+
+ extra$attributes.y <- attributes(orig.y)
+
+ } else {
+ }
+
+
+
+
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ ncoly <- ncol(y)
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+
+ mynames2 <- paste(if ( .var.arg ) "var" else "sd",
+ if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ c(if ( .var.arg )
+ namesof(mynames2, .lvar , earg = .evar , tag = FALSE) else
+ namesof(mynames2, .lsd , earg = .esd , tag = FALSE))
+ extra$predictors.names <- predictors.names
+
+
+ if (!length(etastart)) {
+ sdev.init <- mean.init <- matrix(0, n, ncoly)
+ for (jay in 1:ncoly) {
+ jfit <- lm.wfit(x = x, y = y[, jay], w = w[, jay])
+ mean.init[, jay] <- if ( .lmean == "loge")
+ pmax(1/1024, y[, jay]) else
+ if( .imethod == 1) median(y[, jay]) else
+ if( .imethod == 2) weighted.mean(y[, jay], w = w[, jay]) else
+ if( .imethod == 3) weighted.mean(y[, jay], w = w[, jay]) *
+ 0.5 + y[, jay] * 0.5 else
+ mean(jfit$fitted)
+
+ sdev.init[, jay] <-
+ if( .imethod == 1) {
+ sqrt( sum(w[, jay] *
+ (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) )
+ } else if( .imethod == 2) {
+ if (jfit$df.resid > 0)
+ sqrt( sum(w[, jay] * jfit$resid^2) / jfit$df.resid ) else
+ sqrt( sum(w[, jay] * jfit$resid^2) / sum(w[, jay]) )
+ } else if( .imethod == 3) {
+ sqrt( sum(w[, jay] *
+ (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) )
+ } else {
+ sqrt( sum(w[, jay] * abs(y[, jay] -
+ mean.init[, jay])) / sum(w[, jay]) )
+ }
+
+ if (any(sdev.init[, jay] <= sqrt( .Machine$double.eps ) ))
+ sdev.init[, jay] <- 1.01
+
+ }
+
+
+ if (length( .isd )) {
+ sdev.init <- matrix( .isd , n, ncoly, byrow = TRUE)
+ }
+
+
+ etastart <-
+ cbind(if ( .var.arg )
+ theta2eta(sdev.init^2, .lvar , earg = .evar ) else
+ theta2eta(sdev.init , .lsd , earg = .esd ))
+
+ colnames(etastart) <- predictors.names
+ }
+ }), list( .lsd = lsd, .lvar = lvar,
+ .esd = esd, .evar = evar,
+ .lmean = lmean,
+ .isd = isd,
+ .var.arg = var.arg, .imethod = imethod ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Musual <- extra$Musual
+ ncoly <- extra$ncoly
+ eta2theta(eta[, Musual*(1:ncoly) - 1], .lmean , earg = .emean )
+ }, list( .esd = esd , .evar = evar,
+ .emean = emean,
+ .lmean = lmean ))),
+
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <- c(rep( .lsd , length = ncoly))
+ temp.names <- c(mynames2)
+ names(misc$link) <- temp.names
+
+
+ misc$earg <- vector("list", Musual * ncoly)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii ]] <- if ( .var.arg ) .evar else .esd
+ }
+ names(misc$earg) <- temp.names
+
+ misc$var.arg <- .var.arg
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$imethod <- .imethod
+ misc$multipleResponses <- TRUE
+ misc$parallel <- .parallel
+ misc$apply.parint <- .apply.parint
+ }), list( .lsd = lsd, .lvar = lvar,
+ .esd = esd, .evar = evar,
+ .parallel = parallel, .apply.parint = apply.parint,
+ .var.arg = var.arg, .imethod = imethod ))),
+
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ ncoly <- extra$ncoly
+ Musual <- extra$Musual
+ if ( .var.arg ) {
+ Varm <- eta2theta(eta[, Musual*(1:ncoly)], .lvar , earg = .evar )
+ sdev <- sqrt(Varm)
+ } else {
+ sdev <- eta2theta(eta[, Musual*(1:ncoly)], .lsd , earg = .esd )
+ }
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dnorm(y, m = mu, sd = sdev, log = TRUE))
+ }
+ }, list( .lsd = lsd, .lvar = lvar,
+ .esd = esd, .evar = evar,
+ .var.arg = var.arg ))),
+ vfamily = c("halfnormal1"),
+ deriv = eval(substitute(expression({
+ ncoly <- extra$ncoly
+ Musual <- extra$Musual
+
+ mymu <- zz
+ if ( .var.arg ) {
+ Varm <- eta2theta(eta[, Musual*(1:ncoly) ], .lvar , earg = .evar )
+ sdev <- sqrt(Varm)
+ } else {
+ sdev <- eta2theta(eta[, Musual*(1:ncoly) ], .lsd , earg = .esd )
+ }
+
+ dl.dmu <- zz * (y - mymu) / sdev^2
+ if ( .var.arg ) {
+ dl.dva <- -0.5 / Varm + 0.5 * (y - mymu)^2 / sdev^4
+ } else {
+ dl.dsd <- -1.0 / sdev + (y - mymu)^2 / sdev^3
+ }
+
+ dmu.deta <- dtheta.deta(mymu, .lmean , earg = .emean )
+ if ( .var.arg ) {
+ dva.deta <- dtheta.deta(Varm, .lvar , earg = .evar )
+ } else {
+ dsd.deta <- dtheta.deta(sdev, .lsd , earg = .esd )
+ }
+
+ ans <- c(w) *
+ cbind(if ( .var.arg ) dl.dva * dva.deta else
+ dl.dsd * dsd.deta)
+ ans
+ }), list( .lmean = lmean, .lsd = lsd, .lvar = lvar,
+ .emean = emean, .esd = esd, .evar = evar,
+ .var.arg = var.arg ))),
+ weight = eval(substitute(expression({
+ wz <- matrix(as.numeric(NA), n, M) # diag matrix; y is 1-column too
+
+ ned2l.dmu2 <- 1 / sdev^2
+ if ( .var.arg ) {
+ ned2l.dva2 <- 0.5 / Varm^2
+ } else {
+ ned2l.dsd2 <- 2 / sdev^2
+ }
+
+ wz[, Musual*(1:ncoly) ] <- if ( .var.arg ) {
+ ned2l.dva2 * dva.deta^2
+ } else {
+ ned2l.dsd2 * dsd.deta^2
+ }
+
+
+ wz
+ }), list( .var.arg = var.arg ))))
+}
+
+
+
+
+
diff --git a/R/family.others.R b/R/family.others.R
index a1f3ea6..b788d64 100644
--- a/R/family.others.R
+++ b/R/family.others.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -115,7 +115,7 @@ rexppois <- function(n, lambda, betave = 1) {
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero , M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero))),
initialize = eval(substitute(expression({
@@ -128,24 +128,25 @@ rexppois <- function(n, lambda, betave = 1) {
y <- temp5$y
- predictors.names = c(
+ predictors.names <- c(
namesof("lambda", .llambda, earg = .elambda, short = TRUE),
namesof("betave", .lbetave, earg = .ebetave, short = TRUE))
if (!length(etastart)) {
- betave.init = if (length( .ibetave ))
+ betave.init <- if (length( .ibetave ))
rep( .ibetave , len = n) else
stop("Need to input a value into argument 'ibetave'")
- lambda.init = if (length( .ilambda ))
+ lambda.init <- if (length( .ilambda ))
rep( .ilambda , len = n) else
(1/betave.init - mean(y)) / ((y *
exp(-betave.init * y))/n)
- betave.init = rep(weighted.mean(betave.init, w = w), len = n)
+ betave.init <- rep(weighted.mean(betave.init, w = w), len = n)
- etastart = cbind(theta2eta(lambda.init, .llambda ,earg = .elambda ),
- theta2eta(betave.init, .lbetave ,earg = .ebetave ))
+ etastart <-
+ cbind(theta2eta(lambda.init, .llambda ,earg = .elambda ),
+ theta2eta(betave.init, .lbetave ,earg = .ebetave ))
}
}), list( .llambda = llambda, .lbetave = lbetave,
@@ -153,8 +154,8 @@ rexppois <- function(n, lambda, betave = 1) {
.elambda = elambda, .ebetave = ebetave))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
- betave = eta2theta(eta[, 2], .lbetave , earg = .ebetave )
+ lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+ betave <- eta2theta(eta[, 2], .lbetave , earg = .ebetave )
-lambda * genhypergeo(c(1, 1), c(2, 2), lambda) / (expm1(-lambda) *
@@ -163,19 +164,19 @@ rexppois <- function(n, lambda, betave = 1) {
.elambda = elambda, .ebetave = ebetave))),
last = eval(substitute(expression({
- misc$link = c(lambda = .llambda , betave = .lbetave )
+ misc$link <- c(lambda = .llambda , betave = .lbetave )
- misc$earg = list(lambda = .elambda , betave = .ebetave )
+ misc$earg <- list(lambda = .elambda , betave = .ebetave )
- misc$expected = TRUE
+ misc$expected <- TRUE
misc$multipleResponses <- FALSE
}), list( .llambda = llambda, .lbetave = lbetave,
.elambda = elambda, .ebetave = ebetave))),
loglikelihood = eval(substitute(function(mu, y, w,
residuals = FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
- betave = eta2theta(eta[, 2], .lbetave , earg = .ebetave )
+ lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+ betave <- eta2theta(eta[, 2], .lbetave , earg = .ebetave )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dexppois(x = y, lambda = lambda, betave = betave,
@@ -187,12 +188,12 @@ rexppois <- function(n, lambda, betave = 1) {
vfamily = c("exppoisson"),
deriv = eval(substitute(expression({
- lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
- betave = eta2theta(eta[, 2], .lbetave , earg = .ebetave )
- dl.dbetave = 1/betave - y - y * lambda * exp(-betave * y)
- dl.dlambda = 1/lambda - 1/expm1(lambda) - 1 + exp(-betave * y)
- dbetave.deta = dtheta.deta(betave, .lbetave , earg = .ebetave )
- dlambda.deta = dtheta.deta(lambda, .llambda , earg = .elambda )
+ lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+ betave <- eta2theta(eta[, 2], .lbetave , earg = .ebetave )
+ dl.dbetave <- 1/betave - y - y * lambda * exp(-betave * y)
+ dl.dlambda <- 1/lambda - 1/expm1(lambda) - 1 + exp(-betave * y)
+ dbetave.deta <- dtheta.deta(betave, .lbetave , earg = .ebetave )
+ dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
c(w) * cbind(dl.dlambda * dlambda.deta,
dl.dbetave * dbetave.deta)
}), list( .llambda = llambda, .lbetave = lbetave,
@@ -200,23 +201,23 @@ rexppois <- function(n, lambda, betave = 1) {
weight = eval(substitute(expression({
- temp1 = -expm1(-lambda)
+ temp1 <- -expm1(-lambda)
- ned2l.dlambda2 = (1 + exp(2 * lambda) - lambda^2 * exp(lambda) - 2 *
+ ned2l.dlambda2 <- (1 + exp(2 * lambda) - lambda^2 * exp(lambda) - 2 *
exp(lambda)) / (lambda * temp1)^2
- ned2l.dbetave2 = 1 / betave^2 - (lambda^2 * exp(-lambda) / (4 *
+ ned2l.dbetave2 <- 1 / betave^2 - (lambda^2 * exp(-lambda) / (4 *
betave^2 * temp1)) *
genhypergeo(c(2, 2, 2),c(3, 3, 3),lambda)
- ned2l.dbetavelambda = (lambda * exp(-lambda) / (4 * betave * temp1)) *
+ ned2l.dbetavelambda <- (lambda * exp(-lambda) / (4 * betave * temp1)) *
genhypergeo(c(2, 2),c(3, 3),lambda)
wz <- matrix(0, n, dimm(M))
- wz[, iam(1, 1, M)] = dlambda.deta^2 * ned2l.dlambda2
- wz[, iam(2, 2, M)] = dbetave.deta^2 * ned2l.dbetave2
- wz[, iam(1, 2, M)] = dbetave.deta * dlambda.deta * ned2l.dbetavelambda
+ wz[, iam(1, 1, M)] <- dlambda.deta^2 * ned2l.dlambda2
+ wz[, iam(2, 2, M)] <- dbetave.deta^2 * ned2l.dbetave2
+ wz[, iam(1, 2, M)] <- dbetave.deta * dlambda.deta * ned2l.dbetavelambda
c(w) * wz
}), list( .zero = zero ))))
}
@@ -268,7 +269,7 @@ pgenray <- function(q, shape, scale = 1) {
qgenray <- function(p, shape, scale = 1) {
ans <- scale * sqrt(-log1p(-(p^(1/shape))))
- ans[(shape <= 0) | (scale <= 0)] = NaN
+ ans[(shape <= 0) | (scale <= 0)] <- NaN
ans[p < 0] <- NaN
ans[p > 1] <- NaN
ans[p == 0] <- 0
@@ -345,53 +346,53 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
- predictors.names = c(
+ predictors.names <- c(
namesof("shape", .lshape , earg = .eshape , short = TRUE),
namesof("scale", .lscale , earg = .escale , short = TRUE))
if (!length(etastart)) {
genrayleigh.Loglikfun <- function(scale, y, x, w, extraargs) {
temp1 <- y / scale
- shape = -1 / weighted.mean(log1p(-exp(-temp1^2)), w = w)
+ shape <- -1 / weighted.mean(log1p(-exp(-temp1^2)), w = w)
ans <- sum(c(w) * (log(2) + log(shape) + log(y) -
2 * log(scale) - temp1^2 +
(shape - 1) * log1p(-exp(-temp1^2))))
ans
}
- scale.grid = seq(0.2 * stats::sd(c(y)),
- 5.0 * stats::sd(c(y)), len = 29)
- scale.init = if (length( .iscale )) .iscale else
- getMaxMin(scale.grid, objfun = genrayleigh.Loglikfun,
+ scale.grid <- seq(0.2 * stats::sd(c(y)),
+ 5.0 * stats::sd(c(y)), len = 29)
+ scale.init <- if (length( .iscale )) .iscale else
+ getMaxMin(scale.grid, objfun = genrayleigh.Loglikfun,
y = y, x = x, w = w)
- scale.init = rep(scale.init, length = length(y))
+ scale.init <- rep(scale.init, length = length(y))
- shape.init = if (length( .ishape )) .ishape else
- -1 / weighted.mean(log1p(-exp(-(y/scale.init)^2)),
- w = w)
- shape.init = rep(shape.init, length = length(y))
+ shape.init <- if (length( .ishape )) .ishape else
+ -1 / weighted.mean(log1p(-exp(-(y/scale.init)^2)),
+ w = w)
+ shape.init <- rep(shape.init, length = length(y))
- etastart = cbind(theta2eta(shape.init, .lshape, earg = .eshape),
- theta2eta(scale.init, .lscale, earg = .escale))
+ etastart <- cbind(theta2eta(shape.init, .lshape, earg = .eshape),
+ theta2eta(scale.init, .lscale, earg = .escale))
}
}), list( .lscale = lscale, .lshape = lshape,
.iscale = iscale, .ishape = ishape,
.escale = escale, .eshape = eshape))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
qgenray(p = 0.5, shape = shape, scale = Scale)
}, list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale ))),
last = eval(substitute(expression({
- misc$link = c(shape = .lshape , scale = .lscale )
+ misc$link <- c(shape = .lshape , scale = .lscale )
- misc$earg = list(shape = .eshape , scale = .escale )
+ misc$earg <- list(shape = .eshape , scale = .escale )
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
+ misc$expected <- TRUE
+ misc$nsimEIM <- .nsimEIM
misc$multipleResponses <- FALSE
}), list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale,
@@ -400,8 +401,8 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
loglikelihood = eval(substitute(function(mu, y, w,
residuals = FALSE, eta, extra = NULL) {
- shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
if (residuals) stop("loglikelihood residuals",
"not implemented yet") else {
@@ -414,19 +415,19 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
vfamily = c("genrayleigh"),
deriv = eval(substitute(expression({
- shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
- dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
- dthetas.detas = cbind(dshape.deta, dscale.deta)
+ shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+ dthetas.detas <- cbind(dshape.deta, dscale.deta)
temp1 <- y / Scale
temp2 <- exp(-temp1^2)
temp3 <- temp1^2 / Scale
AAA <- 2 * temp1^2 / Scale # 2 * y^2 / Scale^3
BBB <- -expm1(-temp1^2) # denominator
- dl.dshape = 1/shape + log1p(-temp2)
- dl.dscale = -2 / Scale + AAA * (1 - (shape - 1) * temp2 / BBB)
+ dl.dshape <- 1/shape + log1p(-temp2)
+ dl.dscale <- -2 / Scale + AAA * (1 - (shape - 1) * temp2 / BBB)
dl.dshape[!is.finite(dl.dshape)] =
max(dl.dshape[is.finite(dl.dshape)])
@@ -439,32 +440,32 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
weight = eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
for(ii in 1:( .nsimEIM )) {
- ysim = rgenray(n = n, shape = shape, scale = Scale)
+ ysim <- rgenray(n = n, shape = shape, scale = Scale)
temp1 <- ysim / Scale
temp2 <- exp(-temp1^2) # May be 1 if ysim is very close to 0.
temp3 <- temp1^2 / Scale
AAA <- 2 * temp1^2 / Scale # 2 * y^2 / Scale^3
BBB <- -expm1(-temp1^2) # denominator
- dl.dshape = 1/shape + log1p(-temp2)
- dl.dscale = -2 / Scale + AAA * (1 - (shape - 1) * temp2 / BBB)
+ dl.dshape <- 1/shape + log1p(-temp2)
+ dl.dscale <- -2 / Scale + AAA * (1 - (shape - 1) * temp2 / BBB)
- dl.dshape[!is.finite(dl.dshape)] = max(
+ dl.dshape[!is.finite(dl.dshape)] <- max(
dl.dshape[is.finite(dl.dshape)])
- temp3 = cbind(dl.dshape, dl.dscale)
- run.varcov = run.varcov + temp3[, ind1$row.index] *
- temp3[, ind1$col.index]
+ temp3 <- cbind(dl.dshape, dl.dscale)
+ run.varcov <- run.varcov + temp3[, ind1$row.index] *
+ temp3[, ind1$col.index]
}
- run.varcov = run.varcov / .nsimEIM
+ run.varcov <- run.varcov / .nsimEIM
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(run.varcov, na.rm = FALSE),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
+ wz <- wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
c(w) * wz
}), list( .lshape = lshape , .lscale = lscale,
.eshape = eshape, .escale = escale,
@@ -537,9 +538,8 @@ rexpgeom <- function(n, scale = 1, shape) {
-expgeometric.control <- function(save.weight = TRUE, ...)
-{
- list(save.weight = save.weight)
+expgeometric.control <- function(save.weight = TRUE, ...) {
+ list(save.weight = save.weight)
}
@@ -607,29 +607,30 @@ expgeometric.control <- function(save.weight = TRUE, ...)
- predictors.names = c(
+ predictors.names <- c(
namesof("Scale", .lscale , earg = .escale , short = TRUE),
namesof("shape", .lshape , earg = .eshape , short = TRUE))
if (!length(etastart)) {
- scale.init = if (is.Numeric( .iscale , positive = TRUE)) {
- rep( .iscale , len = n)
- } else {
+ scale.init <- if (is.Numeric( .iscale , positive = TRUE)) {
+ rep( .iscale , len = n)
+ } else {
stats::sd(c(y)) # The papers scale parameter beta
- }
+ }
- shape.init = if (is.Numeric( .ishape , positive = TRUE)) {
- rep( .ishape , len = n)
- } else {
+ shape.init <- if (is.Numeric( .ishape , positive = TRUE)) {
+ rep( .ishape , len = n)
+ } else {
rep(2 - exp(median(y)/scale.init), len = n)
- }
- shape.init[shape.init >= 0.95] = 0.95
- shape.init[shape.init <= 0.05] = 0.05
+ }
+ shape.init[shape.init >= 0.95] <- 0.95
+ shape.init[shape.init <= 0.05] <- 0.05
- etastart = cbind(theta2eta(scale.init, .lscale , earg = .escale ),
- theta2eta(shape.init, .lshape , earg = .eshape ))
+ etastart <-
+ cbind(theta2eta(scale.init, .lscale , earg = .escale ),
+ theta2eta(shape.init, .lshape , earg = .eshape ))
}
}), list( .lscale = lscale, .lshape = lshape,
@@ -637,8 +638,8 @@ expgeometric.control <- function(save.weight = TRUE, ...)
.escale = escale, .eshape = eshape))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
(shape - 1) * log1p(-shape) / (shape / Scale)
@@ -646,12 +647,12 @@ expgeometric.control <- function(save.weight = TRUE, ...)
.escale = escale, .eshape = eshape ))),
last = eval(substitute(expression({
- misc$link = c(Scale = .lscale , shape = .lshape )
+ misc$link <- c(Scale = .lscale , shape = .lshape )
- misc$earg = list(Scale = .escale , shape = .eshape )
+ misc$earg <- list(Scale = .escale , shape = .eshape )
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
+ misc$expected <- TRUE
+ misc$nsimEIM <- .nsimEIM
misc$multipleResponses <- FALSE
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape,
@@ -660,8 +661,8 @@ expgeometric.control <- function(save.weight = TRUE, ...)
loglikelihood = eval(substitute(function(mu, y, w,
residuals = FALSE, eta, extra = NULL) {
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
if (residuals) stop("loglikelihood residuals",
"not implemented yet") else {
@@ -674,18 +675,18 @@ expgeometric.control <- function(save.weight = TRUE, ...)
vfamily = c("expgeometric"),
deriv = eval(substitute(expression({
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
temp2 <- exp(-y / Scale)
temp3 <- shape * temp2
temp4 <- y / Scale^2
- dl.dscale = -1 / Scale + temp4 + 2 * temp4 * temp3 / (1 - temp3)
- dl.dshape = -1 / (1 - shape) + 2 * temp2 / (1 - temp3)
+ dl.dscale <- -1 / Scale + temp4 + 2 * temp4 * temp3 / (1 - temp3)
+ dl.dshape <- -1 / (1 - shape) + 2 * temp2 / (1 - temp3)
- dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
- dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
- dthetas.detas = cbind(dscale.deta, dshape.deta)
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+ dthetas.detas <- cbind(dscale.deta, dshape.deta)
answer <- c(w) * cbind(dl.dscale, dl.dshape) * dthetas.detas
answer
@@ -702,33 +703,33 @@ expgeometric.control <- function(save.weight = TRUE, ...)
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
if (length( .nsimEIM )) {
for(ii in 1:( .nsimEIM )) {
- ysim = rexpgeom(n, scale=Scale, shape=shape)
+ ysim <- rexpgeom(n, scale=Scale, shape=shape)
temp2 <- exp(-ysim / Scale)
temp3 <- shape * temp2
temp4 <- ysim / Scale^2
- dl.dscale = -1 / Scale + temp4 +
+ dl.dscale <- -1 / Scale + temp4 +
2 * temp4 * temp3 / (1 - temp3)
- dl.dshape = -1 / (1 - shape) +
+ dl.dshape <- -1 / (1 - shape) +
2 * temp2 / (1 - temp3)
- temp6 = cbind(dl.dscale, dl.dshape)
- run.varcov = run.varcov +
+ temp6 <- cbind(dl.dscale, dl.dshape)
+ run.varcov <- run.varcov +
temp6[,ind1$row.index] * temp6[,ind1$col.index]
}
- run.varcov = run.varcov / .nsimEIM
+ run.varcov <- run.varcov / .nsimEIM
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = wz * dthetas.detas[, ind1$row] *
+ wz <- wz * dthetas.detas[, ind1$row] *
dthetas.detas[, ind1$col]
}
@@ -809,8 +810,7 @@ rexplog <- function(n, scale = 1, shape) {
-explogarithmic.control <- function(save.weight = TRUE, ...)
-{
+explogarithmic.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -872,28 +872,28 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
- predictors.names = c(
+ predictors.names <- c(
namesof("Scale", .lscale , earg = .escale , short = TRUE),
namesof("shape", .lshape , earg = .eshape , short = TRUE))
if (!length(etastart)) {
- scale.init = if (is.Numeric( .iscale , positive = TRUE)) {
+ scale.init <- if (is.Numeric( .iscale , positive = TRUE)) {
rep( .iscale , len = n)
} else {
stats::sd(c(y))
}
- shape.init = if (is.Numeric( .ishape , positive = TRUE)) {
+ shape.init <- if (is.Numeric( .ishape , positive = TRUE)) {
rep( .ishape , len = n)
} else {
rep((exp(median(y)/scale.init) - 1)^2, len = n)
}
- shape.init[shape.init >= 0.95] = 0.95
- shape.init[shape.init <= 0.05] = 0.05
+ shape.init[shape.init >= 0.95] <- 0.95
+ shape.init[shape.init <= 0.05] <- 0.05
- etastart =
+ etastart <-
cbind(theta2eta(scale.init, .lscale , earg = .escale ),
theta2eta(shape.init, .lshape , earg = .eshape ))
@@ -903,8 +903,8 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
.escale = escale, .eshape = eshape))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
@@ -914,12 +914,12 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
.escale = escale, .eshape = eshape ))),
last = eval(substitute(expression({
- misc$link = c(Scale = .lscale , shape = .lshape )
+ misc$link <- c(Scale = .lscale , shape = .lshape )
- misc$earg = list(Scale = .escale , shape = .eshape )
+ misc$earg <- list(Scale = .escale , shape = .eshape )
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
+ misc$expected <- TRUE
+ misc$nsimEIM <- .nsimEIM
misc$multipleResponses <- FALSE
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape,
@@ -928,8 +928,8 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
loglikelihood = eval(substitute(function(mu, y, w,
residuals = FALSE, eta, extra = NULL) {
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
if (residuals) stop("loglikelihood residuals",
@@ -943,20 +943,20 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
vfamily = c("explogarithmic"),
deriv = eval(substitute(expression({
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
temp2 <- exp(-y / Scale)
temp3 <- y / Scale^2
temp4 <- 1 - shape
- dl.dscale = (-1 / Scale) + temp3 + (temp4 * temp3 *
- temp2) / (1 - temp4 * temp2)
- dl.dshape = -1 / (shape * log(shape)) - 1 / temp4 -
- temp2 / (1 - temp4 * temp2)
+ dl.dscale <- (-1 / Scale) + temp3 + (temp4 * temp3 *
+ temp2) / (1 - temp4 * temp2)
+ dl.dshape <- -1 / (shape * log(shape)) - 1 / temp4 -
+ temp2 / (1 - temp4 * temp2)
- dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
- dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
- dthetas.detas = cbind(dscale.deta, dshape.deta)
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+ dthetas.detas <- cbind(dscale.deta, dshape.deta)
answer <- c(w) * cbind(dl.dscale, dl.dshape) * dthetas.detas
answer
@@ -967,34 +967,34 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
if (length( .nsimEIM )) {
for(ii in 1:( .nsimEIM )) {
- ysim = rexplog(n, scale=Scale, shape=shape)
+ ysim <- rexplog(n, scale=Scale, shape=shape)
temp2 <- exp(-ysim / Scale)
temp3 <- ysim / Scale^2
temp4 <- 1 - shape
- dl.dscale = (-1 / Scale) + temp3 + (temp4 * temp3 *
+ dl.dscale <- (-1 / Scale) + temp3 + (temp4 * temp3 *
temp2) / (1 - temp4 * temp2)
- dl.dshape = -1 / (shape * log(shape)) - 1 / temp4 -
- temp2 / (1 - temp4 * temp2)
+ dl.dshape <- -1 / (shape * log(shape)) - 1 / temp4 -
+ temp2 / (1 - temp4 * temp2)
- temp6 = cbind(dl.dscale, dl.dshape)
- run.varcov = run.varcov +
+ temp6 <- cbind(dl.dscale, dl.dshape)
+ run.varcov <- run.varcov +
temp6[,ind1$row.index] *
temp6[,ind1$col.index]
}
- run.varcov = run.varcov / .nsimEIM
+ run.varcov <- run.varcov / .nsimEIM
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = wz * dthetas.detas[, ind1$row] *
+ wz <- wz * dthetas.detas[, ind1$row] *
dthetas.detas[, ind1$col]
}
@@ -1060,12 +1060,12 @@ dtpn <- function(x, location = 0, scale = 1, skewpar = 0.5,
na.rm = TRUE))
stop("some parameters out of bound")
- LLL = max(length(x), length(location), length(scale),
+ LLL <- max(length(x), length(location), length(scale),
length(skewpar))
- if (length(x) != LLL) x = rep(x, length = LLL)
- if (length(location) != LLL) location = rep(location, length = LLL)
- if (length(scale) != LLL) scale = rep(scale, length = LLL)
- if (length(skewpar) != LLL) skewpar = rep(skewpar, length = LLL)
+ if (length(x) != LLL) x <- rep(x, length = LLL)
+ if (length(location) != LLL) location <- rep(location, length = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length = LLL)
+ if (length(skewpar) != LLL) skewpar <- rep(skewpar, length = LLL)
zedd <- (x - location) / scale
@@ -1118,12 +1118,12 @@ qtpn <- function(p, location = 0, scale = 1, skewpar = 0.5) {
na.rm = TRUE))
stop("some parameters out of bound")
# Recycle the vectors to equal lengths
- LLL = max(length(pp), length(location), length(scale),
+ LLL <- max(length(pp), length(location), length(scale),
length(skewpar))
- if (length(pp) != LLL) pp = rep(pp, length = LLL)
- if (length(location) != LLL) location = rep(location, length = LLL)
- if (length(scale) != LLL) scale = rep(scale, length = LLL)
- if (length(skewpar) != LLL) skewpar = rep(skewpar, length = LLL)
+ if (length(pp) != LLL) pp <- rep(pp, length = LLL)
+ if (length(location) != LLL) location <- rep(location, length = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length = LLL)
+ if (length(skewpar) != LLL) skewpar <- rep(skewpar, length = LLL)
qtpn <- rep(as.numeric(NA), length(LLL))
qtpn <- qnorm(pp / (2 * skewpar), sd = 2 * skewpar)
@@ -1179,15 +1179,15 @@ tpnff <- function(llocation = "identity", lscale = "loge",
new("vglmff",
- blurb = c("Two-piece normal distribution \n\n",
- "Links: ",
- namesof("location", llocat, earg = elocat), ", ",
- namesof("scale", lscale, earg = escale), "\n\n",
- "Mean: "),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
+ blurb = c("Two-piece normal distribution \n\n",
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale), "\n\n",
+ "Mean: "),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
temp5 <-
w.y.check(w = w, y = y,
@@ -1198,15 +1198,15 @@ tpnff <- function(llocation = "identity", lscale = "loge",
- predictors.names <-
- c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE))
+ predictors.names <-
+ c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE))
if (!length(etastart)) {
- junk = lm.wfit(x = x, y = y, w = w)
+ junk <- lm.wfit(x = x, y = c(y), w = c(w))
scale.y.est <-
sqrt( sum(c(w) * junk$resid^2) / junk$df.residual )
location.init <- if ( .llocat == "loge")
@@ -1342,16 +1342,16 @@ tpnff3 <- function(llocation = "identity",
new("vglmff",
- blurb = c("Two-piece normal distribution \n\n",
- "Links: ",
- namesof("location", llocat, earg = elocat), ", ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("skewpar", lscale, earg = eskewp), "\n\n",
- "Mean: "),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
+ blurb = c("Two-piece normal distribution \n\n",
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("skewpar", lscale, earg = eskewp), "\n\n",
+ "Mean: "),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
temp5 <-
w.y.check(w = w, y = y,
@@ -1367,7 +1367,7 @@ tpnff3 <- function(llocation = "identity",
namesof("skewpar", .lskewp, earg = .eskewp, tag = FALSE))
if (!length(etastart)) {
- junk = lm.wfit(x = x, y = y, w = w)
+ junk = lm.wfit(x = x, y = c(y), w = c(w))
scale.y.est <- sqrt(sum(c(w) * junk$resid^2) / junk$df.residual)
location.init <- if ( .llocat == "loge") pmax(1/1024, y) else {
if ( .method.init == 3) {
@@ -1396,11 +1396,13 @@ tpnff3 <- function(llocation = "identity",
.elocat = elocat, .escale = escale ))),
last = eval(substitute(expression({
misc$link <- c("location" = .llocat,
- "scale" = .lscale,
- "skewpar" = .lskewp)
- misc$earg <- list( "location" = .elocat,
- "scale" = .escale,
+ "scale" = .lscale,
+ "skewpar" = .lskewp)
+
+ misc$earg <- list("location" = .elocat,
+ "scale" = .escale,
"skewpar" = .eskewp)
+
misc$expected <- TRUE
misc$method.init <- .method.init
}), list( .llocat = llocat, .lscale = lscale, .lskewp = lskewp,
diff --git a/R/family.positive.R b/R/family.positive.R
index 3659eb5..5585ec0 100644
--- a/R/family.positive.R
+++ b/R/family.positive.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -10,140 +10,124 @@
-rhuggins91 <-
- function(n, nTimePts = 5, pvars = length(xcoeff),
- xcoeff = c(-2, 1, 2),
- capeffect = -1,
- double.ch = FALSE,
- link = "logit",
- earg.link = FALSE) {
-
-
-
-
- use.n <- if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
- orig.n <- use.n
- use.n <- 1.50 * use.n + 100 # Bigger due to rejections
- if (pvars == 0)
- stop("argument 'pvars' must be at least one")
- if (pvars > length(xcoeff))
- stop("argument 'pvars' is too high")
-
+N.hat.posbernoulli <-
+ function(eta, link, earg = list(),
+ R = NULL, w = NULL,
+ X_vlm = NULL, Hlist = NULL,
+ extra = list(),
+ model.type = c("b", "t", "tb")
+ ) {
- if (earg.link) {
- earg <- link
- } else {
- link <- as.list(substitute(link))
- earg <- link2list(link)
- }
- link <- attr(earg, "function.name")
+ if (!is.null(w) && !all(1 == w))
+ warning("estimate of N may be wrong when prior weights ",
+ "are not all unity")
+ model.type <- match.arg(model.type, c("b", "t", "tb"))[1]
- Ymatrix = matrix(0, use.n, nTimePts, dimnames =
- list(as.character(1:use.n),
- paste("y", 1:nTimePts, sep = "")))
+ tau <-
+ switch(model.type,
+ "b" = extra$tau,
+ "t" = ncol(eta),
+ "tb" = (ncol(eta) + 1) / 2)
+ if (length(extra$tau) && extra$tau != tau)
+ warning("variable 'tau' is mistaken") # Checking only
- CHmatrix = matrix(0, use.n, nTimePts, dimnames =
- list(as.character(1:use.n),
- paste("ch", 0:(nTimePts-1), sep = "")))
+ jay.index <-
+ switch(model.type,
+ "b" = rep(1, length = tau), # Subset: 1 out of 1:2
+ "t" = 1:tau, # All of them
+ "tb" = 1:tau) # Subset: first tau of them out of M = 2*tau-1
+ prc <- eta2theta(eta[, jay.index], link, earg = earg) # cap.probs
+ QQQ <- exp(rowSums(log1p(-prc)))
+ pibbeta <- exp(log1p(-QQQ)) # One.minus.QQQ
+ N.hat <- sum(1 / pibbeta) # Point estimate
+ ss2 <- sum(QQQ / pibbeta^2) # Assumes bbeta is known
- Xmatrix = cbind(x1 = rep(1.0, len = use.n))
- if (pvars > 1)
- Xmatrix = cbind(Xmatrix,
- matrix(runif(n = use.n * (pvars-1)), use.n, pvars - 1,
- dimnames = list(as.character(1:use.n),
- paste("x", 2:pvars, sep = ""))))
+ if (length(R)) {
- lin.pred.baseline = xcoeff[1]
- if (pvars > 1)
- lin.pred.baseline = lin.pred.baseline +
- Xmatrix[, 2:pvars, drop = FALSE] %*%
- xcoeff[2:pvars]
- sumrowy = rep(0, length = use.n)
- for (jlocal in 1:nTimePts) {
+ dvect <- matrix(0, length(pibbeta), ncol = ncol(X_vlm))
+ M <- nrow(Hlist[[1]])
+ n_lm <- nrow(X_vlm) / M # Number of rows of the LM matrix
+ dprc.deta <- dtheta.deta(prc, link, earg = earg)
+ Hmatrices <- matrix(c(unlist(Hlist)), nrow = M)
+ for (jay in 1:tau) {
+ lapred.index <- jay.index[jay]
+ Index0 <- Hmatrices[lapred.index, ] != 0
+ X_lm_jay <- X_vlm[(0:(n_lm - 1)) * M + lapred.index, Index0,
+ drop = FALSE]
- CHmatrix[, jlocal] = as.numeric(sumrowy > 0) *
- (1 + double.ch)
+ dvect[, Index0] <-
+ dvect[, Index0] + (QQQ / (1-prc[, jay])) * dprc.deta[, jay] * X_lm_jay
+ }
- lin.pred = lin.pred.baseline + (CHmatrix[, jlocal] > 0) * capeffect
- Ymatrix[, jlocal] = rbinom(use.n, size = 1,
- prob = eta2theta(lin.pred, link = link, earg = earg))
+ dvect <- dvect * (-1 / pibbeta^2)
+ dvect <- colSums(dvect) # Now a vector
- sumrowy = sumrowy + Ymatrix[, jlocal]
+ ncol_X_vlm <- nrow(R)
+ rinv <- diag(ncol_X_vlm)
+ rinv <- backsolve(R, rinv)
+ rowlen <- drop(((rinv^2) %*% rep(1, ncol_X_vlm))^0.5)
+ covun <- rinv %*% t(rinv)
+ vecTF <- FALSE
+ for (jay in 1:tau) {
+ lapred.index <- jay.index[jay]
+ vecTF <- vecTF | (Hmatrices[lapred.index, ] != 0)
+ }
+ vecTF.index <- (1:length(vecTF))[vecTF]
+ covun <- covun[vecTF.index, vecTF.index, drop = FALSE]
+ dvect <- dvect[vecTF.index, drop = FALSE]
}
-
-
- # Strip off rows where the animals were never caught
- # Bug: problem if all values of sumrowy are zero.
- index0 = (sumrowy == 0)
- if (all(!index0))
- stop("bug in this code: cannot handle no animals being caught")
- Ymatrix = Ymatrix[!index0, , drop = FALSE]
- Xmatrix = Xmatrix[!index0, , drop = FALSE]
- CHmatrix = CHmatrix[!index0, , drop = FALSE]
-
- # Bug: problem if all values of sumrowy are zero:
- zCHmatrix = matrix(0, nrow(CHmatrix), ncol(CHmatrix),
- dimnames = list(as.character(1:nrow(CHmatrix)),
- paste("zch", 0:(ncol(CHmatrix)-1), sep = "")))
-
-
- ans = data.frame(Ymatrix, Xmatrix, CHmatrix, zCHmatrix,
- Chistory = rep(0, length = nrow(Ymatrix)))
-
-
- ans = if (nrow(ans) >= orig.n) ans[1:orig.n, ] else {
- rbind(ans,
- Recall(n = orig.n - nrow(ans),
- nTimePts = nTimePts, pvars = pvars,
- xcoeff = xcoeff,
- capeffect = capeffect,
- link = earg, earg.link = TRUE))
- }
-
- rownames(ans) = as.character(1:orig.n)
-
- attr(ans, "pvars") = pvars
- attr(ans, "nTimePts") = nTimePts
- attr(ans, "capeffect") = capeffect
-
- ans
+
+ list(N.hat = N.hat,
+ SE.N.hat = if (length(R)) sqrt(ss2 + t(dvect) %*% covun %*% dvect) else
+ sqrt(ss2)
+ )
}
+aux.posbernoulli <- function(y, check.y = FALSE) {
-
-dhuggins91 = function(x, prob, prob0 = prob, log = FALSE) {
- x = as.matrix(x)
- prob = as.matrix(prob)
- prob0 = as.matrix(prob0)
- if (!is.logical(log.arg <- log) || length(log) != 1)
- stop("bad input for argument 'log'")
- rm(log)
- logAA0 = rowSums(log1p(-prob0))
- AA0 = exp(logAA0)
+ y <- as.matrix(y)
+ if ((tau <- ncol(y)) == 1)
+ stop("argument 'y' needs to be a matrix with at least two columns")
+ if (check.y) {
+ if (!all(y == 0 | y == 1 | y == 1/tau | is.na(y)))
+ stop("response 'y' must contain 0s and 1s only")
+ }
- ell1 = rowSums(x * log(prob) + (1 - x) * log1p(-prob)) - log1p(-AA0)
- if (log.arg) ell1 else exp(ell1)
+ zeddij <- cbind(0, t(apply(y, 1, cumsum))) # tau + 1 columns
+ zij <- (0 + (zeddij > 0))[, 1:tau] # 0 or 1.
+ if (length(colnames(y)))
+ colnames(zij) <- colnames(y)
+
+ cp1 <- numeric(nrow(y))
+ for (jay in tau:1)
+ cp1[y[, jay] > 0] <- jay
+ if (any(cp1 == 0))
+ warning("some individuals were never captured!")
+
+ yr1i <- zeddij[, tau + 1] - 1
+ list(cap.hist1 = zij,
+ cap1 = cp1, # aka ti1
+ y0i = cp1 - 1,
+ yr0i = tau - cp1 - yr1i,
+ yr1i = yr1i)
}
@@ -152,263 +136,151 @@ dhuggins91 = function(x, prob, prob0 = prob, log = FALSE) {
- huggins91 = function(link = "logit",
- parallel = TRUE,
- iprob = NULL,
- eim.not.oim = TRUE) {
-
-
-
-
-
-
- link <- as.list(substitute(link))
- earg <- link2list(link)
- link <- attr(earg, "function.name")
-
-
- if (length(iprob))
- if (!is.Numeric(iprob, positive = TRUE) ||
- max(iprob) >= 1)
- stop("argument 'iprob' should have values in (0,1)")
-
- if (!is.logical(eim.not.oim) ||
- length(eim.not.oim) != 1)
- stop("argument 'eim.not.oim' should be 'TRUE' or 'FALSE' only")
-
-
- new("vglmff",
- blurb = c("Huggins (1991) capture-recapture model\n\n",
- "Links: ",
- namesof("prob1", link, earg = earg, tag = FALSE), ", ",
- namesof("prob1.0", link, earg = earg, tag = FALSE), ", ",
- namesof("prob2", link, earg = earg, tag = FALSE), ", ",
- namesof("prob2.0", link, earg = earg, tag = FALSE), ", ..., ",
- namesof("probT.0", link, earg = earg, tag = FALSE),
- "\n"),
- constraints = eval(substitute(expression({
- constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
- intercept.apply = TRUE)
- }), list( .parallel = parallel ))),
- infos = eval(substitute(function(...) {
- list(Musual = 2,
- parallel = .parallel)
- }, list( .parallel = parallel ))),
-
- initialize = eval(substitute(expression({
- Musual <- 2
- mustart.orig <- mustart
- y <- as.matrix(y)
- Mdiv2 <- ncoly <- ncol(y)
- M <- Musual * ncoly
-
- w <- matrix(w, n, ncoly)
- mustart <- matrix(colSums(y) / colSums(w),
- n, ncol(y), byrow = TRUE)
- mustart[mustart == 0] <- 0.05
- mustart[mustart == 1] <- 0.95
-
- if (ncoly == 1)
- stop("the response is univariate, therefore use posbinomial()")
-
-
-
- temp5 <-
- w.y.check(w = w, y = y,
- Is.nonnegative.y = TRUE,
- ncol.w.max = Inf,
- ncol.y.max = Inf,
- out.wy = TRUE,
- maximize = TRUE)
- w <- temp5$w
- y <- temp5$y
-
- if (!all(y == 0 | y == 1))
- stop("response must contain 0s and 1s only")
- if (!all(w == 1))
- stop("argument 'weight' must contain 1s only")
+rposbern <-
+ function(n, nTimePts = 5, pvars = length(xcoeff),
+ xcoeff = c(-2, 1, 2),
+ cap.effect = -1,
+ link = "logit",
+ is.popn = FALSE,
+ earg.link = FALSE) {
- dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
- dn2 = if (length(dn2)) {
- paste("E[", dn2, "]", sep = "")
- } else {
- paste("prob", 1:Mdiv2, sep = "")
- }
- dn2 = c(dn2, paste(dn2, ".0", sep = ""))
- dn2 = dn2[interleave.VGAM(M, M = Musual)]
- predictors.names <- namesof(dn2, .link , earg = .earg, short = TRUE)
- if (!length(etastart)) {
- mustart.use = if (length(mustart.orig)) {
- mustart.orig
- } else
- if (length( .iprob )) {
- matrix( .iprob, nrow(mustart), ncol(mustart), byrow = TRUE)
- } else {
- mustart
- }
- etastart = cbind(theta2eta(mustart.use, .link , earg = .earg ))
- etastart = kronecker(etastart, cbind(1, 1))
- }
- mustart = NULL
- }), list( .link = link, .earg = earg, .iprob = iprob ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- Musual <- 2
- Mdiv2 <- ncol(eta) / Musual
- index1 <- Musual * (1:Mdiv2) - 1
- index2 <- Musual * (1:Mdiv2) - 0
- probs.numer = eta2theta(eta[, index1], # + extra$moffset[, index1],
- .link , earg = .earg )
- probs.denom = eta2theta(eta[, index1], .link , earg = .earg )
- logAA0 = rowSums(log1p(-probs.denom))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+ orig.n <- use.n
+ if (!is.popn)
+ use.n <- 1.50 * use.n + 100 # Bigger due to rejections
- AA0 = exp(logAA0)
- AAA = exp(log1p(-AA0)) # 1 - AA0
- probs.numer / AAA
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
+ if (pvars == 0)
+ stop("argument 'pvars' must be at least one")
+ if (pvars > length(xcoeff))
+ stop("argument 'pvars' is too high")
+
- misc$link = rep( .link , length = M)
- names(misc$link) = dn2
+ if (earg.link) {
+ earg <- link
+ } else {
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ }
+ link <- attr(earg, "function.name")
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:M)
- misc$earg[[ii]] = .earg
- misc$expected = .eim.not.oim
- misc$mv = TRUE
- misc$iprob = .iprob
- misc$eim.not.oim = .eim.not.oim
- misc$parallel = .parallel
- }), list( .link = link, .earg = earg,
- .parallel = parallel,
- .eim.not.oim = eim.not.oim, .iprob = iprob ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- ycounts = y
- Musual <- 2
- Mdiv2 <- ncol(eta) / Musual
- index1 <- Musual * (1:Mdiv2) - 1
- index2 <- Musual * (1:Mdiv2) - 0
+ Ymatrix <- matrix(0, use.n, nTimePts,
+ dimnames = list(as.character(1:use.n),
+ paste("y", 1:nTimePts, sep = "")))
- probs.numer = eta2theta(eta[, index1], # + extra$moffset[, index1],
- .link , earg = .earg )
+ CHmatrix <- matrix(0, use.n, nTimePts,
+ dimnames = list(as.character(1:use.n),
+ paste("ch", 0:(nTimePts-1), sep = "")))
- probs.denom = eta2theta(eta[, index1], .link , earg = .earg )
+ Xmatrix <- cbind(x1 = rep(1.0, len = use.n))
+ if (pvars > 1)
+ Xmatrix <- cbind(Xmatrix,
+ matrix(runif(n = use.n * (pvars-1)),
+ use.n, pvars - 1,
+ dimnames = list(as.character(1:use.n),
+ paste("x", 2:pvars, sep = ""))))
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(dhuggins91(x = ycounts, # size = 1, # Bernoulli trials
- prob = probs.numer,
- prob0 = probs.denom, # zz choose this??
- log = TRUE))
- }
- }, list( .link = link, .earg = earg ))),
- vfamily = c("huggins91"),
- deriv = eval(substitute(expression({
- Musual <- 2
- Mdiv2 <- ncol(eta) / Musual
- index1 <- Musual * (1:Mdiv2) - 1
- index2 <- Musual * (1:Mdiv2) - 0
- probs.numer = eta2theta(eta[, index1], .link , earg = .earg )
+ lin.pred.baseline <- xcoeff[1]
+ if (pvars > 1)
+ lin.pred.baseline <- lin.pred.baseline +
+ Xmatrix[, 2:pvars, drop = FALSE] %*%
+ xcoeff[2:pvars]
+ sumrowy <- rep(0, length = use.n)
+ for (jlocal in 1:nTimePts) {
+ CHmatrix[, jlocal] <- as.numeric(sumrowy > 0)
- probs.denom = eta2theta(eta[, index1], .link , earg = .earg )
+ lin.pred <- lin.pred.baseline + (CHmatrix[, jlocal] > 0) * cap.effect
- logAA0 = rowSums(log1p(-probs.denom))
+ Ymatrix[, jlocal] <-
+ rbinom(use.n, size = 1,
+ prob = eta2theta(lin.pred, link = link, earg = earg))
+ sumrowy <- sumrowy + Ymatrix[, jlocal]
+ }
- AA0 = exp(logAA0)
- AAA = exp(log1p(-AA0)) # 1 - AA0
- B_s = AA0 / (1 - probs.denom)
- B_st = array(0, c(n, Mdiv2, Mdiv2))
- for(slocal in 1:(Mdiv2-1))
- for(tlocal in (slocal+1):Mdiv2)
- B_st[, slocal, tlocal] =
- B_st[, tlocal, slocal] = B_s[, slocal] / (1 - probs.denom[, tlocal])
+ index0 <- (sumrowy == 0)
+ if (all(!index0))
+ stop("bug in this code: cannot handle no animals being caught")
+ Ymatrix <- Ymatrix[!index0, , drop = FALSE]
+ Xmatrix <- Xmatrix[!index0, , drop = FALSE]
+ CHmatrix <- CHmatrix[!index0, , drop = FALSE]
+ zCHmatrix <- matrix(0, nrow(CHmatrix), ncol(CHmatrix),
+ dimnames = list(as.character(1:nrow(CHmatrix)),
+ paste("zch", 0:(ncol(CHmatrix)-1), sep = "")))
- Temp2 = (1 - probs.numer)^2
- temp2 = (1 - probs.denom)^2
- dprob1.deta1 = dtheta.deta(probs.numer, .link , earg = .earg ) # trivial
- dprob1.deta2 = dtheta.deta(probs.numer, .link , earg = .earg ) # trivial
- dprob2.deta1 = dtheta.deta(probs.denom, .link , earg = .earg ) # trivial
- dprob2.deta2 = dtheta.deta(probs.denom, .link , earg = .earg ) # trivial
+ ans <- data.frame(Ymatrix, Xmatrix, CHmatrix, zCHmatrix,
+ Chistory = rep(0, length = nrow(Ymatrix)))
- dl.dprob1 = y / probs.numer - (1 - y) / (1 - probs.numer)
- dl.dprob2 = -B_s / AAA
- dl.deta1 = dl.dprob1 * dprob1.deta1
- dl.deta2 = dl.dprob2 * dprob2.deta1
- dl.deta2 = dl.dprob2 * dprob2.deta2 # zz
- deriv.ans = cbind(dl.deta1 + dl.deta2,
- dl.deta1 + dl.deta2)
- deriv.ans = deriv.ans[, interleave.VGAM(M, M = Musual)]
- deriv.ans = deriv.ans / Musual # Matches with CCCC
+ if (!is.popn) {
+ ans <- if (nrow(ans) >= orig.n) {
+ ans[1:orig.n, ]
+ } else {
+ rbind(ans,
+ Recall(n = orig.n - nrow(ans),
+ nTimePts = nTimePts, pvars = pvars,
+ xcoeff = xcoeff,
+ cap.effect = cap.effect,
+ link = earg, earg.link = TRUE))
+ }
+ }
- deriv.ans
- }), list( .link = link, .earg = earg ))),
+ rownames(ans) <- as.character(1:nrow(ans))
- weight = eval(substitute(expression({
- ed2l.dprob1.2 = 1 / (probs.numer * AAA) + 1 / Temp2 -
- probs.numer / (AAA * Temp2) - (B_s / AAA)^2
+ attr(ans, "pvars") <- pvars
+ attr(ans, "nTimePts") <- nTimePts
+ attr(ans, "cap.effect") <- cap.effect
+ attr(ans, "is.popn") <- is.popn
+ attr(ans, "n") <- n
- od2l.dprob1.2 = y / probs.numer^2 + (1 - y) / (1 - probs.numer)^2 -
- (B_s / AAA)^2
+ ans
+}
- d2prob1.deta1.2 = d2theta.deta2(probs.numer, .link , earg = .earg )
- d2prob1.deta2.2 = d2theta.deta2(probs.numer, .link , earg = .earg )
- d2prob1.deta12 = d2theta.deta2(probs.numer, .link , earg = .earg )
- d2prob2.deta1.2 = d2theta.deta2(probs.denom, .link , earg = .earg )
- d2prob2.deta12 = d2theta.deta2(probs.denom, .link , earg = .earg )
+
- wz = matrix(0, n, dimm(M))
- wz[, index1] <-
- wz[, index2] <-
- if ( .eim.not.oim ) {
- ed2l.dprob1.2 * (dprob1.deta1^2) # +
- } else {
- od2l.dprob1.2 * (dprob1.deta1^2) -
- (dl.dprob1 + dl.dprob2) * d2prob1.deta1.2
- }
+dposbern <- function(x, prob, prob0 = prob, log = FALSE) {
- for(slocal in 1:(Mdiv2-1))
- for(tlocal in (slocal+1):Mdiv2)
- wz[, iam(Musual*slocal - 1,
- Musual*tlocal - 1, M = M)] =
- wz[, iam(Musual*slocal ,
- Musual*tlocal , M = M)] =
- dprob2.deta1[, slocal] *
- dprob2.deta1[, tlocal] *
- (B_st[, slocal, tlocal] +
- B_s [, slocal] *
- B_s [, tlocal] / AAA) / (-AAA)
+ x <- as.matrix(x)
+ prob <- as.matrix(prob)
+ prob0 <- as.matrix(prob0)
- wz = wz / Musual # Matches with CCCC
+ if (!is.logical(log.arg <- log) ||
+ length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+ if (ncol(x) < 2)
+ stop("columns of argument 'x' should be 2 or more")
- wz
- }), list( .link = link, .earg = earg, .eim.not.oim = eim.not.oim ))))
+ logAA0 <- rowSums(log1p(-prob0))
+ AA0 <- exp(logAA0)
+ ell1 <- x * log(prob) + (1 - x) * log1p(-prob) - log1p(-AA0) / ncol(x)
+ if (log.arg) ell1 else exp(ell1)
}
@@ -418,10 +290,7 @@ dhuggins91 = function(x, prob, prob0 = prob, log = FALSE) {
-
-
-
-dposnegbin = function(x, size, prob = NULL, munb = NULL, log = FALSE) {
+dposnegbin <- function(x, size, prob = NULL, munb = NULL, log = FALSE) {
if (length(munb)) {
if (length(prob))
stop("'prob' and 'munb' both specified")
@@ -434,28 +303,28 @@ dposnegbin = function(x, size, prob = NULL, munb = NULL, log = FALSE) {
LLL <- max(length(x), length(prob), length(size))
- x = rep(x, len = LLL);
- prob = rep(prob, len = LLL);
- size = rep(size, len = LLL);
+ x <- rep(x, len = LLL);
+ prob <- rep(prob, len = LLL);
+ size <- rep(size, len = LLL);
- ans = dnbinom(x = x, size = size, prob = prob, log = log.arg)
- index0 = (x == 0)
+ ans <- dnbinom(x = x, size = size, prob = prob, log = log.arg)
+ index0 <- (x == 0)
if (log.arg) {
- ans[ index0] = log(0.0)
- ans[!index0] = ans[!index0] - log1p(-dnbinom(x = 0 * x[!index0],
- size = size[!index0], prob = prob[!index0]))
+ ans[ index0] <- log(0.0)
+ ans[!index0] <- ans[!index0] - log1p(-dnbinom(x = 0 * x[!index0],
+ size = size[!index0], prob = prob[!index0]))
} else {
- ans[ index0] = 0.0
- ans[!index0] = ans[!index0] / pnbinom(q = 0 * x[!index0],
- size = size[!index0], prob = prob[!index0],
- lower.tail = FALSE)
+ ans[ index0] <- 0.0
+ ans[!index0] <- ans[!index0] / pnbinom(q = 0 * x[!index0],
+ size = size[!index0], prob = prob[!index0],
+ lower.tail = FALSE)
}
ans
}
-pposnegbin = function(q, size, prob = NULL, munb = NULL) {
+pposnegbin <- function(q, size, prob = NULL, munb = NULL) {
if (length(munb)) {
if (length(prob))
@@ -464,11 +333,11 @@ pposnegbin = function(q, size, prob = NULL, munb = NULL) {
}
L <- max(length(q), length(prob), length(size))
if (length(q) != L)
- q = rep(q, length.out = L);
+ q <- rep(q, length.out = L);
if (length(prob) != L)
- prob = rep(prob, length.out = L);
+ prob <- rep(prob, length.out = L);
if (length(size) != L)
- size = rep(size, length.out = L)
+ size <- rep(size, length.out = L)
ifelse(q < 1, 0,
(pnbinom(q, size = size, prob = prob) -
@@ -477,7 +346,7 @@ pposnegbin = function(q, size, prob = NULL, munb = NULL) {
}
-qposnegbin = function(p, size, prob = NULL, munb = NULL) {
+qposnegbin <- function(p, size, prob = NULL, munb = NULL) {
if (length(munb)) {
@@ -486,13 +355,13 @@ qposnegbin = function(p, size, prob = NULL, munb = NULL) {
prob <- size / (size + munb)
}
- ans = qnbinom(pnbinom(q = 0, size = size, prob = prob,
- lower.tail = FALSE) * p +
- dnbinom(x = 0, size = size, prob = prob),
- size = size, prob = prob)
- ans[p > 1] = NaN
- ans[p < 0] = NaN
- ans[p == 1] = Inf
+ ans <- qnbinom(pnbinom(q = 0, size = size, prob = prob,
+ lower.tail = FALSE) * p +
+ dnbinom(x = 0, size = size, prob = prob),
+ size = size, prob = prob)
+ ans[p > 1] <- NaN
+ ans[p < 0] <- NaN
+ ans[p == 1] <- Inf
ans
}
@@ -504,18 +373,16 @@ qposnegbin = function(p, size, prob = NULL, munb = NULL) {
-posnegbinomial.control <- function(save.weight = TRUE, ...)
-{
+posnegbinomial.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
- posnegbinomial = function(lmunb = "loge", lsize = "loge",
- isize = NULL, zero = -2,
- nsimEIM = 250,
- shrinkage.init = 0.95, imethod = 1)
-{
+ posnegbinomial <- function(lmunb = "loge", lsize = "loge",
+ isize = NULL, zero = -2,
+ nsimEIM = 250,
+ shrinkage.init = 0.95, imethod = 1) {
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
@@ -547,10 +414,10 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Positive-negative binomial distribution\n\n",
- "Links: ",
- namesof("munb", lmunb, earg = emunb ), ", ",
- namesof("size", lsize, earg = esize ), "\n",
- "Mean: munb / (1 - (size / (size + munb))^size)"),
+ "Links: ",
+ namesof("munb", lmunb, earg = emunb ), ", ",
+ namesof("size", lsize, earg = esize ), "\n",
+ "Mean: munb / (1 - (size / (size + munb))^size)"),
constraints = eval(substitute(expression({
dotzero <- .zero
@@ -573,7 +440,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
if (any(y == 0))
stop("there are zero values in the response")
- y = as.matrix(y)
+ y <- as.matrix(y)
temp5 <-
@@ -587,17 +454,13 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
maximize = TRUE)
w <- temp5$w
y <- temp5$y
- print("head(w)")
- print( head(w) )
- print("head(y)")
- print( head(y) )
- M = Musual * ncol(y)
- extra$NOS = NOS = ncoly = ncol(y) # Number of species
+ M <- Musual * ncol(y)
+ extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
predictors.names <- c(
namesof(if (NOS == 1) "munb" else
@@ -609,40 +472,40 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
predictors.names <- predictors.names[interleave.VGAM(M, M = Musual)]
if (!length(etastart)) {
- mu.init = y
+ mu.init <- y
for(iii in 1:ncol(y)) {
- use.this = if ( .imethod == 1) {
+ use.this <- if ( .imethod == 1) {
weighted.mean(y[, iii], w[, iii])
} else {
median(y[,iii])
}
- mu.init[, iii] = (1 - .sinit) * y[, iii] + .sinit * use.this
+ mu.init[, iii] <- (1 - .sinit) * y[, iii] + .sinit * use.this
}
if ( is.Numeric( .isize )) {
- kmat0 = matrix( .isize , nrow = n, ncol = NOS, byrow = TRUE)
+ kmat0 <- matrix( .isize , nrow = n, ncol = NOS, byrow = TRUE)
} else {
posnegbinomial.Loglikfun =
function(kmat, y, x, w, extraargs) {
- munb = extraargs
+ munb <- extraargs
sum(w * dposnegbin(x = y, size = kmat, munb = munb,
log = TRUE))
}
- k.grid = 2^((-6):6)
- kmat0 = matrix(0, nrow = n, ncol = NOS)
+ k.grid <- 2^((-6):6)
+ kmat0 <- matrix(0, nrow = n, ncol = NOS)
for(spp. in 1:NOS) {
- kmat0[, spp.] = getMaxMin(k.grid,
+ kmat0[, spp.] <- getMaxMin(k.grid,
objfun = posnegbinomial.Loglikfun,
y = y[, spp.], x = x, w = w[, spp.],
extraargs = mu.init[, spp.])
}
}
- p00 = (kmat0 / (kmat0 + mu.init))^kmat0
- etastart =
+ p00 <- (kmat0 / (kmat0 + mu.init))^kmat0
+ etastart <-
cbind(
theta2eta(mu.init * (1 - p00), .lmunb, earg = .emunb ),
theta2eta(kmat0, .lsize, earg = .esize ))
- etastart = etastart[,interleave.VGAM(M, M = Musual), drop = FALSE]
+ etastart <- etastart[,interleave.VGAM(M, M = Musual), drop = FALSE]
}
}), list( .lmunb = lmunb, .lsize = lsize, .isize = isize,
.emunb = emunb, .esize = esize,
@@ -650,33 +513,33 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
Musual <- 2
- NOS = ncol(eta) / Musual
- munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ NOS <- ncol(eta) / Musual
+ munb <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
.lmunb, earg = .emunb )
- kmat = eta2theta(eta[, Musual*(1:NOS), drop = FALSE],
+ kmat <- eta2theta(eta[, Musual*(1:NOS), drop = FALSE],
.lsize, earg = .esize )
- po0 = (kmat / (kmat + munb))^kmat
+ po0 <- (kmat / (kmat + munb))^kmat
munb / (1 - po0)
}, list( .lsize = lsize, .lmunb = lmunb,
.esize = esize, .emunb = emunb ))),
last = eval(substitute(expression({
- temp0303 = c(rep( .lmunb , length = NOS),
- rep( .lsize , length = NOS))
+ temp0303 <- c(rep( .lmunb , length = NOS),
+ rep( .lsize , length = NOS))
names(temp0303) =
c(if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = ""),
if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""))
- temp0303 = temp0303[interleave.VGAM(M, M = Musual)]
- misc$link = temp0303 # Already named
+ temp0303 <- temp0303[interleave.VGAM(M, M = Musual)]
+ misc$link <- temp0303 # Already named
- misc$earg = vector("list", Musual*NOS)
- names(misc$earg) = names(misc$link)
+ misc$earg <- vector("list", Musual*NOS)
+ names(misc$earg) <- names(misc$link)
for(ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] = .emunb
- misc$earg[[Musual*ii ]] = .esize
+ misc$earg[[Musual*ii-1]] <- .emunb
+ misc$earg[[Musual*ii ]] <- .esize
}
- misc$nsimEIM = .nsimEIM
- misc$imethod = .imethod
+ misc$nsimEIM <- .nsimEIM
+ misc$imethod <- .imethod
}), list( .lmunb = lmunb, .lsize = lsize,
.emunb = emunb, .esize = esize,
.nsimEIM = nsimEIM, .imethod = imethod ))),
@@ -684,10 +547,10 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
Musual <- 2
NOS <- ncol(eta) / Musual
- munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
- .lmunb, earg = .emunb )
- kmat = eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
- .lsize, earg = .esize )
+ munb <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lmunb, earg = .emunb )
+ kmat <- eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ .lsize, earg = .esize )
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
sum(w * dposnegbin(x = y, size = kmat, munb = munb, log = TRUE))
@@ -700,50 +563,50 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
Musual <- 2
NOS <- extra$NOS
- munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
- .lmunb , earg = .emunb )
- kmat = eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
- .lsize , earg = .esize )
+ munb <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ .lsize , earg = .esize )
- dmunb.deta = dtheta.deta(munb, .lmunb, earg = .emunb )
- dsize.deta = dtheta.deta(kmat, .lsize, earg = .esize )
- NOS = ncol(eta) / Musual
+ dmunb.deta <- dtheta.deta(munb, .lmunb, earg = .emunb )
+ dsize.deta <- dtheta.deta(kmat, .lsize, earg = .esize )
+ NOS <- ncol(eta) / Musual
- tempk = kmat / (kmat + munb)
- tempm = munb / (kmat + munb)
- prob0 = tempk^kmat
- oneminusf0 = 1 - prob0
- df0.dmunb = -tempk * prob0
- df0.dkmat = prob0 * (tempm + log(tempk))
- df02.dmunb2 = prob0 * tempk / (kmat + munb) - tempk * df0.dmunb
- df02.dkmat2 = (prob0 / kmat) * tempm^2
- df02.dkmat.dmunb = prob0 * (-tempk) * (tempm + log(tempk)) -
- tempm * prob0 / (kmat + munb)
+ tempk <- kmat / (kmat + munb)
+ tempm <- munb / (kmat + munb)
+ prob0 <- tempk^kmat
+ oneminusf0 <- 1 - prob0
+ df0.dmunb <- -tempk * prob0
+ df0.dkmat <- prob0 * (tempm + log(tempk))
+ df02.dmunb2 <- prob0 * tempk / (kmat + munb) - tempk * df0.dmunb
+ df02.dkmat2 <- (prob0 / kmat) * tempm^2
+ df02.dkmat.dmunb <- prob0 * (-tempk) * (tempm + log(tempk)) -
+ tempm * prob0 / (kmat + munb)
- dl.dmunb = y / munb - (y + kmat) / (munb + kmat) +
- df0.dmunb / oneminusf0
- dl.dsize = digamma(y + kmat) - digamma(kmat) -
- (y + kmat)/(munb + kmat) + 1 + log(tempk) +
- df0.dkmat / oneminusf0
+ dl.dmunb <- y / munb - (y + kmat) / (munb + kmat) +
+ df0.dmunb / oneminusf0
+ dl.dsize <- digamma(y + kmat) - digamma(kmat) -
+ (y + kmat)/(munb + kmat) + 1 + log(tempk) +
+ df0.dkmat / oneminusf0
- myderiv = c(w) * cbind(dl.dmunb * dmunb.deta,
- dl.dsize * dsize.deta)
+ myderiv <- c(w) * cbind(dl.dmunb * dmunb.deta,
+ dl.dsize * dsize.deta)
myderiv[, interleave.VGAM(M, M = Musual)]
}), list( .lmunb = lmunb, .lsize = lsize,
.emunb = emunb, .esize = esize ))),
weight = eval(substitute(expression({
run.varcov =
- wz = matrix(0.0, n, 2 * Musual * NOS - 1)
+ wz <- matrix(0.0, n, 2 * Musual * NOS - 1)
if (FALSE) {
- usualmeanY = munb
- meanY = usualmeanY / oneminusf0
- ed2l.dmu2 = meanY / munb^2 -
+ usualmeanY <- munb
+ meanY <- usualmeanY / oneminusf0
+ ed2l.dmu2 <- meanY / munb^2 -
(meanY + kmat) / (munb + kmat)^2 -
df02.dmunb2 / oneminusf0 -
(df0.dmunb / oneminusf0)^2
@@ -754,24 +617,24 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
{
- ind2 = iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ ind2 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
for(ii in 1:( .nsimEIM )) {
- ysim = rposnegbin(n = n*NOS, mu = c(munb), size = c(kmat))
- dim(ysim) = c(n, NOS)
+ ysim <- rposnegbin(n = n*NOS, mu = c(munb), size = c(kmat))
+ dim(ysim) <- c(n, NOS)
- dl.dmunb = ysim / munb - (ysim + kmat) / (munb + kmat) +
- df0.dmunb / oneminusf0
- dl.dsize = digamma(ysim + kmat) - digamma(kmat) -
- (ysim + kmat) / (munb + kmat) + 1 + log(tempk) +
- df0.dkmat / oneminusf0
+ dl.dmunb <- ysim / munb - (ysim + kmat) / (munb + kmat) +
+ df0.dmunb / oneminusf0
+ dl.dsize <- digamma(ysim + kmat) - digamma(kmat) -
+ (ysim + kmat) / (munb + kmat) + 1 + log(tempk) +
+ df0.dkmat / oneminusf0
for(kk in 1:NOS) {
- temp2 = cbind(dl.dmunb[, kk],
- dl.dsize[, kk]) *
- cbind(dmunb.deta[, kk],
- dsize.deta[, kk])
- small.varcov = temp2[, ind2$row.index] *
- temp2[, ind2$col.index]
+ temp2 <- cbind(dl.dmunb[, kk],
+ dl.dsize[, kk]) *
+ cbind(dmunb.deta[, kk],
+ dsize.deta[, kk])
+ small.varcov <- temp2[, ind2$row.index] *
+ temp2[, ind2$col.index]
run.varcov[, ((kk-1)*Musual+1):(kk*Musual)] =
run.varcov[, ((kk-1)*Musual+1):(kk*Musual)] +
@@ -782,8 +645,8 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
}
} # ii
- run.varcov = cbind(run.varcov / .nsimEIM )
- wz = if (intercept.only)
+ run.varcov <- cbind(run.varcov / .nsimEIM )
+ wz <- if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
@@ -797,12 +660,12 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
-dposgeom = function(x, prob, log = FALSE) {
+dposgeom <- function(x, prob, log = FALSE) {
dgeom(x - 1, prob = prob, log = log)
}
-pposgeom = function(q, prob) {
+pposgeom <- function(q, prob) {
if (!is.Numeric(prob, positive = TRUE))
stop("bad input for argument 'prob'")
L <- max(length(q), length(prob))
@@ -815,24 +678,24 @@ pposgeom = function(q, prob) {
}
-qposgeom = function(p, prob) {
+qposgeom <- function(p, prob) {
- ans = qgeom(pgeom(0, prob, lower.tail = FALSE) * p +
- dgeom(0, prob),
- prob = prob)
- ans[p > 1] = NaN
- ans[p < 0] = NaN
- ans[p == 1] = Inf
+ ans <- qgeom(pgeom(0, prob, lower.tail = FALSE) * p +
+ dgeom(0, prob),
+ prob = prob)
+ ans[p > 1] <- NaN
+ ans[p < 0] <- NaN
+ ans[p == 1] <- Inf
ans
}
-rposgeom = function(n, prob) {
+rposgeom <- function(n, prob) {
qgeom(p = runif(n, min = dgeom(0, prob)), prob)
}
@@ -844,7 +707,7 @@ rposgeom = function(n, prob) {
-dpospois = function(x, lambda, log = FALSE) {
+dpospois <- function(x, lambda, log = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -853,9 +716,9 @@ dpospois = function(x, lambda, log = FALSE) {
if (!is.Numeric(lambda, positive = TRUE))
stop("bad input for argument 'lambda'")
L <- max(length(x), length(lambda))
- x = rep(x, len = L); lambda = rep(lambda, len = L);
+ x <- rep(x, len = L); lambda <- rep(lambda, len = L);
- ans = if (log.arg) {
+ ans <- if (log.arg) {
ifelse(x == 0, log(0.0), dpois(x, lambda, log = TRUE) -
log1p(-exp(-lambda)))
} else {
@@ -865,12 +728,12 @@ dpospois = function(x, lambda, log = FALSE) {
}
-ppospois = function(q, lambda) {
+ppospois <- function(q, lambda) {
if (!is.Numeric(lambda, positive = TRUE))
stop("bad input for argument 'lambda'")
L <- max(length(q), length(lambda))
- if (length(q) != L) q = rep(q, length.out = L);
- if (length(lambda) != L) lambda = rep(lambda, length.out = L);
+ if (length(q) != L) q <- rep(q, length.out = L);
+ if (length(lambda) != L) lambda <- rep(lambda, length.out = L);
ifelse(q < 1, 0,
(ppois(q, lambda) -
@@ -879,29 +742,29 @@ ppospois = function(q, lambda) {
}
-qpospois = function(p, lambda) {
+qpospois <- function(p, lambda) {
- ans = qpois(ppois(0, lambda, lower.tail = FALSE) * p +
- dpois(0, lambda),
- lambda = lambda)
+ ans <- qpois(ppois(0, lambda, lower.tail = FALSE) * p +
+ dpois(0, lambda),
+ lambda = lambda)
- ans[p > 1] = NaN
- ans[p < 0] = NaN
- ans[p == 1] = Inf
+ ans[p > 1] <- NaN
+ ans[p < 0] <- NaN
+ ans[p == 1] <- Inf
ans
}
-rpospois = function(n, lambda) {
+rpospois <- function(n, lambda) {
qpois(p = runif(n, min = dpois(0, lambda)), lambda)
}
-rposnegbin = function(n, size, prob = NULL, munb = NULL) {
+rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
if (!is.null(munb)) {
if (!is.null(prob))
stop("'prob' and 'mu' both specified")
@@ -918,9 +781,8 @@ rposnegbin = function(n, size, prob = NULL, munb = NULL) {
- pospoisson = function(link = "loge", expected = TRUE,
- ilambda = NULL, imethod = 1, zero = NULL)
-{
+ pospoisson <- function(link = "loge", expected = TRUE,
+ ilambda = NULL, imethod = 1, zero = NULL) {
link <- as.list(substitute(link))
earg <- link2list(link)
@@ -1059,16 +921,16 @@ rposnegbin = function(n, size, prob = NULL, munb = NULL) {
-pposbinom = function(q, size, prob
- ) {
+pposbinom <- function(q, size, prob
+ ) {
if (!is.Numeric(prob, positive = TRUE))
stop("no zero or non-numeric values allowed for argument 'prob'")
L <- max(length(q), length(size), length(prob))
- if (length(q) != L) q = rep(q, length.out = L);
- if (length(size) != L) size = rep(size, length.out = L);
- if (length(prob) != L) prob = rep(prob, length.out = L);
+ if (length(q) != L) q <- rep(q, length.out = L);
+ if (length(size) != L) size <- rep(size, length.out = L);
+ if (length(prob) != L) prob <- rep(prob, length.out = L);
ifelse(q < 1, 0,
(pbinom(q = q, size = size, prob = prob) -
@@ -1077,8 +939,8 @@ pposbinom = function(q, size, prob
}
-qposbinom = function(p, size, prob
- ) {
+qposbinom <- function(p, size, prob
+ ) {
@@ -1087,44 +949,44 @@ qposbinom = function(p, size, prob
dbinom(0, size, prob),
size = size, prob = prob)
- ans[p > 1] = NaN
- ans[p < 0] = NaN
- ans[p == 1] = size[p == 1]
+ ans[p > 1] <- NaN
+ ans[p < 0] <- NaN
+ ans[p == 1] <- size[p == 1]
ans
}
-rposbinom = function(n, size, prob) {
+rposbinom <- function(n, size, prob) {
qbinom(p = runif(n, min = dbinom(0, size, prob)), size, prob)
}
-dposbinom = function(x, size, prob, log = FALSE) {
+dposbinom <- function(x, size, prob, log = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
L <- max(length(x), length(size), length(prob))
- x = rep(x, len = L);
- size = rep(size, len = L);
- prob = rep(prob, len = L);
+ x <- rep(x, len = L);
+ size <- rep(size, len = L);
+ prob <- rep(prob, len = L);
- answer = NaN * x
+ answer <- NaN * x
is0 <- (x == 0)
ok2 <- (prob > 0) & (prob <= 1) &
(size == round(size)) & (size > 0)
- answer = dbinom(x = x, size = size, prob = prob, log = TRUE) -
- log1p(-dbinom(x = 0 , size = size, prob = prob))
- answer[!ok2] = NaN
+ answer <- dbinom(x = x, size = size, prob = prob, log = TRUE) -
+ log1p(-dbinom(x = 0, size = size, prob = prob))
+ answer[!ok2] <- NaN
if (log.arg) {
- answer[is0 & ok2] = log(0.0)
+ answer[is0 & ok2] <- log(0.0)
} else {
- answer = exp(answer)
- answer[is0 & ok2] = 0.0
+ answer <- exp(answer)
+ answer[is0 & ok2] <- 0.0
}
answer
}
@@ -1164,7 +1026,7 @@ dposbinom = function(x, size, prob, log = FALSE) {
namesof("prob", link, earg = earg, tag = FALSE),
"\n"),
constraints = eval(substitute(expression({
- constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
dotzero <- .zero
Musual <- 1
@@ -1234,87 +1096,96 @@ dposbinom = function(x, size, prob, log = FALSE) {
mustart <- NULL
}), list( .link = link,
.earg = earg, .mv = mv ))),
+
linkinv = eval(substitute(function(eta, extra = NULL) {
- w = extra$w
- mymu = eta2theta(eta, .link , earg = .earg )
- nvec = if ( .mv ) {
+ w <- extra$w
+ binprob <- eta2theta(eta, .link , earg = .earg )
+ nvec <- if ( .mv ) {
w
} else {
if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
}
- mymu / (1.0 - (1.0 - mymu)^(nvec))
+ binprob / (1.0 - (1.0 - binprob)^nvec)
},
+
list( .link = link, .earg = earg, .mv = mv ))),
last = eval(substitute(expression({
- extra$w = NULL # Kill it off
+ extra$w <- NULL # Kill it off
- misc$link = rep( .link , length = M)
- names(misc$link) = if (M > 1) dn2 else "prob"
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- if (M > 1) dn2 else "prob"
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:M) misc$earg[[ii]] = .earg
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for(ii in 1:M)
+ misc$earg[[ii]] <- .earg
- misc$expected = TRUE
+ misc$expected <- TRUE
- misc$mv = .mv
- w = as.numeric(w)
+ misc$mv <- .mv
+ w <- as.numeric(w)
}), list( .link = link, .earg = earg, .mv = mv ))),
+
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- ycounts = if ( .mv ) {
+ ycounts <- if ( .mv ) {
round(y * extra$orig.w)
} else {
if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
}
- nvec = if ( .mv ) {
+ nvec <- if ( .mv ) {
w
} else {
if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
}
- use.orig.w = if (is.numeric(extra$orig.w)) extra$orig.w else 1
- mymu = eta2theta(eta, .link , earg = .earg )
+ use.orig.w <- if (is.numeric(extra$orig.w)) extra$orig.w else 1
+ binprob <- eta2theta(eta, .link , earg = .earg )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
sum(use.orig.w * dposbinom(x = ycounts, size = nvec,
- prob = mymu, log = TRUE))
+ prob = binprob, log = TRUE))
}
}, list( .link = link, .earg = earg, .mv = mv ))),
+
vfamily = c("posbinomial"),
deriv = eval(substitute(expression({
- use.orig.w = if (is.numeric(extra$orig.w)) extra$orig.w else
- rep(1, n)
-
- nvec = if ( .mv ) {
- w
- } else {
- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
- }
- mymu = eta2theta(eta, .link , earg = .earg )
- dmu.deta = dtheta.deta(mymu, .link , earg = .earg )
+ use.orig.w <- if (is.numeric(extra$orig.w)) extra$orig.w else
+ rep(1, n)
+
+ nvec <- if ( .mv ) {
+ w
+ } else {
+ if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+ }
+ binprob <- eta2theta(eta, .link , earg = .earg )
+ dmu.deta <- dtheta.deta(binprob, .link , earg = .earg )
- temp1 = 1 - (1 - mymu)^nvec
- temp2 = (1 - mymu)^2
- temp3 = (1 - mymu)^(nvec-2)
+ temp1 <- 1 - (1 - binprob)^nvec
+ temp2 <- (1 - binprob)^2
+ temp3 <- (1 - binprob)^(nvec-2)
- dl.dmu = y / mymu - (1 - y) / (1 - mymu) -
- (1 - mymu) * temp3 / temp1
+ dl.dmu <- y / binprob - (1 - y) / (1 - binprob) -
+ (1 - binprob) * temp3 / temp1
c(w) * dl.dmu * dmu.deta
}), list( .link = link, .earg = earg, .mv = mv ))),
weight = eval(substitute(expression({
- ned2l.dmu2 = 1 / (mymu * temp1) + 1 / temp2 -
- mymu / (temp1 * temp2) -
- (nvec-1) * temp3 / temp1 -
- nvec * (temp2^(nvec-1)) / temp1^2
- wz = c(w) * ned2l.dmu2 * dmu.deta^2
+
+ ned2l.dmu2 <- 1 / (binprob * temp1) +
+ (1 - mu) / temp2 -
+ (nvec-1) * temp3 / temp1 -
+ nvec * (temp2^(nvec-1)) / temp1^2
+
+
+
+ wz <- c(w) * ned2l.dmu2 * dmu.deta^2
wz
}), list( .link = link, .earg = earg, .mv = mv ))))
}
@@ -1325,171 +1196,1078 @@ dposbinom = function(x, size, prob, log = FALSE) {
-if (FALSE) rasch <-
- function(lability = "identity", eability = list(),
- ldifficulty = "identity", edifficulty = list(),
- iability = NULL,
- idifficulty = NULL,
- parallel = TRUE) {
+ posbernoulli.t <-
+ function(link = "logit",
+ parallel.t = FALSE,
+ apply.parint = TRUE,
+ iprob = NULL) {
+
- if (mode(labil) != "character" && mode(labil) != "name")
- labil = as.character(substitute(labil))
- if (!is.list(eabil)) eabil = list()
- if (!is.list(ediff)) ediff = list()
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
- if (length(iability))
- if (!is.Numeric(iability))
- stop("bad input in argument 'iability'")
- if (length(idifficulty))
- if (!is.Numeric(idifficulty))
- stop("bad input in argument 'idifficulty'")
+ if (length(iprob))
+ if (!is.Numeric(iprob, positive = TRUE) ||
+ max(iprob) >= 1)
+ stop("argument 'iprob' must have values in (0, 1)")
- labil = lability
- eabil = eability
- ldiff = ldifficulty
- ediff = edifficulty
+ if (!is.logical(apply.parint) ||
+ length(apply.parint) != 1)
+ stop("argument 'apply.parint' must be a single logical")
new("vglmff",
- blurb = c("Rasch model\n\n",
+ blurb = c("(Multiple) positive-Bernoulli (capture-recapture) model ",
+ "with temporal effects (M_t)\n\n",
"Links: ",
- namesof("ability", labil, earg = eabil, tag = FALSE), ", ",
- namesof("difficulty", ldiff, earg = ediff, tag = FALSE),
+ namesof("prob1", link, earg = earg, tag = FALSE), ", ",
+ namesof("prob2", link, earg = earg, tag = FALSE), ", ..., ",
+ namesof("probM", link, earg = earg, tag = FALSE),
"\n"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel.t , constraints,
+ apply.int = .apply.parint , # TRUE,
+ cm.default = diag(M),
+ cm.intercept.default = diag(M))
+ }), list( .parallel.t = parallel.t,
+ .apply.parint = apply.parint ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ multipleResponses = TRUE,
+ apply.parint = .apply.parint ,
+ parallel.t = .parallel.t )
+ }, list( .parallel.t = parallel.t,
+ .apply.parint = apply.parint ))),
initialize = eval(substitute(expression({
- mustart.orig = mustart
- y = as.matrix(y)
- extra$ncoly = ncoly = ncol(y)
- M = n + ncoly # number of ability and number of item parameters
+ Musual <- 1
+ mustart.orig <- mustart
+ y <- as.matrix(y)
+ M <- ncoly <- ncol(y)
+ extra$tau <- tau <- ncol(y)
+ extra$orig.w <- w
- mustart = matrix(apply(y, 2, weighted.mean, w = w),
- n, ncol(y), byrow = TRUE)
- mustart[mustart == 0] = 0.05
- mustart[mustart == 1] = 0.95
+ w <- matrix(w, n, ncoly)
+ mustart <- matrix(colSums(y) / colSums(w),
+ n, ncol(y), byrow = TRUE)
+ mustart[mustart == 0] <- 0.05
+ mustart[mustart == 1] <- 0.95
if (ncoly == 1)
- stop("the response is univariate, therefore use binomialff()")
+ stop("the response is univariate, therefore use posbinomial()")
+
+
+
+
if (!all(y == 0 | y == 1))
stop("response must contain 0s and 1s only")
- if (any(w <= 0))
- stop("argument 'weights' must contain positive values only")
+ if (!all(w == 1))
+ stop("argument 'weight' must contain 1s only")
+
- dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
- dn2 = as.character(1:ncoly)
- dn2 = as.character(1:nrow(y))
- dn2 = if (length(dn2)) {
- paste("ability", dn2, sep = "")
+ dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 <- if (length(dn2)) {
+ paste("E[", dn2, "]", sep = "")
} else {
- paste("zz", 1:Mdiv2, sep = "")
+ paste("prob", 1:M, sep = "")
}
- dn2 = c(dn2, paste("item", as.character(1:nrow(y)), sep = ""))
- predictors.names <-
- namesof(dn2, .labil, earg = .eability, short = TRUE)
+ predictors.names <- namesof(dn2, .link , earg = .earg, short = TRUE)
- if (!length(etastart)) {
-
- init.abil = runif(n) / (1 + colSums(y) - (1:n))
- init.diff = -logit(apply(y, 2, weighted.mean, w = w), inverse = TRUE)
-
- etastart =
- cbind(matrix(init.abil, n, n), # byrow = TRUE ,
- matrix(init.diff, n, ncoly, byrow = TRUE))
+ if (length(extra)) extra$w <- w else extra <- list(w = w)
+ if (!length(etastart)) {
+ mustart.use <- if (length(mustart.orig)) {
+ mustart.orig
+ } else {
+ mustart
+ }
+ etastart <- cbind(theta2eta(mustart.use, .link , earg = .earg ))
}
- }), list( .labil = labil, .eabil = eabil,
- .ldiff = ldiff, .ediff = ediff,
- .iability = iability,
- .idifficulty = idifficulty ))),
-
+ mustart <- NULL
+ }), list( .link = link, .earg = earg ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- myprobs = eta2theta(eta, "logit", earg = list())
- myprobs
- }, list( .labil = labil, .eabil = eabil,
- .ldiff = ldiff, .ediff = ediff ))),
+ probs <- eta2theta(eta, .link , earg = .earg )
+ logAA0 <- rowSums(log1p(-probs))
+ AA0 <- exp(logAA0)
+ AAA <- exp(log1p(-AA0)) # 1 - AA0
+ probs / AAA
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
+ extra$w <- NULL # Kill it off
- misc$link = c(rep( .labil, length = n),
- rep( .ldiff, length = ncoly))
-
- names(misc$link) = dn2
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:n)
- misc$earg[[ii]] = .eabil
- for(ii in 1:ncoly)
- misc$earg[[n + ii]] = .ediff
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- if (M > 1) dn2 else "prob"
- misc$expected = TRUE
- misc$iability = .iability
- misc$idifficulty = .idifficulty
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for(ii in 1:M) misc$earg[[ii]] <- .earg
- }), list( .labil = labil, .eabil = eabil,
- .ldiff = ldiff, .ediff = ediff,
- .iability = iability,
- .idifficulty = idifficulty ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w * (y * log(mu) + (1 - y) * log1p(-mu)))
- }
- }, list( .labil = labil, .eabil = eabil,
- .ldiff = ldiff, .ediff = ediff ))),
- vfamily = c("rasch"),
- deriv = eval(substitute(expression({
- dabil.deta = 1
- ddiff.deta = 1
- dl.dabil = matrix(colSums(y - mu), n, n)
- dl.ddiff = -cbind(y - mu)
+ misc$mv <- TRUE
+ misc$iprob <- .iprob
- deriv.ans = cbind(dl.dabil * dabil.deta,
- dl.ddiff * ddiff.deta)
- deriv.ans
- }), list( .labil = labil, .eabil = eabil,
- .ldiff = ldiff, .ediff = ediff ))),
- weight = eval(substitute(expression({
+ R <- tfit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop = FALSE]
+ R[lower.tri(R)] <- 0
+ tmp6 <- N.hat.posbernoulli(eta = eta, link = .link , earg = .earg ,
+ R = R, w = w,
+ X_vlm = X_vlm_save, Hlist = constraints,
+ extra = extra, model.type = "t")
+ extra$N.hat <- tmp6$N.hat
+ extra$SE.N.hat <- tmp6$SE.N.hat
- wz = matrix(0, n, dimm(M))
- wz[, 1:M] = sqrt( .Machine$double.eps )
- tmp1 = colSums(mu * (1 - mu))
- for (ii in 1:n)
- wz[ii, ii] = tmp1[ii]
+ misc$parallel.t <- .parallel.t
+ misc$apply.parint <- .apply.parint
+ }), list( .link = link, .earg = earg,
+ .parallel.t = parallel.t,
+ .apply.parint = apply.parint,
+ .iprob = iprob ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ ycounts <- y
+ use.orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
- wz[, n + (1:ncoly)] = mu * (1 - mu)
+ probs <- eta2theta(eta, .link , earg = .earg )
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
- for (ii in 1:n)
- for (jay in 1:ncoly)
- wz[ii, iam(ii, jay, M = M)] = -mu[ii, jay] * (1 - mu[ii, jay])
+ sum(dposbern(x = ycounts, # size = 1, # Bernoulli trials
+ prob = probs, prob0 = probs, log = TRUE))
- wz = wz * w
+ sum(use.orig.w *
+ dposbern(x = ycounts, # size = 1, # Bernoulli trials
+ prob = probs, prob0 = probs, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("posbernoulli.t"),
+ deriv = eval(substitute(expression({
+
+
+
+ probs <- eta2theta(eta, .link , earg = .earg )
+ dprobs.deta <- dtheta.deta(probs, .link , earg = .earg )
+
+ logAA0 <- rowSums(log1p(-probs))
+ AA0 <- exp(logAA0)
+ AAA <- exp(log1p(-AA0)) # 1 - AA0
+
+ B_s <- AA0 / (1 - probs)
+ B_st <- array(AA0, c(n, M, M))
+ for(slocal in 1:(M-1))
+ for(tlocal in (slocal+1):M)
+ B_st[, slocal, tlocal] =
+ B_st[, tlocal, slocal] <- B_s[, slocal] / (1 - probs[, tlocal])
+
+
+
+
+ temp2 <- (1 - probs)^2
+
+ dl.dprobs <- y / probs - (1 - y) / (1 - probs) - B_s / AAA
+
+ deriv.ans <- w * dl.dprobs * dprobs.deta
+ deriv.ans
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+
+ ed2l.dprobs2 <- 1 / (probs * AAA) + 1 / temp2 -
+ probs / (AAA * temp2) - (B_s / AAA)^2
+
+ wz <- matrix(as.numeric(NA), n, dimm(M))
+ wz[, 1:M] <- ed2l.dprobs2 * (dprobs.deta^2)
+
+ for(slocal in 1:(M-1))
+ for(tlocal in (slocal+1):M)
+ wz[, iam(slocal, tlocal, M = M)] <- dprobs.deta[, slocal] *
+ dprobs.deta[, tlocal] *
+ (B_st[,slocal,tlocal] +
+ B_s [,slocal] *
+ B_s [,tlocal] / AAA) / (-AAA)
+
+
+
+ wz
+ }), list( .link = link, .earg = earg ))))
+}
+
+
+
+
+
+ posbernoulli.b <-
+ function(link = "logit",
+ parallel.b = FALSE, # TRUE,
+ apply.parint = TRUE,
+ icap.prob = NULL,
+ irecap.prob = NULL
+ ) {
+
+
+
+
+ fit.type <- 1 # Currently only this is implemented
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ if (length(icap.prob))
+ if (!is.Numeric(icap.prob, positive = TRUE) ||
+ max(icap.prob) >= 1)
+ stop("argument 'icap.prob' must have values in (0, 1)")
+ if (length(irecap.prob))
+ if (!is.Numeric(irecap.prob, positive = TRUE) ||
+ max(irecap.prob) >= 1)
+ stop("argument 'irecap.prob' must have values in (0, 1)")
+
+ if (!is.logical(parallel.b) ||
+ length(parallel.b) != 1)
+ stop("argument 'parallel.b' must be a single logical")
+
+
+ new("vglmff",
+ blurb = c("(Multiple) positive-Bernoulli (capture-recapture) model ",
+ "with behavioural effects (M_b)\n\n",
+ "Links: ",
+ namesof("cap.prob", link, earg = earg, tag = FALSE), ", ",
+ namesof("recap.prob", link, earg = earg, tag = FALSE),
+ "\n"),
+
+ constraints = eval(substitute(expression({
+
+ constraints <- cm.vgam(matrix(1, 2, 1), x = x,
+ bool = .parallel.b ,
+ constraints = constraints,
+ apply.int = .apply.parint , # TRUE,
+ cm.default = matrix(1, 2, 1),
+ cm.intercept.default = cbind(1, 0:1))
+ }), list( .parallel.b = parallel.b,
+ .apply.parint = apply.parint ))),
+
+ infos = eval(substitute(function(...) {
+ list( Musual = 2,
+ apply.parint = .apply.parint ,
+ multipleResponses = FALSE)
+ }, list(
+ .apply.parint = apply.parint
+ ))),
+
+ initialize = eval(substitute(expression({
+ Musual <- 2
+ if (!is.matrix(y) || ncol(y) == 1)
+ stop("the response appears to be univariate")
+
+ if (!all(y == 0 | y == 1))
+ stop("response must contain 0s and 1s only")
+
+ orig.y <- y
+ extra$orig.w <- w
+ extra$tau <- tau <- ncol(y)
+ mustart.orig <- mustart
+ M <- 2
+
+
+ tmp3 <- aux.posbernoulli(y)
+ y0i <- extra$y0i <- tmp3$y0i
+ yr0i <- extra$yr0i <- tmp3$yr0i
+ yr1i <- extra$yr1i <- tmp3$yr1i
+ cap1 <- extra$cap1 <- tmp3$cap1
+ cap.hist1 <- extra$cap.hist1 <- tmp3$cap.hist1
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.min = 2,
+ ncol.y.max = Inf,
+ Is.integer.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = ncol(y),
+ maximize = TRUE)
+ w <- temp5$w # Retain the 0-1 response
+ y <- temp5$y # Retain the 0-1 response
+
+ mustart <- matrix(colMeans(y), n, tau, byrow = TRUE)
+ mustart <- (mustart + orig.y) / 2
+
+
+
+
+ predictors.names <-
+ c(namesof( "cap.prob", .link , earg = .earg, short = TRUE),
+ namesof("recap.prob", .link , earg = .earg, short = TRUE))
+
+ if (tau >= 4) {
+ pbd <- posbern.aux(tau = tau)
+ }
+
+ if (!length(etastart)) {
+ mustart.use <- if (length(mustart.orig)) {
+ mustart.orig
+ } else {
+ mustart
+ }
+
+ etastart <-
+ cbind(theta2eta(rowMeans(mustart.use), .link , earg = .earg ),
+ theta2eta(rowMeans(mustart.use), .link , earg = .earg ))
+
+ if (length( .icap.prob ))
+ etastart[, 1] <- theta2eta( .icap.prob , .link , earg = .earg )
+ if (length( .irecap.prob ))
+ etastart[, 2] <- theta2eta( .irecap.prob , .link , earg = .earg )
+ }
+ mustart <- NULL
+ }), list( .link = link, .earg = earg,
+ .icap.prob = icap.prob,
+ .irecap.prob = irecap.prob
+ ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ cap.probs <- eta2theta(eta[, 1], .link , earg = .earg )
+ rec.probs <- eta2theta(eta[, 2], .link , earg = .earg )
+ cap.probs <- matrix(cap.probs, nrow(eta), extra$tau)
+ rec.probs <- matrix(rec.probs, nrow(eta), extra$tau)
+ tau <- extra$tau
+
+ if ( .fit.type == 1) {
+ fv <- rec.probs
+ mat.index <- cbind(1:nrow(fv), extra$cap1)
+ fv[mat.index] <- cap.probs[mat.index]
+ fv[extra$cap.hist1 == 0] <- cap.probs[extra$cap.hist1 == 0]
+ } else if ( .fit.type == 2) {
+ fv <- cap.probs
+ } else if ( .fit.type == 3) {
+ fv <- rec.probs
+ } else if ( .fit.type == 4) {
+ stop("argument 'fit.type' unmatched")
+ } else {
+ stop("argument 'fit.type' unmatched")
+ }
+ fv
+ }, list( .link = link,
+ .fit.type = fit.type,
+ .earg = earg ))),
+ last = eval(substitute(expression({
+
+ misc$link <- c( .link , .link )
+ names(misc$link) <- predictors.names
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ misc$earg[[1]] <- .earg
+ misc$earg[[2]] <- .earg
+
+ misc$expected <- TRUE
+ misc$mv <- TRUE
+ misc$icap.prob <- .icap.prob
+ misc$irecap.prob <- .irecap.prob
+ misc$parallel.b <- .parallel.b
+ misc$fit.type <- .fit.type
+ misc$multipleResponses <- FALSE
+ if (tau >= 4) {
+ misc$pbd <- pbd # Needed for vcov() post-analysis.
+ }
+ misc$apply.parint <- .apply.parint
+
+
+
+ R <- tfit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop = FALSE]
+ R[lower.tri(R)] <- 0
+ tmp6 <- N.hat.posbernoulli(eta = eta, link = .link , earg = .earg ,
+ R = R, w = w,
+ X_vlm = X_vlm_save, Hlist = constraints,
+ extra = extra, model.type = "b")
+ extra$N.hat <- tmp6$N.hat
+ extra$SE.N.hat <- tmp6$SE.N.hat
+
+
+ }), list( .link = link, .earg = earg,
+ .fit.type = fit.type,
+ .parallel.b = parallel.b,
+ .icap.prob = icap.prob,
+ .irecap.prob = irecap.prob,
+ .apply.parint = apply.parint
+ ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+
+ ycounts <- y
+ use.orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+
+ cap.probs <- eta2theta(eta[, 1], .link , earg = .earg )
+ rec.probs <- eta2theta(eta[, 2], .link , earg = .earg )
+ cap.probs <- matrix(cap.probs, nrow(eta), extra$tau)
+ rec.probs <- matrix(rec.probs, nrow(eta), extra$tau)
+
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(use.orig.w *
+ dposbern(x = ycounts, # Bernoulli trials
+ prob = mu, prob0 = cap.probs, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("posbernoulli.b"),
+ deriv = eval(substitute(expression({
+ cap.probs <- eta2theta(eta[, 1], .link , earg = .earg )
+ rec.probs <- eta2theta(eta[, 2], .link , earg = .earg )
+ y0i <- extra$y0i
+ yr0i <- extra$yr0i
+ yr1i <- extra$yr1i
+ cap1 <- extra$cap1
+ tau <- extra$tau
+
+ dcapprobs.deta <- dtheta.deta(cap.probs, .link , earg = .earg )
+ drecprobs.deta <- dtheta.deta(rec.probs, .link , earg = .earg )
+
+ QQQ <- (1 - cap.probs)^tau
+ dl.dcap <- 1 / cap.probs -
+ y0i / (1 - cap.probs) -
+ tau * ((1 - cap.probs)^(tau - 1)) / (1 - QQQ)
+
+ dl.drec <- yr1i / rec.probs -
+ yr0i / (1 - rec.probs)
+
+
+ deriv.ans <- c(w) * cbind(dl.dcap * dcapprobs.deta,
+ dl.drec * drecprobs.deta)
+ deriv.ans
+ }), list( .link = link, .earg = earg ))),
+
+ weight = eval(substitute(expression({
+
+ wz <- matrix(0, n, M) # Diagonal EIM
+
+
+ if (tau == 2)
+ wz[, iam(2, 2, M = M)] <- (cap.probs / (rec.probs * (1 - rec.probs) *
+ (1 - QQQ))) * drecprobs.deta^2
+ if (tau == 3)
+ wz[, iam(2, 2, M = M)] <- (cap.probs * (3 - cap.probs) / (
+ rec.probs * (1 - rec.probs) *
+ (1 - QQQ))) * drecprobs.deta^2
+
+
+ if (tau >= 4) {
+ # rec.probs = rec.probs)
+ eim.rec.tot <- 0
+ for (ii in 1:nrow(pbd$part1.rec)) {
+ if (pbd$ml..konst.rec[ii, 1] != 0)
+ eim.rec.tot <- eim.rec.tot +
+ pbd$ml..konst.rec[ii, 1] * (( cap.probs)^pbd$part1.rec[ii, 1] *
+ (1-cap.probs)^pbd$part1.rec[ii, 2] *
+ ( rec.probs)^pbd$part1.rec[ii, 3] *
+ (1-rec.probs)^pbd$part1.rec[ii, 4])
+ if (pbd$ml..konst.rec[ii, 2] != 0)
+ eim.rec.tot <- eim.rec.tot +
+ pbd$ml..konst.rec[ii, 2] * (( cap.probs)^pbd$part2.rec[ii, 1] *
+ (1-cap.probs)^pbd$part2.rec[ii, 2] *
+ ( rec.probs)^pbd$part2.rec[ii, 3] *
+ (1-rec.probs)^pbd$part2.rec[ii, 4])
+ if (pbd$ml..konst.rec[ii, 3] != 0)
+ eim.rec.tot <- eim.rec.tot +
+ pbd$ml..konst.rec[ii, 3] * (( cap.probs)^pbd$part3.rec[ii, 1] *
+ (1-cap.probs)^pbd$part3.rec[ii, 2] *
+ ( rec.probs)^pbd$part3.rec[ii, 3] *
+ (1-rec.probs)^pbd$part3.rec[ii, 4])
+ if (pbd$ml..konst.rec[ii, 4] != 0)
+ eim.rec.tot <- eim.rec.tot +
+ pbd$ml..konst.rec[ii, 4] * (( cap.probs)^pbd$part4.rec[ii, 1] *
+ (1-cap.probs)^pbd$part4.rec[ii, 2] *
+ ( rec.probs)^pbd$part4.rec[ii, 3] *
+ (1-rec.probs)^pbd$part4.rec[ii, 4])
+ }
+ eim.rec.tot <- (eim.rec.tot / (1 - QQQ)) * drecprobs.deta^2
+ wz[, iam(2, 2, M = M)] <- eim.rec.tot
+ }
+
+
+
+
+
+ dA.dcapprobs <- -tau * ((1 - QQQ) * (tau-1) * (1 - cap.probs)^(tau-2) +
+ tau * (1 - cap.probs)^(2*tau -2)) / (1 - QQQ)^2
+
+ if (tau == 2)
+ wz[, iam(1, 1, M = M)] <-
+ ((2 - 3 * cap.probs + 2 * cap.probs^2) / ((1 - QQQ) *
+ cap.probs * (1 - cap.probs)) + dA.dcapprobs) *
+ dcapprobs.deta^2
+ if (tau == 3)
+ wz[, iam(1, 1, M = M)] <-
+ ((3 + cap.probs * (-6 + cap.probs * (7 + cap.probs * (-3)))) / (
+ (1 - QQQ) * cap.probs * (1 - cap.probs)) + dA.dcapprobs) *
+ dcapprobs.deta^2
+
+
+ if (tau >= 4) {
+
+ eim.cap.tot <- 0
+ for (ii in 1:nrow(pbd$part1.cap)) {
+ if (pbd$ml..konst.cap[ii, 1] != 0)
+ eim.cap.tot <- eim.cap.tot +
+ pbd$ml..konst.cap[ii, 1] * (( cap.probs)^pbd$part1.cap[ii, 1] *
+ (1-cap.probs)^pbd$part1.cap[ii, 2] *
+ ( rec.probs)^pbd$part1.cap[ii, 3] *
+ (1-rec.probs)^pbd$part1.cap[ii, 4])
+ if (pbd$ml..konst.cap[ii, 2] != 0)
+ eim.cap.tot <- eim.cap.tot +
+ pbd$ml..konst.cap[ii, 2] * (( cap.probs)^pbd$part2.cap[ii, 1] *
+ (1-cap.probs)^pbd$part2.cap[ii, 2] *
+ ( rec.probs)^pbd$part2.cap[ii, 3] *
+ (1-rec.probs)^pbd$part2.cap[ii, 4])
+ if (pbd$ml..konst.cap[ii, 3] != 0)
+ eim.cap.tot <- eim.cap.tot +
+ pbd$ml..konst.cap[ii, 3] * (( cap.probs)^pbd$part3.cap[ii, 1] *
+ (1-cap.probs)^pbd$part3.cap[ii, 2] *
+ ( rec.probs)^pbd$part3.cap[ii, 3] *
+ (1-rec.probs)^pbd$part3.cap[ii, 4])
+ if (pbd$ml..konst.cap[ii, 4] != 0)
+ eim.cap.tot <- eim.cap.tot +
+ pbd$ml..konst.cap[ii, 4] * (( cap.probs)^pbd$part4.cap[ii, 1] *
+ (1-cap.probs)^pbd$part4.cap[ii, 2] *
+ ( rec.probs)^pbd$part4.cap[ii, 3] *
+ (1-rec.probs)^pbd$part4.cap[ii, 4])
+ }
+ eim.cap.tot <- (eim.cap.tot / (1 - QQQ) + dA.dcapprobs) *
+ dcapprobs.deta^2
+ wz[, iam(1, 1, M = M)] <- eim.cap.tot
+ }
+
+
+ wz <- c(w) * wz
wz
- }), list( .labil = labil, .eabil = eabil ))))
+ }), list( .link = link, .earg = earg ))))
}
+posbern.aux <- function(tau) {
+
+ y.all <- matrix(0, 2^tau - 0, tau)
+ for (jlocal in 1:tau)
+ y.all[, jlocal] <- c(rep(0, len = 2^(tau-jlocal)),
+ rep(1, len = 2^(tau-jlocal)))
+ y.all <- y.all[-1, ]
+
+ aux <- aux.posbernoulli(y.all, check.y = FALSE)
+
+
+ nstar <- nrow(y.all)
+ l.power.cap <- matrix(0, nstar, 4)
+ l.konst.cap <- matrix(0, nstar, 4)
+ ml..power.cap <- matrix(0, nstar, 4)
+ ml..konst.cap <- matrix(0, nstar, 4)
+ l.power.rec <- matrix(0, nstar, 4)
+ l.konst.rec <- matrix(0, nstar, 4)
+ ml..power.rec <- matrix(0, nstar, 4)
+ ml..konst.rec <- matrix(0, nstar, 4)
+
+
+
+ l.power.rec[, 3] <- -1
+ l.power.rec[, 4] <- -1
+ for (jlocal in 1:tau) {
+ l.konst.rec[, 3] <-
+ l.konst.rec[, 3] + ifelse(y.all[, jlocal] > 0 & jlocal > aux$cap1, 1, 0)
+ l.konst.rec[, 4] <-
+ l.konst.rec[, 4] - ifelse(y.all[, jlocal] == 0 & jlocal > aux$cap1, 1, 0)
+ }
+
+
+
+ ml..power.rec[, 3] <- -2
+ ml..power.rec[, 4] <- -2
+ ml..konst.rec[, 3] <- l.konst.rec[, 3]
+ ml..konst.rec[, 4] <- -l.konst.rec[, 4]
+
+
+
+ mux.mat <- cbind(1, aux$y0i, aux$yr1i, aux$yr0i)
+ part1.rec <- mux.mat + cbind(ml..power.rec[, 1], 0, 0, 0)
+ part2.rec <- mux.mat + cbind(0, ml..power.rec[, 2], 0, 0)
+ part3.rec <- mux.mat + cbind(0, 0, ml..power.rec[, 3], 0)
+ part4.rec <- mux.mat + cbind(0, 0, 0, ml..power.rec[, 4])
+
+
+
+
+
+
+
+ l.power.cap[, 1] <- 1
+ l.power.cap[, 2] <- -1
+ l.konst.cap[, 1] <- 1
+ l.konst.cap[, 2] <- -aux$y0i
+
+
+
+ ml..power.cap[, 1] <- -2
+ ml..power.cap[, 2] <- -2
+ ml..konst.cap[, 1] <- 1
+ ml..konst.cap[, 2] <- aux$y0i
+
+
+
+ mux.mat <- cbind(1, aux$y0i, aux$yr1i, aux$yr0i)
+ part1.cap <- mux.mat + cbind(ml..power.cap[, 1], 0, 0, 0)
+ part2.cap <- mux.mat + cbind(0, ml..power.cap[, 2], 0, 0)
+ part3.cap <- mux.mat + cbind(0, 0, ml..power.cap[, 3], 0)
+ part4.cap <- mux.mat + cbind(0, 0, 0, ml..power.cap[, 4])
+
+
+
+
+ list( y.all = y.all,
+ part1.cap = part1.cap,
+ part2.cap = part2.cap,
+ part3.cap = part3.cap,
+ part4.cap = part4.cap,
+
+ part1.rec = part1.rec,
+ part2.rec = part2.rec,
+ part3.rec = part3.rec,
+ part4.rec = part4.rec,
+ l.konst.cap = l.konst.cap,
+ l.power.cap = l.power.cap,
+ ml..konst.cap = ml..konst.cap,
+ ml..power.cap = ml..power.cap,
+ l.konst.rec = l.konst.rec,
+ l.power.rec = l.power.rec,
+ ml..konst.rec = ml..konst.rec,
+ ml..power.rec = ml..power.rec)
+}
+
+
+
+
+ posbernoulli.tb <-
+ function(link = "logit",
+ parallel.t = FALSE,
+ parallel.b = FALSE,
+ apply.parint = FALSE,
+ imethod = 1,
+ iprob = NULL,
+ dconst = 0.1,
+ dpower = -2) {
+
+
+
+
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+ if (length(iprob))
+ if (!is.Numeric(iprob, positive = TRUE) ||
+ max(iprob) >= 1)
+ stop("argument 'iprob' must have values in (0, 1)")
+
+ if (!is.logical(parallel.t) ||
+ length(parallel.t) != 1)
+ stop("argument 'parallel.t' must be a single logical")
+
+ if (!is.logical(parallel.b) ||
+ length(parallel.b) != 1)
+ stop("argument 'parallel.b' must be a single logical")
+
+ if (!is.logical(apply.parint) ||
+ length(apply.parint) != 1)
+ stop("argument 'apply.parint' must be a single logical")
+
+
+ new("vglmff",
+ blurb = c("(Multiple) positive-Bernoulli (capture-recapture) model\n",
+ "with temporal and behavioural effects (M_{tb})\n\n",
+ "Links: ",
+ namesof("cap.prob.1", link, earg = earg, tag = FALSE), ", ",
+ namesof("cap.prob.2", link, earg = earg, tag = FALSE), ", ",
+ ", ...,\n",
+ namesof("cap.prob.tau", link, earg = earg, tag = FALSE), ", ",
+ namesof("recap.prob.2", link, earg = earg, tag = FALSE),
+ ", ...,\n",
+ namesof("recap.prob.tau", link, earg = earg, tag = FALSE),
+ "\n"),
+ constraints = eval(substitute(expression({
+
+ tmp8.mat <- cbind(c(1, rep(0, len = 2*(tau-1))),
+ rbind(rep(0, len = tau-1), diag(tau-1), diag(tau-1)))
+ tmp9.mat <- cbind(c(rep(0, len = tau), rep(1, len = tau-1)))
+
+ cmk_tb <- if ( .parallel.t ) matrix(1, M, 1) else tmp8.mat
+
+ cm1_tb <-
+ if ( ( .parallel.t ) && ( .parallel.b )) matrix(1, M, 1) else
+ if ( ( .parallel.t ) && !( .parallel.b )) cbind(1, tmp9.mat) else
+ if (!( .parallel.t ) && ( .parallel.b )) tmp8.mat else
+ if (!( .parallel.t ) && !( .parallel.b )) cbind(tmp8.mat, tmp9.mat)
+
+
+ constraints <- cm.vgam(cmk_tb, x = x,
+ bool = .parallel.t , # Same as .parallel.b
+ constraints = constraints,
+ apply.int = .apply.parint , # FALSE,
+ cm.default = cmk_tb,
+ cm.intercept.default = cm1_tb)
+
+ }), list( .parallel.t = parallel.t,
+ .parallel.b = parallel.b,
+ .apply.parint = apply.parint ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ multipleResponses = TRUE,
+ imethod = .imethod ,
+ dconst = .dconst ,
+ dpower = .dpower ,
+ apply.parint = .apply.parint ,
+ parallel.t = .parallel.t ,
+ parallel.b = .parallel.b )
+ }, list( .parallel.t = parallel.t,
+ .parallel.b = parallel.b,
+ .imethod = imethod,
+ .dconst = dconst,
+ .dpower = dpower,
+ .apply.parint = apply.parint ))),
+
+ initialize = eval(substitute(expression({
+ Musual <- 2 # Not quite true
+
+
+
+ if (ncol(cbind(w)) > 1)
+ stop("variable 'w' should be a vector or one-column matrix")
+ w <- c(w) # Make it a vector
+
+ mustart.orig <- mustart
+ y <- as.matrix(y)
+ extra$tau <- tau <- ncol(y)
+ extra$ncoly <- ncoly <- ncol(y)
+ extra$orig.w <- w
+ extra$ycounts <- y
+ M <- Musual * tau - 1 # recap.prob.1 is unused
+
+
+ if (!(ncoly %in% 2:3))
+ stop("the response currently must be a two- or three-column matrix")
+
+
+
+ mustart <- matrix(c(weighted.mean(y[, 1], w),
+ weighted.mean(y[, 2], w),
+ if (tau == 3) weighted.mean(y[, 3], w) else NULL),
+ n, tau, byrow = TRUE)
+ mustart[mustart == 0] <- 0.05
+ mustart[mustart == 1] <- 0.95
+
+
+
+
+
+ if (!all(y == 0 | y == 1))
+ stop("response must contain 0s and 1s only")
+
+
+ tmp3 <- aux.posbernoulli(y)
+ cap.hist1 <- extra$cap.hist1 <- tmp3$cap.hist1
+ if (tau > 2) {
+ yindex <- 4 * y[, 1] + 2 * y[, 2] + 1 * y[, 3]
+ if (length(table(yindex)) != 2^tau - 1)
+ warning("there should be ", 2^tau - 1, " patterns of 0s and 1s ",
+ "in the response matrix. May crash.")
+
+ }
+
+
+ dn2.cap <- paste("cap.prob.", 1:ncoly, sep = "")
+ dn2.recap <- paste("recap.prob.", 2:ncoly, sep = "")
+
+ predictors.names <- c(
+ namesof(dn2.cap, .link , earg = .earg, short = TRUE),
+ namesof(dn2.recap, .link , earg = .earg, short = TRUE))
+
+ if (length(extra)) extra$w <- w else extra <- list(w = w)
+
+ if (!length(etastart)) {
+ if ( .imethod == 1) {
+
+
+ mu.init <- if (length( .iprob ))
+ matrix( .iprob , n, M, byrow = TRUE) else
+ if (length(mustart.orig))
+ matrix(rep(mustart.orig, length = n * M), n, M) else
+ matrix(rep(mustart, length = n * M), n, M)
+ etastart <- theta2eta(mu.init, .link , earg = .earg ) # n x M
+ } else {
+ mu.init <- matrix(runif(n * M), n, M)
+ etastart <- theta2eta(mu.init, .link , earg = .earg ) # n x M
+ }
+ }
+ mustart <- NULL
+ }), list( .link = link, .earg = earg,
+ .iprob = iprob,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ tau <- extra$ncoly
+ probs <- eta2theta(eta, .link , earg = .earg )
+ prc <- probs[, 1:tau]
+ prr <- cbind(0, probs[, (1+tau):ncol(probs)]) # 1st coln ignored
+
+ probs.numer <- cbind(probs[, 1],
+ ifelse(extra$cap.hist1[, 2] == 1, prr[, 2], prc[, 2]))
+
+ if (tau == 3)
+ probs.numer <- cbind(probs.numer,
+ ifelse(extra$cap.hist1[, 3] == 1, prr[, 3], prc[, 3]))
+
+ logQQQ <- rowSums(log1p(-prc))
+ QQQ <- exp(logQQQ)
+ AAA <- exp(log1p(-QQQ)) # 1 - QQQ
+ probs.numer / AAA
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ extra$w <- NULL # Kill it off
+
+
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- c(dn2.cap, dn2.recap)
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for(ii in 1:M)
+ misc$earg[[ii]] <- .earg
+
+
+ misc$mv <- TRUE
+ misc$iprob <- .iprob
+
+
+ R <- tfit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop = FALSE]
+ R[lower.tri(R)] <- 0
+ tmp6 <- N.hat.posbernoulli(eta = eta, link = .link , earg = .earg ,
+ R = R, w = w,
+ X_vlm = X_vlm_save, Hlist = constraints,
+ extra = extra, model.type = "tb")
+ extra$N.hat <- tmp6$N.hat
+ extra$SE.N.hat <- tmp6$SE.N.hat
+
+
+ misc$parallel.t <- .parallel.t
+ misc$parallel.b <- .parallel.b
+
+
+ misc$dconst <- .dconst
+ misc$dpower <- .dpower
+ misc$working.ridge <- c(rep(adjustment.posbern_tb, length = tau),
+ rep(0, length = tau-1))
+
+ misc$apply.parint <- .apply.parint
+
+ }), list( .link = link, .earg = earg,
+ .apply.parint = apply.parint,
+ .parallel.t = parallel.t,
+ .parallel.b = parallel.b,
+ .dconst = dconst,
+ .dpower = dpower,
+ .iprob = iprob ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+
+ tau <- extra$ncoly
+ ycounts <- y
+ use.orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+
+ probs <- eta2theta(eta, .link , earg = .earg )
+ prc <- probs[, 1:tau]
+ prr <- cbind(0, probs[, (1+tau):ncol(probs)]) # 1st coln ignored
+
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+
+ probs.numer <- cbind(probs[, 1],
+ ifelse(extra$cap.hist1[, 2] == 1, prr[, 2], prc[, 2]))
+ if (tau == 3)
+ probs.numer <- cbind(probs.numer,
+ ifelse(extra$cap.hist1[, 3] == 1, prr[, 3], prc[, 3]))
+
+ sum(use.orig.w *
+ dposbern(x = ycounts, # size = 1, # Bernoulli trials
+ prob = probs.numer, prob0 = prc, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("posbernoulli.tb"),
+ deriv = eval(substitute(expression({
+ tau <- extra$ncoly
+ probs <- eta2theta(eta, .link , earg = .earg )
+ prc <- probs[, 1:tau]
+ prr <- cbind(0, probs[, (1+tau):ncol(probs)]) # 1st coln ignored
+
+ logQQQ <- rowSums(log1p(-prc))
+ QQQ <- exp(logQQQ)
+
+ dprobs.deta <- dtheta.deta(probs, .link , earg = .earg )
+ dprc.deta <- dprobs.deta[, 1:tau]
+ dprr.deta <- cbind(0, dprobs.deta[, (1+tau):ncol(probs)]) # 1st coln ignored
+
+ dQ.dprc <- -QQQ / (1 - prc)
+
+
+ d2Q.dprc <- array(0, c(n, tau, tau))
+ for (jay in 1:(tau-1))
+ for (kay in (jay+1):tau)
+ d2Q.dprc[, jay, kay] <-
+ d2Q.dprc[, kay, jay] <- QQQ / ((1 - prc[, jay]) *
+ (1 - prc[, kay]))
+
+ if (tau == 2)
+ dl.dpr <- cbind(y[, 1] / prc[, 1] - (1 - y[, 1]) / (1 - prc[, 1]) +
+ dQ.dprc[, 1] / (1 - QQQ),
+ (1 - y[, 1]) *
+ (y[, 2] / prc[, 2] - (1 - y[, 2]) / (1 - prc[, 2])) +
+ dQ.dprc[, 2] / (1 - QQQ),
+ y[, 1] *
+ (y[, 2] / prr[, 2] - (1 - y[, 2]) / (1 - prr[, 2])))
+
+ if (tau == 3)
+ dl.dpr <- cbind(y[, 1] / prc[, 1] - (1 - y[, 1]) / (1 - prc[, 1]) +
+ dQ.dprc[, 1] / (1 - QQQ),
+
+ (1 - extra$cap.hist1[, 2]) * # (1 - y[, 1]) *
+ (y[, 2] / prc[, 2] - (1 - y[, 2]) / (1 - prc[, 2])) +
+ dQ.dprc[, 2] / (1 - QQQ),
+
+ (1 - extra$cap.hist1[, 3]) * # (1 - y[, 1]) * (1 - y[, 2]) *
+ y[, 3] / prc[, 3] +
+ dQ.dprc[, 3] / (1 - QQQ),
+
+ extra$cap.hist1[, 2] * # y[, 1] *
+ (y[, 2] / prr[, 2] - (1 - y[, 2]) / (1 - prr[, 2])),
+
+ extra$cap.hist1[, 3] *
+ (y[, 3] / prr[, 3] - (1 - y[, 3]) / (1 - prr[, 3]))
+ )
+
+ deriv.ans <- c(w) * dl.dpr * dprobs.deta
+
+ deriv.ans
+ }), list( .link = link, .earg = earg ))),
+
+ weight = eval(substitute(expression({
+ wz <- matrix(0, n, sum(M:(M - (tau - 1))))
+
+ cindex <- iam(NA, NA, M = M, both = TRUE)
+ cindex$row.index <- rep(cindex$row.index, length = ncol(wz))
+ cindex$col.index <- rep(cindex$col.index, length = ncol(wz))
+
+
+ if (tau == 2) {
+ wz[, iam(1, 1, M = M)] <-
+ (1 - prc[, 1] * (1 - prc[, 2])) / (prc[, 1] * (1 - prc[, 1]) *
+ (1 - QQQ)) -
+ ((1 - prc[, 2]) / (1 - QQQ))^2
+ wz[, iam(1, 1, M = M)] <- wz[, iam(1, 1, M = M)] * dprc.deta[, 1]^2
+
+ wz[, iam(2, 2, M = M)] <-
+ (prc[, 1] * (1 - prc[, 1]) / (prc[, 2] * (1 - QQQ)^2)) *
+ dprc.deta[, 2]^2
+
+ wz[, iam(3, 3, M = M)] <-
+ (prc[, 1] / (prr[, 2] * (1 - prr[, 2]) * (1 - QQQ))) *
+ dprr.deta[, 2]^2
+
+ wz[, iam(1, 2, M = M)] <- -dprc.deta[, 1] * dprc.deta[, 2] / (1 - QQQ)^2
+ } else if (tau == 3) {
+
+ wz[, iam(1, 1, M = M)] <-
+ ((1 - prc[, 2]) * prc[, 3] + prc[, 2]) / ((1 - prc[, 1]) * (1 - QQQ)) +
+ 1 / (prc[, 1] * (1 - QQQ)) -
+ (dQ.dprc[, 1] / (1 - QQQ))^2
+
+
+ wz[, iam(2, 2, M = M)] <-
+ (1 - prc[, 1]) * (1 - prc[, 2] * (1 - prc[, 3])) / (
+ prc[, 2] * (1 - prc[, 2]) * (1 - QQQ)) -
+ (dQ.dprc[, 2] / (1 - QQQ))^2
+
+
+ wz[, iam(3, 3, M = M)] <-
+ (1 - prc[, 1]) * (1 - prc[, 2]) / (prc[, 3] * (1 - QQQ)) -
+ (dQ.dprc[, 3] / (1 - QQQ))^2
+
+
+ wz[, iam(4, 4, M = M)] <-
+ prc[, 1] / (prr[, 2] * (1 - prr[, 2]) * (1 - QQQ))
+
+
+ wz[, iam(5, 5, M = M)] <-
+ (prc[, 1] + prc[, 2] * (1 - prc[, 1])) / (
+ prr[, 3] * (1 - prr[, 3]) * (1 - QQQ))
+
+
+ for (jay in 1:(tau-1))
+ for (kay in (jay+1):tau)
+ wz[, iam(jay, kay, M = M)] <-
+ -(d2Q.dprc[, jay, kay] +
+ dQ.dprc[, jay] *
+ dQ.dprc[, kay] / (1 - QQQ)) / (1 - QQQ)
+
+
+ wz <- wz * dprobs.deta[, cindex$row.index] *
+ dprobs.deta[, cindex$col.index]
+
+
+ } else {
+ stop("tau must equal 2 or 3")
+ }
+
+
+ adjustment.posbern_tb <- .dconst * iter^( .dpower )
+
+
+ for (jay in 1:tau)
+ wz[, iam(jay, jay, M = M)] <- wz[, iam(jay, jay, M = M)] +
+ adjustment.posbern_tb
+
+
+
+ c(w) * wz
+ }), list( .link = link, .earg = earg,
+ .dconst = dconst,
+ .dpower = dpower
+ ))))
+}
+
+
+
+
+
diff --git a/R/family.qreg.R b/R/family.qreg.R
index 1611b4e..d7e1279 100644
--- a/R/family.qreg.R
+++ b/R/family.qreg.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -33,11 +33,11 @@ lms.yjn.control <- function(trace = TRUE, ...)
llambda = "identity",
lmu = "identity",
lsigma = "loge",
- dfmu.init=4,
+ dfmu.init = 4,
dfsigma.init = 2,
ilambda = 1,
- isigma = NULL, expectiles = FALSE)
-{
+ isigma = NULL,
+ tol0 = 0.001, expectiles = FALSE) {
llambda <- as.list(substitute(llambda))
elambda <- link2list(llambda)
llambda <- attr(elambda, "function.name")
@@ -51,6 +51,9 @@ lms.yjn.control <- function(trace = TRUE, ...)
lsigma <- attr(esigma, "function.name")
+ if (!is.Numeric(tol0, positive = TRUE, allowable.length = 1))
+ stop("bad input for argument 'tol0'")
+
if (!is.Numeric(ilambda))
stop("bad input for argument 'ilambda'")
if (length(isigma) &&
@@ -59,16 +62,18 @@ lms.yjn.control <- function(trace = TRUE, ...)
if (length(expectiles) != 1 || !is.logical(expectiles))
stop("bad input for argument 'expectiles'")
+
+
new("vglmff",
- blurb = c("LMS ", if (expectiles) "Expectile" else "Quantile",
- " Regression (Box-Cox transformation to normality)\n",
- "Links: ",
- namesof("lambda", link = llambda, earg = elambda), ", ",
- namesof("mu", link = lmu, earg = emu), ", ",
- namesof("sigma", link = lsigma, earg = esigma)),
+ blurb = c("LMS ", if (expectiles) "Expectile" else "Quantile",
+ " Regression (Box-Cox transformation to normality)\n",
+ "Links: ",
+ namesof("lambda", link = llambda, earg = elambda), ", ",
+ namesof("mu", link = lmu, earg = emu), ", ",
+ namesof("sigma", link = lsigma, earg = esigma)),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list(.zero = zero))),
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero))),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
@@ -83,23 +88,26 @@ lms.yjn.control <- function(trace = TRUE, ...)
if (!length(etastart)) {
- Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)],
- y = y, w = w, df = .dfmu.init)
- fv.init = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
-
- lambda.init = if (is.Numeric( .ilambda )) .ilambda else 1.0
- sigma.init = if (is.null(.isigma)) {
- myratio = ((y/fv.init)^lambda.init - 1) / lambda.init
- if (is.Numeric( .dfsigma.init )) {
- fit600 = vsmooth.spline(x = x[, min(ncol(x), 2)],
- y = myratio^2,
- w = w, df = .dfsigma.init)
- sqrt(c(abs(predict(fit600, x = x[, min(ncol(x), 2)])$y)))
- } else
- sqrt(var(myratio))
- } else .isigma
+ Fit5 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = y, w = w, df = .dfmu.init)
+ fv.init <- c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
+
+ lambda.init <- if (is.Numeric( .ilambda )) .ilambda else 1.0
+ sigma.init <- if (is.null(.isigma)) {
+ myratio <- ((y/fv.init)^lambda.init - 1) / lambda.init
+ if (is.Numeric( .dfsigma.init )) {
+ fit600 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = myratio^2,
+ w = w, df = .dfsigma.init)
+ sqrt(c(abs(predict(fit600, x = x[, min(ncol(x), 2)])$y)))
+ } else {
+ sqrt(var(myratio))
+ }
+ } else {
+ .isigma
+ }
- etastart =
+ etastart <-
cbind(theta2eta(lambda.init, .llambda, earg = .elambda),
theta2eta(fv.init, .lmu, earg = .emu),
theta2eta(sigma.init, .lsigma, earg = .esigma))
@@ -110,9 +118,9 @@ lms.yjn.control <- function(trace = TRUE, ...)
.dfsigma.init = dfsigma.init,
.ilambda = ilambda, .isigma = isigma ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta[, 1] = eta2theta(eta[, 1], .llambda, earg = .elambda)
- eta[, 2] = eta2theta(eta[, 2], .lmu, earg = .emu)
- eta[, 3] = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ eta[, 1] <- eta2theta(eta[, 1], .llambda, earg = .elambda)
+ eta[, 2] <- eta2theta(eta[, 2], .lmu, earg = .emu)
+ eta[, 3] <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
if ( .expectiles ) {
explot.lms.bcn(percentiles= .percentiles, eta = eta)
} else {
@@ -126,50 +134,58 @@ lms.yjn.control <- function(trace = TRUE, ...)
misc$earg <- list(lambda = .elambda, mu = .emu, sigma = .esigma )
+ misc$tol0 <- .tol0
misc$percentiles <- .percentiles
misc$true.mu <- FALSE # @fitted is not a true mu
misc$expectiles <- .expectiles
if (control$cdf) {
- post$cdf = cdf.lms.bcn(y,
- eta0 = matrix(c(lambda, mymu, sigma), ncol = 3,
- dimnames = list(dimnames(x)[[1]], NULL)))
+ post$cdf <- cdf.lms.bcn(y,
+ eta0 = matrix(c(lambda, mymu, sigma), ncol = 3,
+ dimnames = list(dimnames(x)[[1]], NULL)))
}
}), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
.elambda = elambda, .emu = emu, .esigma = esigma,
- .percentiles = percentiles, .expectiles = expectiles ))),
+ .percentiles = percentiles, .expectiles = expectiles,
+ .tol0 = tol0 ))),
loglikelihood = eval(substitute(
- function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- muvec = eta2theta(eta[, 2], .lmu, earg = .emu)
- sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
- zedd = ((y/muvec)^lambda - 1) / (lambda * sigma)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented") else {
- use.this = (lambda * log(y / muvec) - log(sigma) - log(y) +
- dnorm(zedd, log = TRUE))
- use.this[abs(lambda) < 0.001] =
- (-log(y / muvec) - log(sigma) +
- dnorm(zedd, log = TRUE))[abs(lambda) < 0.001]
- sum(c(w) * use.this)
- }
- }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
- .elambda = elambda, .emu = emu, .esigma = esigma ))),
+ function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
+ lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
+ muvec <- eta2theta(eta[, 2], .lmu, earg = .emu)
+ sigma <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
+
+ zedd <- ((y/muvec)^lambda - 1) / (lambda * sigma)
+ log.dz.dy <- (lambda - 1) * log(y/muvec) - log(muvec * sigma)
+
+ is.eff.0 <- abs(lambda) < .tol0
+ if (any(is.eff.0)) {
+ zedd[is.eff.0] <- log(y[is.eff.0] / muvec[is.eff.0]) / sigma[is.eff.0]
+ log.dz.dy[is.eff.0] <- -log(y[is.eff.0] * sigma[is.eff.0])
+ }
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented") else {
+ sum(c(w) * (dnorm(zedd, log = TRUE) + log.dz.dy))
+ }
+ }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
+ .tol0 = tol0 ))),
vfamily = c("lms.bcn", "lmscreg"),
deriv = eval(substitute(expression({
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- mymu = eta2theta(eta[, 2], .lmu, earg = .emu)
- sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
+ mymu <- eta2theta(eta[, 2], .lmu, earg = .emu)
+ sigma <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
- zedd = ((y/mymu)^lambda - 1) / (lambda * sigma)
- z2m1 = zedd * zedd - 1
- dl.dlambda = zedd*(zedd - log(y/mymu) / sigma) / lambda -
- z2m1 * log(y/mymu)
- dl.dmu = zedd / (mymu * sigma) + z2m1 * lambda / mymu
- dl.dsigma = z2m1 / sigma
- dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
+ zedd <- ((y / mymu)^lambda - 1) / (lambda * sigma)
+ z2m1 <- zedd * zedd - 1
- dmu.deta = dtheta.deta(mymu, .lmu, earg = .emu)
- dsigma.deta = dtheta.deta(sigma, .lsigma, earg = .esigma)
+ dl.dlambda <- zedd * (zedd - log(y/mymu) / sigma) / lambda -
+ z2m1 * log(y/mymu)
+ dl.dmu <- zedd / (mymu * sigma) + z2m1 * lambda / mymu
+ dl.dsigma <- z2m1 / sigma
+
+ dlambda.deta <- dtheta.deta(lambda, .llambda, earg = .elambda)
+ dmu.deta <- dtheta.deta(mymu, .lmu, earg = .emu)
+ dsigma.deta <- dtheta.deta(sigma, .lsigma, earg = .esigma)
c(w) * cbind(dl.dlambda * dlambda.deta,
dl.dmu * dmu.deta,
@@ -177,15 +193,15 @@ lms.yjn.control <- function(trace = TRUE, ...)
}), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
.elambda = elambda, .emu = emu, .esigma = esigma ))),
weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, 6)
- wz[,iam(1, 1, M)] = (7 * sigma^2 / 4) * dlambda.deta^2
- wz[,iam(2, 2, M)] = (1 + 2*(lambda*sigma)^2)/(mymu*sigma)^2 *
- dmu.deta^2
- wz[,iam(3, 3, M)] = (2 / sigma^2) * dsigma.deta^2
- wz[,iam(1, 2, M)] = (-1 / (2 * mymu)) * dlambda.deta * dmu.deta
- wz[,iam(1, 3, M)] = (lambda * sigma) * dlambda.deta * dsigma.deta
- wz[,iam(2, 3, M)] = (2*lambda/(mymu * sigma)) *
- dmu.deta * dsigma.deta
+ wz <- matrix(as.numeric(NA), n, 6)
+ wz[,iam(1, 1, M)] <- (7 * sigma^2 / 4) * dlambda.deta^2
+ wz[,iam(2, 2, M)] <- (1 + 2*(lambda*sigma)^2)/(mymu*sigma)^2 *
+ dmu.deta^2
+ wz[,iam(3, 3, M)] <- (2 / sigma^2) * dsigma.deta^2
+ wz[,iam(1, 2, M)] <- (-1 / (2 * mymu)) * dlambda.deta * dmu.deta
+ wz[,iam(1, 3, M)] <- (lambda * sigma) * dlambda.deta * dsigma.deta
+ wz[,iam(2, 3, M)] <- (2*lambda/(mymu * sigma)) *
+ dmu.deta * dsigma.deta
c(w) * wz
}), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
.elambda = elambda, .emu = emu, .esigma = esigma ))))
@@ -201,8 +217,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
dfmu.init=4,
dfsigma.init = 2,
ilambda = 1,
- isigma = NULL)
-{
+ isigma = NULL) {
llambda <- as.list(substitute(llambda))
elambda <- link2list(llambda)
llambda <- attr(elambda, "function.name")
@@ -223,13 +238,13 @@ lms.yjn.control <- function(trace = TRUE, ...)
new("vglmff",
blurb = c("LMS Quantile Regression ",
- "(Box-Cox transformation to a Gamma distribution)\n",
- "Links: ",
- namesof("lambda", link = llambda, earg = elambda), ", ",
- namesof("mu", link = lmu, earg = emu), ", ",
- namesof("sigma", link = lsigma, earg = esigma)),
+ "(Box-Cox transformation to a Gamma distribution)\n",
+ "Links: ",
+ namesof("lambda", link = llambda, earg = elambda), ", ",
+ namesof("mu", link = lmu, earg = emu), ", ",
+ namesof("sigma", link = lsigma, earg = esigma)),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list(.zero = zero))),
initialize = eval(substitute(expression({
@@ -262,7 +277,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
sqrt(var(myratio))
} else .isigma
- etastart =
+ etastart <-
cbind(theta2eta(lambda.init, .llambda, earg = .elambda),
theta2eta(fv.init, .lmu, earg = .emu),
theta2eta(sigma.init, .lsigma, earg = .esigma))
@@ -273,22 +288,22 @@ lms.yjn.control <- function(trace = TRUE, ...)
.dfsigma.init = dfsigma.init,
.ilambda = ilambda, .isigma = isigma ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta[, 1] = eta2theta(eta[, 1], .llambda, earg = .elambda)
- eta[, 2] = eta2theta(eta[, 2], .lmu, earg = .emu)
- eta[, 3] = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ eta[, 1] <- eta2theta(eta[, 1], .llambda, earg = .elambda)
+ eta[, 2] <- eta2theta(eta[, 2], .lmu, earg = .emu)
+ eta[, 3] <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
qtplot.lms.bcg(percentiles= .percentiles, eta = eta)
}, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
.elambda = elambda, .emu = emu, .esigma = esigma,
.percentiles = percentiles ))),
last = eval(substitute(expression({
- misc$link = c(lambda = .llambda, mu = .lmu, sigma = .lsigma)
+ misc$link <- c(lambda = .llambda, mu = .lmu, sigma = .lsigma)
- misc$earg = list(lambda = .elambda, mu = .emu, sigma = .esigma)
+ misc$earg <- list(lambda = .elambda, mu = .emu, sigma = .esigma)
- misc$percentiles = .percentiles
- misc$true.mu = FALSE # $fitted is not a true mu
+ misc$percentiles <- .percentiles
+ misc$true.mu <- FALSE # $fitted is not a true mu
if (control$cdf) {
- post$cdf = cdf.lms.bcg(y, eta0=matrix(c(lambda,mymu,sigma),
+ post$cdf <- cdf.lms.bcg(y, eta0=matrix(c(lambda,mymu,sigma),
ncol=3, dimnames = list(dimnames(x)[[1]], NULL)))
}
}), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
@@ -296,11 +311,11 @@ lms.yjn.control <- function(trace = TRUE, ...)
.percentiles = percentiles ))),
loglikelihood = eval(substitute(
function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- mu = eta2theta(eta[, 2], .lmu, earg = .emu)
- sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
- Gee = (y / mu)^lambda
- theta = 1 / (sigma * lambda)^2
+ lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
+ mu <- eta2theta(eta[, 2], .lmu, earg = .emu)
+ sigma <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ Gee <- (y / mu)^lambda
+ theta <- 1 / (sigma * lambda)^2
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(c(w) * (log(abs(lambda)) + theta * (log(theta) +
@@ -309,22 +324,22 @@ lms.yjn.control <- function(trace = TRUE, ...)
.elambda = elambda, .emu = emu, .esigma = esigma ))),
vfamily = c("lms.bcg", "lmscreg"),
deriv = eval(substitute(expression({
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- mymu = eta2theta(eta[, 2], .lmu, earg = .emu)
- sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
+ mymu <- eta2theta(eta[, 2], .lmu, earg = .emu)
+ sigma <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
- Gee = (y / mymu)^lambda
- theta = 1 / (sigma * lambda)^2
- dd = digamma(theta)
+ Gee <- (y / mymu)^lambda
+ theta <- 1 / (sigma * lambda)^2
+ dd <- digamma(theta)
- dl.dlambda = (1 + 2 * theta * (dd + Gee -1 -log(theta) -
+ dl.dlambda <- (1 + 2 * theta * (dd + Gee -1 -log(theta) -
0.5 * (Gee + 1) * log(Gee))) / lambda
- dl.dmu = lambda * theta * (Gee-1) / mymu
- dl.dsigma = 2*theta*(dd + Gee - log(theta * Gee)-1) / sigma
+ dl.dmu <- lambda * theta * (Gee-1) / mymu
+ dl.dsigma <- 2*theta*(dd + Gee - log(theta * Gee)-1) / sigma
- dlambda.deta = dtheta.deta(lambda, link = .llambda, earg = .elambda)
- dmu.deta = dtheta.deta(mymu, link = .lmu, earg = .emu)
- dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
+ dlambda.deta <- dtheta.deta(lambda, link = .llambda, earg = .elambda)
+ dmu.deta <- dtheta.deta(mymu, link = .lmu, earg = .emu)
+ dsigma.deta <- dtheta.deta(sigma, link = .lsigma, earg = .esigma)
cbind(dl.dlambda * dlambda.deta,
dl.dmu * dmu.deta,
@@ -332,27 +347,27 @@ lms.yjn.control <- function(trace = TRUE, ...)
}), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
.elambda = elambda, .emu = emu, .esigma = esigma ))),
weight = eval(substitute(expression({
- tritheta = trigamma(theta)
- wz = matrix(0, n, 6)
+ tritheta <- trigamma(theta)
+ wz <- matrix(0, n, 6)
if (TRUE) {
- part2 = dd + 2/theta - 2*log(theta)
- wz[,iam(1, 1, M)] = ((1 + theta*(tritheta*(1+4*theta) -
+ part2 <- dd + 2/theta - 2*log(theta)
+ wz[,iam(1, 1, M)] <- ((1 + theta*(tritheta*(1+4*theta) -
4*(1+1/theta) - log(theta)*(2/theta -
log(theta)) + dd*part2)) / lambda^2) *
dlambda.deta^2
} else {
- temp = mean( Gee*(log(Gee))^2 )
- wz[,iam(1, 1, M)] = ((4 * theta * (theta * tritheta-1) - 1 +
+ temp <- mean( Gee*(log(Gee))^2 )
+ wz[,iam(1, 1, M)] <- ((4 * theta * (theta * tritheta-1) - 1 +
theta*temp) / lambda^2) * dlambda.deta^2
}
- wz[,iam(2, 2, M)] = dmu.deta^2 / (mymu * sigma)^2
- wz[,iam(3, 3, M)] = (4 * theta * (theta * tritheta - 1) / sigma^2) *
+ wz[,iam(2, 2, M)] <- dmu.deta^2 / (mymu * sigma)^2
+ wz[,iam(3, 3, M)] <- (4 * theta * (theta * tritheta - 1) / sigma^2) *
dsigma.deta^2
- wz[,iam(1, 2, M)] = (-theta * (dd + 1 / theta - log(theta)) / mymu) *
+ wz[,iam(1, 2, M)] <- (-theta * (dd + 1 / theta - log(theta)) / mymu) *
dlambda.deta * dmu.deta
- wz[,iam(1, 3, M)] = 2 * theta^1.5 * (2 * theta * tritheta - 2 -
+ wz[,iam(1, 3, M)] <- 2 * theta^1.5 * (2 * theta * tritheta - 2 -
1 / theta) * dlambda.deta * dsigma.deta
c(w) * wz
}), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
@@ -364,9 +379,9 @@ lms.yjn.control <- function(trace = TRUE, ...)
dy.dpsi.yeojohnson <- function(psi, lambda) {
- L = max(length(psi), length(lambda))
- psi = rep(psi, length.out = L);
- lambda = rep(lambda, length.out = L);
+ L <- max(length(psi), length(lambda))
+ psi <- rep(psi, length.out = L);
+ lambda <- rep(lambda, length.out = L);
ifelse(psi > 0, (1 + psi * lambda)^(1/lambda - 1),
(1 - (2-lambda) * psi)^((lambda - 1) / (2-lambda)))
@@ -374,9 +389,9 @@ dy.dpsi.yeojohnson <- function(psi, lambda) {
dyj.dy.yeojohnson <- function(y, lambda) {
- L = max(length(y), length(lambda))
- y = rep(y, length.out = L);
- lambda = rep(lambda, length.out = L);
+ L <- max(length(y), length(lambda))
+ y <- rep(y, length.out = L);
+ lambda <- rep(lambda, length.out = L);
ifelse(y>0, (1 + y)^(lambda - 1), (1 - y)^(1 - lambda))
}
@@ -384,69 +399,68 @@ dyj.dy.yeojohnson <- function(y, lambda) {
yeo.johnson <- function(y, lambda, derivative = 0,
epsilon = sqrt(.Machine$double.eps),
- inverse = FALSE)
-{
+ inverse = FALSE) {
if (!is.Numeric(derivative, allowable.length = 1,
integer.valued = TRUE) ||
derivative < 0)
stop("argument 'derivative' must be a non-negative integer")
- ans = y
+ ans <- y
if (!is.Numeric(epsilon, allowable.length = 1, positive = TRUE))
stop("argument 'epsilon' must be a single positive number")
- L = max(length(lambda), length(y))
+ L <- max(length(lambda), length(y))
if (length(y) != L)
- y = rep(y, length.out = L)
+ y <- rep(y, length.out = L)
if (length(lambda) != L)
- lambda = rep(lambda, length.out = L) # lambda may be of length 1
+ lambda <- rep(lambda, length.out = L) # lambda may be of length 1
if (inverse) {
if (derivative != 0)
stop("argument 'derivative' must 0 when inverse = TRUE")
if (any(index <- y >= 0 & abs(lambda) > epsilon))
- ans[index] = (y[index]*lambda[index] + 1)^(1/lambda[index]) - 1
+ ans[index] <- (y[index]*lambda[index] + 1)^(1/lambda[index]) - 1
if (any(index <- y >= 0 & abs(lambda) <= epsilon))
- ans[index] = expm1(y[index])
+ ans[index] <- expm1(y[index])
if (any(index <- y < 0 & abs(lambda-2) > epsilon))
- ans[index] = 1- (-(2-lambda[index]) *
+ ans[index] <- 1- (-(2-lambda[index]) *
y[index]+1)^(1/(2-lambda[index]))
if (any(index <- y < 0 & abs(lambda-2) <= epsilon))
- ans[index] = -expm1(-y[index])
+ ans[index] <- -expm1(-y[index])
return(ans)
}
if (derivative == 0) {
if (any(index <- y >= 0 & abs(lambda) > epsilon))
- ans[index] = ((y[index]+1)^(lambda[index]) - 1) / lambda[index]
+ ans[index] <- ((y[index]+1)^(lambda[index]) - 1) / lambda[index]
if (any(index <- y >= 0 & abs(lambda) <= epsilon))
- ans[index] = log1p(y[index])
+ ans[index] <- log1p(y[index])
if (any(index <- y < 0 & abs(lambda-2) > epsilon))
- ans[index] = -((-y[index]+1)^(2-lambda[index]) - 1)/(2 -
+ ans[index] <- -((-y[index]+1)^(2-lambda[index]) - 1)/(2 -
lambda[index])
if (any(index <- y < 0 & abs(lambda-2) <= epsilon))
- ans[index] = -log1p(-y[index])
+ ans[index] <- -log1p(-y[index])
} else {
psi <- Recall(y = y, lambda=lambda, derivative=derivative-1,
epsilon=epsilon, inverse=inverse)
if (any(index <- y >= 0 & abs(lambda) > epsilon))
- ans[index] = ( (y[index]+1)^(lambda[index]) *
+ ans[index] <- ( (y[index]+1)^(lambda[index]) *
(log1p(y[index]))^(derivative) - derivative *
psi[index] ) / lambda[index]
if (any(index <- y >= 0 & abs(lambda) <= epsilon))
- ans[index] = (log1p(y[index]))^(derivative + 1) / (derivative+1)
+ ans[index] <- (log1p(y[index]))^(derivative + 1) / (derivative+1)
if (any(index <- y < 0 & abs(lambda-2) > epsilon))
- ans[index] = -( (-y[index]+1)^(2-lambda[index]) *
+ ans[index] <- -( (-y[index]+1)^(2-lambda[index]) *
(-log1p(-y[index]))^(derivative) - derivative *
psi[index] ) / (2-lambda[index])
if (any(index <- y < 0 & abs(lambda-2) <= epsilon))
- ans[index] = (-log1p(-y[index]))^(derivative + 1) / (derivative+1)
+ ans[index] <- (-log1p(-y[index]))^(derivative + 1) / (derivative+1)
}
ans
}
dpsi.dlambda.yjn <- function(psi, lambda, mymu, sigma,
- derivative = 0, smallno=1.0e-8) {
+ derivative = 0, smallno = 1.0e-8) {
if (!is.Numeric(derivative, allowable.length = 1,
integer.valued = TRUE) ||
@@ -455,33 +469,34 @@ dpsi.dlambda.yjn <- function(psi, lambda, mymu, sigma,
if (!is.Numeric(smallno, allowable.length = 1, positive = TRUE))
stop("argument 'smallno' must be a single positive number")
- L = max(length(psi), length(lambda), length(mymu), length(sigma))
- if (length(psi) != L) psi = rep(psi, length.out = L)
- if (length(lambda) != L) lambda = rep(lambda, length.out = L)
- if (length(mymu) != L) mymu = rep(mymu, length.out = L)
- if (length(sigma) != L) sigma = rep(sigma, length.out = L)
-
- answer = matrix(as.numeric(NA), L, derivative+1)
- CC = psi >= 0
- BB = ifelse(CC, lambda, -2+lambda)
- AA = psi * BB
- temp8 = if (derivative > 0) {
- answer[,1:derivative] =
- Recall(psi=psi, lambda=lambda, mymu=mymu, sigma=sigma,
- derivative=derivative-1, smallno=smallno)
+ L <- max(length(psi), length(lambda), length(mymu), length(sigma))
+ if (length(psi) != L) psi <- rep(psi, length.out = L)
+ if (length(lambda) != L) lambda <- rep(lambda, length.out = L)
+ if (length(mymu) != L) mymu <- rep(mymu, length.out = L)
+ if (length(sigma) != L) sigma <- rep(sigma, length.out = L)
+
+ answer <- matrix(as.numeric(NA), L, derivative+1)
+ CC <- psi >= 0
+ BB <- ifelse(CC, lambda, -2+lambda)
+ AA <- psi * BB
+ temp8 <- if (derivative > 0) {
+ answer[,1:derivative] <-
+ Recall(psi = psi, lambda = lambda, mymu = mymu, sigma = sigma,
+ derivative = derivative-1, smallno = smallno)
answer[,derivative] * derivative
} else {
0
}
- answer[,1+derivative] = ((AA+1) * (log1p(AA)/BB)^derivative - temp8) / BB
+ answer[,1+derivative] <- ((AA+1) * (log1p(AA)/BB)^derivative - temp8) / BB
- pos = (CC & abs(lambda) <= smallno) | (!CC & abs(lambda-2) <= smallno)
+ pos <- (CC & abs(lambda) <= smallno) | (!CC & abs(lambda-2) <= smallno)
if (any(pos))
answer[pos,1+derivative] =
(answer[pos, 1]^(1+derivative))/(derivative+1)
answer
}
+
gh.weight.yjn.11 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
@@ -490,7 +505,7 @@ gh.weight.yjn.11 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
sqrt(2) * z * derivmat[, 3] / sigma) / sqrt(pi)
} else {
# Long-winded way
- psi = mymu + sqrt(2) * sigma * z
+ psi <- mymu + sqrt(2) * sigma * z
(1 / sqrt(pi)) *
(dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
derivative = 1)[, 2]^2 +
@@ -505,7 +520,7 @@ gh.weight.yjn.12 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
if (length(derivmat)) {
(-derivmat[, 2]) / (sqrt(pi) * sigma^2)
} else {
- psi = mymu + sqrt(2) * sigma * z
+ psi <- mymu + sqrt(2) * sigma * z
(1 / sqrt(pi)) * (- dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
derivative = 1)[, 2]) / sigma^2
}
@@ -516,7 +531,7 @@ gh.weight.yjn.13 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
if (length(derivmat)) {
sqrt(8 / pi) * (-derivmat[, 2]) * z / sigma^2
} else {
- psi = mymu + sqrt(2) * sigma * z
+ psi <- mymu + sqrt(2) * sigma * z
(1 / sqrt(pi)) *
(-2 * dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
derivative = 1)[, 2]) *
@@ -531,8 +546,8 @@ glag.weight.yjn.11 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
if (length(derivmat)) {
derivmat[, 4] * (derivmat[, 2]^2 + sqrt(2) * sigma * z * derivmat[, 3])
} else {
- psi = mymu + sqrt(2) * sigma * z
- discontinuity = -mymu / (sqrt(2) * sigma)
+ psi <- mymu + sqrt(2) * sigma * z
+ discontinuity <- -mymu / (sqrt(2) * sigma)
(1 / (2 * sqrt((z-discontinuity^2)^2 + discontinuity^2))) *
(1 / sqrt(pi)) *
(dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 1)[, 2]^2 +
@@ -543,11 +558,11 @@ glag.weight.yjn.11 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
}
glag.weight.yjn.12 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
- discontinuity = -mymu / (sqrt(2) * sigma)
+ discontinuity <- -mymu / (sqrt(2) * sigma)
if (length(derivmat)) {
derivmat[, 4] * (-derivmat[, 2])
} else {
- psi = mymu + sqrt(2) * sigma * z
+ psi <- mymu + sqrt(2) * sigma * z
(1 / (2 * sqrt((z-discontinuity^2)^2 + discontinuity^2))) *
(1 / sqrt(pi)) *
(- dpsi.dlambda.yjn(psi, lambda, mymu,
@@ -559,8 +574,8 @@ glag.weight.yjn.13 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
if (length(derivmat)) {
derivmat[, 4] * (-derivmat[, 2]) * sqrt(8) * z
} else {
- psi = mymu + sqrt(2) * sigma * z
- discontinuity = -mymu / (sqrt(2) * sigma)
+ psi <- mymu + sqrt(2) * sigma * z
+ discontinuity <- -mymu / (sqrt(2) * sigma)
(1 / (2 * sqrt((z-discontinuity^2)^2 + discontinuity^2))) *
(1 / sqrt(pi)) *
(-2 * dpsi.dlambda.yjn(psi, lambda, mymu,
@@ -578,7 +593,7 @@ gleg.weight.yjn.11 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
if (length(derivmat)) {
derivmat[, 4] * (derivmat[, 2]^2 + sqrt(2) * sigma * z * derivmat[, 3])
} else {
- psi = mymu + sqrt(2) * sigma * z
+ psi <- mymu + sqrt(2) * sigma * z
(exp(-z^2) / sqrt(pi)) *
(dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 1)[, 2]^2 +
(psi - mymu) *
@@ -587,22 +602,24 @@ gleg.weight.yjn.11 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
}
}
+
gleg.weight.yjn.12 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
if (length(derivmat)) {
derivmat[, 4] * (- derivmat[, 2])
} else {
- psi = mymu + sqrt(2) * sigma * z
+ psi <- mymu + sqrt(2) * sigma * z
(exp(-z^2) / sqrt(pi)) *
(- dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
derivative = 1)[, 2]) / sigma^2
}
}
+
gleg.weight.yjn.13 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
if (length(derivmat)) {
derivmat[, 4] * (-derivmat[, 2]) * sqrt(8) * z
} else {
- psi = mymu + sqrt(2) * sigma * z
+ psi <- mymu + sqrt(2) * sigma * z
(exp(-z^2) / sqrt(pi)) *
(-2 * dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 1)[, 2]) *
(psi - mymu) / sigma^3
@@ -611,8 +628,7 @@ gleg.weight.yjn.13 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
-lms.yjn2.control <- function(save.weight = TRUE, ...)
-{
+lms.yjn2.control <- function(save.weight = TRUE, ...) {
list(save.weight=save.weight)
}
@@ -626,8 +642,7 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
ilambda=1.0,
isigma = NULL,
yoffset = NULL,
- nsimEIM = 250)
-{
+ nsimEIM = 250) {
llambda <- as.list(substitute(llambda))
elambda <- link2list(llambda)
@@ -651,15 +666,15 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("LMS Quantile Regression (Yeo-Johnson transformation",
- " to normality)\n",
- "Links: ",
- namesof("lambda", link = llambda, earg = elambda),
- ", ",
- namesof("mu", link = lmu, earg = emu),
- ", ",
- namesof("sigma", link = lsigma, earg = esigma)),
+ " to normality)\n",
+ "Links: ",
+ namesof("lambda", link = llambda, earg = elambda),
+ ", ",
+ namesof("mu", link = lmu, earg = emu),
+ ", ",
+ namesof("sigma", link = lsigma, earg = esigma)),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list(.zero = zero))),
initialize = eval(substitute(expression({
@@ -672,26 +687,26 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
namesof("mu", .lmu, earg = .emu, short= TRUE),
namesof("sigma", .lsigma, earg = .esigma, short= TRUE))
- y.save = y
- yoff = if (is.Numeric( .yoffset)) .yoffset else -median(y)
- extra$yoffset = yoff
- y = y + yoff
+ y.save <- y
+ yoff <- if (is.Numeric( .yoffset)) .yoffset else -median(y)
+ extra$yoffset <- yoff
+ y <- y + yoff
if (!length(etastart)) {
- lambda.init = if (is.Numeric( .ilambda )) .ilambda else 1.
+ lambda.init <- if (is.Numeric( .ilambda )) .ilambda else 1.
- y.tx = yeo.johnson(y, lambda.init)
+ y.tx <- yeo.johnson(y, lambda.init)
fv.init =
if (smoothok <-
(length(unique(sort(x[, min(ncol(x), 2)]))) > 7)) {
- fit700 = vsmooth.spline(x = x[, min(ncol(x), 2)],
- y=y.tx, w = w, df = .dfmu.init)
+ fit700 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = y.tx, w = w, df = .dfmu.init)
c(predict(fit700, x = x[, min(ncol(x), 2)])$y)
} else {
rep(weighted.mean(y, w), length.out = n)
}
- sigma.init = if (!is.Numeric(.isigma)) {
+ sigma.init <- if (!is.Numeric(.isigma)) {
if (is.Numeric( .dfsigma.init) && smoothok) {
fit710 = vsmooth.spline(x = x[, min(ncol(x), 2)],
y = (y.tx - fv.init)^2,
@@ -704,10 +719,10 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
} else
.isigma
- etastart = matrix(0, n, 3)
- etastart[, 1] = theta2eta(lambda.init, .llambda, earg = .elambda)
- etastart[, 2] = theta2eta(fv.init, .lmu, earg = .emu)
- etastart[, 3] = theta2eta(sigma.init, .lsigma, earg = .esigma)
+ etastart <- matrix(0, n, 3)
+ etastart[, 1] <- theta2eta(lambda.init, .llambda, earg = .elambda)
+ etastart[, 2] <- theta2eta(fv.init, .lmu, earg = .emu)
+ etastart[, 3] <- theta2eta(sigma.init, .lsigma, earg = .esigma)
}
}), list(.llambda = llambda, .lmu = lmu, .lsigma = lsigma,
@@ -718,8 +733,8 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
.yoffset=yoffset,
.isigma = isigma))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta[, 1] = eta2theta(eta[, 1], .llambda, earg = .elambda)
- eta[, 3] = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ eta[, 1] <- eta2theta(eta[, 1], .llambda, earg = .elambda)
+ eta[, 3] <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
qtplot.lms.yjn(percentiles = .percentiles, eta = eta,
yoffset = extra$yoff)
}, list(.percentiles = percentiles,
@@ -727,20 +742,20 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
.llambda = llambda,
.lsigma = lsigma))),
last = eval(substitute(expression({
- misc$link = c(lambda = .llambda, mu = .lmu, sigma = .lsigma)
- misc$earg = list(lambda = .elambda, mu = .emu, sigma = .esigma)
+ misc$link <- c(lambda = .llambda, mu = .lmu, sigma = .lsigma)
+ misc$earg <- list(lambda = .elambda, mu = .emu, sigma = .esigma)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- misc$percentiles = .percentiles
+ misc$expected <- TRUE
+ misc$nsimEIM <- .nsimEIM
+ misc$percentiles <- .percentiles
- misc$true.mu = FALSE # $fitted is not a true mu
- misc[["yoffset"]] = extra$yoffset
+ misc$true.mu <- FALSE # $fitted is not a true mu
+ misc[["yoffset"]] <- extra$yoffset
- y = y.save # Restore back the value; to be attached to object
+ y <- y.save # Restore back the value; to be attached to object
if (control$cdf) {
- post$cdf = cdf.lms.yjn(y + misc$yoffset,
+ post$cdf <- cdf.lms.yjn(y + misc$yoffset,
eta0=matrix(c(lambda,mymu,sigma),
ncol=3, dimnames = list(dimnames(x)[[1]], NULL)))
}
@@ -750,10 +765,10 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
.llambda = llambda, .lmu = lmu, .lsigma = lsigma ))),
loglikelihood = eval(substitute(
function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- mu = eta2theta(eta[, 2], .lmu, earg = .emu)
- sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
- psi = yeo.johnson(y, lambda)
+ lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
+ mu <- eta2theta(eta[, 2], .lmu, earg = .emu)
+ sigma <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ psi <- yeo.johnson(y, lambda)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(c(w) * (-log(sigma) - 0.5 * ((psi-mu)/sigma)^2 +
@@ -763,20 +778,20 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
.lsigma = lsigma ))),
vfamily = c("lms.yjn2", "lmscreg"),
deriv = eval(substitute(expression({
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- mymu = eta2theta(eta[, 2], .lmu, earg = .emu)
- sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
- dlambda.deta = dtheta.deta(lambda, link = .llambda, earg = .elambda)
- dmu.deta = dtheta.deta(mymu, link = .lmu, earg = .emu)
- dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
-
- psi = yeo.johnson(y, lambda)
- d1 = yeo.johnson(y, lambda, deriv = 1)
- AA = (psi - mymu) / sigma
- dl.dlambda = -AA * d1 /sigma + sign(y) * log1p(abs(y))
- dl.dmu = AA / sigma
- dl.dsigma = (AA^2 -1) / sigma
- dthetas.detas = cbind(dlambda.deta, dmu.deta, dsigma.deta)
+ lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
+ mymu <- eta2theta(eta[, 2], .lmu, earg = .emu)
+ sigma <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ dlambda.deta <- dtheta.deta(lambda, link = .llambda, earg = .elambda)
+ dmu.deta <- dtheta.deta(mymu, link = .lmu, earg = .emu)
+ dsigma.deta <- dtheta.deta(sigma, link = .lsigma, earg = .esigma)
+
+ psi <- yeo.johnson(y, lambda)
+ d1 <- yeo.johnson(y, lambda, deriv = 1)
+ AA <- (psi - mymu) / sigma
+ dl.dlambda <- -AA * d1 /sigma + sign(y) * log1p(abs(y))
+ dl.dmu <- AA / sigma
+ dl.dsigma <- (AA^2 -1) / sigma
+ dthetas.detas <- cbind(dlambda.deta, dmu.deta, dsigma.deta)
c(w) * cbind(dl.dlambda, dl.dmu, dl.dsigma) * dthetas.detas
}), list( .elambda = elambda, .emu = emu, .esigma = esigma,
.llambda = llambda, .lmu = lmu,
@@ -784,29 +799,29 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
weight = eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
for(ii in 1:( .nsimEIM )) {
- psi = rnorm(n, mymu, sigma)
- ysim = yeo.johnson(y=psi, lam=lambda, inv = TRUE)
- d1 = yeo.johnson(ysim, lambda, deriv = 1)
- AA = (psi - mymu) / sigma
- dl.dlambda = -AA * d1 /sigma + sign(ysim) * log1p(abs(ysim))
- dl.dmu = AA / sigma
- dl.dsigma = (AA^2 -1) / sigma
+ psi <- rnorm(n, mymu, sigma)
+ ysim <- yeo.johnson(y=psi, lam=lambda, inv = TRUE)
+ d1 <- yeo.johnson(ysim, lambda, deriv = 1)
+ AA <- (psi - mymu) / sigma
+ dl.dlambda <- -AA * d1 /sigma + sign(ysim) * log1p(abs(ysim))
+ dl.dmu <- AA / sigma
+ dl.dsigma <- (AA^2 -1) / sigma
rm(ysim)
- temp3 = cbind(dl.dlambda, dl.dmu, dl.dsigma)
- run.varcov = ((ii-1) * run.varcov +
+ temp3 <- cbind(dl.dlambda, dl.dmu, dl.dsigma)
+ run.varcov <- ((ii-1) * run.varcov +
temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
}
if (intercept.only)
- run.varcov = matrix(colMeans(run.varcov),
+ run.varcov <- matrix(colMeans(run.varcov),
nr=n, nc=ncol(run.varcov), byrow = TRUE)
- wz = run.varcov * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
- dimnames(wz) = list(rownames(wz), NULL) # Remove the colnames
+ wz <- run.varcov * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ dimnames(wz) <- list(rownames(wz), NULL) # Remove the colnames
c(w) * wz
}), list(.lsigma = lsigma,
.esigma = esigma, .elambda = elambda,
@@ -825,8 +840,7 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
isigma = NULL,
rule = c(10, 5),
yoffset = NULL,
- diagW = FALSE, iters.diagW=6)
-{
+ diagW = FALSE, iters.diagW = 6) {
@@ -841,19 +855,19 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
- rule = rule[1] # Number of points (common) for all the quadrature schemes
+ rule <- rule[1] # Number of points (common) for all the quadrature schemes
if (rule != 5 && rule != 10)
stop("only rule=5 or 10 is supported")
new("vglmff",
blurb = c("LMS Quantile Regression ",
- "(Yeo-Johnson transformation to normality)\n",
- "Links: ",
- namesof("lambda", link = llambda, earg = elambda),
- ", mu, ",
- namesof("sigma", link = lsigma, earg = esigma)),
+ "(Yeo-Johnson transformation to normality)\n",
+ "Links: ",
+ namesof("lambda", link = llambda, earg = elambda),
+ ", mu, ",
+ namesof("sigma", link = lsigma, earg = esigma)),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list(.zero = zero))),
initialize = eval(substitute(expression({
@@ -867,26 +881,26 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
"mu",
namesof("sigma", .lsigma, earg = .esigma, short= TRUE))
- y.save = y
- yoff = if (is.Numeric( .yoffset )) .yoffset else -median(y)
- extra$yoffset = yoff
- y = y + yoff
+ y.save <- y
+ yoff <- if (is.Numeric( .yoffset )) .yoffset else -median(y)
+ extra$yoffset <- yoff
+ y <- y + yoff
if (!length(etastart)) {
- lambda.init = if (is.Numeric( .ilambda )) .ilambda else 1.0
+ lambda.init <- if (is.Numeric( .ilambda )) .ilambda else 1.0
- y.tx = yeo.johnson(y, lambda.init)
+ y.tx <- yeo.johnson(y, lambda.init)
if (smoothok <-
(length(unique(sort(x[, min(ncol(x), 2)]))) > 7)) {
- fit700 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ fit700 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
y = y.tx, w = w, df = .dfmu.init)
- fv.init = c(predict(fit700, x = x[, min(ncol(x), 2)])$y)
+ fv.init <- c(predict(fit700, x = x[, min(ncol(x), 2)])$y)
} else {
- fv.init = rep(weighted.mean(y, w), length.out = n)
+ fv.init <- rep(weighted.mean(y, w), length.out = n)
}
- sigma.init = if (!is.Numeric( .isigma )) {
+ sigma.init <- if (!is.Numeric( .isigma )) {
if (is.Numeric( .dfsigma.init) &&
smoothok) {
fit710 = vsmooth.spline(x = x[, min(ncol(x), 2)],
@@ -900,7 +914,7 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
} else
.isigma
- etastart =
+ etastart <-
cbind(theta2eta(lambda.init,.llambda, earg = .elambda),
fv.init,
theta2eta(sigma.init, .lsigma, earg = .esigma))
@@ -915,8 +929,8 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
.yoffset=yoffset,
.isigma = isigma))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta[, 1] = eta2theta(eta[, 1], .llambda, earg = .elambda)
- eta[, 3] = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ eta[, 1] <- eta2theta(eta[, 1], .llambda, earg = .elambda)
+ eta[, 3] <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
qtplot.lms.yjn(percentiles = .percentiles,
eta = eta, yoffset = extra$yoff)
}, list(.percentiles = percentiles,
@@ -925,17 +939,17 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
.llambda = llambda,
.lsigma = lsigma))),
last = eval(substitute(expression({
- misc$link = c(lambda = .llambda, mu = "identity",
+ misc$link <- c(lambda = .llambda, mu = "identity",
sigma = .lsigma)
- misc$earg = list(lambda = .elambda, mu = list(theta = NULL),
+ misc$earg <- list(lambda = .elambda, mu = list(theta = NULL),
sigma = .esigma)
- misc$percentiles = .percentiles
- misc$true.mu = FALSE # $fitted is not a true mu
- misc[["yoffset"]] = extra$yoff
+ misc$percentiles <- .percentiles
+ misc$true.mu <- FALSE # $fitted is not a true mu
+ misc[["yoffset"]] <- extra$yoff
- y = y.save # Restore back the value; to be attached to object
+ y <- y.save # Restore back the value; to be attached to object
if (control$cdf) {
post$cdf =
@@ -950,10 +964,10 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
.lsigma = lsigma))),
loglikelihood = eval(substitute(
function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- mu = eta[, 2]
- sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
- psi = yeo.johnson(y, lambda)
+ lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
+ mu <- eta[, 2]
+ sigma <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ psi <- yeo.johnson(y, lambda)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(c(w) * (-log(sigma) - 0.5 * ((psi-mu)/sigma)^2 +
@@ -962,19 +976,19 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
.lsigma = lsigma, .llambda = llambda))),
vfamily = c("lms.yjn", "lmscreg"),
deriv = eval(substitute(expression({
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- mymu = eta[, 2]
- sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
+ mymu <- eta[, 2]
+ sigma <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
- psi = yeo.johnson(y, lambda)
- d1 = yeo.johnson(y, lambda, deriv = 1)
- AA = (psi - mymu) / sigma
+ psi <- yeo.johnson(y, lambda)
+ d1 <- yeo.johnson(y, lambda, deriv = 1)
+ AA <- (psi - mymu) / sigma
- dl.dlambda = -AA * d1 /sigma + sign(y) * log1p(abs(y))
- dl.dmu = AA / sigma
- dl.dsigma = (AA^2 -1) / sigma
- dlambda.deta = dtheta.deta(lambda, link = .llambda, earg = .elambda)
- dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
+ dl.dlambda <- -AA * d1 /sigma + sign(y) * log1p(abs(y))
+ dl.dmu <- AA / sigma
+ dl.dsigma <- (AA^2 -1) / sigma
+ dlambda.deta <- dtheta.deta(lambda, link = .llambda, earg = .elambda)
+ dsigma.deta <- dtheta.deta(sigma, link = .lsigma, earg = .esigma)
cbind(dl.dlambda * dlambda.deta,
dl.dmu,
@@ -982,11 +996,11 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
}), list( .esigma = esigma, .elambda = elambda,
.lsigma = lsigma, .llambda = llambda ))),
weight = eval(substitute(expression({
- wz = matrix(0, n, 6)
+ wz <- matrix(0, n, 6)
- wz[,iam(2, 2, M)] = 1 / sigma^2
- wz[,iam(3, 3, M)] = 2 * wz[,iam(2, 2, M)] # 2 / sigma^2
+ wz[,iam(2, 2, M)] <- 1 / sigma^2
+ wz[,iam(3, 3, M)] <- 2 * wz[,iam(2, 2, M)] # 2 / sigma^2
if (.rule == 10) {
@@ -1051,23 +1065,23 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
discontinuity = -mymu/(sqrt(2)*sigma)
- LL = pmin(discontinuity, 0)
- UU = pmax(discontinuity, 0)
+ LL <- pmin(discontinuity, 0)
+ UU <- pmax(discontinuity, 0)
if (FALSE) {
- AA = (UU-LL)/2
+ AA <- (UU-LL)/2
for(kk in 1:length(gleg.wts)) {
- temp1 = AA * gleg.wts[kk]
- abscissae = (UU+LL)/2 + AA * gleg.abs[kk]
- psi = mymu + sqrt(2) * sigma * abscissae
- temp9 = dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
+ temp1 <- AA * gleg.wts[kk]
+ abscissae <- (UU+LL)/2 + AA * gleg.abs[kk]
+ psi <- mymu + sqrt(2) * sigma * abscissae
+ temp9 <- dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
derivative = 2)
- temp9 = cbind(temp9, exp(-abscissae^2) / (sqrt(pi) * sigma^2))
+ temp9 <- cbind(temp9, exp(-abscissae^2) / (sqrt(pi) * sigma^2))
- wz[,iam(1, 1, M)] = wz[,iam(1, 1, M)] + temp1 *
+ wz[,iam(1, 1, M)] <- wz[,iam(1, 1, M)] + temp1 *
gleg.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
- wz[,iam(1, 2, M)] = wz[,iam(1, 2, M)] + temp1 *
+ wz[,iam(1, 2, M)] <- wz[,iam(1, 2, M)] + temp1 *
gleg.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
- wz[,iam(1, 3, M)] = wz[,iam(1, 3, M)] + temp1 *
+ wz[,iam(1, 3, M)] <- wz[,iam(1, 3, M)] + temp1 *
gleg.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
}
} else {
@@ -1077,54 +1091,54 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
as.integer(length(gleg.abs)), as.double(lambda),
as.double(mymu), as.double(sigma), answer=double(3*n),
eps=as.double(1.0e-5))$ans
- dim(temp9) = c(3,n)
- wz[,iam(1, 1, M)] = temp9[1,]
- wz[,iam(1, 2, M)] = temp9[2,]
- wz[,iam(1, 3, M)] = temp9[3,]
+ dim(temp9) <- c(3,n)
+ wz[,iam(1, 1, M)] <- temp9[1,]
+ wz[,iam(1, 2, M)] <- temp9[2,]
+ wz[,iam(1, 3, M)] <- temp9[3,]
}
for(kk in 1:length(sgh.wts)) {
- abscissae = sign(-discontinuity) * sgh.abs[kk]
- psi = mymu + sqrt(2) * sigma * abscissae # abscissae = z
- temp9 = dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
+ abscissae <- sign(-discontinuity) * sgh.abs[kk]
+ psi <- mymu + sqrt(2) * sigma * abscissae # abscissae = z
+ temp9 <- dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
derivative = 2)
- wz[,iam(1, 1, M)] = wz[,iam(1, 1, M)] + sgh.wts[kk] *
+ wz[,iam(1, 1, M)] <- wz[,iam(1, 1, M)] + sgh.wts[kk] *
gh.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
- wz[,iam(1, 2, M)] = wz[,iam(1, 2, M)] + sgh.wts[kk] *
+ wz[,iam(1, 2, M)] <- wz[,iam(1, 2, M)] + sgh.wts[kk] *
gh.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
- wz[,iam(1, 3, M)] = wz[,iam(1, 3, M)] + sgh.wts[kk] *
+ wz[,iam(1, 3, M)] <- wz[,iam(1, 3, M)] + sgh.wts[kk] *
gh.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
}
- temp1 = exp(-discontinuity^2)
+ temp1 <- exp(-discontinuity^2)
for(kk in 1:length(glag.wts)) {
- abscissae = sign(discontinuity) * sqrt(glag.abs[kk]) + discontinuity^2
- psi = mymu + sqrt(2) * sigma * abscissae
- temp9 = dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 2)
- temp9 = cbind(temp9,
+ abscissae <- sign(discontinuity) * sqrt(glag.abs[kk]) + discontinuity^2
+ psi <- mymu + sqrt(2) * sigma * abscissae
+ temp9 <- dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 2)
+ temp9 <- cbind(temp9,
1 / (2 * sqrt((abscissae-discontinuity^2)^2 +
discontinuity^2) *
sqrt(pi) * sigma^2))
- temp7 = temp1 * glag.wts[kk]
- wz[,iam(1, 1, M)] = wz[,iam(1, 1, M)] + temp7 *
+ temp7 <- temp1 * glag.wts[kk]
+ wz[,iam(1, 1, M)] <- wz[,iam(1, 1, M)] + temp7 *
glag.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
- wz[,iam(1, 2, M)] = wz[,iam(1, 2, M)] + temp7 *
+ wz[,iam(1, 2, M)] <- wz[,iam(1, 2, M)] + temp7 *
glag.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
- wz[,iam(1, 3, M)] = wz[,iam(1, 3, M)] + temp7 *
+ wz[,iam(1, 3, M)] <- wz[,iam(1, 3, M)] + temp7 *
glag.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
}
- wz[,iam(1, 1, M)] = wz[,iam(1, 1, M)] * dlambda.deta^2
- wz[,iam(1, 2, M)] = wz[,iam(1, 2, M)] * dlambda.deta
- wz[,iam(1, 3, M)] = wz[,iam(1, 3, M)] * dsigma.deta * dlambda.deta
+ wz[,iam(1, 1, M)] <- wz[,iam(1, 1, M)] * dlambda.deta^2
+ wz[,iam(1, 2, M)] <- wz[,iam(1, 2, M)] * dlambda.deta
+ wz[,iam(1, 3, M)] <- wz[,iam(1, 3, M)] * dsigma.deta * dlambda.deta
if ( .diagW && iter <= .iters.diagW) {
- wz[,iam(1, 2, M)] = wz[,iam(1, 3, M)] = 0
+ wz[,iam(1, 2, M)] <- wz[,iam(1, 3, M)] <- 0
}
- wz[,iam(2, 3, M)] = wz[,iam(2, 3, M)] * dsigma.deta
- wz[,iam(3, 3, M)] = wz[,iam(3, 3, M)] * dsigma.deta^2
+ wz[,iam(2, 3, M)] <- wz[,iam(2, 3, M)] * dsigma.deta
+ wz[,iam(3, 3, M)] <- wz[,iam(3, 3, M)] * dsigma.deta^2
c(w) * wz
}), list(.lsigma = lsigma,
@@ -1137,14 +1151,13 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
-lmscreg.control <- function(cdf = TRUE, at.arg = NULL, x0 = NULL, ...)
-{
+lmscreg.control <- function(cdf = TRUE, at.arg = NULL, x0 = NULL, ...) {
- if (!is.logical(cdf)) {
- warning("'cdf' is not logical; using TRUE instead")
- cdf = TRUE
- }
- list(cdf =cdf, at.arg=at.arg, x0=x0)
+ if (!is.logical(cdf)) {
+ warning("'cdf' is not logical; using TRUE instead")
+ cdf <- TRUE
+ }
+ list(cdf = cdf, at.arg = at.arg, x0 = x0)
}
@@ -1162,19 +1175,19 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
M <- length(extra$w.aml)
- if (M > 1) y = matrix(y, extra$n, extra$M)
+ if (M > 1) y <- matrix(y, extra$n, extra$M)
- devi = cbind((y - mu)^2)
+ devi <- cbind((y - mu)^2)
if (residuals) {
stop("not sure here")
- wz = VGAM.weights.function(w = w, M = extra$M, n = extra$n)
+ wz <- VGAM.weights.function(w = w, M = extra$M, n = extra$n)
return((y - mu) * sqrt(wz) * matrix(extra$w.aml,extra$n,extra$M))
} else {
- all.deviances = numeric(M)
- myresid = matrix(y,extra$n,extra$M) - cbind(mu)
+ all.deviances <- numeric(M)
+ myresid <- matrix(y,extra$n,extra$M) - cbind(mu)
for(ii in 1:M)
- all.deviances[ii] = sum(c(w) * devi[, ii] *
- Wr1(myresid[, ii], w=extra$w.aml[ii]))
+ all.deviances[ii] <- sum(c(w) * devi[, ii] *
+ Wr1(myresid[, ii], w=extra$w.aml[ii]))
}
if (is.logical(extra$individual) && extra$individual)
all.deviances else sum(all.deviances)
@@ -1185,8 +1198,7 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
amlnormal <- function(w.aml = 1, parallel = FALSE,
lexpectile = "identity",
iexpectile = NULL,
- imethod = 1, digw = 4)
-{
+ imethod = 1, digw = 4) {
if (!is.Numeric(w.aml, positive = TRUE))
@@ -1211,14 +1223,14 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
"Links: ",
namesof("expectile", link = lexpectile, earg = eexpectile)),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M,1), x, .parallel, constraints)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
}), list( .parallel = parallel ))),
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
amlnormal.deviance(mu = mu, y = y, w = w, residuals = residuals,
eta = eta, extra = extra)
},
initialize = eval(substitute(expression({
- extra$w.aml = .w.aml
+ extra$w.aml <- .w.aml
temp5 <-
w.y.check(w = w, y = y,
@@ -1231,9 +1243,9 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
- extra$M = M = length(extra$w.aml) # Recycle if necessary
- extra$n = n
- extra$y.names = y.names =
+ extra$M <- M <- length(extra$w.aml) # Recycle if necessary
+ extra$n <- n
+ extra$y.names <- y.names <-
paste("w.aml = ", round(extra$w.aml, digits = .digw ), sep = "")
predictors.names <- c(namesof(
@@ -1241,16 +1253,16 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
earg = .eexpectile, tag = FALSE))
if (!length(etastart)) {
- mean.init = if ( .imethod == 1)
+ mean.init <- if ( .imethod == 1)
rep(median(y), length = n) else
if ( .imethod == 2)
rep(weighted.mean(y, w), length = n) else {
- junk = lm.wfit(x = x, y = y, w = w)
+ junk <- lm.wfit(x = x, y = c(y), w = c(w))
junk$fitted
}
if (length( .iexpectile))
- mean.init = matrix( .iexpectile, n, M, byrow = TRUE)
- etastart =
+ mean.init <- matrix( .iexpectile, n, M, byrow = TRUE)
+ etastart <-
matrix(theta2eta(mean.init, .lexpectile,
earg = .eexpectile), n, M)
}
@@ -1258,56 +1270,56 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
.iexpectile = iexpectile,
.imethod = imethod, .digw = digw, .w.aml = w.aml ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- ans = eta = as.matrix(eta)
+ ans <- eta <- as.matrix(eta)
for(ii in 1:ncol(eta))
- ans[, ii] = eta2theta(eta[, ii], .lexpectile, earg = .eexpectile)
- dimnames(ans) = list(dimnames(eta)[[1]], extra$y.names)
+ ans[, ii] <- eta2theta(eta[, ii], .lexpectile, earg = .eexpectile)
+ dimnames(ans) <- list(dimnames(eta)[[1]], extra$y.names)
ans
}, list( .lexpectile = lexpectile, .eexpectile = eexpectile ))),
last = eval(substitute(expression({
- misc$link = rep(.lexpectile, length = M)
- names(misc$link) = extra$y.names
+ misc$link <- rep(.lexpectile, length = M)
+ names(misc$link) <- extra$y.names
- misc$earg = vector("list", M)
+ misc$earg <- vector("list", M)
for (ilocal in 1:M)
misc$earg[[ilocal]] <- list(theta = NULL)
- names(misc$earg) = names(misc$link)
+ names(misc$earg) <- names(misc$link)
- misc$parallel = .parallel
- misc$expected = TRUE
- extra$percentile = numeric(M)
+ misc$parallel <- .parallel
+ misc$expected <- TRUE
+ extra$percentile <- numeric(M)
misc$multipleResponses <- TRUE
for(ii in 1:M) {
- use.w = if (M > 1 && ncol(cbind(w)) == M) w[, ii] else w
- extra$percentile[ii] = 100 *
+ use.w <- if (M > 1 && ncol(cbind(w)) == M) w[, ii] else w
+ extra$percentile[ii] <- 100 *
weighted.mean(myresid[, ii] <= 0, use.w)
}
- names(extra$percentile) = names(misc$link)
+ names(extra$percentile) <- names(misc$link)
- extra$individual = TRUE
+ extra$individual <- TRUE
if (!(M > 1 && ncol(cbind(w)) == M)) {
- extra$deviance = amlnormal.deviance(mu = mu, y = y, w = w,
+ extra$deviance <- amlnormal.deviance(mu = mu, y = y, w = w,
residuals = FALSE, eta = eta, extra = extra)
- names(extra$deviance) = extra$y.names
+ names(extra$deviance) <- extra$y.names
}
}), list( .lexpectile = lexpectile,
.eexpectile = eexpectile, .parallel = parallel ))),
vfamily = c("amlnormal"),
deriv = eval(substitute(expression({
- mymu = eta2theta(eta, .lexpectile, earg = .eexpectile)
- dexpectile.deta = dtheta.deta(mymu, .lexpectile, earg = .eexpectile)
- myresid = matrix(y,extra$n,extra$M) - cbind(mu)
- wor1 = Wr2(myresid, w = matrix(extra$w.aml, extra$n, extra$M,
+ mymu <- eta2theta(eta, .lexpectile, earg = .eexpectile)
+ dexpectile.deta <- dtheta.deta(mymu, .lexpectile, earg = .eexpectile)
+ myresid <- matrix(y,extra$n,extra$M) - cbind(mu)
+ wor1 <- Wr2(myresid, w = matrix(extra$w.aml, extra$n, extra$M,
byrow = TRUE))
c(w) * myresid * wor1 * dexpectile.deta
}), list( .lexpectile = lexpectile,
.eexpectile = eexpectile ))),
weight = eval(substitute(expression({
- wz = c(w) * wor1 * dexpectile.deta^2
+ wz <- c(w) * wor1 * dexpectile.deta^2
wz
}), list( .lexpectile = lexpectile,
.eexpectile = eexpectile ))))
@@ -1323,23 +1335,23 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
amlpoisson.deviance <- function(mu, y, w, residuals = FALSE, eta,
- extra = NULL) {
+ extra = NULL) {
M <- length(extra$w.aml)
- if (M > 1) y = matrix(y,extra$n,extra$M)
+ if (M > 1) y <- matrix(y,extra$n,extra$M)
nz <- y > 0
- devi = cbind(-(y - mu))
- devi[nz] = devi[nz] + y[nz] * log(y[nz]/mu[nz])
+ devi <- cbind(-(y - mu))
+ devi[nz] <- devi[nz] + y[nz] * log(y[nz]/mu[nz])
if (residuals) {
stop("not sure here")
return(sign(y - mu) * sqrt(2 * abs(devi) * w) *
matrix(extra$w,extra$n,extra$M))
} else {
- all.deviances = numeric(M)
- myresid = matrix(y,extra$n,extra$M) - cbind(mu)
- for(ii in 1:M) all.deviances[ii] = 2 * sum(c(w) * devi[, ii] *
+ all.deviances <- numeric(M)
+ myresid <- matrix(y,extra$n,extra$M) - cbind(mu)
+ for(ii in 1:M) all.deviances[ii] <- 2 * sum(c(w) * devi[, ii] *
Wr1(myresid[, ii], w=extra$w.aml[ii]))
}
if (is.logical(extra$individual) && extra$individual)
@@ -1348,8 +1360,7 @@ amlpoisson.deviance <- function(mu, y, w, residuals = FALSE, eta,
amlpoisson <- function(w.aml = 1, parallel = FALSE, imethod = 1,
- digw = 4, link = "loge")
-{
+ digw = 4, link = "loge") {
if (!is.Numeric(w.aml, positive = TRUE))
stop("'w.aml' must be a vector of positive values")
@@ -1360,18 +1371,18 @@ amlpoisson.deviance <- function(mu, y, w, residuals = FALSE, eta,
new("vglmff",
- blurb = c("Poisson expectile regression by",
- " asymmetric maximum likelihood estimation\n\n",
- "Link: ", namesof("expectile", link, earg = earg)),
+ blurb = c("Poisson expectile regression by",
+ " asymmetric maximum likelihood estimation\n\n",
+ "Link: ", namesof("expectile", link, earg = earg)),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M,1), x, .parallel, constraints)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
}), list( .parallel = parallel ))),
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
amlpoisson.deviance(mu = mu, y = y, w = w, residuals = residuals,
eta = eta, extra = extra)
},
initialize = eval(substitute(expression({
- extra$w.aml = .w.aml
+ extra$w.aml <- .w.aml
temp5 <-
w.y.check(w = w, y = y,
@@ -1383,75 +1394,76 @@ amlpoisson.deviance <- function(mu, y, w, residuals = FALSE, eta,
- extra$M = M = length(extra$w.aml) # Recycle if necessary
- extra$n = n
- extra$y.names = y.names =
- paste("w.aml = ", round(extra$w.aml, digits = .digw ), sep = "")
- extra$individual = FALSE
- predictors.names <-
- c(namesof(paste("expectile(",y.names,")", sep = ""),
- .link , earg = .earg , tag = FALSE))
+ extra$M <- M <- length(extra$w.aml) # Recycle if necessary
+ extra$n <- n
+ extra$y.names <- y.names <-
+ paste("w.aml = ", round(extra$w.aml, digits = .digw ), sep = "")
+ extra$individual <- FALSE
+ predictors.names <-
+ c(namesof(paste("expectile(",y.names,")", sep = ""),
+ .link , earg = .earg , tag = FALSE))
- if (!length(etastart)) {
- mean.init = if ( .imethod == 2)
- rep(median(y), length = n) else
- if ( .imethod == 1)
- rep(weighted.mean(y, w), length = n) else {
- junk = lm.wfit(x = x, y = y, w = w)
- abs(junk$fitted)
- }
- etastart =
- matrix(theta2eta(mean.init, .link , earg = .earg ), n, M)
- }
- }), list( .link = link, .earg = earg, .imethod = imethod,
- .digw = digw, .w.aml = w.aml ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- mu.ans = eta = as.matrix(eta)
- for(ii in 1:ncol(eta))
- mu.ans[, ii] = eta2theta(eta[, ii], .link , earg = .earg )
- dimnames(mu.ans) = list(dimnames(eta)[[1]], extra$y.names)
- mu.ans
+ if (!length(etastart)) {
+ mean.init <- if ( .imethod == 2)
+ rep(median(y), length = n) else
+ if ( .imethod == 1)
+ rep(weighted.mean(y, w), length = n) else {
+ junk = lm.wfit(x = x, y = c(y), w = c(w))
+ abs(junk$fitted)
+ }
+ etastart <-
+ matrix(theta2eta(mean.init, .link , earg = .earg ), n, M)
+ }
+ }), list( .link = link, .earg = earg, .imethod = imethod,
+ .digw = digw, .w.aml = w.aml ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ mu.ans <- eta <- as.matrix(eta)
+ for(ii in 1:ncol(eta))
+ mu.ans[, ii] <- eta2theta(eta[, ii], .link , earg = .earg )
+ dimnames(mu.ans) <- list(dimnames(eta)[[1]], extra$y.names)
+ mu.ans
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$multipleResponses <- TRUE
- misc$expected = TRUE
- misc$parallel = .parallel
+ misc$expected <- TRUE
+ misc$parallel <- .parallel
- misc$link = rep(.link , length = M)
- names(misc$link) = extra$y.names
+ misc$link <- rep(.link , length = M)
+ names(misc$link) <- extra$y.names
- misc$earg = vector("list", M)
+ misc$earg <- vector("list", M)
for (ilocal in 1:M)
misc$earg[[ilocal]] <- list(theta = NULL)
- names(misc$earg) = names(misc$link)
+ names(misc$earg) <- names(misc$link)
- extra$percentile = numeric(M)
+ extra$percentile <- numeric(M)
for(ii in 1:M)
- extra$percentile[ii] = 100 * weighted.mean(myresid[, ii] <= 0, w)
- names(extra$percentile) = names(misc$link)
-
- extra$individual = TRUE
- extra$deviance = amlpoisson.deviance(mu = mu, y = y, w = w,
- residuals = FALSE, eta = eta, extra = extra)
- names(extra$deviance) = extra$y.names
+ extra$percentile[ii] <- 100 * weighted.mean(myresid[, ii] <= 0, w)
+ names(extra$percentile) <- names(misc$link)
+
+ extra$individual <- TRUE
+ extra$deviance <- amlpoisson.deviance(mu = mu, y = y, w = w,
+ residuals = FALSE,
+ eta = eta, extra = extra)
+ names(extra$deviance) <- extra$y.names
}), list( .link = link, .earg = earg, .parallel = parallel ))),
linkfun = eval(substitute(function(mu, extra = NULL) {
- theta2eta(mu, link = .link , earg = .earg )
+ theta2eta(mu, link = .link , earg = .earg )
}, list( .link = link, .earg = earg ))),
vfamily = c("amlpoisson"),
deriv = eval(substitute(expression({
- mymu = eta2theta(eta, .link , earg = .earg )
- dexpectile.deta = dtheta.deta(mymu, .link , earg = .earg )
- myresid = matrix(y,extra$n,extra$M) - cbind(mu)
- wor1 = Wr2(myresid, w = matrix(extra$w.aml, extra$n, extra$M,
- byrow = TRUE))
+ mymu <- eta2theta(eta, .link , earg = .earg )
+ dexpectile.deta <- dtheta.deta(mymu, .link , earg = .earg )
+ myresid <- matrix(y,extra$n,extra$M) - cbind(mu)
+ wor1 <- Wr2(myresid, w = matrix(extra$w.aml, extra$n, extra$M,
+ byrow = TRUE))
c(w) * myresid * wor1 * (dexpectile.deta / mymu)
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- use.mu = mymu
- use.mu[use.mu < .Machine$double.eps^(3/4)] = .Machine$double.eps^(3/4)
- wz = c(w) * wor1 * use.mu * (dexpectile.deta / mymu)^2
+ use.mu <- mymu
+ use.mu[use.mu < .Machine$double.eps^(3/4)] <- .Machine$double.eps^(3/4)
+ wz <- c(w) * wor1 * use.mu * (dexpectile.deta / mymu)^2
wz
}), list( .link = link, .earg = earg ))))
}
@@ -1461,12 +1473,12 @@ amlpoisson.deviance <- function(mu, y, w, residuals = FALSE, eta,
amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
- eta, extra = NULL) {
+ eta, extra = NULL) {
M <- length(extra$w.aml)
- if (M > 1) y = matrix(y,extra$n,extra$M)
+ if (M > 1) y <- matrix(y,extra$n,extra$M)
devy <- y
@@ -1490,9 +1502,9 @@ amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
stop("not sure here")
return(sign(y - mu) * sqrt(abs(devi) * w))
} else {
- all.deviances = numeric(M)
- myresid = matrix(y,extra$n,extra$M) - matrix(mu,extra$n,extra$M)
- for(ii in 1:M) all.deviances[ii] = sum(c(w) * devi[, ii] *
+ all.deviances <- numeric(M)
+ myresid <- matrix(y,extra$n,extra$M) - matrix(mu,extra$n,extra$M)
+ for(ii in 1:M) all.deviances[ii] <- sum(c(w) * devi[, ii] *
Wr1(myresid[, ii], w=extra$w.aml[ii]))
}
if (is.logical(extra$individual) && extra$individual)
@@ -1501,8 +1513,7 @@ amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
amlbinomial <- function(w.aml = 1, parallel = FALSE, digw = 4,
- link = "logit")
-{
+ link = "logit") {
if (!is.Numeric(w.aml, positive = TRUE))
stop("'w.aml' must be a vector of positive values")
@@ -1514,11 +1525,11 @@ amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
new("vglmff",
- blurb = c("Logistic expectile regression by ",
- "asymmetric maximum likelihood estimation\n\n",
- "Link: ", namesof("expectile", link, earg = earg)),
+ blurb = c("Logistic expectile regression by ",
+ "asymmetric maximum likelihood estimation\n\n",
+ "Link: ", namesof("expectile", link, earg = earg)),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M,1), x, .parallel, constraints)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
}), list( .parallel = parallel ))),
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
amlbinomial.deviance(mu = mu, y = y, w = w, residuals = residuals,
@@ -1528,97 +1539,97 @@ amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
{
- NCOL = function (x)
+ NCOL <- function (x)
if (is.array(x) && length(dim(x)) > 1 ||
is.data.frame(x)) ncol(x) else as.integer(1)
if (NCOL(y) == 1) {
- if (is.factor(y)) y = y != levels(y)[1]
- nn = rep(1, n)
+ if (is.factor(y)) y <- y != levels(y)[1]
+ nn <- rep(1, n)
if (!all(y >= 0 & y <= 1))
stop("response values must be in [0, 1]")
if (!length(mustart) && !length(etastart))
- mustart = (0.5 + w * y) / (1 + w)
- no.successes = w * y
+ mustart <- (0.5 + w * y) / (1 + w)
+ no.successes <- w * y
if (any(abs(no.successes - round(no.successes)) > 0.001))
stop("Number of successes must be integer-valued")
} else if (NCOL(y) == 2) {
if (any(abs(y - round(y)) > 0.001))
stop("Count data must be integer-valued")
- nn = y[, 1] + y[, 2]
- y = ifelse(nn > 0, y[, 1]/nn, 0)
- w = w * nn
+ nn <- y[, 1] + y[, 2]
+ y <- ifelse(nn > 0, y[, 1]/nn, 0)
+ w <- w * nn
if (!length(mustart) && !length(etastart))
- mustart = (0.5 + nn * y) / (1 + nn)
+ mustart <- (0.5 + nn * y) / (1 + nn)
} else
stop("Response not of the right form")
}
- extra$w.aml = .w.aml
+ extra$w.aml <- .w.aml
if (ncol(y <- cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- extra$M = M = length(extra$w.aml) # Recycle if necessary
- extra$n = n
- extra$y.names = y.names =
+ extra$M <- M <- length(extra$w.aml) # Recycle if necessary
+ extra$n <- n
+ extra$y.names <- y.names <-
paste("w.aml = ", round(extra$w.aml, digits = .digw ), sep = "")
- extra$individual = FALSE
+ extra$individual <- FALSE
predictors.names <-
c(namesof(paste("expectile(", y.names, ")", sep = ""),
.link , earg = .earg , tag = FALSE))
if (!length(etastart)) {
- etastart = matrix(theta2eta(mustart, .link , earg = .earg ), n, M)
- mustart = NULL
+ etastart <- matrix(theta2eta(mustart, .link , earg = .earg ), n, M)
+ mustart <- NULL
}
}), list( .link = link, .earg = earg,
.digw = digw, .w.aml = w.aml ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mu.ans = eta = as.matrix(eta)
+ mu.ans <- eta <- as.matrix(eta)
for(ii in 1:ncol(eta))
- mu.ans[, ii] = eta2theta(eta[, ii], .link , earg = .earg )
- dimnames(mu.ans) = list(dimnames(eta)[[1]], extra$y.names)
+ mu.ans[, ii] <- eta2theta(eta[, ii], .link , earg = .earg )
+ dimnames(mu.ans) <- list(dimnames(eta)[[1]], extra$y.names)
mu.ans
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = rep(.link , length = M)
- names(misc$link) = extra$y.names
+ misc$link <- rep(.link , length = M)
+ names(misc$link) <- extra$y.names
- misc$earg = vector("list", M)
+ misc$earg <- vector("list", M)
for (ilocal in 1:M)
misc$earg[[ilocal]] <- list(theta = NULL)
- names(misc$earg) = names(misc$link)
+ names(misc$earg) <- names(misc$link)
- misc$parallel = .parallel
- misc$expected = TRUE
+ misc$parallel <- .parallel
+ misc$expected <- TRUE
- extra$percentile = numeric(M)
+ extra$percentile <- numeric(M)
for(ii in 1:M)
- extra$percentile[ii] = 100 * weighted.mean(myresid[, ii] <= 0, w)
- names(extra$percentile) = names(misc$link)
+ extra$percentile[ii] <- 100 * weighted.mean(myresid[, ii] <= 0, w)
+ names(extra$percentile) <- names(misc$link)
- extra$individual = TRUE
- extra$deviance = amlbinomial.deviance(mu = mu, y = y, w = w,
+ extra$individual <- TRUE
+ extra$deviance <- amlbinomial.deviance(mu = mu, y = y, w = w,
residuals = FALSE, eta = eta, extra = extra)
- names(extra$deviance) = extra$y.names
+ names(extra$deviance) <- extra$y.names
}), list( .link = link, .earg = earg, .parallel = parallel ))),
linkfun = eval(substitute(function(mu, extra = NULL) {
theta2eta(mu, link = .link , earg = .earg )
}, list( .link = link, .earg = earg ))),
vfamily = c("amlbinomial"),
deriv = eval(substitute(expression({
- mymu = eta2theta(eta, .link , earg = .earg )
- use.mu = mymu
+ mymu <- eta2theta(eta, .link , earg = .earg )
+ use.mu <- mymu
use.mu[use.mu < .Machine$double.eps^(3/4)] = .Machine$double.eps^(3/4)
- dexpectile.deta = dtheta.deta(use.mu, .link , earg = .earg )
- myresid = matrix(y,extra$n,extra$M) - cbind(mu)
- wor1 = Wr2(myresid, w = matrix(extra$w.aml, extra$n, extra$M,
+ dexpectile.deta <- dtheta.deta(use.mu, .link , earg = .earg )
+ myresid <- matrix(y,extra$n,extra$M) - cbind(mu)
+ wor1 <- Wr2(myresid, w = matrix(extra$w.aml, extra$n, extra$M,
byrow = TRUE))
c(w) * myresid * wor1 * (dexpectile.deta / (use.mu * (1-use.mu)))
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- wz = c(w) * wor1 * (dexpectile.deta^2 / (use.mu * (1 - use.mu)))
+ wz <- c(w) * wor1 * (dexpectile.deta^2 / (use.mu * (1 - use.mu)))
wz
}), list( .link = link, .earg = earg))))
}
@@ -1637,17 +1648,17 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
M <- length(extra$w.aml)
- if (M > 1) y = matrix(y,extra$n,extra$M)
+ if (M > 1) y <- matrix(y,extra$n,extra$M)
- devy = cbind(-log(y) - 1)
- devi = cbind(-log(mu) - y / mu)
+ devy <- cbind(-log(y) - 1)
+ devi <- cbind(-log(mu) - y / mu)
if (residuals) {
stop("not sure here")
return(sign(y - mu) * sqrt(2 * abs(devi) * w) *
matrix(extra$w,extra$n,extra$M))
} else {
- all.deviances = numeric(M)
- myresid = matrix(y,extra$n,extra$M) - cbind(mu)
+ all.deviances <- numeric(M)
+ myresid <- matrix(y,extra$n,extra$M) - cbind(mu)
for(ii in 1:M) all.deviances[ii] = 2 * sum(c(w) *
(devy[, ii] - devi[, ii]) *
Wr1(myresid[, ii], w=extra$w.aml[ii]))
@@ -1660,8 +1671,7 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
amlexponential <- function(w.aml = 1, parallel = FALSE, imethod = 1,
- digw = 4, link = "loge")
-{
+ digw = 4, link = "loge") {
if (!is.Numeric(w.aml, positive = TRUE))
stop("'w.aml' must be a vector of positive values")
if (!is.Numeric(imethod, allowable.length = 1,
@@ -1675,18 +1685,18 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
link <- attr(earg, "function.name")
- y.names = paste("w.aml = ", round(w.aml, digits = digw), sep = "")
+ y.names <- paste("w.aml = ", round(w.aml, digits = digw), sep = "")
predictors.names <- c(namesof(
paste("expectile(", y.names,")", sep = ""), link, earg = earg))
predictors.names <- paste(predictors.names, collapse = ", ")
new("vglmff",
- blurb = c("Exponential expectile regression by",
- " asymmetric maximum likelihood estimation\n\n",
- "Link: ", predictors.names),
+ blurb = c("Exponential expectile regression by",
+ " asymmetric maximum likelihood estimation\n\n",
+ "Link: ", predictors.names),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M,1), x, .parallel, constraints)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
}), list( .parallel = parallel ))),
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
amlexponential.deviance(mu = mu, y = y, w = w,
@@ -1694,7 +1704,7 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
eta = eta, extra = extra)
},
initialize = eval(substitute(expression({
- extra$w.aml = .w.aml
+ extra$w.aml <- .w.aml
temp5 <-
@@ -1708,9 +1718,9 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
- extra$M = M = length(extra$w.aml) # Recycle if necessary
- extra$n = n
- extra$y.names = y.names =
+ extra$M <- M <- length(extra$w.aml) # Recycle if necessary
+ extra$n <- n
+ extra$y.names <- y.names <-
paste("w.aml = ", round(extra$w.aml, digits = .digw ), sep = "")
extra$individual = FALSE
@@ -1720,67 +1730,67 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
.link , earg = .earg , tag = FALSE))
if (!length(etastart)) {
- mean.init = if ( .imethod == 1)
+ mean.init <- if ( .imethod == 1)
rep(median(y), length = n) else
if ( .imethod == 2)
rep(weighted.mean(y, w), length = n) else {
1 / (y + 1)
}
- etastart = matrix(theta2eta(mean.init, .link , earg = .earg ),
+ etastart <- matrix(theta2eta(mean.init, .link , earg = .earg ),
n, M)
}
}), list( .link = link, .earg = earg, .imethod = imethod,
.digw = digw, .w.aml = w.aml ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mu.ans = eta = as.matrix(eta)
+ mu.ans <- eta <- as.matrix(eta)
for(ii in 1:ncol(eta))
- mu.ans[, ii] = eta2theta(eta[, ii], .link , earg = .earg )
- dimnames(mu.ans) = list(dimnames(eta)[[1]], extra$y.names)
+ mu.ans[, ii] <- eta2theta(eta[, ii], .link , earg = .earg )
+ dimnames(mu.ans) <- list(dimnames(eta)[[1]], extra$y.names)
mu.ans
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$multipleResponses <- TRUE
- misc$expected = TRUE
- misc$parallel = .parallel
+ misc$expected <- TRUE
+ misc$parallel <- .parallel
- misc$link = rep(.link , length = M)
- names(misc$link) = extra$y.names
+ misc$link <- rep(.link , length = M)
+ names(misc$link) <- extra$y.names
- misc$earg = vector("list", M)
+ misc$earg <- vector("list", M)
for (ilocal in 1:M)
misc$earg[[ilocal]] <- list(theta = NULL)
- names(misc$earg) = names(misc$link)
+ names(misc$earg) <- names(misc$link)
- extra$percentile = numeric(M)
+ extra$percentile <- numeric(M)
for(ii in 1:M)
- extra$percentile[ii] = 100 * weighted.mean(myresid[, ii] <= 0, w)
- names(extra$percentile) = names(misc$link)
+ extra$percentile[ii] <- 100 * weighted.mean(myresid[, ii] <= 0, w)
+ names(extra$percentile) <- names(misc$link)
- extra$individual = TRUE
+ extra$individual <- TRUE
extra$deviance =
amlexponential.deviance(mu = mu, y = y, w = w,
residuals = FALSE, eta = eta, extra = extra)
- names(extra$deviance) = extra$y.names
+ names(extra$deviance) <- extra$y.names
}), list( .link = link, .earg = earg, .parallel = parallel ))),
linkfun = eval(substitute(function(mu, extra = NULL) {
theta2eta(mu, link = .link , earg = .earg )
}, list( .link = link, .earg = earg ))),
vfamily = c("amlexponential"),
deriv = eval(substitute(expression({
- mymu = eta2theta(eta, .link , earg = .earg )
- bigy = matrix(y,extra$n,extra$M)
- dl.dmu = (bigy - mymu) / mymu^2
+ mymu <- eta2theta(eta, .link , earg = .earg )
+ bigy <- matrix(y,extra$n,extra$M)
+ dl.dmu <- (bigy - mymu) / mymu^2
- dmu.deta = dtheta.deta(mymu, .link , earg = .earg )
- myresid = bigy - cbind(mymu)
- wor1 = Wr2(myresid, w = matrix(extra$w.aml, extra$n, extra$M,
+ dmu.deta <- dtheta.deta(mymu, .link , earg = .earg )
+ myresid <- bigy - cbind(mymu)
+ wor1 <- Wr2(myresid, w = matrix(extra$w.aml, extra$n, extra$M,
byrow = TRUE))
c(w) * wor1 * dl.dmu * dmu.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- ned2l.dmu2 = 1 / mymu^2
- wz = c(w) * wor1 * ned2l.dmu2 * dmu.deta^2
+ ned2l.dmu2 <- 1 / mymu^2
+ wz <- c(w) * wor1 * ned2l.dmu2 * dmu.deta^2
wz
}), list( .link = link, .earg = earg ))))
}
@@ -1805,19 +1815,19 @@ dalap <- function(x, location = 0, scale = 1, tau = 0.5,
- NN = max(length(x), length(location), length(scale), length(kappa))
- location = rep(location, length.out = NN);
- scale = rep(scale, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- x = rep(x, length.out = NN)
- tau = rep(tau, length.out = NN)
+ NN <- max(length(x), length(location), length(scale), length(kappa))
+ location <- rep(location, length.out = NN);
+ scale <- rep(scale, length.out = NN)
+ kappa <- rep(kappa, length.out = NN);
+ x <- rep(x, length.out = NN)
+ tau <- rep(tau, length.out = NN)
- logconst = 0.5 * log(2) - log(scale) + log(kappa) - log1p(kappa^2)
- exponent = -(sqrt(2) / scale) * abs(x - location) *
+ logconst <- 0.5 * log(2) - log(scale) + log(kappa) - log1p(kappa^2)
+ exponent <- -(sqrt(2) / scale) * abs(x - location) *
ifelse(x >= location, kappa, 1/kappa)
- indexTF = (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- logconst[!indexTF] = NaN
+ indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ logconst[!indexTF] <- NaN
if (log.arg) logconst + exponent else exp(logconst + exponent)
}
@@ -1825,69 +1835,69 @@ dalap <- function(x, location = 0, scale = 1, tau = 0.5,
ralap <- function(n, location = 0, scale = 1, tau = 0.5,
kappa = sqrt(tau/(1-tau))) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- location = rep(location, length.out = use.n);
- scale = rep(scale, length.out = use.n)
- tau = rep(tau, length.out = use.n);
- kappa = rep(kappa, length.out = use.n);
- ans = location + scale *
+ location <- rep(location, length.out = use.n);
+ scale <- rep(scale, length.out = use.n)
+ tau <- rep(tau, length.out = use.n);
+ kappa <- rep(kappa, length.out = use.n);
+ ans <- location + scale *
log(runif(use.n)^kappa / runif(use.n)^(1/kappa)) / sqrt(2)
- indexTF = (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- ans[!indexTF] = NaN
+ indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ ans[!indexTF] <- NaN
ans
}
palap <- function(q, location = 0, scale = 1, tau = 0.5,
kappa = sqrt(tau/(1-tau))) {
- NN = max(length(q), length(location), length(scale), length(kappa))
- location = rep(location, length.out = NN);
- scale = rep(scale, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- q = rep(q, length.out = NN)
- tau = rep(tau, length.out = NN);
-
- exponent = -(sqrt(2) / scale) * abs(q - location) *
+ NN <- max(length(q), length(location), length(scale), length(kappa))
+ location <- rep(location, length.out = NN);
+ scale <- rep(scale, length.out = NN)
+ kappa <- rep(kappa, length.out = NN);
+ q <- rep(q, length.out = NN)
+ tau <- rep(tau, length.out = NN);
+
+ exponent <- -(sqrt(2) / scale) * abs(q - location) *
ifelse(q >= location, kappa, 1/kappa)
- temp5 = exp(exponent) / (1 + kappa^2)
- ans = 1 - temp5
- index1 = (q < location)
- ans[index1] = (kappa[index1])^2 * temp5[index1]
+ temp5 <- exp(exponent) / (1 + kappa^2)
+ ans <- 1 - temp5
+ index1 <- (q < location)
+ ans[index1] <- (kappa[index1])^2 * temp5[index1]
- indexTF = (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- ans[!indexTF] = NaN
+ indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ ans[!indexTF] <- NaN
ans
}
qalap <- function(p, location = 0, scale = 1, tau = 0.5,
kappa = sqrt(tau / (1 - tau))) {
- NN = max(length(p), length(location), length(scale), length(kappa))
- location = rep(location, length.out = NN);
- scale = rep(scale, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- p = rep(p, length.out = NN)
- tau = rep(tau, length.out = NN)
-
- ans = p
- temp5 = kappa^2 / (1 + kappa^2)
- index1 = (p <= temp5)
- exponent = p[index1] / temp5[index1]
- ans[index1] = location[index1] + (scale[index1] * kappa[index1]) *
+ NN <- max(length(p), length(location), length(scale), length(kappa))
+ location <- rep(location, length.out = NN);
+ scale <- rep(scale, length.out = NN)
+ kappa <- rep(kappa, length.out = NN);
+ p <- rep(p, length.out = NN)
+ tau <- rep(tau, length.out = NN)
+
+ ans <- p
+ temp5 <- kappa^2 / (1 + kappa^2)
+ index1 <- (p <= temp5)
+ exponent <- p[index1] / temp5[index1]
+ ans[index1] <- location[index1] + (scale[index1] * kappa[index1]) *
log(exponent) / sqrt(2)
- ans[!index1] = location[!index1] - (scale[!index1] / kappa[!index1]) *
+ ans[!index1] <- location[!index1] - (scale[!index1] / kappa[!index1]) *
(log1p((kappa[!index1])^2) +
log1p(-p[!index1])) / sqrt(2)
- indexTF = (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) &
+ indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) &
(p >= 0) & (p <= 1)
- ans[!indexTF] = NaN
- ans[p == 0 & indexTF] = -Inf
- ans[p == 1 & indexTF] = Inf
+ ans[!indexTF] <- NaN
+ ans[p == 0 & indexTF] <- -Inf
+ ans[p == 1 & indexTF] <- Inf
ans
}
@@ -1898,18 +1908,18 @@ qalap <- function(p, location = 0, scale = 1, tau = 0.5,
rloglap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
kappa = sqrt(tau/(1-tau))) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- location.ald = rep(location.ald, length.out = use.n);
- scale.ald = rep(scale.ald, length.out = use.n)
- tau = rep(tau, length.out = use.n);
- kappa = rep(kappa, length.out = use.n);
- ans = exp(location.ald) *
+ location.ald <- rep(location.ald, length.out = use.n);
+ scale.ald <- rep(scale.ald, length.out = use.n);
+ tau <- rep(tau, length.out = use.n);
+ kappa <- rep(kappa, length.out = use.n);
+ ans <- exp(location.ald) *
(runif(use.n)^kappa / runif(use.n)^(1/kappa))^(scale.ald / sqrt(2))
- indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- ans[!indexTF] = NaN
+ indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ ans[!indexTF] <- NaN
ans
}
@@ -1922,79 +1932,79 @@ dloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
- NN = max(length(x), length(location.ald),
+ NN <- max(length(x), length(location.ald),
length(scale.ald), length(kappa))
- location = rep(location.ald, length.out = NN);
- scale = rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- x = rep(x, length.out = NN)
- tau = rep(tau, length.out = NN)
-
- Alpha = sqrt(2) * kappa / scale.ald
- Beta = sqrt(2) / (scale.ald * kappa)
- Delta = exp(location.ald)
- exponent = ifelse(x >= Delta, -(Alpha+1), (Beta-1)) *
+ location <- rep(location.ald, length.out = NN);
+ scale <- rep(scale.ald, length.out = NN)
+ kappa <- rep(kappa, length.out = NN);
+ x <- rep(x, length.out = NN)
+ tau <- rep(tau, length.out = NN)
+
+ Alpha <- sqrt(2) * kappa / scale.ald
+ Beta <- sqrt(2) / (scale.ald * kappa)
+ Delta <- exp(location.ald)
+ exponent <- ifelse(x >= Delta, -(Alpha+1), (Beta-1)) *
(log(x) - location.ald)
- logdensity = -location.ald + log(Alpha) + log(Beta) -
+ logdensity <- -location.ald + log(Alpha) + log(Beta) -
log(Alpha + Beta) + exponent
- indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- logdensity[!indexTF] = NaN
- logdensity[x < 0 & indexTF] = -Inf
+ indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ logdensity[!indexTF] <- NaN
+ logdensity[x < 0 & indexTF] <- -Inf
if (log.arg) logdensity else exp(logdensity)
}
qloglap <- function(p, location.ald = 0, scale.ald = 1,
tau = 0.5, kappa = sqrt(tau/(1-tau))) {
- NN = max(length(p), length(location.ald), length(scale.ald),
- length(kappa))
- location = rep(location.ald, length.out = NN);
- scale = rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- p = rep(p, length.out = NN)
- tau = rep(tau, length.out = NN)
-
- Alpha = sqrt(2) * kappa / scale.ald
- Beta = sqrt(2) / (scale.ald * kappa)
- Delta = exp(location.ald)
-
- temp9 = Alpha + Beta
- ans = Delta * (p * temp9 / Alpha)^(1/Beta)
- index1 = (p > Alpha / temp9)
- ans[index1] = (Delta * ((1-p) * temp9 / Beta)^(-1/Alpha))[index1]
- ans[p == 0] = 0
- ans[p == 1] = Inf
-
- indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0)
+ NN <- max(length(p), length(location.ald), length(scale.ald),
+ length(kappa))
+ location <- rep(location.ald, length.out = NN);
+ scale <- rep(scale.ald, length.out = NN)
+ kappa <- rep(kappa, length.out = NN);
+ p <- rep(p, length.out = NN)
+ tau <- rep(tau, length.out = NN)
+
+ Alpha <- sqrt(2) * kappa / scale.ald
+ Beta <- sqrt(2) / (scale.ald * kappa)
+ Delta <- exp(location.ald)
+
+ temp9 <- Alpha + Beta
+ ans <- Delta * (p * temp9 / Alpha)^(1/Beta)
+ index1 <- (p > Alpha / temp9)
+ ans[index1] <- (Delta * ((1-p) * temp9 / Beta)^(-1/Alpha))[index1]
+ ans[p == 0] <- 0
+ ans[p == 1] <- Inf
+
+ indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0)
(p >= 0) & (p <= 1) # &
- ans[!indexTF] = NaN
+ ans[!indexTF] <- NaN
ans
}
ploglap <- function(q, location.ald = 0, scale.ald = 1,
- tau = 0.5, kappa = sqrt(tau/(1-tau))) {
- NN = max(length(q), length(location.ald), length(scale.ald),
- length(kappa))
- location = rep(location.ald, length.out = NN);
- scale = rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- q = rep(q, length.out = NN)
- tau = rep(tau, length.out = NN)
-
- Alpha = sqrt(2) * kappa / scale.ald
- Beta = sqrt(2) / (scale.ald * kappa)
- Delta = exp(location.ald)
-
- temp9 = Alpha + Beta
- ans = (Alpha / temp9) * (q / Delta)^(Beta)
- ans[q <= 0] = 0
- index1 = (q >= Delta)
- ans[index1] = (1 - (Beta/temp9) * (Delta/q)^(Alpha))[index1]
-
- indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- ans[!indexTF] = NaN
+ tau = 0.5, kappa = sqrt(tau/(1-tau))) {
+ NN <- max(length(q), length(location.ald), length(scale.ald),
+ length(kappa))
+ location <- rep(location.ald, length.out = NN);
+ scale <- rep(scale.ald, length.out = NN)
+ kappa <- rep(kappa, length.out = NN);
+ q <- rep(q, length.out = NN)
+ tau <- rep(tau, length.out = NN)
+
+ Alpha <- sqrt(2) * kappa / scale.ald
+ Beta <- sqrt(2) / (scale.ald * kappa)
+ Delta <- exp(location.ald)
+
+ temp9 <- Alpha + Beta
+ ans <- (Alpha / temp9) * (q / Delta)^(Beta)
+ ans[q <= 0] <- 0
+ index1 <- (q >= Delta)
+ ans[index1] <- (1 - (Beta/temp9) * (Delta/q)^(Alpha))[index1]
+
+ indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ ans[!indexTF] <- NaN
ans
}
@@ -2017,39 +2027,39 @@ dlogitlap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
- NN = max(length(x), length(location.ald),
+ NN <- max(length(x), length(location.ald),
length(scale.ald), length(kappa))
- location = rep(location.ald, length.out = NN);
- scale = rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- x = rep(x, length.out = NN)
- tau = rep(tau, length.out = NN)
+ location <- rep(location.ald, length.out = NN);
+ scale <- rep(scale.ald, length.out = NN)
+ kappa <- rep(kappa, length.out = NN);
+ x <- rep(x, length.out = NN)
+ tau <- rep(tau, length.out = NN)
- Alpha = sqrt(2) * kappa / scale.ald
- Beta = sqrt(2) / (scale.ald * kappa)
- Delta = logit(location.ald, inverse = TRUE) # earg = earg
+ Alpha <- sqrt(2) * kappa / scale.ald
+ Beta <- sqrt(2) / (scale.ald * kappa)
+ Delta <- logit(location.ald, inverse = TRUE) # earg = earg
- exponent = ifelse(x >= Delta, -Alpha, Beta) *
+ exponent <- ifelse(x >= Delta, -Alpha, Beta) *
(logit(x) - # earg = earg
location.ald)
- logdensity = log(Alpha) + log(Beta) - log(Alpha + Beta) -
+ logdensity <- log(Alpha) + log(Beta) - log(Alpha + Beta) -
log(x) - log1p(-x) + exponent
- indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- logdensity[!indexTF] = NaN
- logdensity[x < 0 & indexTF] = -Inf
- logdensity[x > 1 & indexTF] = -Inf
+ indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ logdensity[!indexTF] <- NaN
+ logdensity[x < 0 & indexTF] <- -Inf
+ logdensity[x > 1 & indexTF] <- -Inf
if (log.arg) logdensity else exp(logdensity)
}
qlogitlap <- function(p, location.ald = 0, scale.ald = 1,
tau = 0.5, kappa = sqrt(tau/(1-tau))) {
- qqq = qalap(p = p, location = location.ald, scale = scale.ald,
+ qqq <- qalap(p = p, location = location.ald, scale = scale.ald,
tau = tau, kappa = kappa)
- ans = logit(qqq, inverse = TRUE) # earg = earg
- ans[(p < 0) | (p > 1)] = NaN
- ans[p == 0] = 0
- ans[p == 1] = 1
+ ans <- logit(qqq, inverse = TRUE) # earg = earg
+ ans[(p < 0) | (p > 1)] <- NaN
+ ans[p == 0] <- 0
+ ans[p == 1] <- 1
ans
}
@@ -2057,21 +2067,21 @@ qlogitlap <- function(p, location.ald = 0, scale.ald = 1,
plogitlap <- function(q, location.ald = 0, scale.ald = 1,
tau = 0.5, kappa = sqrt(tau/(1-tau))) {
- NN = max(length(q), length(location.ald), length(scale.ald),
+ NN <- max(length(q), length(location.ald), length(scale.ald),
length(kappa))
- location.ald = rep(location.ald, length.out = NN);
- scale.ald = rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN); q = rep(q, length.out = NN)
- tau = rep(tau, length.out = NN);
-
- indexTF = (q > 0) & (q < 1)
- qqq = logit(q[indexTF]) # earg = earg
- ans = q
- ans[indexTF] = palap(q = qqq, location = location.ald[indexTF],
+ location.ald <- rep(location.ald, length.out = NN);
+ scale.ald <- rep(scale.ald, length.out = NN)
+ kappa <- rep(kappa, length.out = NN); q <- rep(q, length.out = NN)
+ tau <- rep(tau, length.out = NN);
+
+ indexTF <- (q > 0) & (q < 1)
+ qqq <- logit(q[indexTF]) # earg = earg
+ ans <- q
+ ans[indexTF] <- palap(q = qqq, location = location.ald[indexTF],
scale = scale.ald[indexTF],
tau = tau[indexTF], kappa = kappa[indexTF])
- ans[q >= 1] = 1
- ans[q <= 0] = 0
+ ans[q >= 1] <- 1
+ ans[q <= 0] <- 0
ans
}
@@ -2099,45 +2109,45 @@ dprobitlap <-
- NN = max(length(x), length(location.ald), length(scale.ald),
+ NN <- max(length(x), length(location.ald), length(scale.ald),
length(kappa))
- location.ald = rep(location.ald, length.out = NN);
- scale.ald = rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN); x = rep(x, length.out = NN)
- tau = rep(tau, length.out = NN)
-
- logdensity = x * NaN
- index1 = (x > 0) & (x < 1)
- indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ location.ald <- rep(location.ald, length.out = NN);
+ scale.ald <- rep(scale.ald, length.out = NN)
+ kappa <- rep(kappa, length.out = NN); x = rep(x, length.out = NN)
+ tau <- rep(tau, length.out = NN)
+
+ logdensity <- x * NaN
+ index1 <- (x > 0) & (x < 1)
+ indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
if (meth2) {
- dx.dy = x
- use.x = probit(x[index1]) # earg = earg
+ dx.dy <- x
+ use.x <- probit(x[index1]) # earg = earg
logdensity[index1] =
dalap(x = use.x, location = location.ald[index1],
scale = scale.ald[index1], tau = tau[index1],
kappa = kappa[index1], log = TRUE)
} else {
- Alpha = sqrt(2) * kappa / scale.ald
- Beta = sqrt(2) / (scale.ald * kappa)
- Delta = pnorm(location.ald)
- use.x = qnorm(x) # qnorm(x[index1])
- log.dy.dw = dnorm(use.x, log = TRUE)
+ Alpha <- sqrt(2) * kappa / scale.ald
+ Beta <- sqrt(2) / (scale.ald * kappa)
+ Delta <- pnorm(location.ald)
+ use.x <- qnorm(x) # qnorm(x[index1])
+ log.dy.dw <- dnorm(use.x, log = TRUE)
- exponent = ifelse(x >= Delta, -Alpha, Beta) *
+ exponent <- ifelse(x >= Delta, -Alpha, Beta) *
(use.x - location.ald) - log.dy.dw
- logdensity[index1] = (log(Alpha) + log(Beta) -
+ logdensity[index1] <- (log(Alpha) + log(Beta) -
log(Alpha + Beta) + exponent)[index1]
}
- logdensity[!indexTF] = NaN
- logdensity[x < 0 & indexTF] = -Inf
- logdensity[x > 1 & indexTF] = -Inf
+ logdensity[!indexTF] <- NaN
+ logdensity[x < 0 & indexTF] <- -Inf
+ logdensity[x > 1 & indexTF] <- -Inf
if (meth2) {
- dx.dy[index1] = probit(x[index1], # earg = earg,
+ dx.dy[index1] <- probit(x[index1], # earg = earg,
inverse = FALSE, deriv = 1)
- dx.dy[!index1] = 0
- dx.dy[!indexTF] = NaN
+ dx.dy[!index1] <- 0
+ dx.dy[!indexTF] <- NaN
if (log.arg) logdensity - log(abs(dx.dy)) else
exp(logdensity) / abs(dx.dy)
} else {
@@ -2148,12 +2158,12 @@ dprobitlap <-
qprobitlap <- function(p, location.ald = 0, scale.ald = 1,
tau = 0.5, kappa = sqrt(tau/(1-tau))) {
- qqq = qalap(p = p, location = location.ald, scale = scale.ald,
+ qqq <- qalap(p = p, location = location.ald, scale = scale.ald,
tau = tau, kappa = kappa)
- ans = probit(qqq, inverse = TRUE) # , earg = earg
+ ans <- probit(qqq, inverse = TRUE) # , earg = earg
ans[(p < 0) | (p > 1)] = NaN
- ans[p == 0] = 0
- ans[p == 1] = 1
+ ans[p == 0] <- 0
+ ans[p == 1] <- 1
ans
}
@@ -2161,22 +2171,22 @@ qprobitlap <- function(p, location.ald = 0, scale.ald = 1,
pprobitlap <- function(q, location.ald = 0, scale.ald = 1,
tau = 0.5, kappa = sqrt(tau/(1-tau))) {
- NN = max(length(q), length(location.ald), length(scale.ald),
+ NN <- max(length(q), length(location.ald), length(scale.ald),
length(kappa))
- location.ald = rep(location.ald, length.out = NN);
- scale.ald = rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- q = rep(q, length.out = NN)
- tau = rep(tau, length.out = NN);
-
- indexTF = (q > 0) & (q < 1)
- qqq = probit(q[indexTF]) # earg = earg
- ans = q
- ans[indexTF] = palap(q = qqq, location = location.ald[indexTF],
+ location.ald <- rep(location.ald, length.out = NN);
+ scale.ald <- rep(scale.ald, length.out = NN)
+ kappa <- rep(kappa, length.out = NN);
+ q <- rep(q, length.out = NN)
+ tau <- rep(tau, length.out = NN);
+
+ indexTF <- (q > 0) & (q < 1)
+ qqq <- probit(q[indexTF]) # earg = earg
+ ans <- q
+ ans[indexTF] <- palap(q = qqq, location = location.ald[indexTF],
scale = scale.ald[indexTF],
tau = tau[indexTF], kappa = kappa[indexTF])
- ans[q >= 1] = 1
- ans[q <= 0] = 0
+ ans[q >= 1] <- 1
+ ans[q <= 0] <- 0
ans
}
@@ -2200,45 +2210,45 @@ dclogloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
- NN = max(length(x), length(location.ald), length(scale.ald),
+ NN <- max(length(x), length(location.ald), length(scale.ald),
length(kappa))
- location.ald = rep(location.ald, length.out = NN);
- scale.ald = rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- x = rep(x, length.out = NN)
- tau = rep(tau, length.out = NN)
-
- logdensity = x * NaN
- index1 = (x > 0) & (x < 1)
- indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ location.ald <- rep(location.ald, length.out = NN);
+ scale.ald <- rep(scale.ald, length.out = NN)
+ kappa <- rep(kappa, length.out = NN);
+ x <- rep(x, length.out = NN)
+ tau <- rep(tau, length.out = NN)
+
+ logdensity <- x * NaN
+ index1 <- (x > 0) & (x < 1)
+ indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
if (meth2) {
- dx.dy = x
- use.w = cloglog(x[index1]) # earg = earg
- logdensity[index1] =
+ dx.dy <- x
+ use.w <- cloglog(x[index1]) # earg = earg
+ logdensity[index1] <-
dalap(x = use.w, location = location.ald[index1],
scale = scale.ald[index1],
tau = tau[index1],
kappa = kappa[index1], log = TRUE)
} else {
- Alpha = sqrt(2) * kappa / scale.ald
- Beta = sqrt(2) / (scale.ald * kappa)
- Delta = cloglog(location.ald, inverse = TRUE)
+ Alpha <- sqrt(2) * kappa / scale.ald
+ Beta <- sqrt(2) / (scale.ald * kappa)
+ Delta <- cloglog(location.ald, inverse = TRUE)
- exponent = ifelse(x >= Delta, -(Alpha+1), Beta-1) * log(-log1p(-x)) +
+ exponent <- ifelse(x >= Delta, -(Alpha+1), Beta-1) * log(-log1p(-x)) +
ifelse(x >= Delta, Alpha, -Beta) * location.ald
- logdensity[index1] = (log(Alpha) + log(Beta) -
+ logdensity[index1] <- (log(Alpha) + log(Beta) -
log(Alpha + Beta) - log1p(-x) + exponent)[index1]
}
- logdensity[!indexTF] = NaN
- logdensity[x < 0 & indexTF] = -Inf
- logdensity[x > 1 & indexTF] = -Inf
+ logdensity[!indexTF] <- NaN
+ logdensity[x < 0 & indexTF] <- -Inf
+ logdensity[x > 1 & indexTF] <- -Inf
if (meth2) {
- dx.dy[index1] = cloglog(x[index1], # earg = earg,
+ dx.dy[index1] <- cloglog(x[index1], # earg = earg,
inverse = FALSE, deriv = 1)
- dx.dy[!index1] = 0
- dx.dy[!indexTF] = NaN
+ dx.dy[!index1] <- 0
+ dx.dy[!indexTF] <- NaN
if (log.arg) logdensity - log(abs(dx.dy)) else
exp(logdensity) / abs(dx.dy)
} else {
@@ -2250,12 +2260,12 @@ dclogloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
qclogloglap <- function(p, location.ald = 0, scale.ald = 1,
tau = 0.5, kappa = sqrt(tau/(1-tau))) {
- qqq = qalap(p = p, location = location.ald, scale = scale.ald,
+ qqq <- qalap(p = p, location = location.ald, scale = scale.ald,
tau = tau, kappa = kappa)
- ans = cloglog(qqq, inverse = TRUE) # , earg = earg
- ans[(p < 0) | (p > 1)] = NaN
- ans[p == 0] = 0
- ans[p == 1] = 1
+ ans <- cloglog(qqq, inverse = TRUE) # , earg = earg
+ ans[(p < 0) | (p > 1)] <- NaN
+ ans[p == 0] <- 0
+ ans[p == 1] <- 1
ans
}
@@ -2263,22 +2273,22 @@ qclogloglap <- function(p, location.ald = 0, scale.ald = 1,
pclogloglap <- function(q, location.ald = 0, scale.ald = 1,
tau = 0.5, kappa = sqrt(tau/(1-tau))) {
- NN = max(length(q), length(location.ald), length(scale.ald),
+ NN <- max(length(q), length(location.ald), length(scale.ald),
length(kappa))
- location.ald = rep(location.ald, length.out = NN);
- scale.ald = rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- q = rep(q, length.out = NN)
- tau = rep(tau, length.out = NN);
-
- indexTF = (q > 0) & (q < 1)
- qqq = cloglog(q[indexTF]) # earg = earg
- ans = q
- ans[indexTF] = palap(q = qqq, location = location.ald[indexTF],
+ location.ald <- rep(location.ald, length.out = NN);
+ scale.ald <- rep(scale.ald, length.out = NN)
+ kappa <- rep(kappa, length.out = NN);
+ q <- rep(q, length.out = NN)
+ tau <- rep(tau, length.out = NN);
+
+ indexTF <- (q > 0) & (q < 1)
+ qqq <- cloglog(q[indexTF]) # earg = earg
+ ans <- q
+ ans[indexTF] <- palap(q = qqq, location = location.ald[indexTF],
scale = scale.ald[indexTF],
tau = tau[indexTF], kappa = kappa[indexTF])
- ans[q >= 1] = 1
- ans[q <= 0] = 0
+ ans[q >= 1] <- 1
+ ans[q <= 0] <- 0
ans
}
@@ -2291,8 +2301,7 @@ pclogloglap <- function(q, location.ald = 0, scale.ald = 1,
-alaplace2.control <- function(maxit = 100, ...)
-{
+alaplace2.control <- function(maxit = 100, ...) {
list(maxit = maxit)
}
@@ -2352,44 +2361,44 @@ alaplace2.control <- function(maxit = 100, ...)
stop("argument 'eq.scale' must be a single logical")
if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
stop("argument 'parallelLocation' must be a single logical")
- fittedMean = FALSE
+ fittedMean <- FALSE
if (!is.logical(fittedMean) || length(fittedMean) != 1)
stop("bad input for argument 'fittedMean'")
new("vglmff",
blurb = c("Two-parameter asymmetric Laplace distribution\n\n",
- "Links: ",
- namesof("location", llocat, earg = elocat), ", ",
- namesof("scale", lscale, earg = escale), "\n\n",
- "Mean: ",
- "location + scale * (1/kappa - kappa) / sqrt(2)", "\n",
- "Quantiles: location", "\n",
- "Variance: scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale), "\n\n",
+ "Mean: ",
+ "location + scale * (1/kappa - kappa) / sqrt(2)", "\n",
+ "Quantiles: location", "\n",
+ "Variance: scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
constraints = eval(substitute(expression({
- orig.constraints = constraints
+ orig.constraints <- constraints
- .PARALLEL = .parallelLocation
+ .PARALLEL <- .parallelLocation
- onemat = matrix(1, Mdiv2, 1)
- locatHmat1 = kronecker(if ( .intparloc ) onemat else
+ onemat <- matrix(1, Mdiv2, 1)
+ locatHmat1 <- kronecker(if ( .intparloc ) onemat else
diag(Mdiv2), rbind(1, 0))
- scaleHmat1 = kronecker(if ( .eq.scale ) onemat else
+ scaleHmat1 <- kronecker(if ( .eq.scale ) onemat else
diag(Mdiv2), rbind(0, 1))
- locatHmatk = kronecker(if ( .PARALLEL ) onemat else
+ locatHmatk <- kronecker(if ( .PARALLEL ) onemat else
diag(Mdiv2), rbind(1, 0))
- scaleHmatk = scaleHmat1
+ scaleHmatk <- scaleHmat1
- constraints = cm.vgam(cbind(locatHmatk, scaleHmatk),
- x, .PARALLEL, constraints,
- intercept = FALSE)
+ constraints <- cm.vgam(cbind(locatHmatk, scaleHmatk),
+ x, .PARALLEL , constraints,
+ apply.int = FALSE)
if (names(constraints)[1] == "(Intercept)") {
constraints[["(Intercept)"]] = cbind(locatHmat1, scaleHmat1)
@@ -2399,7 +2408,7 @@ alaplace2.control <- function(maxit = 100, ...)
dotzero <- .zero
Musual <- 2
eval(negzero.expression)
- constraints = cm.zero.vgam(constraints, x, z_Index, M)
+ constraints <- cm.zero.vgam(constraints, x, z_Index, M)
@@ -2409,7 +2418,7 @@ alaplace2.control <- function(maxit = 100, ...)
warning("the inputted 'constraints' argument does not match with ",
"the 'zero', 'parallel', 'eq.scale' arguments. ",
"Using the inputted 'constraints'.")
- constraints = orig.constraints
+ constraints <- orig.constraints
}
}
@@ -2442,24 +2451,24 @@ alaplace2.control <- function(maxit = 100, ...)
- extra$kappa = .kappa
- extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+ extra$kappa <- .kappa
+ extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
- extra$Mdiv2 = Mdiv2 = max(ncoly, length( .kappa ))
- extra$M = M = Musual * Mdiv2
- extra$n = n
+ extra$Mdiv2 <- Mdiv2 <- max(ncoly, length( .kappa ))
+ extra$M <- M <- Musual * Mdiv2
+ extra$n <- n
- extra$tau.names = tau.names =
+ extra$tau.names <- tau.names <-
paste("(tau = ", round(extra$tau, digits = .digt), ")", sep = "")
- extra$Y.names = Y.names = if (ncoly > 1) dimnames(y)[[2]] else "y"
+ extra$Y.names <- Y.names <- if (ncoly > 1) dimnames(y)[[2]] else "y"
if (is.null(Y.names) || any(Y.names == ""))
- extra$Y.names = Y.names = paste("y", 1:ncoly, sep = "")
- extra$y.names = y.names =
+ extra$Y.names <- Y.names <- paste("y", 1:ncoly, sep = "")
+ extra$y.names <- y.names <-
if (ncoly > 1) paste(Y.names, tau.names, sep = "") else tau.names
- extra$individual = FALSE
+ extra$individual <- FALSE
mynames1 <- paste("location", if (Mdiv2 > 1) 1:Mdiv2 else "", sep = "")
@@ -2478,23 +2487,23 @@ alaplace2.control <- function(maxit = 100, ...)
for(jay in 1:Mdiv2) {
y.use <- if (ncoly > 1) y[, jay] else y
if ( .imethod == 1) {
- locat.init[, jay] = weighted.mean(y.use, w[, jay])
- scale.init[, jay] = sqrt(var(y.use) / 2)
+ locat.init[, jay] <- weighted.mean(y.use, w[, jay])
+ scale.init[, jay] <- sqrt(var(y.use) / 2)
} else if ( .imethod == 2) {
- locat.init[, jay] = median(y.use)
- scale.init[, jay] = sqrt(sum(c(w[, jay]) *
+ locat.init[, jay] <- median(y.use)
+ scale.init[, jay] <- sqrt(sum(c(w[, jay]) *
abs(y - median(y.use))) / (sum(w[, jay]) * 2))
} else if ( .imethod == 3) {
- Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ Fit5 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
y = y.use, w = w[, jay],
df = .dfmu.init )
- locat.init[, jay] = predict(Fit5, x = x[, min(ncol(x), 2)])$y
- scale.init[, jay] =
+ locat.init[, jay] <- predict(Fit5, x = x[, min(ncol(x), 2)])$y
+ scale.init[, jay] <-
sqrt(sum(c(w[, jay]) *
abs(y.use - median(y.use))) / (sum(w[, jay]) * 2))
} else {
- use.this = weighted.mean(y.use, w[, jay])
- locat.init[, jay] = (1 - .sinit) * y.use + .sinit * use.this
+ use.this <- weighted.mean(y.use, w[, jay])
+ locat.init[, jay] <- (1 - .sinit) * y.use + .sinit * use.this
scale.init[, jay] =
sqrt(sum(c(w[, jay]) *
abs(y.use - median(y.use ))) / (sum(w[, jay]) * 2))
@@ -2504,16 +2513,16 @@ alaplace2.control <- function(maxit = 100, ...)
if (length( .ilocat )) {
- locat.init = matrix( .ilocat , n, Mdiv2, byrow = TRUE)
+ locat.init <- matrix( .ilocat , n, Mdiv2, byrow = TRUE)
}
if (length( .iscale )) {
- scale.init = matrix( .iscale , n, Mdiv2, byrow = TRUE)
+ scale.init <- matrix( .iscale , n, Mdiv2, byrow = TRUE)
}
- etastart =
+ etastart <-
cbind(theta2eta(locat.init, .llocat , earg = .elocat ),
theta2eta(scale.init, .lscale , earg = .escale ))
- etastart = etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
+ etastart <- etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
}
}), list( .imethod = imethod,
.dfmu.init = dfmu.init,
@@ -2522,14 +2531,14 @@ alaplace2.control <- function(maxit = 100, ...)
.llocat = llocat, .lscale = lscale, .kappa = kappa,
.ilocat = ilocat, .iscale = iscale ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Mdiv2 = extra$Mdiv2
- locat = eta2theta(eta[, 2 * (1:Mdiv2) - 1, drop = FALSE],
+ Mdiv2 <- extra$Mdiv2
+ locat <- eta2theta(eta[, 2 * (1:Mdiv2) - 1, drop = FALSE],
.llocat , earg = .elocat )
- dimnames(locat) = list(dimnames(eta)[[1]], extra$y.names)
+ dimnames(locat) <- list(dimnames(eta)[[1]], extra$y.names)
myans <- if ( .fittedMean ) {
- kappamat = matrix(extra$kappa, extra$n, extra$Mdiv2,
+ kappamat <- matrix(extra$kappa, extra$n, extra$Mdiv2,
byrow = TRUE)
- Scale = eta2theta(eta[, 2 * (1:Mdiv2) , drop = FALSE],
+ Scale <- eta2theta(eta[, 2 * (1:Mdiv2) , drop = FALSE],
.lscale , earg = .escale )
locat + Scale * (1/kappamat - kappamat)
} else {
@@ -2544,37 +2553,37 @@ alaplace2.control <- function(maxit = 100, ...)
last = eval(substitute(expression({
Musual <- extra$Musual
- tmp34 = c(rep( .llocat , length = Mdiv2),
+ tmp34 <- c(rep( .llocat , length = Mdiv2),
rep( .lscale , length = Mdiv2))
- names(tmp34) = c(mynames1, mynames2)
- tmp34 = tmp34[interleave.VGAM(M, M = Musual)]
- misc$link = tmp34 # Already named
+ names(tmp34) <- c(mynames1, mynames2)
+ tmp34 <- tmp34[interleave.VGAM(M, M = Musual)]
+ misc$link <- tmp34 # Already named
- misc$earg = vector("list", M)
+ misc$earg <- vector("list", M)
misc$Musual <- Musual
for(ii in 1:Mdiv2) {
- misc$earg[[Musual * ii - 1]] = .elocat
- misc$earg[[Musual * ii ]] = .escale
+ misc$earg[[Musual * ii - 1]] <- .elocat
+ misc$earg[[Musual * ii ]] <- .escale
}
- names(misc$earg) = names(misc$link)
+ names(misc$earg) <- names(misc$link)
misc$multipleResponses <- TRUE
- misc$expected = TRUE
- extra$kappa = misc$kappa = .kappa
- extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
- misc$true.mu = .fittedMean # @fitted is not a true mu?
- misc$intparloc = .intparloc
+ misc$expected <- TRUE
+ extra$kappa <- misc$kappa <- .kappa
+ extra$tau <- misc$tau <- misc$kappa^2 / (1 + misc$kappa^2)
+ misc$true.mu <- .fittedMean # @fitted is not a true mu?
+ misc$intparloc <- .intparloc
- extra$percentile = numeric(Mdiv2) # length(misc$kappa)
- locat = as.matrix(locat)
+ extra$percentile <- numeric(Mdiv2) # length(misc$kappa)
+ locat <- as.matrix(locat)
for(ii in 1:Mdiv2) {
y.use <- if (ncoly > 1) y[, ii] else y
- extra$percentile[ii] = 100 * weighted.mean(y.use <= locat[, ii],
+ extra$percentile[ii] <- 100 * weighted.mean(y.use <= locat[, ii],
w[, ii])
}
# if (ncoly > 1) names(misc$link) else zz:
- names(extra$percentile) = y.names
+ names(extra$percentile) <- y.names
}), list( .elocat = elocat, .llocat = llocat,
.escale = escale, .lscale = lscale,
.fittedMean = fittedMean,
@@ -2583,13 +2592,13 @@ alaplace2.control <- function(maxit = 100, ...)
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
Musual <- 2
- Mdiv2 = extra$Mdiv2
- ymat = matrix(y, extra$n, extra$Mdiv2)
- kappamat = matrix(extra$kappa, extra$n, extra$Mdiv2, byrow = TRUE)
+ Mdiv2 <- extra$Mdiv2
+ ymat <- matrix(y, extra$n, extra$Mdiv2)
+ kappamat <- matrix(extra$kappa, extra$n, extra$Mdiv2, byrow = TRUE)
- locat = eta2theta(eta[, 2 * (1:Mdiv2) - 1, drop = FALSE],
+ locat <- eta2theta(eta[, 2 * (1:Mdiv2) - 1, drop = FALSE],
.llocat , earg = .elocat )
- Scale = eta2theta(eta[, 2 * (1:Mdiv2) , drop = FALSE],
+ Scale <- eta2theta(eta[, 2 * (1:Mdiv2) , drop = FALSE],
.lscale , earg = .escale )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
@@ -2604,23 +2613,23 @@ alaplace2.control <- function(maxit = 100, ...)
vfamily = c("alaplace2"),
deriv = eval(substitute(expression({
Musual <- 2
- Mdiv2 = extra$Mdiv2
- ymat = matrix(y, n, Mdiv2)
+ Mdiv2 <- extra$Mdiv2
+ ymat <- matrix(y, n, Mdiv2)
- locat = eta2theta(eta[, Musual * (1:(Mdiv2)) - 1, drop = FALSE],
+ locat <- eta2theta(eta[, Musual * (1:(Mdiv2)) - 1, drop = FALSE],
.llocat , earg = .elocat )
- Scale = eta2theta(eta[, Musual * (1:(Mdiv2)) , drop = FALSE],
+ Scale <- eta2theta(eta[, Musual * (1:(Mdiv2)) , drop = FALSE],
.lscale , earg = .escale )
- kappamat = matrix(extra$kappa, n, Mdiv2, byrow = TRUE)
- zedd = abs(ymat - locat) / Scale
- dl.dlocat = sqrt(2) * ifelse(ymat >= locat, kappamat, 1/kappamat) *
+ kappamat <- matrix(extra$kappa, n, Mdiv2, byrow = TRUE)
+ zedd <- abs(ymat - locat) / Scale
+ dl.dlocat <- sqrt(2) * ifelse(ymat >= locat, kappamat, 1/kappamat) *
sign(ymat - locat) / Scale
- dl.dscale = sqrt(2) * ifelse(ymat >= locat, kappamat, 1/kappamat) *
+ dl.dscale <- sqrt(2) * ifelse(ymat >= locat, kappamat, 1/kappamat) *
zedd / Scale - 1 / Scale
- dlocat.deta = dtheta.deta(locat, .llocat , earg = .elocat )
- dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
+ dlocat.deta <- dtheta.deta(locat, .llocat , earg = .elocat )
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
ans <- c(w) * cbind(dl.dlocat * dlocat.deta,
dl.dscale * dscale.deta)
@@ -2632,8 +2641,8 @@ alaplace2.control <- function(maxit = 100, ...)
weight = eval(substitute(expression({
wz <- matrix(as.numeric(NA), n, M)
- d2l.dlocat2 = 2 / Scale^2
- d2l.dscale2 = 1 / Scale^2
+ d2l.dlocat2 <- 2 / Scale^2
+ d2l.dscale2 <- 1 / Scale^2
wz[, Musual*(1:Mdiv2) - 1] <- d2l.dlocat2 * dlocat.deta^2
wz[, Musual*(1:Mdiv2) ] <- d2l.dscale2 * dscale.deta^2
@@ -2654,8 +2663,7 @@ alaplace2.control <- function(maxit = 100, ...)
-alaplace1.control <- function(maxit = 100, ...)
-{
+alaplace1.control <- function(maxit = 100, ...) {
list(maxit = maxit)
}
@@ -2709,7 +2717,7 @@ alaplace1.control <- function(maxit = 100, ...)
- fittedMean = FALSE
+ fittedMean <- FALSE
if (!is.logical(fittedMean) || length(fittedMean) != 1)
stop("bad input for argument 'fittedMean'")
@@ -2728,17 +2736,17 @@ alaplace1.control <- function(maxit = 100, ...)
"Variance: scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
constraints = eval(substitute(expression({
- orig.constraints = constraints
+ orig.constraints <- constraints
- onemat = matrix(1, M, 1)
- locatHmat1 = if ( .intparloc ) onemat else diag(M)
- locatHmatk = if ( .parallelLocation ) onemat else diag(M)
+ onemat <- matrix(1, M, 1)
+ locatHmat1 <- if ( .intparloc ) onemat else diag(M)
+ locatHmatk <- if ( .parallelLocation ) onemat else diag(M)
- constraints = cm.vgam(locatHmatk, x, .parallelLocation, constraints,
- intercept = FALSE)
+ constraints <- cm.vgam(locatHmatk, x, .parallelLocation, constraints,
+ apply.int = FALSE)
if (names(constraints)[1] == "(Intercept)") {
constraints[["(Intercept)"]] = locatHmat1
@@ -2752,7 +2760,7 @@ alaplace1.control <- function(maxit = 100, ...)
warning("the inputted 'constraints' argument does not match with ",
"the 'parallel', 'eq.scale' arguments. ",
"Using the inputted 'constraints'.")
- constraints = orig.constraints
+ constraints <- orig.constraints
}
}
@@ -2785,30 +2793,30 @@ alaplace1.control <- function(maxit = 100, ...)
stop("response must be a vector if 'kappa' or 'Scale.arg' ",
"has a length greater than one")
- extra$kappa = .kappa
- extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+ extra$kappa <- .kappa
+ extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
- extra$M = M = max(length( .Scale.arg ),
+ extra$M <- M <- max(length( .Scale.arg ),
ncoly,
length( .kappa )) # Recycle
- extra$Scale = rep( .Scale.arg, length = M)
- extra$kappa = rep( .kappa, length = M)
- extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
- extra$n = n
+ extra$Scale <- rep( .Scale.arg, length = M)
+ extra$kappa <- rep( .kappa, length = M)
+ extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
+ extra$n <- n
- extra$tau.names = tau.names =
+ extra$tau.names <- tau.names <-
paste("(tau = ", round(extra$tau, digits = .digt), ")", sep = "")
- extra$Y.names = Y.names = if (ncoly > 1) dimnames(y)[[2]] else "y"
+ extra$Y.names <- Y.names <- if (ncoly > 1) dimnames(y)[[2]] else "y"
if (is.null(Y.names) || any(Y.names == ""))
- extra$Y.names = Y.names = paste("y", 1:ncoly, sep = "")
- extra$y.names = y.names =
+ extra$Y.names <- Y.names <- paste("y", 1:ncoly, sep = "")
+ extra$y.names <- y.names <-
if (ncoly > 1) paste(Y.names, tau.names, sep = "") else tau.names
- extra$individual = FALSE
+ extra$individual <- FALSE
mynames1 <- paste("location", if (M > 1) 1:M else "", sep = "")
predictors.names <-
@@ -2821,25 +2829,25 @@ alaplace1.control <- function(maxit = 100, ...)
for(jay in 1:M) {
y.use <- if (ncoly > 1) y[, jay] else y
if ( .imethod == 1) {
- locat.init[, jay] = weighted.mean(y.use, w)
+ locat.init[, jay] <- weighted.mean(y.use, w)
} else if ( .imethod == 2) {
- locat.init[, jay] = median(y.use)
+ locat.init[, jay] <- median(y.use)
} else if ( .imethod == 3) {
Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)],
y = y.use, w = w, df = .dfmu.init)
- locat.init[, jay] = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
+ locat.init[, jay] <- c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
} else {
- use.this = weighted.mean(y.use, w)
- locat.init[, jay] = (1- .sinit) * y.use + .sinit * use.this
+ use.this <- weighted.mean(y.use, w)
+ locat.init[, jay] <- (1- .sinit) * y.use + .sinit * use.this
}
if (length( .ilocat )) {
- locat.init = matrix( .ilocat , n, M, byrow = TRUE)
+ locat.init <- matrix( .ilocat , n, M, byrow = TRUE)
}
- if ( .llocat == "loge") locat.init = abs(locat.init)
- etastart =
+ if ( .llocat == "loge") locat.init <- abs(locat.init)
+ etastart <-
cbind(theta2eta(locat.init, .llocat , earg = .elocat ))
}
}
@@ -2851,14 +2859,14 @@ alaplace1.control <- function(maxit = 100, ...)
.ilocat = ilocat ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
if ( .fittedMean ) {
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
- locat = eta2theta(eta, .llocat , earg = .elocat )
- Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ kappamat <- matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ locat <- eta2theta(eta, .llocat , earg = .elocat )
+ Scale <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
locat + Scale * (1/kappamat - kappamat)
} else {
- locat = eta2theta(eta, .llocat , earg = .elocat )
+ locat <- eta2theta(eta, .llocat , earg = .elocat )
if (length(locat) > extra$n)
- dimnames(locat) = list(dimnames(eta)[[1]], extra$y.names)
+ dimnames(locat) <- list(dimnames(eta)[[1]], extra$y.names)
locat
}
}, list( .elocat = elocat, .llocat = llocat,
@@ -2869,42 +2877,42 @@ alaplace1.control <- function(maxit = 100, ...)
misc$Musual <- Musual
misc$multipleResponses <- TRUE
- tmp34 = c(rep( .llocat , length = M))
- names(tmp34) = mynames1
- misc$link = tmp34 # Already named
+ tmp34 <- c(rep( .llocat , length = M))
+ names(tmp34) <- mynames1
+ misc$link <- tmp34 # Already named
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
for(ii in 1:M) {
- misc$earg[[ii]] = .elocat
+ misc$earg[[ii]] <- .elocat
}
- misc$expected = TRUE
- extra$kappa = misc$kappa = .kappa
- extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
- misc$true.mu = .fittedMean # @fitted is not a true mu?
+ misc$expected <- TRUE
+ extra$kappa <- misc$kappa <- .kappa
+ extra$tau <- misc$tau <- misc$kappa^2 / (1 + misc$kappa^2)
+ misc$true.mu <- .fittedMean # @fitted is not a true mu?
- extra$percentile = numeric(M)
- locat = as.matrix(locat)
+ extra$percentile <- numeric(M)
+ locat <- as.matrix(locat)
for(ii in 1:M) {
y.use <- if (ncoly > 1) y[, ii] else y
extra$percentile[ii] =
100 * weighted.mean(y.use <= locat[, ii], w)
}
- names(extra$percentile) = y.names
+ names(extra$percentile) <- y.names
- extra$Scale.arg = .Scale.arg
+ extra$Scale.arg <- .Scale.arg
}), list( .elocat = elocat,
.llocat = llocat,
.Scale.arg = Scale.arg, .fittedMean = fittedMean,
.kappa = kappa ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- ymat = matrix(y, extra$n, extra$M)
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
- locat = eta2theta(eta, .llocat , earg = .elocat )
- Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ ymat <- matrix(y, extra$n, extra$M)
+ kappamat <- matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ locat <- eta2theta(eta, .llocat , earg = .elocat )
+ Scale <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
if (residuals) {
stop("loglikelihood residuals not implemented yet")
@@ -2917,25 +2925,25 @@ alaplace1.control <- function(maxit = 100, ...)
.Scale.arg = Scale.arg, .kappa = kappa ))),
vfamily = c("alaplace1"),
deriv = eval(substitute(expression({
- ymat = matrix(y, n, M)
- Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ ymat <- matrix(y, n, M)
+ Scale <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- locat = eta2theta(eta, .llocat , earg = .elocat )
+ locat <- eta2theta(eta, .llocat , earg = .elocat )
- kappamat = matrix(extra$kappa, n, M, byrow = TRUE)
- zedd = abs(ymat-locat) / Scale
+ kappamat <- matrix(extra$kappa, n, M, byrow = TRUE)
+ zedd <- abs(ymat-locat) / Scale
- dl.dlocat = ifelse(ymat >= locat, kappamat, 1/kappamat) *
+ dl.dlocat <- ifelse(ymat >= locat, kappamat, 1/kappamat) *
sqrt(2) * sign(ymat - locat) / Scale
- dlocat.deta = dtheta.deta(locat, .llocat , earg = .elocat )
+ dlocat.deta <- dtheta.deta(locat, .llocat , earg = .elocat )
c(w) * cbind(dl.dlocat * dlocat.deta)
}), list( .Scale.arg = Scale.arg, .elocat = elocat,
.llocat = llocat, .kappa = kappa ))),
weight = eval(substitute(expression({
- d2l.dlocat2 = 2 / Scale^2
- wz = cbind(d2l.dlocat2 * dlocat.deta^2)
+ d2l.dlocat2 <- 2 / Scale^2
+ wz <- cbind(d2l.dlocat2 * dlocat.deta^2)
c(w) * wz
}), list( .Scale.arg = Scale.arg,
@@ -2950,8 +2958,7 @@ alaplace1.control <- function(maxit = 100, ...)
-alaplace3.control <- function(maxit = 100, ...)
-{
+alaplace3.control <- function(maxit = 100, ...) {
list(maxit = maxit)
}
@@ -2991,16 +2998,16 @@ alaplace3.control <- function(maxit = 100, ...)
new("vglmff",
blurb = c("Three-parameter asymmetric Laplace distribution\n\n",
- "Links: ",
- namesof("location", llocat, earg = elocat), ", ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("kappa", lkappa, earg = ekappa),
- "\n", "\n",
- "Mean: location + scale * (1/kappa - kappa) / sqrt(2)",
- "\n",
- "Variance: Scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("kappa", lkappa, earg = ekappa),
+ "\n", "\n",
+ "Mean: location + scale * (1/kappa - kappa) / sqrt(2)",
+ "\n",
+ "Variance: Scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -3016,23 +3023,23 @@ alaplace3.control <- function(maxit = 100, ...)
namesof("kappa", .lkappa , earg = .ekappa, tag = FALSE))
if (!length(etastart)) {
- kappa.init = if (length( .ikappa ))
+ kappa.init <- if (length( .ikappa ))
rep( .ikappa, length.out = n) else
rep( 1.0, length.out = n)
if ( .imethod == 1) {
- locat.init = median(y)
- scale.init = sqrt(var(y) / 2)
+ locat.init <- median(y)
+ scale.init <- sqrt(var(y) / 2)
} else {
- locat.init = y
- scale.init = sqrt(sum(c(w)*abs(y-median(y ))) / (sum(w) *2))
+ locat.init <- y
+ scale.init <- sqrt(sum(c(w)*abs(y-median(y ))) / (sum(w) *2))
}
- locat.init = if (length( .ilocat))
+ locat.init <- if (length( .ilocat))
rep( .ilocat, length.out = n) else
rep(locat.init, length.out = n)
- scale.init = if (length( .iscale))
+ scale.init <- if (length( .iscale))
rep( .iscale, length.out = n) else
rep(scale.init, length.out = n)
- etastart =
+ etastart <-
cbind(theta2eta(locat.init, .llocat , earg = .elocat ),
theta2eta(scale.init, .lscale , earg = .escale ),
theta2eta(kappa.init, .lkappa, earg = .ekappa))
@@ -3042,21 +3049,21 @@ alaplace3.control <- function(maxit = 100, ...)
.llocat = llocat, .lscale = lscale, .lkappa = lkappa,
.ilocat = ilocat, .iscale = iscale, .ikappa = ikappa ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- locat = eta2theta(eta[, 1], .llocat , earg = .elocat )
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- kappa = eta2theta(eta[, 3], .lkappa, earg = .ekappa)
+ locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ kappa <- eta2theta(eta[, 3], .lkappa, earg = .ekappa)
locat + Scale * (1/kappa - kappa) / sqrt(2)
}, list( .elocat = elocat, .llocat = llocat,
.escale = escale, .lscale = lscale,
.ekappa = ekappa, .lkappa = lkappa ))),
last = eval(substitute(expression({
- misc$link = c(location = .llocat ,
- scale = .lscale ,
- kappa = .lkappa )
+ misc$link <- c(location = .llocat ,
+ scale = .lscale ,
+ kappa = .lkappa )
- misc$earg = list(location = .elocat,
- scale = .escale,
- kappa = .ekappa )
+ misc$earg <- list(location = .elocat,
+ scale = .escale,
+ kappa = .ekappa )
misc$expected = TRUE
}), list( .elocat = elocat, .llocat = llocat,
@@ -3064,9 +3071,9 @@ alaplace3.control <- function(maxit = 100, ...)
.ekappa = ekappa, .lkappa = lkappa ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- locat = eta2theta(eta[, 1], .llocat , earg = .elocat )
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- kappa = eta2theta(eta[, 3], .lkappa , earg = .ekappa ) # a matrix
+ locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ kappa <- eta2theta(eta[, 3], .lkappa , earg = .ekappa ) # a matrix
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
@@ -3078,22 +3085,22 @@ alaplace3.control <- function(maxit = 100, ...)
.ekappa = ekappa, .lkappa = lkappa ))),
vfamily = c("alaplace3"),
deriv = eval(substitute(expression({
- locat = eta2theta(eta[, 1], .llocat , earg = .elocat )
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- kappa = eta2theta(eta[, 3], .lkappa, earg = .ekappa)
+ locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ kappa <- eta2theta(eta[, 3], .lkappa, earg = .ekappa)
- zedd = abs(y - locat) / Scale
- dl.dlocat = sqrt(2) * ifelse(y >= locat, kappa, 1/kappa) *
+ zedd <- abs(y - locat) / Scale
+ dl.dlocat <- sqrt(2) * ifelse(y >= locat, kappa, 1/kappa) *
sign(y-locat) / Scale
- dl.dscale = sqrt(2) * ifelse(y >= locat, kappa, 1/kappa) *
+ dl.dscale <- sqrt(2) * ifelse(y >= locat, kappa, 1/kappa) *
zedd / Scale - 1 / Scale
- dl.dkappa = 1 / kappa - 2 * kappa / (1+kappa^2) -
+ dl.dkappa <- 1 / kappa - 2 * kappa / (1+kappa^2) -
(sqrt(2) / Scale) *
ifelse(y > locat, 1, -1/kappa^2) * abs(y-locat)
- dlocat.deta = dtheta.deta(locat, .llocat , earg = .elocat )
- dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
- dkappa.deta = dtheta.deta(kappa, .lkappa, earg = .ekappa)
+ dlocat.deta <- dtheta.deta(locat, .llocat , earg = .elocat )
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+ dkappa.deta <- dtheta.deta(kappa, .lkappa, earg = .ekappa)
c(w) * cbind(dl.dlocat * dlocat.deta,
dl.dscale * dscale.deta,
@@ -3102,17 +3109,17 @@ alaplace3.control <- function(maxit = 100, ...)
.elocat = elocat, .llocat = llocat,
.ekappa = ekappa, .lkappa = lkappa ))),
weight = eval(substitute(expression({
- d2l.dlocat2 = 2 / Scale^2
- d2l.dscale2 = 1 / Scale^2
- d2l.dkappa2 = 1 / kappa^2 + 4 / (1+kappa^2)^2
- d2l.dkappadloc = -sqrt(8) / ((1+kappa^2) * Scale)
- d2l.dkappadscale = -(1-kappa^2) / ((1+kappa^2) * kappa * Scale)
- wz = matrix(0, nrow = n, dimm(M))
- wz[,iam(1, 1, M)] = d2l.dlocat2 * dlocat.deta^2
- wz[,iam(2, 2, M)] = d2l.dscale2 * dscale.deta^2
- wz[,iam(3, 3, M)] = d2l.dkappa2 * dkappa.deta^2
- wz[,iam(1, 3, M)] = d2l.dkappadloc * dkappa.deta * dlocat.deta
- wz[,iam(2, 3, M)] = d2l.dkappadscale * dkappa.deta * dscale.deta
+ d2l.dlocat2 <- 2 / Scale^2
+ d2l.dscale2 <- 1 / Scale^2
+ d2l.dkappa2 <- 1 / kappa^2 + 4 / (1+kappa^2)^2
+ d2l.dkappadloc <- -sqrt(8) / ((1+kappa^2) * Scale)
+ d2l.dkappadscale <- -(1-kappa^2) / ((1+kappa^2) * kappa * Scale)
+ wz <- matrix(0, nrow = n, dimm(M))
+ wz[,iam(1, 1, M)] <- d2l.dlocat2 * dlocat.deta^2
+ wz[,iam(2, 2, M)] <- d2l.dscale2 * dscale.deta^2
+ wz[,iam(3, 3, M)] <- d2l.dkappa2 * dkappa.deta^2
+ wz[,iam(1, 3, M)] <- d2l.dkappadloc * dkappa.deta * dlocat.deta
+ wz[,iam(2, 3, M)] <- d2l.dkappadscale * dkappa.deta * dscale.deta
c(w) * wz
}), list( .escale = escale, .lscale = lscale,
.elocat = elocat, .llocat = llocat ))))
@@ -3131,7 +3138,7 @@ dlaplace <- function(x, location = 0, scale = 1, log = FALSE) {
- logdensity = (-abs(x-location)/scale) - log(2*scale)
+ logdensity <- (-abs(x-location)/scale) - log(2*scale)
if (log.arg) logdensity else exp(logdensity)
}
@@ -3139,11 +3146,11 @@ dlaplace <- function(x, location = 0, scale = 1, log = FALSE) {
plaplace <- function(q, location = 0, scale = 1) {
if (!is.Numeric(scale, positive = TRUE))
stop("argument 'scale' must be positive")
- zedd = (q-location) / scale
- L = max(length(q), length(location), length(scale))
- q = rep(q, length.out = L);
- location = rep(location, length.out = L);
- scale = rep(scale, length.out = L)
+ zedd <- (q-location) / scale
+ L <- max(length(q), length(location), length(scale))
+ q <- rep(q, length.out = L);
+ location <- rep(location, length.out = L);
+ scale <- rep(scale, length.out = L)
ifelse(q < location, 0.5*exp(zedd), 1-0.5*exp(-zedd))
}
@@ -3152,10 +3159,10 @@ plaplace <- function(q, location = 0, scale = 1) {
qlaplace <- function(p, location = 0, scale = 1) {
if (!is.Numeric(scale, positive = TRUE))
stop("argument 'scale' must be positive")
- L = max(length(p), length(location), length(scale))
- p = rep(p, length.out = L);
- location = rep(location, length.out = L);
- scale = rep(scale, length.out = L)
+ L <- max(length(p), length(location), length(scale))
+ p <- rep(p, length.out = L);
+ location <- rep(location, length.out = L);
+ scale <- rep(scale, length.out = L)
location - sign(p-0.5) * scale * log(2*ifelse(p < 0.5, p, 1-p))
}
@@ -3167,9 +3174,9 @@ rlaplace <- function(n, location = 0, scale = 1) {
stop("bad input for argument 'n'")
if (!is.Numeric(scale, positive = TRUE))
stop("'scale' must be positive")
- location = rep(location, length.out = n);
- scale = rep(scale, length.out = n)
- r = runif(n)
+ location <- rep(location, length.out = n);
+ scale <- rep(scale, length.out = n)
+ r <- runif(n)
location - sign(r-0.5) * scale * log(2 * ifelse(r < 0.5, r, 1-r))
}
@@ -3205,14 +3212,14 @@ rlaplace <- function(n, location = 0, scale = 1) {
new("vglmff",
blurb = c("Two-parameter Laplace distribution\n\n",
- "Links: ",
- namesof("location", llocat, earg = elocat), ", ",
- namesof("scale", lscale, earg = escale),
- "\n", "\n",
- "Mean: location", "\n",
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale),
+ "\n", "\n",
+ "Mean: location", "\n",
"Variance: 2*scale^2"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -3230,22 +3237,22 @@ rlaplace <- function(n, location = 0, scale = 1) {
if (!length(etastart)) {
if ( .imethod == 1) {
- locat.init = median(y)
- scale.init = sqrt(var(y) / 2)
+ locat.init <- median(y)
+ scale.init <- sqrt(var(y) / 2)
} else if ( .imethod == 2) {
- locat.init = weighted.mean(y, w)
- scale.init = sqrt(var(y) / 2)
+ locat.init <- weighted.mean(y, w)
+ scale.init <- sqrt(var(y) / 2)
} else {
- locat.init = median(y)
- scale.init = sqrt(sum(c(w)*abs(y-median(y ))) / (sum(w) *2))
+ locat.init <- median(y)
+ scale.init <- sqrt(sum(c(w)*abs(y-median(y ))) / (sum(w) *2))
}
- locat.init = if (length( .ilocat))
+ locat.init <- if (length( .ilocat))
rep( .ilocat, length.out = n) else
rep(locat.init, length.out = n)
- scale.init = if (length( .iscale))
+ scale.init <- if (length( .iscale))
rep( .iscale, length.out = n) else
rep(scale.init, length.out = n)
- etastart =
+ etastart <-
cbind(theta2eta(locat.init, .llocat , earg = .elocat ),
theta2eta(scale.init, .lscale , earg = .escale ))
}
@@ -3257,16 +3264,18 @@ rlaplace <- function(n, location = 0, scale = 1) {
eta2theta(eta[, 1], .llocat , earg = .elocat )
}, list( .elocat = elocat, .llocat = llocat ))),
last = eval(substitute(expression({
- misc$link = c(location = .llocat , scale = .lscale )
- misc$earg = list(location = .elocat , scale = .escale )
- misc$expected = TRUE
- misc$RegCondOK = FALSE # Save this for later
+ misc$link <- c(location = .llocat , scale = .lscale )
+
+ misc$earg <- list(location = .elocat , scale = .escale )
+
+ misc$expected <- TRUE
+ misc$RegCondOK <- FALSE # Save this for later
}), list( .escale = escale, .lscale = lscale,
.elocat = elocat, .llocat = llocat ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- locat = eta2theta(eta[, 1], .llocat , earg = .elocat )
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
@@ -3277,25 +3286,25 @@ rlaplace <- function(n, location = 0, scale = 1) {
.elocat = elocat, .llocat = llocat ))),
vfamily = c("laplace"),
deriv = eval(substitute(expression({
- Locat = eta2theta(eta[, 1], .llocat , earg = .elocat )
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ Locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
- zedd = abs(y-Locat) / Scale
- dl.dLocat = sign(y - Locat) / Scale
- dl.dscale = zedd / Scale - 1 / Scale
+ zedd <- abs(y-Locat) / Scale
+ dl.dLocat <- sign(y - Locat) / Scale
+ dl.dscale <- zedd / Scale - 1 / Scale
- dLocat.deta = dtheta.deta(Locat, .llocat , earg = .elocat )
- dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
+ dLocat.deta <- dtheta.deta(Locat, .llocat , earg = .elocat )
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
c(w) * cbind(dl.dLocat * dLocat.deta,
dl.dscale * dscale.deta)
}), list( .escale = escale, .lscale = lscale,
.elocat = elocat, .llocat = llocat ))),
weight = eval(substitute(expression({
- d2l.dLocat2 = d2l.dscale2 = 1 / Scale^2
- wz = matrix(0, nrow = n, ncol=M) # diagonal
- wz[,iam(1, 1, M)] = d2l.dLocat2 * dLocat.deta^2
- wz[,iam(2, 2, M)] = d2l.dscale2 * dscale.deta^2
+ d2l.dLocat2 <- d2l.dscale2 <- 1 / Scale^2
+ wz <- matrix(0, nrow = n, ncol = M) # diagonal
+ wz[,iam(1, 1, M)] <- d2l.dLocat2 * dLocat.deta^2
+ wz[,iam(2, 2, M)] <- d2l.dscale2 * dscale.deta^2
c(w) * wz
}), list( .escale = escale, .lscale = lscale,
.elocat = elocat, .llocat = llocat ))))
@@ -3303,8 +3312,7 @@ rlaplace <- function(n, location = 0, scale = 1) {
-fff.control <- function(save.weight = TRUE, ...)
-{
+fff.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -3331,7 +3339,7 @@ fff.control <- function(save.weight = TRUE, ...)
nsimEIM <= 10)
stop("argument 'nsimEIM' should be an integer greater than 10")
- ncp = 0
+ ncp <- 0
if (any(ncp != 0))
warning("not sure about ncp != 0 wrt dl/dtheta")
@@ -3339,16 +3347,16 @@ fff.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("F-distribution\n\n",
- "Links: ",
- namesof("df1", link, earg = earg), ", ",
- namesof("df2", link, earg = earg),
- "\n", "\n",
- "Mean: df2/(df2-2) provided df2>2 and ncp = 0", "\n",
- "Variance: ",
- "2*df2^2*(df1+df2-2)/(df1*(df2-2)^2*(df2-4)) ",
- "provided df2>4 and ncp = 0"),
+ "Links: ",
+ namesof("df1", link, earg = earg), ", ",
+ namesof("df2", link, earg = earg),
+ "\n", "\n",
+ "Mean: df2/(df2-2) provided df2>2 and ncp = 0", "\n",
+ "Variance: ",
+ "2*df2^2*(df1+df2-2)/(df1*(df2-2)^2*(df2-4)) ",
+ "provided df2>4 and ncp = 0"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -3358,52 +3366,54 @@ fff.control <- function(save.weight = TRUE, ...)
- predictors.names <- c(namesof("df1", .link , earg = .earg , tag = FALSE),
- namesof("df2", .link , earg = .earg , tag = FALSE))
+ predictors.names <-
+ c(namesof("df1", .link , earg = .earg , tag = FALSE),
+ namesof("df2", .link , earg = .earg , tag = FALSE))
if (!length(etastart)) {
if ( .imethod == 1) {
- df2.init = b = 2*mean(y) / (mean(y)-1)
- df1.init = 2*b^2*(b-2)/(var(y)*(b-2)^2 * (b-4) - 2*b^2)
- if (df2.init < 4) df2.init = 5
- if (df1.init < 2) df1.init = 3
+ df2.init <- b <- 2*mean(y) / (mean(y)-1)
+ df1.init <- 2*b^2*(b-2)/(var(y)*(b-2)^2 * (b-4) - 2*b^2)
+ if (df2.init < 4) df2.init <- 5
+ if (df1.init < 2) df1.init <- 3
} else {
- df2.init = b = 2*median(y) / (median(y)-1)
- summy = summary(y)
- var.est = summy[5] - summy[2]
- df1.init = 2*b^2*(b-2)/(var.est*(b-2)^2 * (b-4) - 2*b^2)
+ df2.init <- b <- 2*median(y) / (median(y)-1)
+ summy <- summary(y)
+ var.est <- summy[5] - summy[2]
+ df1.init <- 2*b^2*(b-2)/(var.est*(b-2)^2 * (b-4) - 2*b^2)
}
- df1.init = if (length( .idf1))
+ df1.init <- if (length( .idf1))
rep( .idf1, length.out = n) else
rep(df1.init, length.out = n)
- df2.init = if (length( .idf2))
+ df2.init <- if (length( .idf2))
rep( .idf2, length.out = n) else
rep(1, length.out = n)
- etastart = cbind(theta2eta(df1.init, .link , earg = .earg ),
+ etastart <- cbind(theta2eta(df1.init, .link , earg = .earg ),
theta2eta(df2.init, .link , earg = .earg ))
}
}), list( .imethod = imethod, .idf1 = idf1, .earg = earg,
.idf2 = idf2, .link = link ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- df2 = eta2theta(eta[, 2], .link , earg = .earg )
- ans = df2 * NA
- ans[df2>2] = df2[df2>2] / (df2[df2>2]-2)
+ df2 <- eta2theta(eta[, 2], .link , earg = .earg )
+ ans <- df2 * NA
+ ans[df2>2] <- df2[df2>2] / (df2[df2>2]-2)
ans
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c(df1 = .link , df2 = .link )
- misc$earg = list(df1 = .earg , df2 = .earg )
+ misc$link <- c(df1 = .link , df2 = .link )
+
+ misc$earg <- list(df1 = .earg , df2 = .earg )
- misc$nsimEIM = .nsimEIM
- misc$ncp = .ncp
+ misc$nsimEIM <- .nsimEIM
+ misc$ncp <- .ncp
}), list( .link = link, .earg = earg,
.ncp = ncp,
.nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- df1 = eta2theta(eta[, 1], .link , earg = .earg )
- df2 = eta2theta(eta[, 2], .link , earg = .earg )
+ df1 <- eta2theta(eta[, 1], .link , earg = .earg )
+ df2 <- eta2theta(eta[, 2], .link , earg = .earg )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
@@ -3413,44 +3423,44 @@ fff.control <- function(save.weight = TRUE, ...)
}, list( .link = link, .earg = earg, .ncp=ncp ))),
vfamily = c("fff"),
deriv = eval(substitute(expression({
- df1 = eta2theta(eta[, 1], .link , earg = .earg )
- df2 = eta2theta(eta[, 2], .link , earg = .earg )
- dl.ddf1 = 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
+ df1 <- eta2theta(eta[, 1], .link , earg = .earg )
+ df2 <- eta2theta(eta[, 2], .link , earg = .earg )
+ dl.ddf1 <- 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
0.5*log(y) - 0.5*digamma(0.5*df1) -
0.5*(df1+df2)*(y/df2) / (1 + df1*y/df2) -
0.5*log1p(df1*y/df2)
- dl.ddf2 = 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
+ dl.ddf2 <- 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
0.5*digamma(0.5*df2) -
0.5*(df1+df2) * (-df1*y/df2^2) / (1 + df1*y/df2) -
0.5*log1p(df1*y/df2)
- ddf1.deta = dtheta.deta(df1, .link , earg = .earg )
- ddf2.deta = dtheta.deta(df2, .link , earg = .earg )
- dthetas.detas = cbind(ddf1.deta, ddf2.deta)
+ ddf1.deta <- dtheta.deta(df1, .link , earg = .earg )
+ ddf2.deta <- dtheta.deta(df2, .link , earg = .earg )
+ dthetas.detas <- cbind(ddf1.deta, ddf2.deta)
c(w) * dthetas.detas * cbind(dl.ddf1, dl.ddf2)
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M=M, both = TRUE, diag = TRUE)
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
for(ii in 1:( .nsimEIM )) {
- ysim = rf(n = n, df1=df1, df2=df2)
- dl.ddf1 = 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
+ ysim <- rf(n = n, df1=df1, df2=df2)
+ dl.ddf1 <- 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
0.5*log(ysim) - 0.5*digamma(0.5*df1) -
0.5*(df1+df2)*(ysim/df2) / (1 + df1*ysim/df2) -
0.5*log1p(df1*ysim/df2)
- dl.ddf2 = 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
+ dl.ddf2 <- 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
0.5*digamma(0.5*df2) -
0.5*(df1+df2) * (-df1*ysim/df2^2)/(1 + df1*ysim/df2) -
0.5*log1p(df1*ysim/df2)
rm(ysim)
- temp3 = cbind(dl.ddf1, dl.ddf2)
- run.varcov = ((ii-1) * run.varcov +
+ temp3 <- cbind(dl.ddf1, dl.ddf2)
+ run.varcov <- ((ii-1) * run.varcov +
temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = c(w) * wz * dthetas.detas[, ind1$row] *
+ wz <- c(w) * wz * dthetas.detas[, ind1$row] *
dthetas.detas[, ind1$col]
wz
}), list( .link = link, .earg = earg, .nsimEIM = nsimEIM,
@@ -3464,8 +3474,8 @@ fff.control <- function(save.weight = TRUE, ...)
lprob = "logit",
iprob = NULL) {
- inputN = is.Numeric(N, positive = TRUE)
- inputD = is.Numeric(D, positive = TRUE)
+ inputN <- is.Numeric(N, positive = TRUE)
+ inputD <- is.Numeric(D, positive = TRUE)
if (inputD && inputN)
stop("only one of 'N' and 'D' is to be inputted")
if (!inputD && !inputN)
@@ -3480,43 +3490,43 @@ fff.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Hypergeometric distribution\n\n",
- "Link: ",
- namesof("prob", lprob, earg = earg), "\n",
- "Mean: D/N\n"),
+ "Link: ",
+ namesof("prob", lprob, earg = earg), "\n",
+ "Mean: D/N\n"),
initialize = eval(substitute(expression({
- NCOL = function (x)
+ NCOL <- function (x)
if (is.array(x) && length(dim(x)) > 1 ||
is.data.frame(x)) ncol(x) else as.integer(1)
if (NCOL(y) == 1) {
- if (is.factor(y)) y = y != levels(y)[1]
- nn = rep(1, length.out = n)
+ if (is.factor(y)) y <- y != levels(y)[1]
+ nn <- rep(1, length.out = n)
if (!all(y >= 0 & y <= 1))
stop("response values must be in [0, 1]")
- mustart = (0.5 + w * y) / (1 + w)
- no.successes = w * y
+ mustart <- (0.5 + w * y) / (1 + w)
+ no.successes <- w * y
if (any(abs(no.successes - round(no.successes)) > 0.001))
stop("Number of successes must be integer-valued")
} else if (NCOL(y) == 2) {
if (any(abs(y - round(y)) > 0.001))
stop("Count data must be integer-valued")
- nn = y[, 1] + y[, 2]
- y = ifelse(nn > 0, y[, 1]/nn, 0)
- w = w * nn
- mustart = (0.5 + nn * y) / (1 + nn)
- mustart[mustart >= 1] = 0.95
+ nn <- y[, 1] + y[, 2]
+ y <- ifelse(nn > 0, y[, 1]/nn, 0)
+ w <- w * nn
+ mustart <- (0.5 + nn * y) / (1 + nn)
+ mustart[mustart >= 1] <- 0.95
} else
stop("Response not of the right form")
predictors.names <-
namesof("prob", .lprob , earg = .earg , tag = FALSE)
- extra$Nvector = .N
- extra$Dvector = .D
- extra$Nunknown = length(extra$Nvector) == 0
+ extra$Nvector <- .N
+ extra$Dvector <- .D
+ extra$Nunknown <- length(extra$Nvector) == 0
if (!length(etastart)) {
- init.prob = if (length( .iprob))
+ init.prob <- if (length( .iprob))
rep( .iprob, length.out = n) else
mustart
- etastart = matrix(init.prob, n, ncol(cbind(y )))
+ etastart <- matrix(init.prob, n, ncol(cbind(y )))
}
}), list( .lprob = lprob, .earg = earg, .N = N, .D = D,
@@ -3526,7 +3536,9 @@ fff.control <- function(save.weight = TRUE, ...)
}, list( .lprob = lprob, .earg = earg ))),
last = eval(substitute(expression({
misc$link <- c("prob" = .lprob)
+
misc$earg <- list("prob" = .earg )
+
misc$Dvector <- .D
misc$Nvector <- .N
}), list( .N = N, .D = D, .lprob = lprob, .earg = earg ))),
@@ -3535,15 +3547,15 @@ fff.control <- function(save.weight = TRUE, ...)
}, list( .lprob = lprob, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- N = extra$Nvector
- Dvec = extra$Dvector
- prob = mu
- yvec = w * y
+ N <- extra$Nvector
+ Dvec <- extra$Dvector
+ prob <- mu
+ yvec <- w * y
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
if (extra$Nunknown) {
- tmp12 = Dvec * (1-prob) / prob
+ tmp12 <- Dvec * (1-prob) / prob
sum(lgamma(1+tmp12) + lgamma(1+Dvec/prob-w) -
@@ -3560,19 +3572,19 @@ fff.control <- function(save.weight = TRUE, ...)
}, list( .lprob = lprob, .earg = earg ))),
vfamily = c("hyperg"),
deriv = eval(substitute(expression({
- prob = mu # equivalently, eta2theta(eta, .lprob, earg = .earg )
- dprob.deta = dtheta.deta(prob, .lprob, earg = .earg )
- Dvec = extra$Dvector
- Nvec = extra$Nvector
- yvec = w * y
+ prob <- mu # equivalently, eta2theta(eta, .lprob, earg = .earg )
+ dprob.deta <- dtheta.deta(prob, .lprob, earg = .earg )
+ Dvec <- extra$Dvector
+ Nvec <- extra$Nvector
+ yvec <- w * y
if (extra$Nunknown) {
- tmp72 = -Dvec / prob^2
- tmp12 = Dvec * (1-prob) / prob
- dl.dprob = tmp72 * (digamma(1 + tmp12) +
+ tmp72 <- -Dvec / prob^2
+ tmp12 <- Dvec * (1-prob) / prob
+ dl.dprob <- tmp72 * (digamma(1 + tmp12) +
digamma(1 + Dvec/prob -w) -
digamma(1 + tmp12-w+yvec) - digamma(1 + Dvec/prob))
} else {
- dl.dprob = Nvec * (digamma(1+Nvec*prob) -
+ dl.dprob <- Nvec * (digamma(1+Nvec*prob) -
digamma(1+Nvec*(1-prob)) -
digamma(1+Nvec*prob-yvec) +
digamma(1+Nvec*(1-prob)-w+yvec))
@@ -3581,9 +3593,9 @@ fff.control <- function(save.weight = TRUE, ...)
}), list( .lprob = lprob, .earg = earg ))),
weight = eval(substitute(expression({
if (extra$Nunknown) {
- tmp722 = tmp72^2
- tmp13 = 2*Dvec / prob^3
- d2l.dprob2 = tmp722 * (trigamma(1 + tmp12) +
+ tmp722 <- tmp72^2
+ tmp13 <- 2*Dvec / prob^3
+ d2l.dprob2 <- tmp722 * (trigamma(1 + tmp12) +
trigamma(1 + Dvec/prob - w) -
trigamma(1 + tmp12 - w + yvec) -
trigamma(1 + Dvec/prob)) +
@@ -3592,16 +3604,16 @@ fff.control <- function(save.weight = TRUE, ...)
digamma(1 + tmp12 - w + yvec) -
digamma(1 + Dvec/prob))
} else {
- d2l.dprob2 = Nvec^2 * (trigamma(1+Nvec*prob) +
+ d2l.dprob2 <- Nvec^2 * (trigamma(1+Nvec*prob) +
trigamma(1+Nvec*(1-prob)) -
trigamma(1+Nvec*prob-yvec) -
trigamma(1+Nvec*(1-prob)-w+yvec))
}
- d2prob.deta2 = d2theta.deta2(prob, .lprob, earg = .earg )
+ d2prob.deta2 <- d2theta.deta2(prob, .lprob, earg = .earg )
- wz = -(dprob.deta^2) * d2l.dprob2
- wz = c(w) * wz
- wz[wz < .Machine$double.eps] = .Machine$double.eps
+ wz <- -(dprob.deta^2) * d2l.dprob2
+ wz <- c(w) * wz
+ wz[wz < .Machine$double.eps] <- .Machine$double.eps
wz
}), list( .lprob = lprob, .earg = earg ))))
}
@@ -3615,15 +3627,15 @@ dbenini <- function(x, shape, y0, log = FALSE) {
- N = max(length(x), length(shape), length(y0))
- x = rep(x, length.out = N);
- shape = rep(shape, length.out = N);
- y0 = rep(y0, length.out = N);
+ N <- max(length(x), length(shape), length(y0))
+ x <- rep(x, length.out = N);
+ shape <- rep(shape, length.out = N);
+ y0 <- rep(y0, length.out = N);
- logdensity = rep(log(0), length.out = N)
- xok = (x > y0)
- tempxok = log(x[xok]/y0[xok])
- logdensity[xok] = log(2*shape[xok]) - shape[xok] * tempxok^2 +
+ logdensity <- rep(log(0), length.out = N)
+ xok <- (x > y0)
+ tempxok <- log(x[xok]/y0[xok])
+ logdensity[xok] <- log(2*shape[xok]) - shape[xok] * tempxok^2 +
log(tempxok) - log(x[xok])
if (log.arg) logdensity else exp(logdensity)
}
@@ -3636,14 +3648,14 @@ pbenini <- function(q, shape, y0) {
stop("bad input for argument 'shape'")
if (!is.Numeric(y0, positive = TRUE))
stop("bad input for argument 'y0'")
- N = max(length(q), length(shape), length(y0))
- q = rep(q, length.out = N);
- shape = rep(shape, length.out = N);
- y0 = rep(y0, length.out = N);
-
- ans = y0 * 0
- ok = q > y0
- ans[ok] = -expm1(-shape[ok] * (log(q[ok]/y0[ok]))^2)
+ N <- max(length(q), length(shape), length(y0))
+ q <- rep(q, length.out = N);
+ shape <- rep(shape, length.out = N);
+ y0 <- rep(y0, length.out = N);
+
+ ans <- y0 * 0
+ ok <- q > y0
+ ans[ok] <- -expm1(-shape[ok] * (log(q[ok]/y0[ok]))^2)
ans
}
@@ -3739,8 +3751,8 @@ rbenini <- function(n, shape, y0) {
if (!length(etastart)) {
- probs.y = (1:3) / 4
- qofy = quantile(rep(y, times = w), probs = probs.y)
+ probs.y <- (1:3) / 4
+ qofy <- quantile(rep(y, times = w), probs = probs.y)
if ( .imethod == 1) {
shape.init <- mean(-log1p(-probs.y) / (log(qofy))^2)
} else {
@@ -3755,7 +3767,7 @@ rbenini <- function(n, shape, y0) {
.lshape = lshape, .eshape = eshape,
.y0 = y0 ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape = eta2theta(eta, .lshape , earg = .eshape )
+ shape <- eta2theta(eta, .lshape , earg = .eshape )
qbenini(p = 0.5, shape, y0 = extra$y0)
@@ -3782,8 +3794,8 @@ rbenini <- function(n, shape, y0) {
.eshape = eshape, .y0 = y0 ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape = eta2theta(eta, .lshape , earg = .eshape )
- y0 = extra$y0
+ shape <- eta2theta(eta, .lshape , earg = .eshape )
+ y0 <- extra$y0
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
@@ -3792,18 +3804,18 @@ rbenini <- function(n, shape, y0) {
}, list( .lshape = lshape, .eshape = eshape ))),
vfamily = c("benini"),
deriv = eval(substitute(expression({
- shape = eta2theta(eta, .lshape , earg = .eshape )
+ shape <- eta2theta(eta, .lshape , earg = .eshape )
- y0 = extra$y0
- dl.dshape = 1/shape - (log(y/y0))^2
+ y0 <- extra$y0
+ dl.dshape <- 1/shape - (log(y/y0))^2
- dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
c(w) * dl.dshape * dshape.deta
}), list( .lshape = lshape, .eshape = eshape ))),
weight = eval(substitute(expression({
- ned2l.dshape2 = 1 / shape^2
- wz = ned2l.dshape2 * dshape.deta^2
+ ned2l.dshape2 <- 1 / shape^2
+ wz <- ned2l.dshape2 * dshape.deta^2
c(w) * wz
}), list( .lshape = lshape, .eshape = eshape ))))
}
@@ -3858,7 +3870,7 @@ ppolono <- function(q, meanlog = 0, sdlog = 1,
rpolono <- function(n, meanlog = 0, sdlog = 1) {
- lambda = rlnorm(n = n, meanlog = meanlog, sdlog = sdlog)
+ lambda <- rlnorm(n = n, meanlog = meanlog, sdlog = sdlog)
rpois(n = n, lambda = lambda)
}
@@ -3878,24 +3890,24 @@ dtriangle <- function(x, theta, lower = 0, upper = 1, log = FALSE) {
rm(log)
- N = max(length(x), length(theta), length(lower), length(upper))
- x = rep(x, length.out = N);
- lower = rep(lower, length.out = N);
- upper = rep(upper, length.out = N);
- theta = rep(theta, length.out = N)
+ N <- max(length(x), length(theta), length(lower), length(upper))
+ x <- rep(x, length.out = N);
+ lower <- rep(lower, length.out = N);
+ upper <- rep(upper, length.out = N);
+ theta <- rep(theta, length.out = N)
- denom1 = ((upper-lower)*(theta-lower))
- denom2 = ((upper-lower)*(upper-theta))
- logdensity = rep(log(0), length.out = N)
- xok.neg = (lower < x) & (x <= theta)
- xok.pos = (theta <= x) & (x < upper)
+ denom1 <- ((upper-lower)*(theta-lower))
+ denom2 <- ((upper-lower)*(upper-theta))
+ logdensity <- rep(log(0), length.out = N)
+ xok.neg <- (lower < x) & (x <= theta)
+ xok.pos <- (theta <= x) & (x < upper)
logdensity[xok.neg] =
log(2 * (x[xok.neg] - lower[xok.neg]) / denom1[xok.neg])
logdensity[xok.pos] =
log(2 * (upper[xok.pos] - x[xok.pos]) / denom2[xok.pos])
- logdensity[lower >= upper] = NaN
- logdensity[lower > theta] = NaN
- logdensity[upper < theta] = NaN
+ logdensity[lower >= upper] <- NaN
+ logdensity[lower > theta] <- NaN
+ logdensity[upper < theta] <- NaN
if (log.arg) logdensity else exp(logdensity)
}
@@ -3912,12 +3924,12 @@ rtriangle <- function(n, theta, lower = 0, upper = 1) {
if (!all(lower < theta & theta < upper))
stop("lower < theta < upper values are required")
- N = n
- lower = rep(lower, length.out = N);
- upper = rep(upper, length.out = N);
- theta = rep(theta, length.out = N)
- t1 = sqrt(runif(n))
- t2 = sqrt(runif(n))
+ N <- n
+ lower <- rep(lower, length.out = N);
+ upper <- rep(upper, length.out = N);
+ theta <- rep(theta, length.out = N)
+ t1 <- sqrt(runif(n))
+ t2 <- sqrt(runif(n))
ifelse(runif(n) < (theta - lower) / (upper - lower),
lower + (theta - lower) * t1,
upper - (upper - theta) * t2)
@@ -3936,31 +3948,31 @@ qtriangle <- function(p, theta, lower = 0, upper = 1) {
if (!all(lower < theta & theta < upper))
stop("lower < theta < upper values are required")
- N = max(length(p), length(theta), length(lower), length(upper))
- p = rep(p, length.out = N);
- lower = rep(lower, length.out = N);
- upper = rep(upper, length.out = N);
- theta = rep(theta, length.out = N)
+ N <- max(length(p), length(theta), length(lower), length(upper))
+ p <- rep(p, length.out = N);
+ lower <- rep(lower, length.out = N);
+ upper <- rep(upper, length.out = N);
+ theta <- rep(theta, length.out = N)
- bad = (p < 0) | (p > 1)
+ bad <- (p < 0) | (p > 1)
if (any(bad))
stop("bad input for argument 'p'")
- Neg = (p <= (theta - lower)/(upper - lower))
- ans = as.numeric(NA) * p
- temp1 = p * (upper - lower) * (theta - lower)
- ans[ Neg] = lower[ Neg] + sqrt(temp1[ Neg])
+ Neg <- (p <= (theta - lower)/(upper - lower))
+ ans <- as.numeric(NA) * p
+ temp1 <- p * (upper - lower) * (theta - lower)
+ ans[ Neg] <- lower[ Neg] + sqrt(temp1[ Neg])
- Pos = (p >= (theta - lower)/(upper - lower))
+ Pos <- (p >= (theta - lower)/(upper - lower))
if (any(Pos)) {
- pstar = (p - (theta - lower)/(upper - lower)) / (1 -
+ pstar <- (p - (theta - lower)/(upper - lower)) / (1 -
(theta - lower) / (upper - lower))
- qstar = cbind(1 - sqrt(1-pstar), 1 + sqrt(1-pstar))
- qstar = qstar[Pos,, drop = FALSE]
- qstar = ifelse(qstar[, 1] >= 0 & qstar[, 1] <= 1,
+ qstar <- cbind(1 - sqrt(1-pstar), 1 + sqrt(1-pstar))
+ qstar <- qstar[Pos,, drop = FALSE]
+ qstar <- ifelse(qstar[, 1] >= 0 & qstar[, 1] <= 1,
qstar[, 1],
qstar[, 2])
- ans[Pos] = theta[Pos] + qstar * (upper - theta)[Pos]
+ ans[Pos] <- theta[Pos] + qstar * (upper - theta)[Pos]
}
ans
}
@@ -3978,22 +3990,22 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
if (!all(lower < theta & theta < upper))
stop("lower < theta < upper values are required")
- N = max(length(q), length(theta), length(lower), length(upper))
- q = rep(q, length.out = N);
- lower = rep(lower, length.out = N);
- upper = rep(upper, length.out = N);
- theta = rep(theta, length.out = N)
- ans = q * 0
+ N <- max(length(q), length(theta), length(lower), length(upper))
+ q <- rep(q, length.out = N);
+ lower <- rep(lower, length.out = N);
+ upper <- rep(upper, length.out = N);
+ theta <- rep(theta, length.out = N)
+ ans <- q * 0
- qstar = (q - lower)^2 / ((upper-lower) * (theta-lower))
- Neg = (lower <= q & q <= theta)
- ans[Neg] = (qstar)[Neg]
+ qstar <- (q - lower)^2 / ((upper-lower) * (theta-lower))
+ Neg <- (lower <= q & q <= theta)
+ ans[Neg] <- (qstar)[Neg]
- Pos = (theta <= q & q <= upper)
- qstar = (q - theta) / (upper-theta)
- ans[Pos] = ((theta-lower)/(upper-lower))[Pos] +
+ Pos <- (theta <= q & q <= upper)
+ qstar <- (q - theta) / (upper-theta)
+ ans[Pos] <- ((theta-lower)/(upper-lower))[Pos] +
(qstar * (2-qstar) * (upper-theta) / (upper - lower))[Pos]
- ans[q >= upper] = 1
+ ans[q >= upper] <- 1
ans
}
@@ -4001,8 +4013,7 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
triangle <- function(lower = 0, upper = 1,
link = elogit(min = lower, max = upper),
- itheta = NULL)
-{
+ itheta = NULL) {
if (!is.Numeric(lower))
stop("bad input for argument 'lower'")
if (!is.Numeric(upper))
@@ -4020,10 +4031,9 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
new("vglmff",
- blurb = c(
- "Triangle distribution\n\n",
- "Link: ",
- namesof("theta", link, earg = earg)),
+ blurb = c("Triangle distribution\n\n",
+ "Link: ",
+ namesof("theta", link, earg = earg)),
infos = eval(substitute(function(...) {
list(Musual = 1,
link = .link )
@@ -4037,8 +4047,8 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
- extra$lower = rep( .lower, length.out = n)
- extra$upper = rep( .upper, length.out = n)
+ extra$lower <- rep( .lower, length.out = n)
+ extra$upper <- rep( .upper, length.out = n)
if (any(y <= extra$lower | y >= extra$upper))
stop("some y values in [lower,upper] detected")
@@ -4048,35 +4058,36 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
if (!length(etastart)) {
- Theta.init = if (length( .itheta )) .itheta else {
+ Theta.init <- if (length( .itheta )) .itheta else {
weighted.mean(y, w)
}
- Theta.init = rep(Theta.init, length = n)
- etastart = theta2eta(Theta.init, .link , earg = .earg )
+ Theta.init <- rep(Theta.init, length = n)
+ etastart <- theta2eta(Theta.init, .link , earg = .earg )
}
}), list( .link = link, .earg = earg, .itheta=itheta,
.upper = upper, .lower = lower ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Theta = eta2theta(eta, .link , earg = .earg )
- lower = extra$lower
- upper = extra$upper
- mu = ((Theta^3 / 3 - lower * Theta^2 / 2 +
+ Theta <- eta2theta(eta, .link , earg = .earg )
+ lower <- extra$lower
+ upper <- extra$upper
+ mu <- ((Theta^3 / 3 - lower * Theta^2 / 2 +
lower^3 / 6) / (Theta - lower) +
((Theta^3 / 3 - upper * Theta^2 / 2 +
upper^3 / 6) / (upper - Theta))) * 2 / (upper-lower)
mu
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c(theta = .link )
- misc$earg = list(theta = .earg )
+ misc$link <- c(theta = .link )
- misc$expected = TRUE
+ misc$earg <- list(theta = .earg )
+
+ misc$expected <- TRUE
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Theta = eta2theta(eta, .link , earg = .earg )
- lower = extra$lower
- upper = extra$upper
+ Theta <- eta2theta(eta, .link , earg = .earg )
+ lower <- extra$lower
+ upper <- extra$upper
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
@@ -4086,24 +4097,24 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
}, list( .link = link, .earg = earg ))),
vfamily = c("triangle"),
deriv = eval(substitute(expression({
- Theta = eta2theta(eta, .link , earg = .earg )
+ Theta <- eta2theta(eta, .link , earg = .earg )
- dTheta.deta = dtheta.deta(Theta, .link , earg = .earg )
+ dTheta.deta <- dtheta.deta(Theta, .link , earg = .earg )
- pos = y > Theta
- neg = y < Theta
- lower = extra$lower
- upper = extra$upper
+ pos <- y > Theta
+ neg <- y < Theta
+ lower <- extra$lower
+ upper <- extra$upper
- dl.dTheta = 0 * y
- dl.dTheta[neg] = -1 / (Theta[neg]-lower[neg])
- dl.dTheta[pos] = 1 / (upper[pos]-Theta[pos])
+ dl.dTheta <- 0 * y
+ dl.dTheta[neg] <- -1 / (Theta[neg]-lower[neg])
+ dl.dTheta[pos] <- 1 / (upper[pos]-Theta[pos])
w * dl.dTheta * dTheta.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- d2l.dTheta2 = 1 / ((Theta - lower) * (upper - Theta))
- wz = d2l.dTheta2 * dTheta.deta^2
+ d2l.dTheta2 <- 1 / ((Theta - lower) * (upper - Theta))
+ wz <- d2l.dTheta2 * dTheta.deta^2
c(w) * wz
}), list( .link = link, .earg = earg ))))
}
@@ -4115,14 +4126,13 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
adjust0.loglaplace1 <- function(ymat, y, w, rep0) {
- rangey0 = range(y[y > 0])
- ymat[ymat <= 0] = min(rangey0[1] / 2, rep0)
+ rangey0 <- range(y[y > 0])
+ ymat[ymat <= 0] <- min(rangey0[1] / 2, rep0)
ymat
}
-loglaplace1.control <- function(maxit = 300, ...)
-{
+loglaplace1.control <- function(maxit = 300, ...) {
list(maxit = maxit)
}
@@ -4186,38 +4196,38 @@ loglaplace1.control <- function(maxit = 300, ...)
length(parallelLocation) != 1)
stop("bad input for argument 'parallelLocation'")
- fittedMean = FALSE
+ fittedMean <- FALSE
if (!is.logical(fittedMean) || length(fittedMean) != 1)
stop("bad input for argument 'fittedMean'")
- mystring0 = namesof("location", llocat, earg = elocat)
- mychars = substring(mystring0, first = 1:nchar(mystring0),
+ mystring0 <- namesof("location", llocat, earg = elocat)
+ mychars <- substring(mystring0, first = 1:nchar(mystring0),
last = 1:nchar(mystring0))
- mychars[nchar(mystring0)] = ", inverse = TRUE)"
- mystring1 = paste(mychars, collapse = "")
+ mychars[nchar(mystring0)] <- ", inverse = TRUE)"
+ mystring1 <- paste(mychars, collapse = "")
new("vglmff",
blurb = c("One-parameter ",
- if (llocat == "loge") "log-Laplace" else
- c(llocat, "-Laplace"),
- " distribution\n\n",
- "Links: ", mystring0, "\n", "\n",
+ if (llocat == "loge") "log-Laplace" else
+ c(llocat, "-Laplace"),
+ " distribution\n\n",
+ "Links: ", mystring0, "\n", "\n",
"Quantiles: ", mystring1),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallelLocation,
- constraints, intercept = FALSE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallelLocation,
+ constraints, apply.int = FALSE)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .parallelLocation = parallelLocation,
.Scale.arg = Scale.arg, .zero = zero ))),
initialize = eval(substitute(expression({
- extra$M = M = max(length( .Scale.arg ), length( .kappa )) # Recycle
- extra$Scale = rep( .Scale.arg, length = M)
- extra$kappa = rep( .kappa, length = M)
- extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+ extra$M <- M <- max(length( .Scale.arg ), length( .kappa )) # Recycle
+ extra$Scale <- rep( .Scale.arg, length = M)
+ extra$kappa <- rep( .kappa, length = M)
+ extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
temp5 <-
@@ -4232,10 +4242,10 @@ loglaplace1.control <- function(maxit = 300, ...)
- extra$n = n
- extra$y.names = y.names =
+ extra$n <- n
+ extra$y.names <- y.names <-
paste("tau = ", round(extra$tau, digits = .digt), sep = "")
- extra$individual = FALSE
+ extra$individual <- FALSE
predictors.names <-
@@ -4258,26 +4268,26 @@ loglaplace1.control <- function(maxit = 300, ...)
if (!length(etastart)) {
if ( .imethod == 1) {
- locat.init = quantile(rep(y, w), probs= extra$tau) + 1/16
+ locat.init <- quantile(rep(y, w), probs= extra$tau) + 1/16
} else if ( .imethod == 2) {
- locat.init = weighted.mean(y, w)
+ locat.init <- weighted.mean(y, w)
} else if ( .imethod == 3) {
- locat.init = median(y)
+ locat.init <- median(y)
} else if ( .imethod == 4) {
- Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)], y = y, w = w,
+ Fit5 <- vsmooth.spline(x = x[, min(ncol(x), 2)], y = y, w = w,
df = .dfmu.init)
- locat.init = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
+ locat.init <- c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
} else {
- use.this = weighted.mean(y, w)
- locat.init = (1- .sinit)*y + .sinit * use.this
+ use.this <- weighted.mean(y, w)
+ locat.init <- (1- .sinit)*y + .sinit * use.this
}
- locat.init = if (length( .ilocat))
+ locat.init <- if (length( .ilocat))
rep( .ilocat, length.out = M) else
rep(locat.init, length.out = M)
- locat.init = matrix(locat.init, n, M, byrow = TRUE)
+ locat.init <- matrix(locat.init, n, M, byrow = TRUE)
if ( .llocat == "loge")
- locat.init = abs(locat.init)
- etastart =
+ locat.init <- abs(locat.init)
+ etastart <-
cbind(theta2eta(locat.init, .llocat , earg = .elocat ))
}
}), list( .imethod = imethod,
@@ -4290,12 +4300,12 @@ loglaplace1.control <- function(maxit = 300, ...)
locat.y = eta2theta(eta, .llocat , earg = .elocat )
if ( .fittedMean ) {
stop("Yet to do: handle 'fittedMean = TRUE'")
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
- Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ kappamat <- matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ Scale <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
locat.y + Scale * (1/kappamat - kappamat)
} else {
if (length(locat.y) > extra$n)
- dimnames(locat.y) = list(dimnames(eta)[[1]], extra$y.names)
+ dimnames(locat.y) <- list(dimnames(eta)[[1]], extra$y.names)
locat.y
}
locat.y[locat.y < .minquantile] = .minquantile
@@ -4306,41 +4316,43 @@ loglaplace1.control <- function(maxit = 300, ...)
.fittedMean = fittedMean, .Scale.arg = Scale.arg,
.kappa = kappa ))),
last = eval(substitute(expression({
- misc$link = c(location = .llocat)
- misc$earg = list(location = .elocat )
- misc$expected = TRUE
+ misc$link <- c(location = .llocat)
- extra$kappa = misc$kappa = .kappa
- extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
- extra$Scale.arg = .Scale.arg
+ misc$earg <- list(location = .elocat )
- misc$true.mu = .fittedMean # @fitted is not a true mu?
- misc$rep0 = .rep0
- misc$minquantile = .minquantile
- misc$maxquantile = .maxquantile
+ misc$expected <- TRUE
+
+ extra$kappa <- misc$kappa <- .kappa
+ extra$tau <- misc$tau <- misc$kappa^2 / (1 + misc$kappa^2)
+ extra$Scale.arg <- .Scale.arg
- extra$percentile = numeric(length(misc$kappa))
- locat.y = as.matrix(locat.y)
+ misc$true.mu <- .fittedMean # @fitted is not a true mu?
+ misc$rep0 <- .rep0
+ misc$minquantile <- .minquantile
+ misc$maxquantile <- .maxquantile
+
+ extra$percentile <- numeric(length(misc$kappa))
+ locat.y <- as.matrix(locat.y)
for(ii in 1:length(misc$kappa))
- extra$percentile[ii] = 100 * weighted.mean(y <= locat.y[, ii], w)
+ extra$percentile[ii] <- 100 * weighted.mean(y <= locat.y[, ii], w)
}), list( .elocat = elocat, .llocat = llocat,
.Scale.arg = Scale.arg, .fittedMean = fittedMean,
.minquantile = minquantile, .maxquantile = maxquantile,
.rep0 = rep0, .kappa = kappa ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
- Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- ymat = matrix(y, extra$n, extra$M)
+ kappamat <- matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ Scale.w <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ ymat <- matrix(y, extra$n, extra$M)
if ( .llocat == "loge")
- ymat = adjust0.loglaplace1(ymat = ymat, y = y, w = w, rep0= .rep0)
- w.mat = theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logoff()
+ ymat <- adjust0.loglaplace1(ymat = ymat, y = y, w = w, rep0= .rep0)
+ w.mat <- theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logoff()
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- ALDans = sum(c(w) * dalap(x = c(w.mat), locat = c(eta),
+ ALDans <- sum(c(w) * dalap(x = c(w.mat), locat = c(eta),
scale = c(Scale.w), kappa = c(kappamat),
log = TRUE))
ALDans
@@ -4350,20 +4362,20 @@ loglaplace1.control <- function(maxit = 300, ...)
.Scale.arg = Scale.arg, .kappa = kappa ))),
vfamily = c("loglaplace1"),
deriv = eval(substitute(expression({
- ymat = matrix(y, n, M)
- Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- locat.w = eta
- locat.y = eta2theta(locat.w, .llocat , earg = .elocat )
- kappamat = matrix(extra$kappa, n, M, byrow = TRUE)
-
- ymat = adjust0.loglaplace1(ymat = ymat, y = y, w = w, rep0= .rep0)
- w.mat = theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logit()
- zedd = abs(w.mat-locat.w) / Scale.w
- dl.dlocat = ifelse(w.mat >= locat.w, kappamat, 1/kappamat) *
+ ymat <- matrix(y, n, M)
+ Scale.w <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ locat.w <- eta
+ locat.y <- eta2theta(locat.w, .llocat , earg = .elocat )
+ kappamat <- matrix(extra$kappa, n, M, byrow = TRUE)
+
+ ymat <- adjust0.loglaplace1(ymat = ymat, y = y, w = w, rep0= .rep0)
+ w.mat <- theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logit()
+ zedd <- abs(w.mat-locat.w) / Scale.w
+ dl.dlocat <- ifelse(w.mat >= locat.w, kappamat, 1/kappamat) *
sqrt(2) * sign(w.mat-locat.w) / Scale.w
- dlocat.deta = dtheta.deta(locat.w,
+ dlocat.deta <- dtheta.deta(locat.w,
.llocat.identity ,
earg = .elocat.identity )
c(w) * cbind(dl.dlocat * dlocat.deta)
@@ -4374,8 +4386,8 @@ loglaplace1.control <- function(maxit = 300, ...)
.kappa = kappa ))),
weight = eval(substitute(expression({
- ned2l.dlocat2 = 2 / Scale.w^2
- wz = cbind(ned2l.dlocat2 * dlocat.deta^2)
+ ned2l.dlocat2 <- 2 / Scale.w^2
+ wz <- cbind(ned2l.dlocat2 * dlocat.deta^2)
c(w) * wz
}), list( .Scale.arg = Scale.arg,
.elocat = elocat, .llocat = llocat,
@@ -4387,8 +4399,7 @@ loglaplace1.control <- function(maxit = 300, ...)
-loglaplace2.control <- function(save.weight = TRUE, ...)
-{
+loglaplace2.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -4451,7 +4462,7 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
if (!is.logical(parallelLocation) ||
length(parallelLocation) != 1)
stop("bad input for argument 'parallelLocation'")
- fittedMean = FALSE
+ fittedMean <- FALSE
if (!is.logical(fittedMean) || length(fittedMean) != 1)
stop("bad input for argument 'fittedMean'")
@@ -4461,45 +4472,45 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Two-parameter log-Laplace distribution\n\n",
- "Links: ",
- namesof("location", llocat, earg = elocat), ", ",
- namesof("scale", lscale, earg = escale),
- "\n", "\n",
- "Mean: zz location + scale * ",
- "(1/kappa - kappa) / sqrt(2)", "\n",
- "Quantiles: location", "\n",
- "Variance: zz scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale),
+ "\n", "\n",
+ "Mean: zz location + scale * ",
+ "(1/kappa - kappa) / sqrt(2)", "\n",
+ "Quantiles: location", "\n",
+ "Variance: zz scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
constraints = eval(substitute(expression({
- .ZERO = .zero
- if (is.character( .ZERO)) .ZERO = eval(parse(text = .ZERO))
- .PARALLEL = .parallelLocation
- parelHmat = if (is.logical( .PARALLEL ) && .PARALLEL )
+ .ZERO <- .zero
+ if (is.character( .ZERO)) .ZERO <- eval(parse(text = .ZERO))
+ .PARALLEL <- .parallelLocation
+ parelHmat <- if (is.logical( .PARALLEL ) && .PARALLEL )
matrix(1, M/2, 1) else diag(M/2)
- scaleHmat = if (is.logical( .eq.scale ) && .eq.scale )
+ scaleHmat <- if (is.logical( .eq.scale ) && .eq.scale )
matrix(1, M/2, 1) else diag(M/2)
- mycmatrix = cbind(rbind( parelHmat, 0*parelHmat),
+ mycmatrix <- cbind(rbind( parelHmat, 0*parelHmat),
rbind(0*scaleHmat, scaleHmat))
- constraints = cm.vgam(mycmatrix, x, .PARALLEL, constraints,
- int = FALSE)
- constraints = cm.zero.vgam(constraints, x, .ZERO, M)
+ constraints <- cm.vgam(mycmatrix, x, .PARALLEL, constraints,
+ apply.int = FALSE)
+ constraints <- cm.zero.vgam(constraints, x, .ZERO, M)
if ( .PARALLEL && names(constraints)[1] == "(Intercept)") {
- parelHmat = diag(M/2)
- mycmatrix = cbind(rbind( parelHmat, 0*parelHmat),
+ parelHmat <- diag(M/2)
+ mycmatrix <- cbind(rbind( parelHmat, 0*parelHmat),
rbind(0*scaleHmat, scaleHmat))
- constraints[["(Intercept)"]] = mycmatrix
+ constraints[["(Intercept)"]] <- mycmatrix
}
if (is.logical( .eq.scale) && .eq.scale &&
names(constraints)[1] == "(Intercept)") {
- temp3 = constraints[["(Intercept)"]]
- temp3 = cbind(temp3[,1:(M/2)], rbind(0*scaleHmat, scaleHmat))
+ temp3 <- constraints[["(Intercept)"]]
+ temp3 <- cbind(temp3[,1:(M/2)], rbind(0*scaleHmat, scaleHmat))
constraints[["(Intercept)"]] = temp3
}
}), list( .eq.scale = eq.scale, .parallelLocation = parallelLocation,
.zero = zero ))),
initialize = eval(substitute(expression({
- extra$kappa = .kappa
- extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+ extra$kappa <- .kappa
+ extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
@@ -4515,9 +4526,9 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
- extra$M = M = 2 * length(extra$kappa)
- extra$n = n
- extra$y.names = y.names =
+ extra$M <- M <- 2 * length(extra$kappa)
+ extra$n <- n
+ extra$y.names <- y.names <-
paste("tau = ", round(extra$tau, digits = .digt), sep = "")
extra$individual = FALSE
@@ -4533,30 +4544,30 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
if (!length(etastart)) {
if ( .imethod == 1) {
- locat.init.y = weighted.mean(y, w)
- scale.init = sqrt(var(y) / 2)
+ locat.init.y <- weighted.mean(y, w)
+ scale.init <- sqrt(var(y) / 2)
} else if ( .imethod == 2) {
- locat.init.y = median(y)
- scale.init = sqrt(sum(c(w)*abs(y-median(y))) / (sum(w) *2))
+ locat.init.y <- median(y)
+ scale.init <- sqrt(sum(c(w)*abs(y-median(y))) / (sum(w) *2))
} else if ( .imethod == 3) {
- Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)], y = y, w = w,
+ Fit5 <- vsmooth.spline(x = x[, min(ncol(x), 2)], y = y, w = w,
df = .dfmu.init)
- locat.init.y = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
- scale.init = sqrt(sum(c(w)*abs(y-median(y))) / (sum(w) *2))
+ locat.init.y <- c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
+ scale.init <- sqrt(sum(c(w)*abs(y-median(y))) / (sum(w) *2))
} else {
- use.this = weighted.mean(y, w)
- locat.init.y = (1- .sinit)*y + .sinit * use.this
- scale.init = sqrt(sum(c(w)*abs(y-median(y ))) / (sum(w) *2))
+ use.this <- weighted.mean(y, w)
+ locat.init.y <- (1- .sinit)*y + .sinit * use.this
+ scale.init <- sqrt(sum(c(w)*abs(y-median(y ))) / (sum(w) *2))
}
- locat.init.y = if (length( .ilocat ))
+ locat.init.y <- if (length( .ilocat ))
rep( .ilocat , length.out = n) else
rep(locat.init.y, length.out = n)
- locat.init.y = matrix(locat.init.y, n, M/2)
- scale.init = if (length( .iscale))
+ locat.init.y <- matrix(locat.init.y, n, M/2)
+ scale.init <- if (length( .iscale))
rep( .iscale, length.out = n) else
rep(scale.init, length.out = n)
- scale.init = matrix(scale.init, n, M/2)
- etastart =
+ scale.init <- matrix(scale.init, n, M/2)
+ etastart <-
cbind(theta2eta(locat.init.y, .llocat , earg = .elocat ),
theta2eta(scale.init, .lscale , earg = .escale ))
}
@@ -4567,12 +4578,12 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
.elocat = elocat, .escale = escale,
.ilocat = ilocat, .iscale = iscale ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- locat.y = eta2theta(eta[,1:(extra$M/2), drop = FALSE],
+ locat.y <- eta2theta(eta[,1:(extra$M/2), drop = FALSE],
.llocat , earg = .elocat )
if ( .fittedMean ) {
- kappamat = matrix(extra$kappa, extra$n, extra$M/2,
+ kappamat <- matrix(extra$kappa, extra$n, extra$M/2,
byrow = TRUE)
- Scale.y = eta2theta(eta[,(1+extra$M/2):extra$M],
+ Scale.y <- eta2theta(eta[,(1+extra$M/2):extra$M],
.lscale , earg = .escale )
locat.y + Scale.y * (1/kappamat - kappamat)
} else {
@@ -4584,19 +4595,20 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
.fittedMean = fittedMean,
.kappa = kappa ))),
last = eval(substitute(expression({
- misc$link = c(location = .llocat , scale = .lscale )
- misc$earg = list(location = .elocat , scale = .escale )
+ misc$link <- c(location = .llocat , scale = .lscale )
- misc$expected = TRUE
- extra$kappa = misc$kappa = .kappa
- extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
- misc$true.mu = .fittedMean # @fitted is not a true mu?
- misc$nsimEIM = .nsimEIM
- misc$rep0 = .rep0
- extra$percentile = numeric(length(misc$kappa))
- locat = as.matrix(locat.y)
+ misc$earg <- list(location = .elocat , scale = .escale )
+
+ misc$expected <- TRUE
+ extra$kappa <- misc$kappa <- .kappa
+ extra$tau <- misc$tau <- misc$kappa^2 / (1 + misc$kappa^2)
+ misc$true.mu <- .fittedMean # @fitted is not a true mu?
+ misc$nsimEIM <- .nsimEIM
+ misc$rep0 <- .rep0
+ extra$percentile <- numeric(length(misc$kappa))
+ locat <- as.matrix(locat.y)
for(ii in 1:length(misc$kappa))
- extra$percentile[ii] = 100 *
+ extra$percentile[ii] <- 100 *
weighted.mean(y <= locat.y[, ii], w)
}), list( .elocat = elocat, .llocat = llocat,
.escale = escale, .lscale = lscale,
@@ -4605,12 +4617,12 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
.kappa = kappa ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- kappamat = matrix(extra$kappa, extra$n, extra$M/2, byrow = TRUE)
- Scale.w = eta2theta(eta[,(1+extra$M/2):extra$M],
+ kappamat <- matrix(extra$kappa, extra$n, extra$M/2, byrow = TRUE)
+ Scale.w <- eta2theta(eta[,(1+extra$M/2):extra$M],
.lscale , earg = .escale )
- ymat = matrix(y, extra$n, extra$M/2)
- ymat[ymat <= 0] = min(min(y[y > 0]), .rep0) # Adjust for 0s
- ell.mat = matrix(c(dloglaplace(x = c(ymat),
+ ymat <- matrix(y, extra$n, extra$M/2)
+ ymat[ymat <= 0] <- min(min(y[y > 0]), .rep0) # Adjust for 0s
+ ell.mat <- matrix(c(dloglaplace(x = c(ymat),
locat.ald = c(eta[,1:(extra$M/2)]),
scale.ald = c(Scale.w),
kappa = c(kappamat), log = TRUE)),
@@ -4625,62 +4637,62 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
.rep0 = rep0, .kappa = kappa ))),
vfamily = c("loglaplace2"),
deriv = eval(substitute(expression({
- ymat = matrix(y, n, M/2)
- Scale.w = eta2theta(eta[,(1+extra$M/2):extra$M],
+ ymat <- matrix(y, n, M/2)
+ Scale.w <- eta2theta(eta[,(1+extra$M/2):extra$M],
.lscale , earg = .escale )
- locat.w = eta[,1:(extra$M/2), drop = FALSE]
- locat.y = eta2theta(locat.w, .llocat , earg = .elocat )
- kappamat = matrix(extra$kappa, n, M/2, byrow = TRUE)
- w.mat = ymat
- w.mat[w.mat <= 0] = min(min(w.mat[w.mat > 0]), .rep0) # Adjust for 0s
- w.mat= theta2eta(w.mat, .llocat , earg = .elocat ) # w.mat=log(w.mat)
- zedd = abs(w.mat-locat.w) / Scale.w
- dl.dlocat = sqrt(2) *
+ locat.w <- eta[,1:(extra$M/2), drop = FALSE]
+ locat.y <- eta2theta(locat.w, .llocat , earg = .elocat )
+ kappamat <- matrix(extra$kappa, n, M/2, byrow = TRUE)
+ w.mat <- ymat
+ w.mat[w.mat <= 0] <- min(min(w.mat[w.mat > 0]), .rep0) # Adjust for 0s
+ w.mat <- theta2eta(w.mat, .llocat , earg = .elocat ) # w.mat=log(w.mat)
+ zedd <- abs(w.mat-locat.w) / Scale.w
+ dl.dlocat <- sqrt(2) *
ifelse(w.mat >= locat.w, kappamat, 1/kappamat) *
sign(w.mat-locat.w) / Scale.w
- dl.dscale = sqrt(2) *
+ dl.dscale <- sqrt(2) *
ifelse(w.mat >= locat.w, kappamat, 1/kappamat) *
zedd / Scale.w - 1 / Scale.w
- dlocat.deta = dtheta.deta(locat.w, .llocat , earg = .elocat )
- dscale.deta = dtheta.deta(Scale.w, .lscale , earg = .escale )
+ dlocat.deta <- dtheta.deta(locat.w, .llocat , earg = .elocat )
+ dscale.deta <- dtheta.deta(Scale.w, .lscale , earg = .escale )
c(w) * cbind(dl.dlocat * dlocat.deta,
dl.dscale * dscale.deta)
}), list( .escale = escale, .lscale = lscale,
.elocat = elocat, .llocat = llocat,
.rep0 = rep0, .kappa = kappa ))),
weight = eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M=M, both = TRUE, diag = TRUE)
- dthetas.detas = cbind(dlocat.deta, dscale.deta)
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ dthetas.detas <- cbind(dlocat.deta, dscale.deta)
if (length( .nsimEIM )) {
for(ii in 1:( .nsimEIM )) {
- wsim = matrix(rloglap(n*M/2, loc = c(locat.w),
+ wsim <- matrix(rloglap(n*M/2, loc = c(locat.w),
sca = c(Scale.w),
kappa = c(kappamat)), n, M/2)
- zedd = abs(wsim-locat.w) / Scale.w
- dl.dlocat = sqrt(2) *
+ zedd <- abs(wsim-locat.w) / Scale.w
+ dl.dlocat <- sqrt(2) *
ifelse(wsim >= locat.w, kappamat, 1/kappamat) *
sign(wsim-locat.w) / Scale.w
- dl.dscale = sqrt(2) *
+ dl.dscale <- sqrt(2) *
ifelse(wsim >= locat.w, kappamat, 1/kappamat) *
zedd / Scale.w - 1 / Scale.w
rm(wsim)
- temp3 = cbind(dl.dlocat, dl.dscale) # n x M matrix
- run.varcov = ((ii-1) * run.varcov +
+ temp3 <- cbind(dl.dlocat, dl.dscale) # n x M matrix
+ run.varcov <- ((ii-1) * run.varcov +
temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
- wz = c(w) * matrix(wz, n, dimm(M))
+ wz <- wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ wz <- c(w) * matrix(wz, n, dimm(M))
wz
} else {
- d2l.dlocat2 = 2 / (Scale.w * locat.w)^2
- d2l.dscale2 = 1 / Scale.w^2
- wz = cbind(d2l.dlocat2 * dlocat.deta^2,
+ d2l.dlocat2 <- 2 / (Scale.w * locat.w)^2
+ d2l.dscale2 <- 1 / Scale.w^2
+ wz <- cbind(d2l.dlocat2 * dlocat.deta^2,
d2l.dscale2 * dscale.deta^2)
c(w) * wz
}
@@ -4694,16 +4706,15 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
-logitlaplace1.control <- function(maxit = 300, ...)
-{
+logitlaplace1.control <- function(maxit = 300, ...) {
list(maxit = maxit)
}
adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
- rangey01 = range(y[(y > 0) & (y < 1)])
- ymat[ymat <= 0] = min(rangey01[1] / 2, rep01 / w[y <= 0])
- ymat[ymat >= 1] = max((1 + rangey01[2]) / 2, 1 - rep01 / w[y >= 1])
+ rangey01 <- range(y[(y > 0) & (y < 1)])
+ ymat[ymat <= 0] <- min(rangey01[1] / 2, rep01 / w[y <= 0])
+ ymat[ymat >= 1] <- max((1 + rangey01[2]) / 2, 1 - rep01 / w[y >= 1])
ymat
}
@@ -4763,17 +4774,17 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
if (!is.logical(parallelLocation) ||
length(parallelLocation) != 1)
stop("bad input for argument 'parallelLocation'")
- fittedMean = FALSE
+ fittedMean <- FALSE
if (!is.logical(fittedMean) ||
length(fittedMean) != 1)
stop("bad input for argument 'fittedMean'")
- mystring0 = namesof("location", llocat, earg = elocat)
- mychars = substring(mystring0, first = 1:nchar(mystring0),
+ mystring0 <- namesof("location", llocat, earg = elocat)
+ mychars <- substring(mystring0, first = 1:nchar(mystring0),
last = 1:nchar(mystring0))
mychars[nchar(mystring0)] = ", inverse = TRUE)"
- mystring1 = paste(mychars, collapse = "")
+ mystring1 <- paste(mychars, collapse = "")
@@ -4783,16 +4794,16 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
"Links: ", mystring0, "\n", "\n",
"Quantiles: ", mystring1),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallelLocation,
- constraints, intercept = FALSE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallelLocation,
+ constraints, apply.int = FALSE)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .parallelLocation = parallelLocation,
.Scale.arg = Scale.arg, .zero = zero ))),
initialize = eval(substitute(expression({
- extra$M = M = max(length( .Scale.arg ), length( .kappa )) # Recycle
- extra$Scale = rep( .Scale.arg, length = M)
- extra$kappa = rep( .kappa, length = M)
- extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+ extra$M <- M <- max(length( .Scale.arg ), length( .kappa )) # Recycle
+ extra$Scale <- rep( .Scale.arg, length = M)
+ extra$kappa <- rep( .kappa, length = M)
+ extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
@@ -4809,10 +4820,10 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
- extra$n = n
- extra$y.names = y.names =
+ extra$n <- n
+ extra$y.names <- y.names <-
paste("tau = ", round(extra$tau, digits = .digt), sep = "")
- extra$individual = FALSE
+ extra$individual <- FALSE
predictors.names <-
namesof(paste("quantile(", y.names, ")", sep = ""),
@@ -4832,24 +4843,24 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
" < maximum 'tau' value. Choose smaller values for 'tau'.")
if (!length(etastart)) {
if ( .imethod == 1) {
- locat.init = quantile(rep(y, w), probs= extra$tau)
+ locat.init <- quantile(rep(y, w), probs= extra$tau)
} else if ( .imethod == 2) {
- locat.init = weighted.mean(y, w)
- locat.init = median(rep(y, w))
+ locat.init <- weighted.mean(y, w)
+ locat.init <- median(rep(y, w))
} else if ( .imethod == 3) {
- use.this = weighted.mean(y, w)
- locat.init = (1- .sinit)*y + use.this * .sinit
+ use.this <- weighted.mean(y, w)
+ locat.init <- (1- .sinit)*y + use.this * .sinit
} else {
stop("this option not implemented")
}
- locat.init = if (length( .ilocat ))
+ locat.init <- if (length( .ilocat ))
rep( .ilocat , length.out = M) else
rep(locat.init, length.out = M)
- locat.init = matrix(locat.init, n, M, byrow = TRUE)
- locat.init = abs(locat.init)
- etastart =
+ locat.init <- matrix(locat.init, n, M, byrow = TRUE)
+ locat.init <- abs(locat.init)
+ etastart <-
cbind(theta2eta(locat.init, .llocat , earg = .elocat ))
}
}), list( .imethod = imethod,
@@ -4859,38 +4870,38 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
.llocat = llocat, .kappa = kappa,
.ilocat = ilocat ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- locat.y = eta2theta(eta, .llocat , earg = .elocat )
+ locat.y <- eta2theta(eta, .llocat , earg = .elocat )
if ( .fittedMean ) {
stop("Yet to do: handle 'fittedMean = TRUE'")
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
- Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ kappamat <- matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ Scale <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
locat.y + Scale * (1/kappamat - kappamat)
} else {
if (length(locat.y) > extra$n)
- dimnames(locat.y) = list(dimnames(eta)[[1]], extra$y.names)
+ dimnames(locat.y) <- list(dimnames(eta)[[1]], extra$y.names)
locat.y
}
}, list( .elocat = elocat, .llocat = llocat,
.fittedMean = fittedMean, .Scale.arg = Scale.arg,
.kappa = kappa ))),
last = eval(substitute(expression({
- misc$link = c(location = .llocat )
- misc$earg = list(location = .elocat )
+ misc$link <- c(location = .llocat )
+ misc$earg <- list(location = .elocat )
- misc$expected = TRUE
+ misc$expected <- TRUE
- extra$kappa = misc$kappa = .kappa
- extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
- extra$Scale.arg = .Scale.arg
+ extra$kappa <- misc$kappa <- .kappa
+ extra$tau <- misc$tau <- misc$kappa^2 / (1 + misc$kappa^2)
+ extra$Scale.arg <- .Scale.arg
- misc$true.mu = .fittedMean # @fitted is not a true mu?
- misc$rep01 = .rep01
+ misc$true.mu <- .fittedMean # @fitted is not a true mu?
+ misc$rep01 <- .rep01
- extra$percentile = numeric(length(misc$kappa))
- locat.y = eta2theta(eta, .llocat , earg = .elocat )
- locat.y = as.matrix(locat.y)
+ extra$percentile <- numeric(length(misc$kappa))
+ locat.y <- eta2theta(eta, .llocat , earg = .elocat )
+ locat.y <- as.matrix(locat.y)
for(ii in 1:length(misc$kappa))
- extra$percentile[ii] = 100 *
+ extra$percentile[ii] <- 100 *
weighted.mean(y <= locat.y[, ii], w)
}), list( .elocat = elocat, .llocat = llocat,
@@ -4899,12 +4910,12 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
.kappa = kappa ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
- Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- ymat = matrix(y, extra$n, extra$M)
- ymat = adjust01.logitlaplace1(ymat = ymat, y = y, w = w,
+ kappamat <- matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ Scale.w <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ ymat <- matrix(y, extra$n, extra$M)
+ ymat <- adjust01.logitlaplace1(ymat = ymat, y = y, w = w,
rep01 = .rep01)
- w.mat = theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logit()
+ w.mat <- theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logit()
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
@@ -4919,19 +4930,19 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
.Scale.arg = Scale.arg, .kappa = kappa ))),
vfamily = c("logitlaplace1"),
deriv = eval(substitute(expression({
- ymat = matrix(y, n, M)
- Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- locat.w = eta
- kappamat = matrix(extra$kappa, n, M, byrow = TRUE)
- ymat = adjust01.logitlaplace1(ymat = ymat, y = y, w = w,
+ ymat <- matrix(y, n, M)
+ Scale.w <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ locat.w <- eta
+ kappamat <- matrix(extra$kappa, n, M, byrow = TRUE)
+ ymat <- adjust01.logitlaplace1(ymat = ymat, y = y, w = w,
rep01 = .rep01)
- w.mat = theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logit()
- zedd = abs(w.mat-locat.w) / Scale.w
- dl.dlocat = ifelse(w.mat >= locat.w, kappamat, 1/kappamat) *
+ w.mat <- theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logit()
+ zedd <- abs(w.mat-locat.w) / Scale.w
+ dl.dlocat <- ifelse(w.mat >= locat.w, kappamat, 1/kappamat) *
sqrt(2) * sign(w.mat-locat.w) / Scale.w
- dlocat.deta = dtheta.deta(locat.w,
+ dlocat.deta <- dtheta.deta(locat.w,
"identity",
earg = .elocat.identity )
@@ -4946,8 +4957,8 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
.kappa = kappa ))),
weight = eval(substitute(expression({
- d2l.dlocat2 = 2 / Scale.w^2
- wz = cbind(d2l.dlocat2 * dlocat.deta^2)
+ d2l.dlocat2 <- 2 / Scale.w^2
+ wz <- cbind(d2l.dlocat2 * dlocat.deta^2)
c(w) * wz
}), list( .Scale.arg = Scale.arg,
.elocat = elocat, .llocat = llocat ))))
diff --git a/R/family.quantal.R b/R/family.quantal.R
index c132ea8..5836683 100644
--- a/R/family.quantal.R
+++ b/R/family.quantal.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -14,6 +14,8 @@
+
+
abbott <- function(link0 = "logit",
link1 = "logit",
iprob0 = NULL, iprob1 = NULL,
@@ -96,13 +98,13 @@
prob0 <- eta2theta(eta[, 1], .link0 , earg = .earg0 )
prob1 <- eta2theta(eta[, 2], .link1 , earg = .earg1 )
- con.fv = prob0
- trt.fv = prob1
- obs.fv = prob0 + (1 - prob0) * prob1
+ con.fv <- prob0
+ trt.fv <- prob1
+ obs.fv <- prob0 + (1 - prob0) * prob1
- ans = cbind("observed" = obs.fv,
+ ans <- cbind("observed" = obs.fv,
"treatment" = trt.fv,
"control" = con.fv)
@@ -118,21 +120,21 @@
prob0 <- eta2theta(eta[, 1], .link0 , earg = .earg0 )
prob1 <- eta2theta(eta[, 2], .link1 , earg = .earg1 )
- mymu = prob0 + (1 - prob0) * prob1
+ mymu <- prob0 + (1 - prob0) * prob1
if (residuals) {
w * (y / mymu - (1 - y) / (1 - mymu))
} else {
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
- smallno = 1.0e6 * .Machine$double.eps
- smallno = sqrt(.Machine$double.eps)
+ smallno <- 1.0e6 * .Machine$double.eps
+ smallno <- sqrt(.Machine$double.eps)
if (max(abs(ycounts - round(ycounts))) > smallno)
warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ ycounts <- round(ycounts)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dbinom(x = ycounts, size = nvec, prob = mymu, log = TRUE))
}
@@ -143,9 +145,10 @@
last = eval(substitute(expression({
misc$link <- c(prob0 = .link0 , prob1 = .link1 )
misc$earg <- list(prob0 = .earg0 , prob1 = .earg1 )
- misc$mux.offdiagonal = .mux.offdiagonal
- misc$fitted.type = .fitted.type
- misc$true.mu = ( .fitted.type == "observed")
+
+ misc$mux.offdiagonal <- .mux.offdiagonal
+ misc$fitted.type <- .fitted.type
+ misc$true.mu <- ( .fitted.type == "observed")
}), list( .link0 = link0, .earg0 = earg0,
@@ -161,7 +164,7 @@
dprob1.deta <- dtheta.deta(prob1, .link1 , earg = .earg1 )
- mymu = prob0 + (1 - prob0) * prob1
+ mymu <- prob0 + (1 - prob0) * prob1
dl.dmu <- y / mymu - (1 - y) / (1 - mymu)
@@ -178,15 +181,15 @@
weight = eval(substitute(expression({
- ed2l.dmu2 <- 1 / (mymu * (1-mymu))
- ed2l.dprob02 <- ed2l.dmu2 * dmu.dprob0^2
- ed2l.dprob12 <- ed2l.dmu2 * dmu.dprob1^2
- ed2l.dprob1prob2 <- ( 1) # seems sort of ok but slow cvgc
- ed2l.dprob1prob2 <- ( 0) # kill it
- ed2l.dprob1prob2 <- ed2l.dmu2 * ( 1) # dont seem to work
+ ned2l.dmu2 <- 1 / (mymu * (1-mymu))
+ ned2l.dprob02 <- ned2l.dmu2 * dmu.dprob0^2
+ ned2l.dprob12 <- ned2l.dmu2 * dmu.dprob1^2
+ ned2l.dprob1prob2 <- ( 1) # seems sort of ok but slow cvgc
+ ned2l.dprob1prob2 <- ( 0) # kill it
+ ned2l.dprob1prob2 <- ned2l.dmu2 * ( 1) # dont seem to work
- ed2l.dprob1prob2 <- ed2l.dmu2 * dmu.dprob1 * dmu.dprob0 *
- .mux.offdiagonal
+ ned2l.dprob1prob2 <- ned2l.dmu2 * dmu.dprob1 * dmu.dprob0 *
+ .mux.offdiagonal
od2l.dmu2 <- y / mymu^2 + (1 - y) / (1 - mymu)^2
od2l.dprob02 <- od2l.dmu2 * dmu.dprob0^2
@@ -194,9 +197,9 @@
od2l.dprob1prob2 <- od2l.dmu2 * dmu.dprob1 * dmu.dprob0 + dl.dmu
- wz <- cbind(ed2l.dprob02 * dprob0.deta^2,
- ed2l.dprob12 * dprob1.deta^2,
- ed2l.dprob1prob2 * dprob1.deta * dprob0.deta)
+ wz <- cbind(ned2l.dprob02 * dprob0.deta^2,
+ ned2l.dprob12 * dprob1.deta^2,
+ ned2l.dprob1prob2 * dprob1.deta * dprob0.deta)
@@ -214,8 +217,6 @@
-
-
if (FALSE)
Abbott <- function(lprob1 = elogit(min = 0, max = 1), # For now, that is
lprob0 = "logit",
@@ -248,8 +249,8 @@ if (FALSE)
namesof("prob0", lprob0, earg = eprob0), ", ",
namesof("prob1", lprob1, earg = eprob1)),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- constraints = cm.nointercept.vgam(constraints, x, .nointercept, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.nointercept.vgam(constraints, x, .nointercept, M)
}), list( .zero = zero,
.nointercept = nointercept ))),
@@ -291,7 +292,7 @@ if (FALSE)
print( sort(prob1.init) )
- eprob1 = list(min = prob0.init, max = 1)
+ eprob1 <- list(min = prob0.init, max = 1)
etastart <-
cbind(theta2eta(prob0.init, link = .lprob0 , earg = .eprob0 ),
theta2eta(prob1.init, link = .lprob1 , earg = eprob1 ))
@@ -304,15 +305,16 @@ if (FALSE)
linkinv = eval(substitute(function(eta, extra = NULL) {
prob0 <- eta2theta(eta[, 1], .lprob0 , earg = .eprob0)
- eprob1 = list(min = prob0, max = 1)
+ eprob1 <- list(min = prob0, max = 1)
prob1 <- eta2theta(eta[, 2], .lprob1 , earg = eprob1)
prob0 + prob1
}, list( .lprob1 = lprob1, .eprob1 = eprob1,
.lprob0 = lprob0, .eprob0 = eprob0 ))),
last = eval(substitute(expression({
- eprob1 = list(min = prob0, max = 1)
+ eprob1 <- list(min = prob0, max = 1)
misc$link <- c(prob0 = .lprob0, prob1 = .lprob1)
+
misc$earg <- list(prob0 = .eprob0, prob1 = eprob1)
misc$nointercept = .nointercept
@@ -323,7 +325,7 @@ if (FALSE)
deriv = eval(substitute(expression({
prob0 <- eta2theta(eta[,1], .lprob0, earg = .eprob0)
- eprob1 = list(min = prob0, max = 1)
+ eprob1 <- list(min = prob0, max = 1)
prob1 <- eta2theta(eta[,2], .lprob1, earg = eprob1)
dprob0.deta <- dtheta.deta(prob0, .lprob0 , earg = .eprob0 )
dprob1.deta <- dtheta.deta(prob1, .lprob1 , earg = eprob1 )
@@ -341,12 +343,12 @@ if (FALSE)
weight = eval(substitute(expression({
- ed2l.dmu2 <- 1 / (mu * (1-mu))
- ed2l.dprob02 <- ed2l.dmu2 * dmu.dprob0^2
- ed2l.dprob12 <- ed2l.dmu2 * dmu.dprob1^2
+ ned2l.dmu2 <- 1 / (mu * (1-mu))
+ ned2l.dprob02 <- ned2l.dmu2 * dmu.dprob0^2
+ ned2l.dprob12 <- ned2l.dmu2 * dmu.dprob1^2
- wz <- cbind(ed2l.dprob02 * dprob0.deta^2,
- ed2l.dprob12 * dprob1.deta^2)
+ wz <- cbind(ned2l.dprob02 * dprob0.deta^2,
+ ned2l.dprob12 * dprob1.deta^2)
print("head(wz)")
print( head(wz) )
@@ -369,4 +371,188 @@ if (FALSE)
+abbott.EM.control <- function(maxit = 1000, ...) {
+ list(maxit = maxit)
+}
+
+
+ abbott.EM <-
+ function(link = "probit",
+ b1.arg = 0, b2.arg = 0,
+ imethod = 1, ilambda = 0.5,
+ iprob = NULL) {
+
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ if (!is.Numeric(b1.arg, # allowable.length = 1,
+ integer.valued = TRUE) ||
+ b1.arg < 0)
+ stop("argument 'b1.arg' must be a vector of non-negative integers")
+
+
+ if (!is.Numeric(b2.arg, # allowable.length = 1,
+ integer.valued = TRUE) ||
+ b2.arg < 0)
+ stop("argument 'b2.arg' must be a vector of non-negative integers")
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
+
+
+ zero <- NULL
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ new("vglmff",
+ blurb = c("Probit regression with nonzero background (EM algorithm)\n",
+ "P[Y=1] = mu = prob0 + (1 - prob0) * linkinv(eta)\n\n",
+ "Link: ",
+ namesof("pi", link, earg = earg), "\n",
+ "Mean: mu"),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 1
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ if (length(table(y)) != 2 || max(y) > 1)
+ stop("response must be a vector of 0s and 1s only")
+
+
+ ncoly <- ncol(y)
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+ extra$lambda <- matrix( .ilambda , n, M, byrow = TRUE)
+ extra$orig.w <- w
+
+
+ mynames1 <- paste("prob0", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ namesof(mynames1, .link , earg = .earg , tag = FALSE)
+
+
+ if (!length(etastart)) {
+ prob.init <- if ( .imethod == 2)
+ 1 / (1 + y + 1/16) else
+ if ( .imethod == 3)
+ 1 / (1 + apply(y, 2, median) + 1/16) else
+ rnorm(n * M, mean = 0.5, sd = 0.01) # Mean 0.5
+
+ if (!is.matrix(prob.init))
+ prob.init <- matrix(prob.init, n, M, byrow = TRUE)
+
+
+ if (length( .iprob ))
+ prob.init <- matrix( .iprob , n, M, byrow = TRUE) # Mean 0.5
+
+
+ etastart <- theta2eta(prob.init, .link , earg = .earg ) # Mean 0
+ }
+ }), list( .link = link, .earg = earg,
+ .ilambda = ilambda,
+ .imethod = imethod, .iprob = iprob ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ prob <- eta2theta(eta, .link , earg = .earg )
+ mymu <- extra$lambda + (1 - extra$lambda) * prob # Eqn (3)
+ mymu
+ }, list( .link = link, .earg = earg ))),
+
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <- c(rep( .link , length = ncoly))
+ names(misc$link) <- mynames1
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
+ for(ii in 1:ncoly) {
+ misc$earg[[ii]] <- .earg
+ }
+
+ misc$Musual <- Musual
+ misc$multipleResponses <- TRUE
+ misc$imethod <- .imethod
+ misc$iprob <- .iprob
+ misc$b1.arg <- .b1.arg
+ misc$b2.arg <- .b2.arg
+
+ extra$lambda <- extra$lambda[1, ] # Now a vector
+ }), list( .link = link, .earg = earg,
+ .iprob = iprob,
+ .b1.arg = b1.arg, .b2.arg = b2.arg,
+ .imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ prob <- eta2theta(eta, .link , earg = .earg )
+ mymu <- extra$lambda + (1 - extra$lambda) * prob # Eqn (3)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+ sum(c(w) * dbinom(x = y, prob = mymu,
+ size = nvec, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("abbott.EM"),
+ deriv = eval(substitute(expression({
+ prob <- eta2theta(eta, .link , earg = .earg )
+
+ mymu <- extra$lambda + (1 - extra$lambda) * prob # Eqn (3)
+
+ wz <- cbind((1 - extra$lambda) * prob / mymu) # Eqn (4)
+
+ Deriv1 <- ifelse(y == 0, -dnorm(eta) / pnorm(eta, lower.tail = FALSE),
+ dnorm(eta) / pnorm(eta))
+
+ c(w) * wz * Deriv1
+ }), list( .link = link, .earg = earg ))),
+
+ weight = eval(substitute(expression({
+
+ extra$lambda <-
+ matrix((colSums((1 - wz) * y) + .b1.arg ) / (n + .b1.arg + .b2.arg ),
+ n, M, byrow = TRUE) # Between eqns (6),(7)
+
+
+
+
+ c(w) * wz
+ }), list( .link = link, .earg = earg,
+ .b1.arg = b1.arg, .b2.arg = b2.arg ))))
+}
+
+
+
+
diff --git a/R/family.rcim.R b/R/family.rcim.R
index 0de4d48..53d0bb5 100644
--- a/R/family.rcim.R
+++ b/R/family.rcim.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -40,8 +40,8 @@
- noroweffects = FALSE
- nocoleffects = FALSE
+ noroweffects <- FALSE
+ nocoleffects <- FALSE
if (!is.Numeric(which.lp, allowable.length = 1,
integer.valued = TRUE, positive = TRUE))
@@ -59,7 +59,7 @@
if (!inherits(family, "vglmff")) {
stop("'family = ", family, "' is not a VGAM family function")
}
- efamily = family
+ efamily <- family
if (!is.Numeric(Musual)) {
@@ -98,7 +98,7 @@
yn1 <- if (length(dimnames(y)[[1]])) dimnames(y)[[1]] else
paste("X2.", 1:nrow(y), sep = "")
- warn.save = options()$warn
+ warn.save <- options()$warn
options(warn = -3) # Suppress the warnings (hopefully, temporarily)
if (any(!is.na(as.numeric(substring(yn1, 1, 1)))))
yn1 <- paste("X2.", 1:nrow(y), sep = "")
@@ -128,21 +128,21 @@
Hlist <- list("(Intercept)" = matrix(1, ncol(y), 1))
if (!noroweffects)
- for(ii in 2:nrow(y)) {
- Hlist[[ paste(rprefix, ii, sep = "")]] <- matrix(1, ncol(y), 1)
+ for(ii in 2:nrow(y)) {
+ Hlist[[paste(rprefix, ii, sep = "")]] <- matrix(1, ncol(y), 1)
- .rcim.df[[paste(rprefix, ii, sep = "")]] <- modmat.row[, ii]
- }
+ .rcim.df[[paste(rprefix, ii, sep = "")]] <- modmat.row[, ii]
+ }
if (!nocoleffects)
- for(ii in 2:ncol(y)) {
+ for(ii in 2:ncol(y)) {
- Hlist[[ paste(cprefix, ii, sep = "")]] <- modmat.col[, ii, drop = FALSE]
- .rcim.df[[paste(cprefix, ii, sep = "")]] <- rep(1, nrow(y))
- }
+ Hlist[[ paste(cprefix, ii, sep = "")]] <- modmat.col[, ii, drop = FALSE]
+ .rcim.df[[paste(cprefix, ii, sep = "")]] <- rep(1, nrow(y))
+ }
if (Rank > 0) {
for(ii in 2:nrow(y)) {
@@ -179,7 +179,7 @@
controlfun <- if (Rank == 0) rrvglm.control else rrvglm.control
- controlfun <- if (Rank == 0) vglm.control else rrvglm.control # orig.
+ controlfun <- if (Rank == 0) vglm.control else rrvglm.control # orig.
mycontrol <- controlfun(Rank = Rank,
@@ -199,7 +199,7 @@
if (Rank > 0)
- mycontrol$Norrr <- as.formula(str1) # Overwrite this
+ mycontrol$noRRR <- as.formula(str1) # Overwrite this
assign(".rcim.df", .rcim.df, envir = VGAM::VGAMenv)
@@ -212,13 +212,13 @@
if (Musual > 1) {
orig.Hlist <- Hlist
- kmat1 = rbind(1, 0)
- kmat0 = rbind(0, 1)
+ kmat1 <- rbind(1, 0)
+ kmat0 <- rbind(0, 1)
- kmat1 = matrix(0, nrow = Musual, ncol = 1)
- kmat1[which.lp, 1] = 1
- kmat0 = matrix(1, nrow = Musual, ncol = 1)
- kmat0[which.lp, 1] = 0
+ kmat1 <- matrix(0, nrow = Musual, ncol = 1)
+ kmat1[which.lp, 1] <- 1
+ kmat0 <- matrix(1, nrow = Musual, ncol = 1)
+ kmat0[which.lp, 1] <- 0
for (ii in 1:length(Hlist)) {
Hlist[[ii]] <- kronecker(Hlist[[ii]],
@@ -238,7 +238,7 @@
- offset.matrix = matrix(offset, nrow = nrow(y),
+ offset.matrix <- matrix(offset, nrow = nrow(y),
ncol = ncol(y) * Musual) # byrow = TRUE
answer <- if (Rank > 0) {
@@ -292,7 +292,7 @@
-summaryrcim = function(object, ...) {
+summaryrcim <- function(object, ...) {
rcim(object, summary.arg = TRUE, ...)
}
@@ -329,19 +329,23 @@ setMethod("summary", "rcim",
- Rcim <- function (mat, rbaseline = 1, cbaseline = 1) {
+ Rcim <- function(mat, rbaseline = 1, cbaseline = 1) {
mat <- as.matrix(mat)
RRR <- dim(mat)[1]
CCC <- dim(mat)[2]
- if (is.null(rownames(mat)))
- rnames <- paste("X", 1:RRR, sep = "") else
- rnames <- rownames(mat)
+ rnames <- if (is.null(rownames(mat))) {
+ paste("X", 1:RRR, sep = "")
+ } else {
+ rownames(mat)
+ }
- if (is.null(colnames(mat)))
- cnames <- paste("Y", 1:CCC, sep = "") else
- cnames <- colnames(mat)
+ cnames <- if (is.null(colnames(mat))) {
+ paste("Y", 1:CCC, sep = "")
+ } else {
+ colnames(mat)
+ }
r.index <- if (is.character(rbaseline))
which(rownames(mat) == rbaseline) else
@@ -389,21 +393,21 @@ setMethod("summary", "rcim",
- plotrcim0 <- function (object,
+ plotrcim0 <- function(object,
centered = TRUE, whichplots = c(1, 2),
hline0 = TRUE, hlty = "dashed", hcol = par()$col, hlwd = par()$lwd,
- rfirst = 1, cfirst = 1,
- rtype = "h", ctype = "h",
- rcex.lab = 1, rcex.axis = 1, # rlabels = FALSE,
- rtick = FALSE,
- ccex.lab = 1, ccex.axis = 1, # clabels = FALSE,
- ctick = FALSE,
- rmain = "Row effects", rsub = "",
- rxlab = "", rylab = "Row effects",
- cmain = "Column effects", csub = "",
- cxlab = "", cylab = "Column effects",
- rcol = par()$col, ccol = par()$col,
- ...) {
+ rfirst = 1, cfirst = 1,
+ rtype = "h", ctype = "h",
+ rcex.lab = 1, rcex.axis = 1, # rlabels = FALSE,
+ rtick = FALSE,
+ ccex.lab = 1, ccex.axis = 1, # clabels = FALSE,
+ ctick = FALSE,
+ rmain = "Row effects", rsub = "",
+ rxlab = "", rylab = "Row effects",
+ cmain = "Column effects", csub = "",
+ cxlab = "", cylab = "Column effects",
+ rcol = par()$col, ccol = par()$col,
+ ...) {
nparff <- if (is.numeric(object at family@infos()$Musual)) {
@@ -418,11 +422,11 @@ setMethod("summary", "rcim",
warning("argument 'object' is not Rank-0")
- n_lm = nrow(object at y)
+ n_lm <- nrow(object at y)
cobj <- coefficients(object)
- upperbound = if (!is.numeric(object at control$Rank) ||
+ upperbound <- if (!is.numeric(object at control$Rank) ||
object at control$Rank == 0) length(cobj) else
length(object at control$colx1.index)
@@ -435,9 +439,9 @@ setMethod("summary", "rcim",
orig.raxisl <- rownames(object at y)
orig.caxisl <- colnames(object at y)
if (is.null(orig.raxisl))
- orig.raxisl = as.character(1:nrow(object at y))
+ orig.raxisl <- as.character(1:nrow(object at y))
if (is.null(orig.caxisl))
- orig.caxisl = as.character(1:ncol(object at y))
+ orig.caxisl <- as.character(1:ncol(object at y))
roweff.orig <-
roweff <- orig.roweff[c(rfirst:last.r,
@@ -447,8 +451,8 @@ setMethod("summary", "rcim",
if (cfirst > 1) 1:(cfirst-1) else NULL)]
if (centered) {
- roweff = scale(roweff, scale = FALSE) # Center it only
- coleff = scale(coleff, scale = FALSE) # Center it only
+ roweff <- scale(roweff, scale = FALSE) # Center it only
+ coleff <- scale(coleff, scale = FALSE) # Center it only
}
raxisl <- orig.raxisl[c(rfirst:last.r,
@@ -525,9 +529,7 @@ setMethod("plot", "rcim",
-
-
-moffset <- function (mat, roffset = 0, coffset = 0, postfix = "") {
+moffset <- function(mat, roffset = 0, coffset = 0, postfix = "") {
@@ -538,7 +540,7 @@ moffset <- function (mat, roffset = 0, coffset = 0, postfix = "") {
return(mat)
- vecmat = c(unlist(mat))
+ vecmat <- c(unlist(mat))
ind1 <- if (is.character(roffset))
which(rownames(mat) == roffset) else
if (is.numeric(roffset)) roffset + 1 else
@@ -565,31 +567,31 @@ moffset <- function (mat, roffset = 0, coffset = 0, postfix = "") {
stop("too large a value for argument 'coffset'")
- start.ind = (ind2 - 1)* nrow(mat) + ind1
+ start.ind <- (ind2 - 1)* nrow(mat) + ind1
- svecmat = vecmat[c(start.ind:(nrow(mat) * ncol(mat)),
+ svecmat <- vecmat[c(start.ind:(nrow(mat) * ncol(mat)),
0:(start.ind - 1))]
- rownames.mat = rownames(mat)
+ rownames.mat <- rownames(mat)
if (length(rownames.mat) != nrow(mat))
- rownames.mat = paste("Row.", 1:nrow(mat), sep = "")
+ rownames.mat <- paste("Row.", 1:nrow(mat), sep = "")
- colnames.mat = colnames(mat)
+ colnames.mat <- colnames(mat)
if (length(colnames.mat) != ncol(mat))
- colnames.mat = paste("Col.", 1:ncol(mat), sep = "")
+ colnames.mat <- paste("Col.", 1:ncol(mat), sep = "")
- newrn = if (roffset > 0)
+ newrn <- if (roffset > 0)
c(rownames.mat[c(ind1:nrow(mat))],
paste(rownames.mat[0:(ind1-1)], postfix, sep = "")) else
rownames.mat
- newcn = c(colnames.mat[c(ind2:ncol(mat), 0:(ind2 - 1))])
+ newcn <- c(colnames.mat[c(ind2:ncol(mat), 0:(ind2 - 1))])
if (roffset > 0)
- newcn = paste(newcn, postfix, sep = "")
+ newcn <- paste(newcn, postfix, sep = "")
- newmat = matrix(svecmat, nrow(mat), ncol(mat),
+ newmat <- matrix(svecmat, nrow(mat), ncol(mat),
dimnames = list(newrn, newcn))
newmat
}
@@ -612,7 +614,7 @@ moffset <- function (mat, roffset = 0, coffset = 0, postfix = "") {
-confint_rrnb <- function(rrnb2, level = 0.95) {
+Confint.rrnb <- function(rrnb2, level = 0.95) {
if (class(rrnb2) != "rrvglm")
stop("argument 'rrnb2' does not appear to be a rrvglm() object")
@@ -635,27 +637,27 @@ confint_rrnb <- function(rrnb2, level = 0.95) {
delta1.hat <- exp(a21.hat * beta11.hat - beta21.hat)
delta2.hat <- 2 - a21.hat
- se.a21.hat <- sqrt(vcovrrvglm(rrnb2)["I(lv.mat)", "I(lv.mat)"])
+ SE.a21.hat <- sqrt(vcovrrvglm(rrnb2)["I(lv.mat)", "I(lv.mat)"])
- ci.a21 <- a21.hat + c(-1, 1) * qnorm(1 - (1 - level)/2) * se.a21.hat
+ ci.a21 <- a21.hat + c(-1, 1) * qnorm(1 - (1 - level)/2) * SE.a21.hat
(ci.delta2 <- 2 - rev(ci.a21)) # e.g., the 95 percent CI
list(a21.hat = a21.hat,
beta11.hat = beta11.hat,
beta21.hat = beta21.hat,
CI.a21 = ci.a21,
- ci.delta2 = ci.delta2,
+ CI.delta2 = ci.delta2,
delta1 = delta1.hat,
delta2 = delta2.hat,
- se.a21.hat = se.a21.hat)
+ SE.a21.hat = SE.a21.hat)
}
-confint_nb1 <- function(nb1, level = 0.95) {
+Confint.nb1 <- function(nb1, level = 0.95) {
@@ -692,9 +694,9 @@ confint_nb1 <- function(nb1, level = 0.95) {
ci.mydiff <- mydiff + c(-1, 1) * qnorm(1 - (1 - level)/2) * se.mydiff
ci.delta0 <- ci.exp.mydiff <- exp(ci.mydiff)
- (ci.phi0 <- 1 + 1 / rev(ci.delta0)) # e.g., the 95 percent CI for phi0
+ (ci.phi0 <- 1 + 1 / rev(ci.delta0)) # e.g., the 95 percent CI for phi0
- list(ci.phi0 = ci.phi0,
+ list(CI.phi0 = ci.phi0,
CI.delta0 = ci.delta0,
delta0 = delta0.hat,
phi0 = phi0.hat)
@@ -703,6 +705,7 @@ confint_nb1 <- function(nb1, level = 0.95) {
+
plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
se.eachway = c(5, 5), # == c(LHS, RHS),
trace.arg = TRUE, ...) {
@@ -722,16 +725,16 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
loglik.orig <- logLik(rrvglm2)
- temp1 <- confint_rrnb(rrvglm2) # zz
+ temp1 <- Confint.rrnb(rrvglm2) # zz
a21.hat <- (Coef(rrvglm2)@A)[2, 1]
- se.a21.hat <- temp1$se.a21.hat
+ SE.a21.hat <- temp1$SE.a21.hat
- se.a21.hat <- sqrt(vcov(rrvglm2)["I(lv.mat)", "I(lv.mat)"])
+ SE.a21.hat <- sqrt(vcov(rrvglm2)["I(lv.mat)", "I(lv.mat)"])
- big.ci.a21 <- a21.hat + c(-1, 1) * se.eachway * se.a21.hat
+ big.ci.a21 <- a21.hat + c(-1, 1) * se.eachway * SE.a21.hat
seq.a21 <- seq(big.ci.a21[1], big.ci.a21[2], length = nseq.a21)
Hlist.orig <- constraints.vlm(rrvglm2, type = "term")
@@ -757,9 +760,9 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
names(argslist) <- c(names(listcall)[-1])
- argslist$trace = trace.arg
- argslist$etastart = prev.etastart
- argslist$constraints = Hlist.orig
+ argslist$trace <- trace.arg
+ argslist$etastart <- prev.etastart
+ argslist$constraints <- Hlist.orig
for (kay in 2:length(argslist[["constraints"]])) {
@@ -767,7 +770,7 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
}
- fitnew = do.call(what = funname, args = argslist)
+ fitnew <- do.call(what = funname, args = argslist)
a21.matrix[ii, 2] <- logLik(fitnew)
@@ -790,7 +793,7 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
abline(h = loglik.orig - qchisq(0.95, df = 1),
col = "darkorange", lty = "dashed")
- abline(v = a21.hat + c(-1, 1) * 1.96 * se.a21.hat,
+ abline(v = a21.hat + c(-1, 1) * 1.96 * SE.a21.hat,
col = "gray50", lty = "dashed", lwd = 2.0)
} # End of (plot.it)
@@ -812,68 +815,94 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
-Qvar <- function(object, factorname = NULL, coef.indices = NULL,
- labels = NULL, dispersion = NULL,
- reference.name = "(reference)",
- estimates = NULL
- ) {
+ Qvar <- function(object,
+ factorname = NULL,
+ which.eta = 1,
+ coef.indices = NULL,
+ labels = NULL, dispersion = NULL,
+ reference.name = "(reference)",
+ estimates = NULL
+ ) {
+
+
+
+
+ if (!is.Numeric(which.eta, allowable.length = 1, integer.valued = TRUE,
+ positive = TRUE))
+ stop("argument 'which.eta' must be a positive integer")
+
+
+
coef.indices.saved <- coef.indices
if (!is.matrix(object)) {
model <- object
if (is.null(factorname) && is.null(coef.indices)) {
- stop("arguments \"factorname\" and \"coef.indices\" are ",
+ stop("arguments 'factorname' and 'coef.indices' are ",
"both NULL")
}
if (is.null(coef.indices)) {
- tmodel <- terms(model)
- modelmat <- if (is.matrix(model at x)) model at x else
- model.matrix(tmodel,
- data = model at model)
+ M <- npred(model)
+ if (M < which.eta)
+ stop("argument 'which.eta' must be a value from the set 1:", M)
+ newfactorname <- if (M > 1) {
+ clist <- constraints(model, type = "term")
- colptr = attr(model.matrix(object, type = "vlm"), "vassign")
+ Hk <- clist[[factorname]]
+ Mdot <- ncol(Hk)
+ Hk.row <- Hk[which.eta, ]
+ if (sum(Hk.row != 0) > 1)
+ stop("cannot handle rows of constraint matrices with more ",
+ "than one nonzero value")
+ foo <- function(ii)
+ switch(as.character(ii), '1'="1st", '2'="2nd", '3'="3rd",
+ paste(ii, "th", sep = ""))
+ if (sum(Hk.row != 0) == 0)
+ stop("factor '", factorname, "' is not used the ",
+ foo(which.eta), " eta (linear predictor)")
- M <- npred(model)
- newfactorname = if (M > 1) {
- clist = constraints(model, type = "term")
- Mdot = ncol(clist[[factorname]])
- vlabel(factorname, ncolBlist = Mdot, M = M)
+ row.index <- (1:Mdot)[Hk.row != 0]
+
+ all.labels <- vlabel(factorname, ncolBlist = Mdot, M = M)
+ all.labels[row.index]
} else {
factorname
}
- colptr = if (M > 1) {
+ colptr <- attr(model.matrix(object, type = "vlm"), "vassign")
+ colptr <- if (M > 1) {
colptr[newfactorname]
} else {
colptr[[newfactorname]]
}
- coef.indices <- colptr
+ coef.indices <- colptr
contmat <- if (length(model at xlevels[[factorname]]) ==
length(coef.indices)) {
diag(length(coef.indices))
} else {
eval(call(model at contrasts[[factorname]],
- model at xlevels[[factorname]]))
+ model at xlevels [[factorname]]))
}
rownames(contmat) <- model at xlevels[[factorname]]
if (is.null(estimates)) {
if (M > 1) {
- estimates <- matrix(-1, nrow(contmat), Mdot)
- for (ii in 1:Mdot)
- estimates[, ii] <- contmat %*% (coefvlm(model)[(coef.indices[[ii]])])
+ estimates <- matrix(-1, nrow(contmat), 1) # Used to be nc = Mdot
+ ii <- 1
+ estimates[, ii] <- contmat %*%
+ (coefvlm(model)[(coef.indices[[ii]])])
} else {
estimates <- contmat %*% (coefvlm(model)[coef.indices])
}
@@ -884,13 +913,10 @@ Qvar <- function(object, factorname = NULL, coef.indices = NULL,
covmat <- Covmat[unlist(coef.indices),
unlist(coef.indices), drop = FALSE]
covmat <- if (M > 1) {
-
- for (ii in 1:Mdot) {
- ans <- contmat %*% Covmat[colptr[[ii]], (colptr[[ii]])] %*% t(contmat)
- }
+ ii <- 1
+ ans <- contmat %*% Covmat[(colptr[[ii]]),
+ (colptr[[ii]])] %*% t(contmat)
ans
-
-
} else {
contmat %*% covmat %*% t(contmat)
}
@@ -926,6 +952,7 @@ Qvar <- function(object, factorname = NULL, coef.indices = NULL,
return(Recall(covmat,
factorname = factorname,
+ which.eta = which.eta,
coef.indices = coef.indices.saved,
labels = labels,
dispersion = dispersion,
@@ -945,26 +972,28 @@ Qvar <- function(object, factorname = NULL, coef.indices = NULL,
- allvcov = covmat
+ allvcov <- covmat
for (ilocal in 1:LLL)
for (jlocal in ilocal:LLL)
- allvcov[ilocal, jlocal] =
- allvcov[jlocal, ilocal] = covmat[ilocal, ilocal] +
- covmat[jlocal, jlocal] -
- covmat[ilocal, jlocal] * 2
+ allvcov[ilocal, jlocal] <-
+ allvcov[jlocal, ilocal] <- covmat[ilocal, ilocal] +
+ covmat[jlocal, jlocal] -
+ covmat[ilocal, jlocal] * 2
+
+ diag(allvcov) <- rep(1.0, length = LLL) # Any positive value should do
- diag(allvcov) = rep(1.0, len = LLL) # Any positive value should do
+ wmat <- matrix(1.0, LLL, LLL)
+ diag(wmat) <- sqrt( .Machine$double.eps )
- wmat = matrix(1.0, LLL, LLL)
- diag(wmat) = sqrt( .Machine$double.eps )
- logAllvcov = log(allvcov)
- attr(logAllvcov, "Prior.Weights") = wmat
- attr(logAllvcov, "estimates") = estimates
- attr(logAllvcov, "coef.indices") = coef.indices
- attr(logAllvcov, "factorname") = factorname
- attr(logAllvcov, "regularVar") = diag(covmat)
+ logAllvcov <- log(allvcov)
+ attr(logAllvcov, "Prior.Weights") <- wmat
+ attr(logAllvcov, "estimates") <- estimates
+ attr(logAllvcov, "coef.indices") <- coef.indices
+ attr(logAllvcov, "factorname") <- factorname
+ attr(logAllvcov, "regularVar") <- diag(covmat)
+ attr(logAllvcov, "which.eta") <- which.eta
logAllvcov
}
@@ -974,6 +1003,7 @@ Qvar <- function(object, factorname = NULL, coef.indices = NULL,
+
WorstErrors <- function(qv.object) {
stop("20110729; does not work")
@@ -1005,7 +1035,7 @@ WorstErrors <- function(qv.object) {
-IndentPrint <- function(object, indent = 4, ...){
+IndentPrint <- function(object, indent = 4, ...) {
stop("20110729; does not work")
zz <- ""
@@ -1015,11 +1045,12 @@ IndentPrint <- function(object, indent = 4, ...){
sink()
close(tc)
indent <- paste(rep(" ", indent), sep = "", collapse = "")
- cat(paste(indent, zz, sep = ""), sep = "\n")}
+ cat(paste(indent, zz, sep = ""), sep = "\n")
+}
-Print.qv <- function(x, ...){
+Print.qv <- function(x, ...) {
stop("20110729; does not work")
}
@@ -1029,23 +1060,23 @@ Print.qv <- function(x, ...){
summary.qvar <- function(object, ...) {
- relerrs = 1 - sqrt(exp(residuals(object, type = "response")))
- diag(relerrs) = NA
+ relerrs <- 1 - sqrt(exp(residuals(object, type = "response")))
+ diag(relerrs) <- NA
minErrSimple <- round(100 * min(relerrs, na.rm = TRUE), 1)
maxErrSimple <- round(100 * max(relerrs, na.rm = TRUE), 1)
- estimates = c(object at extra$attributes.y$estimates)
+ estimates <- c(object at extra$attributes.y$estimates)
if (!length(names(estimates)) &&
is.matrix(object at extra$attributes.y$estimates))
- names( estimates) = rownames(object at extra$attributes.y$estimates)
+ names( estimates) <- rownames(object at extra$attributes.y$estimates)
if (!length(names(estimates)))
- names( estimates) = paste("Level", 1:length(estimates), sep = "")
+ names( estimates) <- paste("Level", 1:length(estimates), sep = "")
- regularVar = c(object at extra$attributes.y$regularVar)
+ regularVar <- c(object at extra$attributes.y$regularVar)
QuasiVar <- exp(diag(fitted(object))) / 2
QuasiSE <- sqrt(QuasiVar)
@@ -1062,15 +1093,16 @@ summary.qvar <- function(object, ...) {
+
print.summary.qvar <- function(x, ...) {
- object = x$object
- minErrSimple = x$minErrSimple
- maxErrSimple = x$maxErrSimple
+ object <- x$object
+ minErrSimple <- x$minErrSimple
+ maxErrSimple <- x$maxErrSimple
- x$minErrSimple = NULL
- x$maxErrSimple = NULL
- x$object = NULL
+ x$minErrSimple <- NULL
+ x$maxErrSimple <- NULL
+ x$object <- NULL
if (length(cl <- object at call)) {
@@ -1079,7 +1111,7 @@ print.summary.qvar <- function(x, ...) {
}
- facname = c(object at extra$attributes.y$factorname)
+ facname <- c(object at extra$attributes.y$factorname)
if (length(facname))
cat("Factor name: ", facname, "\n")
@@ -1087,7 +1119,7 @@ print.summary.qvar <- function(x, ...) {
if (length(object at dispersion))
cat("\nDispersion: ", object at dispersion, "\n\n")
- x = as.data.frame(c(x))
+ x <- as.data.frame(c(x))
print.data.frame(x)
@@ -1099,6 +1131,7 @@ print.summary.qvar <- function(x, ...) {
+
plotqvar <- function(object,
intervalWidth = 2,
ylab = "Estimate",
@@ -1132,12 +1165,12 @@ plotqvar <- function(object,
stop("argument 'object' dos not appear to be a ",
"rcim(, normal1) object")
- estimates = c(object at extra$attributes.y$estimates)
+ estimates <- c(object at extra$attributes.y$estimates)
if (!length(names(estimates)) &&
is.matrix(object at extra$attributes.y$estimates))
- names( estimates) = rownames(object at extra$attributes.y$estimates)
+ names( estimates) <- rownames(object at extra$attributes.y$estimates)
if (!length(names(estimates)))
- names( estimates) = paste("Level", 1:length(estimates),
+ names( estimates) <- paste("Level", 1:length(estimates),
sep = "")
@@ -1166,7 +1199,7 @@ plotqvar <- function(object,
if (is.numeric(conf.level)) {
- zedd = abs(qnorm((1 - conf.level) / 2))
+ zedd <- abs(qnorm((1 - conf.level) / 2))
lsd.tops <- estimates + zedd * QuasiSE / sqrt(2)
lsd.tails <- estimates - zedd * QuasiSE / sqrt(2)
if (max(QuasiSE) / min(QuasiSE) > warn.ratio)
diff --git a/R/family.rcqo.R b/R/family.rcqo.R
index 4b5dca3..c3269bf 100644
--- a/R/family.rcqo.R
+++ b/R/family.rcqo.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -80,18 +80,18 @@ rcqo <- function(n, p, S,
if (!is.logical(Crow1positive)) {
stop("bad input for argument 'Crow1positive)'")
} else {
- Crow1positive = rep(Crow1positive, len=Rank)
+ Crow1positive <- rep(Crow1positive, len=Rank)
}
- Shape = rep(Shape, len=S)
- sdlv = rep(sdlv, len=Rank)
- sdOptima = rep(sdOptima, len=Rank)
- sdTolerances = rep(sdTolerances, len=Rank)
- AA = sdOptima / 3^0.5
+ Shape <- rep(Shape, len=S)
+ sdlv <- rep(sdlv, len=Rank)
+ sdOptima <- rep(sdOptima, len=Rank)
+ sdTolerances <- rep(sdTolerances, len=Rank)
+ AA <- sdOptima / 3^0.5
if (Rank > 1 && any(diff(sdlv) > 0))
stop("argument 'sdlv)' must be a vector with decreasing values")
if (FALSE)
- change.seed.expression = expression({
+ change.seed.expression <- expression({
if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) {
runif(1) # initialize the RNG if necessary
}
@@ -104,176 +104,176 @@ rcqo <- function(n, p, S,
on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv))
}
})
- change.seed.expression = expression({
+ change.seed.expression <- expression({
if (length(seed)) set.seed(seed)
})
eval(change.seed.expression)
- V = matrix(rhox, p-1, p-1)
- diag(V) = 1
- L = chol(V)
+ V <- matrix(rhox, p-1, p-1)
+ diag(V) <- 1
+ L <- chol(V)
if (length(xmat)) {
- xnames = colnames(xmat)
+ xnames <- colnames(xmat)
} else {
eval(change.seed.expression)
- xmat = matrix(rnorm(n*(p-1)), n, p-1) %*% L
- xmat = scale(xmat, center = TRUE)
- xnames = paste("x", 2:p, sep="")
- dimnames(xmat) = list(as.character(1:n), xnames)
+ xmat <- matrix(rnorm(n*(p-1)), n, p-1) %*% L
+ xmat <- scale(xmat, center = TRUE)
+ xnames <- paste("x", 2:p, sep="")
+ dimnames(xmat) <- list(as.character(1:n), xnames)
}
eval(change.seed.expression)
- ccoefs = matrix(rnorm((p-1)*Rank), p-1, Rank)
- lvmat = cbind(xmat %*% ccoefs)
+ ccoefs <- matrix(rnorm((p-1)*Rank), p-1, Rank)
+ lvmat <- cbind(xmat %*% ccoefs)
if (Rank > 1) {
- Rmat = chol(var(lvmat))
- iRmat = solve(Rmat)
- lvmat = lvmat %*% iRmat # var(lvmat) == diag(Rank)
- ccoefs = ccoefs %*% iRmat
+ Rmat <- chol(var(lvmat))
+ iRmat <- solve(Rmat)
+ lvmat <- lvmat %*% iRmat # var(lvmat) == diag(Rank)
+ ccoefs <- ccoefs %*% iRmat
}
for(r in 1:Rank)
if (( Crow1positive[r] && ccoefs[1,r] < 0) ||
(!Crow1positive[r] && ccoefs[1,r] > 0)) {
- ccoefs[,r] = -ccoefs[,r]
- lvmat[,r] = -lvmat[,r]
+ ccoefs[,r] <- -ccoefs[,r]
+ lvmat[,r] <- -lvmat[,r]
}
if (scalelv) {
for(r in 1:Rank) {
- sdlvr = sd(lvmat[,r])
- lvmat[,r] = lvmat[,r] * sdlv[r] / sdlvr
- ccoefs[,r] = ccoefs[,r] * sdlv[r] / sdlvr
+ sdlvr <- sd(lvmat[,r])
+ lvmat[,r] <- lvmat[,r] * sdlv[r] / sdlvr
+ ccoefs[,r] <- ccoefs[,r] * sdlv[r] / sdlvr
}
} else {
- sdlvr = NULL
+ sdlvr <- NULL
for(r in 1:Rank) {
- sdlvr = c(sdlvr, sd(lvmat[,r]))
+ sdlvr <- c(sdlvr, sd(lvmat[,r]))
}
}
if (ESOptima) {
- if (!is.Numeric(S^(1/Rank), integer.valued = TRUE) ||
- S^(1/Rank) < 2)
- stop("S^(1/Rank) must be an integer greater or equal to 2")
- if (Rank == 1) {
- optima = matrix(as.numeric(NA), S, Rank)
- for(r in 1:Rank) {
- optima[,r] = seq(-AA, AA, len=S^(1/Rank))
- }
- } else if (Rank == 2) {
- optima = expand.grid(lv1=seq(-AA[1], AA[1], len=S^(1/Rank)),
- lv2=seq(-AA[2], AA[2], len=S^(1/Rank)))
- } else if (Rank == 3) {
- optima = expand.grid(lv1=seq(-AA[1], AA[1], len=S^(1/Rank)),
- lv2=seq(-AA[2], AA[2], len=S^(1/Rank)),
- lv3=seq(-AA[3], AA[3], len=S^(1/Rank)))
- } else {
- optima = expand.grid(lv1=seq(-AA[1], AA[1], len=S^(1/Rank)),
- lv2=seq(-AA[2], AA[2], len=S^(1/Rank)),
- lv3=seq(-AA[3], AA[3], len=S^(1/Rank)),
- lv4=seq(-AA[4], AA[4], len=S^(1/Rank)))
+ if (!is.Numeric(S^(1/Rank), integer.valued = TRUE) ||
+ S^(1/Rank) < 2)
+ stop("S^(1/Rank) must be an integer greater or equal to 2")
+ if (Rank == 1) {
+ optima <- matrix(as.numeric(NA), S, Rank)
+ for(r in 1:Rank) {
+ optima[,r] <- seq(-AA, AA, len=S^(1/Rank))
}
- if (Rank > 1)
- optima = matrix(unlist(optima), S, Rank) # Make sure it is a matrix
+ } else if (Rank == 2) {
+ optima <- expand.grid(lv1=seq(-AA[1], AA[1], len=S^(1/Rank)),
+ lv2=seq(-AA[2], AA[2], len=S^(1/Rank)))
+ } else if (Rank == 3) {
+ optima <- expand.grid(lv1=seq(-AA[1], AA[1], len=S^(1/Rank)),
+ lv2=seq(-AA[2], AA[2], len=S^(1/Rank)),
+ lv3=seq(-AA[3], AA[3], len=S^(1/Rank)))
+ } else {
+ optima <- expand.grid(lv1=seq(-AA[1], AA[1], len=S^(1/Rank)),
+ lv2=seq(-AA[2], AA[2], len=S^(1/Rank)),
+ lv3=seq(-AA[3], AA[3], len=S^(1/Rank)),
+ lv4=seq(-AA[4], AA[4], len=S^(1/Rank)))
+ }
+ if (Rank > 1)
+ optima <- matrix(unlist(optima), S, Rank) # Make sure its a matrix
} else {
- optima = matrix(1, S, Rank)
+ optima <- matrix(1, S, Rank)
eval(change.seed.expression)
for(r in 1:Rank) {
- optima[,r] = rnorm(n=S, sd=sdOptima[r])
+ optima[,r] <- rnorm(n=S, sd=sdOptima[r])
}
}
for(r in 1:Rank)
- optima[,r] = optima[,r] * sdOptima[r] / sd(optima[,r])
+ optima[,r] <- optima[,r] * sdOptima[r] / sd(optima[,r])
- ynames = paste("y", 1:S, sep="")
- Kvector = rep(Kvector, len=S)
- names(Kvector) = ynames
- lvnames = if (Rank==1) "lv" else paste("lv", 1:Rank, sep="")
- Tols = if (EqualTolerances) matrix(1, S, Rank) else {
+ ynames <- paste("y", 1:S, sep="")
+ Kvector <- rep(Kvector, len=S)
+ names(Kvector) <- ynames
+ lvnames <- if (Rank==1) "lv" else paste("lv", 1:Rank, sep="")
+ Tols <- if (EqualTolerances) matrix(1, S, Rank) else {
eval(change.seed.expression)
- temp = matrix(1, S, Rank)
+ temp <- matrix(1, S, Rank)
if (S > 1)
for(r in 1:Rank) {
- temp[-1,r] = rnorm(S-1, mean=1, sd=sdTolerances[r])
+ temp[-1,r] <- rnorm(S-1, mean=1, sd=sdTolerances[r])
if (any(temp[,r] <= 0)) stop("negative tolerances!")
- temp[,r] = temp[,r]^2 # Tolerance matrix = var-cov matrix)
+ temp[,r] <- temp[,r]^2 # Tolerance matrix = var-cov matrix)
}
temp
}
- dimnames(Tols) = list(ynames, lvnames)
- dimnames(ccoefs) = list(xnames, lvnames)
- dimnames(optima) = list(ynames, lvnames)
- loeta = log(loabundance) # May be a vector
- hieta = log(hiabundance)
+ dimnames(Tols) <- list(ynames, lvnames)
+ dimnames(ccoefs) <- list(xnames, lvnames)
+ dimnames(optima) <- list(ynames, lvnames)
+ loeta <- log(loabundance) # May be a vector
+ hieta <- log(hiabundance)
eval(change.seed.expression)
- logmaxima = runif(S, min=loeta, max=hieta) # loeta and hieta may be vector
- names(logmaxima) = ynames
- etamat = matrix(logmaxima,n,S,byrow = TRUE) # eta=log(mu) only; intercept term
+ logmaxima <- runif(S, min=loeta, max=hieta) # loeta and hieta may be vector
+ names(logmaxima) <- ynames
+ etamat <- matrix(logmaxima, n, S, byrow = TRUE)
for(jay in 1:S) {
- optmat = matrix(optima[jay,], nrow=n, ncol=Rank, byrow = TRUE)
- tolmat = matrix(Tols[jay,], nrow=n, ncol=Rank, byrow = TRUE)
- temp = cbind((lvmat - optmat) / tolmat)
+ optmat <- matrix(optima[jay,], nrow=n, ncol=Rank, byrow = TRUE)
+ tolmat <- matrix(Tols[jay,], nrow=n, ncol=Rank, byrow = TRUE)
+ temp <- cbind((lvmat - optmat) / tolmat)
for(r in 1:Rank)
etamat[,jay]=etamat[,jay]-0.5*(lvmat[,r] - optmat[jay,r])*temp[,r]
}
- rootdist = switch(family,
+ rootdist <- switch(family,
"poisson"=1, "binomial-poisson"=1, "ordinal-poisson"=1,
"negbinomial"=2, "Binomial-negbinomial"=2, "Ordinal-negbinomial"=2,
"gamma2"=3)
eval(change.seed.expression)
if (rootdist == 1) {
- ymat = matrix(rpois(n*S, lambda = exp(etamat)), n, S)
+ ymat <- matrix(rpois(n*S, lambda = exp(etamat)), n, S)
} else if (rootdist == 2) {
- mKvector = matrix(Kvector, n, S, byrow = TRUE)
- ymat = matrix(rnbinom(n=n*S, mu=exp(etamat), size=mKvector),n,S)
- if (sqrt) ymat = ymat^0.5
+ mKvector <- matrix(Kvector, n, S, byrow = TRUE)
+ ymat <- matrix(rnbinom(n=n*S, mu=exp(etamat), size=mKvector),n,S)
+ if (sqrt) ymat <- ymat^0.5
} else if (rootdist == 3) {
- Shape = matrix(Shape, n, S, byrow = TRUE)
- ymat = matrix(rgamma(n*S, shape=Shape, scale=exp(etamat)/Shape),n,S)
- if (Log) ymat = log(ymat)
+ Shape <- matrix(Shape, n, S, byrow = TRUE)
+ ymat <- matrix(rgamma(n*S, shape=Shape, scale=exp(etamat)/Shape),n,S)
+ if (Log) ymat <- log(ymat)
} else stop("argument 'rootdist' unmatched")
- tmp1 = NULL
+ tmp1 <- NULL
if (any(family == c("ordinal-poisson","Ordinal-negbinomial"))) {
- tmp1 = cut(c(ymat), breaks=breaks, labels=NULL) #To get attributes(tmp1)
- ymat = cut(c(ymat), breaks=breaks, labels=FALSE)
- dim(ymat) = c(n,S)
+ tmp1 <- cut(c(ymat), breaks=breaks, labels=NULL) #To get attributes(tmp1)
+ ymat <- cut(c(ymat), breaks=breaks, labels=FALSE)
+ dim(ymat) <- c(n,S)
}
if (any(family == c("binomial-poisson","Binomial-negbinomial")))
- ymat = 0 + (ymat > 0)
+ ymat <- 0 + (ymat > 0)
- myform = as.formula(paste(paste("cbind(",
+ myform <- as.formula(paste(paste("cbind(",
paste(paste("y",1:S,sep=""), collapse=","),
") ~ ", sep=""),
paste(paste("x",2:p,sep=""), collapse="+"), sep=""))
- dimnames(ymat) = list(as.character(1:n), ynames)
- ans = data.frame(xmat, ymat)
- attr(ans, "ccoefficients") = ccoefs
- attr(ans, "Crow1positive") = Crow1positive
- attr(ans, "family") = family
- attr(ans, "formula") = myform # Useful for running cqo() on the data
- attr(ans, "Rank") = Rank
- attr(ans, "family") = family
- attr(ans, "Kvector") = Kvector
- attr(ans, "logmaxima") = logmaxima
- attr(ans, "loabundance") = loabundance
- attr(ans, "hiabundance") = hiabundance
- attr(ans, "optima") = optima
- attr(ans, "Log") = Log
- attr(ans, "lv") = lvmat
- attr(ans, "eta") = etamat
- attr(ans, "EqualTolerances") = EqualTolerances
- attr(ans, "EqualMaxima") = EqualMaxima || all(loabundance == hiabundance)
- attr(ans, "ESOptima") = ESOptima
- attr(ans, "seed") = seed # RNGstate
- attr(ans, "sdTolerances") = sdTolerances
- attr(ans, "sdlv") = if (scalelv) sdlv else sdlvr
- attr(ans, "sdOptima") = sdOptima
- attr(ans, "Shape") = Shape
- attr(ans, "sqrt") = sqrt
- attr(ans, "tolerances") = Tols^0.5 # Like a standard deviation
- attr(ans, "breaks") = if (length(tmp1)) attributes(tmp1) else breaks
+ dimnames(ymat) <- list(as.character(1:n), ynames)
+ ans <- data.frame(xmat, ymat)
+ attr(ans, "ccoefficients") <- ccoefs
+ attr(ans, "Crow1positive") <- Crow1positive
+ attr(ans, "family") <- family
+ attr(ans, "formula") <- myform # Useful for running cqo() on the data
+ attr(ans, "Rank") <- Rank
+ attr(ans, "family") <- family
+ attr(ans, "Kvector") <- Kvector
+ attr(ans, "logmaxima") <- logmaxima
+ attr(ans, "loabundance") <- loabundance
+ attr(ans, "hiabundance") <- hiabundance
+ attr(ans, "optima") <- optima
+ attr(ans, "Log") <- Log
+ attr(ans, "lv") <- lvmat
+ attr(ans, "eta") <- etamat
+ attr(ans, "EqualTolerances") <- EqualTolerances
+ attr(ans, "EqualMaxima") <- EqualMaxima || all(loabundance == hiabundance)
+ attr(ans, "ESOptima") <- ESOptima
+ attr(ans, "seed") <- seed # RNGstate
+ attr(ans, "sdTolerances") <- sdTolerances
+ attr(ans, "sdlv") <- if (scalelv) sdlv else sdlvr
+ attr(ans, "sdOptima") <- sdOptima
+ attr(ans, "Shape") <- Shape
+ attr(ans, "sqrt") <- sqrt
+ attr(ans, "tolerances") <- Tols^0.5 # Like a standard deviation
+ attr(ans, "breaks") <- if (length(tmp1)) attributes(tmp1) else breaks
ans
}
@@ -298,8 +298,8 @@ dcqo <- function(x, p, S,
if (mode(family) != "character" && mode(family) != "name")
- family = as.character(substitute(family))
- family = match.arg(family, c("poisson", "binomial",
+ family <- as.character(substitute(family))
+ family <- match.arg(family, c("poisson", "binomial",
"negbinomial", "ordinal"))[1]
@@ -323,28 +323,28 @@ dcqo <- function(x, p, S,
"be equal when 'EqualTolerances = TRUE'")
if (length(seed)) set.seed(seed)
- xmat = matrix(rnorm(n*(p-1)), n, p-1, dimnames=list(as.character(1:n),
+ xmat <- matrix(rnorm(n*(p-1)), n, p-1, dimnames=list(as.character(1:n),
paste("x", 2:p, sep="")))
- ccoefs = matrix(rnorm((p-1)*Rank), p-1, Rank)
- lvmat = xmat %*% ccoefs
- optima = matrix(rnorm(Rank*S, sd=sdOptima), S, Rank)
- Tols = if (EqualTolerances) matrix(1, S, Rank) else
+ ccoefs <- matrix(rnorm((p-1)*Rank), p-1, Rank)
+ lvmat <- xmat %*% ccoefs
+ optima <- matrix(rnorm(Rank*S, sd=sdOptima), S, Rank)
+ Tols <- if (EqualTolerances) matrix(1, S, Rank) else
matrix(rnorm(Rank*S, mean=1, sd=1), S, Rank)
- loeta = log(loabundance)
- hieta = log(hiabundance)
- logmaxima = runif(S, min=loeta, max=hieta)
+ loeta <- log(loabundance)
+ hieta <- log(hiabundance)
+ logmaxima <- runif(S, min=loeta, max=hieta)
- etamat = matrix(logmaxima,n,S,byrow = TRUE) # eta=log(mu) only; intercept term
+ etamat <- matrix(logmaxima,n,S,byrow = TRUE) # eta=log(mu) only; intercept term
for(jay in 1:S) {
- optmat = matrix(optima[jay,], n, Rank, byrow = TRUE)
- tolmat = matrix(Tols[jay,], n, Rank, byrow = TRUE)
- temp = cbind((lvmat - optmat) * tolmat)
+ optmat <- matrix(optima[jay,], n, Rank, byrow = TRUE)
+ tolmat <- matrix(Tols[jay,], n, Rank, byrow = TRUE)
+ temp <- cbind((lvmat - optmat) * tolmat)
for(r in 1:Rank)
- etamat[,jay] = etamat[,jay] - 0.5 * temp[,r] *
+ etamat[,jay] <- etamat[,jay] - 0.5 * temp[,r] *
(lvmat[,r] - optmat[jay,r])
}
- ymat = if (family == "negbinomial") {
+ ymat <- if (family == "negbinomial") {
@@ -352,12 +352,12 @@ dcqo <- function(x, p, S,
matrix(rpois(n*S, lambda = exp(etamat)), n, S)
}
if (family == "binomial")
- ymat = 0 + (ymat > 0)
+ ymat <- 0 + (ymat > 0)
- dimnames(ymat) = list(as.character(1:n), paste("y", 1:S, sep=""))
- ans = data.frame(xmat, ymat)
- attr(ans, "ccoefficients") = ccoefs
- attr(ans, "family") = family
+ dimnames(ymat) <- list(as.character(1:n), paste("y", 1:S, sep=""))
+ ans <- data.frame(xmat, ymat)
+ attr(ans, "ccoefficients") <- ccoefs
+ attr(ans, "family") <- family
ans
}
@@ -365,13 +365,13 @@ dcqo <- function(x, p, S,
-getInitVals = function(gvals, llfun, ...) {
- LLFUN = match.fun(llfun)
- ff = function(myx, ...) LLFUN(myx, ...)
- objFun = gvals
+getInitVals <- function(gvals, llfun, ...) {
+ LLFUN <- match.fun(llfun)
+ ff <- function(myx, ...) LLFUN(myx, ...)
+ objFun <- gvals
for(ii in 1:length(gvals))
- objFun[ii] = ff(myx=gvals[ii], ...)
- try.this = gvals[objFun == max(objFun)] # Usually scalar, maybe vector
+ objFun[ii] <- ff(myx=gvals[ii], ...)
+ try.this <- gvals[objFun == max(objFun)] # Usually scalar, maybe vector
try.this
}
@@ -383,13 +383,13 @@ getInitVals = function(gvals, llfun, ...) {
-campp = function(q, size, prob, mu) {
+campp <- function(q, size, prob, mu) {
if (!missing(mu)) {
if (!missing(prob))
stop("'prob' and 'mu' both specified")
prob <- size/(size + mu)
}
- K = (1/3) * ((9*q+8)/(q+1) - ((9*size-1)/size) * (mu/(q+1))^(1/3)) /
+ K <- (1/3) * ((9*q+8)/(q+1) - ((9*size-1)/size) * (mu/(q+1))^(1/3)) /
sqrt( (1/size) * (mu/(q+1))^(2/3) + 1 / (q+1)) # Note the +, not -
pnorm(K)
}
diff --git a/R/family.robust.R b/R/family.robust.R
index 34b535b..e7370e9 100644
--- a/R/family.robust.R
+++ b/R/family.robust.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -46,9 +46,9 @@ dhuber <- function(x, k = 0.862, mu = 0, sigma = 1, log = FALSE)
rhuber <- function(n, k = 0.862, mu = 0, sigma = 1) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
myl <- rep(0.0, len = use.n)
@@ -127,8 +127,10 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
- huber <- function(llocation = "identity", lscale = "loge",
- k = 0.862, imethod = 1, zero = 2) {
+ huber2 <- function(llocation = "identity", lscale = "loge",
+ k = 0.862, imethod = 1, zero = 2) {
+
+
A1 <- (2 * dnorm(k) / k - 2 * pnorm(-k))
eps <- A1 / (1 + A1)
@@ -180,7 +182,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
namesof("scale", .lscale, earg = .escale, tag = FALSE))
if (!length(etastart)) {
- junk = lm.wfit(x = x, y = y, w = c(w))
+ junk <- lm.wfit(x = x, y = c(y), w = c(w))
scale.y.est <- sqrt( sum(c(w) * junk$resid^2) / junk$df.residual )
location.init <- if ( .llocat == "loge") pmax(1/1024, y) else {
if ( .imethod == 3) {
@@ -205,9 +207,9 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
}, list( .llocat = llocat,
.elocat = elocat, .escale = escale ))),
last = eval(substitute(expression({
- misc$link <- c("location" = .llocat, "scale" = .lscale)
+ misc$link <- c("location" = .llocat , "scale" = .lscale )
- misc$earg <- list("location" = .elocat, "scale" = .escale)
+ misc$earg <- list("location" = .elocat , "scale" = .escale )
misc$expected <- TRUE
misc$k.huber <- .k
@@ -229,7 +231,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
}, list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale,
.k = k ))),
- vfamily = c("huber"),
+ vfamily = c("huber2"),
deriv = eval(substitute(expression({
mylocat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
myscale <- eta2theta(eta[, 2], .lscale, earg = .escale)
@@ -239,7 +241,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
cond2 <- (abs(zedd) <= myk)
cond3 <- (zedd > myk)
- dl.dlocat <- -myk + 0 * zedd # cond1
+ dl.dlocat <- -myk + 0 * zedd # cond1
dl.dlocat[cond2] <- zedd[cond2]
dl.dlocat[cond3] <- myk # myk is a scalar
dl.dlocat <- dl.dlocat / myscale
@@ -252,9 +254,8 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
dlocat.deta <- dtheta.deta(mylocat, .llocat, earg = .elocat)
dscale.deta <- dtheta.deta(myscale, .lscale, earg = .escale)
- ans <-
- c(w) * cbind(dl.dlocat * dlocat.deta,
- dl.dscale * dscale.deta)
+ ans <- c(w) * cbind(dl.dlocat * dlocat.deta,
+ dl.dscale * dscale.deta)
ans
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale,
@@ -269,10 +270,10 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
ned2l.dlocat2 <- temp4 * (1 - .eps) / myscale^2
ned2l.dscale2 <- (dnorm(myk) * (1 - myk^2) + temp4) *
- 2 * (1 - .eps) / (myk * myscale^2)
+ 2 * (1 - .eps) / (myk * myscale^2)
- wz[, iam(1,1,M)] <- ned2l.dlocat2 * dlocat.deta^2
- wz[, iam(2,2,M)] <- ned2l.dscale2 * dscale.deta^2
+ wz[, iam(1, 1, M)] <- ned2l.dlocat2 * dlocat.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dscale2 * dscale.deta^2
ans
c(w) * wz
}), list( .eps = eps ))))
@@ -324,7 +325,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
if (!length(etastart)) {
- junk = lm.wfit(x = x, y = y, w = c(w))
+ junk <- lm.wfit(x = x, y = c(y), w = c(w))
location.init <- if ( .llocat == "loge") pmax(1/1024, y) else {
if ( .imethod == 3) {
rep(weighted.mean(y, w), len = n)
diff --git a/R/family.rrr.R b/R/family.rrr.R
index e7dd0cc..ce3e5c6 100644
--- a/R/family.rrr.R
+++ b/R/family.rrr.R
@@ -1,58 +1,57 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
-replace.constraints = function(Blist, cm, index) {
+replace.constraints <- function(Blist, cm, index) {
- for(iii in index)
- Blist[[iii]] = cm
- Blist
+ for (iii in index)
+ Blist[[iii]] <- cm
+ Blist
}
valt.control <- function(
Alphavec = c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
- 60, 80, 100, 125, 2^(8:12)),
+ 60, 80, 100, 125, 2^(8:12)),
Criterion = c("rss", "coefficients"),
Linesearch = FALSE, Maxit = 7,
Suppress.warning = TRUE,
- Tolerance = 1e-7, ...)
-{
-
- if (mode(Criterion) != "character" && mode(Criterion) != "name")
- Criterion <- as.character(substitute(Criterion))
- Criterion <- match.arg(Criterion, c("rss", "coefficients"))[1]
-
- list(Alphavec = Alphavec,
- Criterion = Criterion,
- Linesearch = Linesearch,
- Maxit = Maxit,
- Suppress.warning = Suppress.warning,
- Tolerance = Tolerance)
-
+ Tolerance = 1e-7, ...) {
+
+ if (mode(Criterion) != "character" && mode(Criterion) != "name")
+ Criterion <- as.character(substitute(Criterion))
+ Criterion <- match.arg(Criterion, c("rss", "coefficients"))[1]
+
+ list(Alphavec = Alphavec,
+ Criterion = Criterion,
+ Linesearch = Linesearch,
+ Maxit = Maxit,
+ Suppress.warning = Suppress.warning,
+ Tolerance = Tolerance)
}
-qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
- Rank = ncol(numat)
- moff = NULL
- ans = if (Quadratic) {
- index = iam(NA, NA, M = Rank, diag = TRUE, both = TRUE)
- temp1 = cbind(numat[,index$row] * numat[,index$col])
- if (ITolerances) {
- moff = 0
- for(ii in 1:Rank)
- moff = moff - 0.5 * temp1[,ii]
- }
- cbind(numat, if (ITolerances) NULL else temp1)
- } else
- as.matrix(numat)
- list(matrix = if (Aoffset>0) ans else ans[,-(1:Rank),drop = FALSE],
- offset = moff)
+qrrvglm.xprod <- function(numat, Aoffset, Quadratic, ITolerances) {
+ Rank <- ncol(numat)
+ moff <- NULL
+ ans <- if (Quadratic) {
+ index <- iam(NA, NA, M = Rank, diag = TRUE, both = TRUE)
+ temp1 <- cbind(numat[,index$row] * numat[,index$col])
+ if (ITolerances) {
+ moff <- 0
+ for (ii in 1:Rank)
+ moff <- moff - 0.5 * temp1[,ii]
+ }
+ cbind(numat, if (ITolerances) NULL else temp1)
+ } else {
+ as.matrix(numat)
+ }
+ list(matrix = if (Aoffset>0) ans else ans[, -(1:Rank), drop = FALSE],
+ offset = moff)
}
@@ -60,22 +59,21 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
valt <- function(x, z, U, Rank = 1,
- Blist = NULL,
- Cinit = NULL,
- Alphavec = c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
- 60, 80, 100, 125, 2^(8:12)),
- Criterion = c("rss", "coefficients"),
- Crow1positive = rep(TRUE, length.out = Rank),
- colx1.index,
- Linesearch = FALSE,
- Maxit = 20,
- szero = NULL,
- SD.Cinit = 0.02,
- Suppress.warning = FALSE,
- Tolerance = 1e-6,
- trace = FALSE,
- xij = NULL)
-{
+ Blist = NULL,
+ Cinit = NULL,
+ Alphavec = c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
+ 60, 80, 100, 125, 2^(8:12)),
+ Criterion = c("rss", "coefficients"),
+ Crow1positive = rep(TRUE, length.out = Rank),
+ colx1.index,
+ Linesearch = FALSE,
+ Maxit = 20,
+ szero = NULL,
+ SD.Cinit = 0.02,
+ Suppress.warning = FALSE,
+ Tolerance = 1e-6,
+ trace = FALSE,
+ xij = NULL) {
@@ -97,29 +95,29 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
if (!is.matrix(x))
x <- as.matrix(x)
- colx2.index = if (is.null(colx1.index)) 1:ncol(x) else
+ colx2.index <- if (is.null(colx1.index)) 1:ncol(x) else
(1:ncol(x))[-colx1.index]
- p1 = length(colx1.index)
- p2 = length(colx2.index)
- p = p1 + p2
+ p1 <- length(colx1.index)
+ p2 <- length(colx2.index)
+ p <- p1 + p2
if (!p2) stop("'p2', the number of variables for the ",
"reduced-rank regression, must be > 0")
if (!length(Blist)) {
- Blist = replace.constraints(vector("list", p), diag(M), 1:p)
+ Blist <- replace.constraints(vector("list", p), diag(M), 1:p)
}
dU <- dim(U)
if (dU[2] != n)
stop("input unconformable")
- clist2 = replace.constraints(vector("list", Rank+p1),
+ clist2 <- replace.constraints(vector("list", Rank+p1),
if (length(szero))
diag(M)[, -szero, drop = FALSE] else diag(M), 1:Rank)
if (p1) {
- for(kk in 1:p1)
- clist2[[Rank+kk]] <- Blist[[colx1.index[kk]]]
+ for (kk in 1:p1)
+ clist2[[Rank+kk]] <- Blist[[colx1.index[kk]]]
}
if (is.null(Cinit))
@@ -130,37 +128,37 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
C <- Cinit # This is input for the main iter loop
old.crit <- switch(Criterion, coefficients=C, rss=fit$rss)
- recover = 0 # Allow a few iterations between different line searches
- for(iter in 1:Maxit) {
- iter.save <- iter
+ recover <- 0 # Allow a few iterations between different line searches
+ for (iter in 1:Maxit) {
+ iter.save <- iter
lv.mat <- x[, colx2.index, drop = FALSE] %*% C
- new.lv.model.matrix = cbind(lv.mat,
+ new.lv.model.matrix <- cbind(lv.mat,
if (p1) x[, colx1.index] else NULL)
- fit = vlm.wfit(xmat = new.lv.model.matrix, z, Blist = clist2,
+ fit <- vlm.wfit(xmat = new.lv.model.matrix, z, Blist = clist2,
U = U, matrix.out = TRUE, is.vlmX = FALSE,
rss = FALSE, qr = FALSE, xij = xij)
A <- t(fit$mat.coef[1:Rank, , drop = FALSE])
- clist1 = replace.constraints(Blist, A, colx2.index)
- fit = vlm.wfit(xmat = x, z, Blist = clist1, U = U,
+ clist1 <- replace.constraints(Blist, A, colx2.index)
+ fit <- vlm.wfit(xmat = x, z, Blist = clist1, U = U,
matrix.out = TRUE, is.vlmX = FALSE,
rss = TRUE, qr = FALSE, xij = xij)
- C = fit$mat.coef[colx2.index, , drop = FALSE] %*% A %*%
- solve(t(A) %*% A)
+ C <- fit$mat.coef[colx2.index, , drop = FALSE] %*% A %*%
+ solve(t(A) %*% A)
- numat = x[, colx2.index, drop = FALSE] %*% C
- evnu = eigen(var(numat))
- temp7 = if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
+ numat <- x[, colx2.index, drop = FALSE] %*% C
+ evnu <- eigen(var(numat))
+ temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
evnu$vector %*% evnu$value^(-0.5)
- C = C %*% temp7
- A = A %*% t(solve(temp7))
- temp8 = crow1C(cmat = C, Crow1positive, amat = A)
- C = temp8$cmat
- A = temp8$amat
+ C <- C %*% temp7
+ A <- A %*% t(solve(temp7))
+ temp8 <- crow1C(cmat = C, Crow1positive, amat = A)
+ C <- temp8$cmat
+ A <- temp8$amat
- ratio = switch(Criterion,
+ ratio <- switch(Criterion,
coefficients = max(abs(C - old.crit) / (Tolerance+abs(C))),
rss = max(abs(fit$rss - old.crit) / (Tolerance+fit$rss)))
@@ -178,21 +176,21 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
warning("did not converge")
}
- fini.linesearch = FALSE
+ fini.linesearch <- FALSE
if (Linesearch && iter - recover >= 2) {
xnew <- C
direction1 <- (xnew-xold) # / sqrt(1 + sum((xnew-xold)^2))
ftemp <- fit$rss # Most recent objective function
use.alpha <- 0 # The current step relative to (xold, yold)
- for(itter in 1:length(Alphavec)) {
+ for (itter in 1:length(Alphavec)) {
CC <- xold + Alphavec[itter] * direction1
try.lv.mat <- x[, colx2.index, drop = FALSE] %*% CC
try.new.lv.model.matrix = cbind(try.lv.mat,
if (p1) x[,colx1.index] else NULL)
- try = vlm.wfit(xmat = try.new.lv.model.matrix, z,
+ try <- vlm.wfit(xmat = try.new.lv.model.matrix, z,
Blist = clist2, U = U, matrix.out = TRUE,
is.vlmX = FALSE, rss = TRUE, qr = FALSE,
xij = xij)
@@ -201,16 +199,16 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
fit <- try
ftemp <- try$rss
C <- CC
- A = t(fit$mat.coef[1:Rank, , drop = FALSE])
+ A <- t(fit$mat.coef[1:Rank, , drop = FALSE])
lv.mat <- x[, colx2.index, drop = FALSE] %*% C
- recover = iter # Give it some altg iters to recover
+ recover <- iter # Give it some altg iters to recover
} else {
if (trace && use.alpha > 0) {
cat(" Finished line search using Alpha = ",
use.alpha, "\n")
flush.console()
}
- fini.linesearch = TRUE
+ fini.linesearch <- TRUE
}
if (fini.linesearch) break
} # End of itter loop
@@ -226,45 +224,44 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
-lm2qrrvlm.model.matrix = function(x, Blist, C, control, assign = TRUE,
- no.thrills = FALSE)
-{
+lm2qrrvlm.model.matrix <- function(x, Blist, C, control, assign = TRUE,
+ no.thrills = FALSE) {
- Rank = control$Rank
- colx1.index = control$colx1.index
- Quadratic = control$Quadratic
- Dzero = control$Dzero
- Corner = control$Corner
- ITolerances = control$ITolerances
+ Rank <- control$Rank
+ colx1.index <- control$colx1.index
+ Quadratic <- control$Quadratic
+ Dzero <- control$Dzero
+ Corner <- control$Corner
+ ITolerances <- control$ITolerances
- M = nrow(Blist[[1]])
- p1 = length(colx1.index)
- combine2 = c(control$szero,
+ M <- nrow(Blist[[1]])
+ p1 <- length(colx1.index)
+ combine2 <- c(control$szero,
if (Corner) control$Index.corner else NULL)
- Qoffset = if (Quadratic) ifelse(ITolerances, 0, sum(1:Rank)) else 0
- NoA = length(combine2) == M # No unknown parameters in A
- clist2 = if (NoA) {
- Aoffset = 0
+ Qoffset <- if (Quadratic) ifelse(ITolerances, 0, sum(1:Rank)) else 0
+ NoA <- length(combine2) == M # No unknown parameters in A
+ clist2 <- if (NoA) {
+ Aoffset <- 0
vector("list", Aoffset+Qoffset+p1)
} else {
- Aoffset = Rank
+ Aoffset <- Rank
replace.constraints(vector("list", Aoffset+Qoffset+p1),
if (length(combine2)) diag(M)[,-combine2,drop = FALSE] else diag(M),
1:Rank) # If Corner then does not contain \bI_{Rank}
}
if (Quadratic && !ITolerances)
- clist2 = replace.constraints(clist2,
+ clist2 <- replace.constraints(clist2,
if (control$EqualTolerances)
- matrix(1, M, 1) - eij(Dzero, M) else {
+ matrix(1, M, 1) - eijfun(Dzero, M) else {
if (length(Dzero)) diag(M)[,-Dzero,drop = FALSE] else diag(M)},
Aoffset + (1:Qoffset))
if (p1)
- for(kk in 1:p1)
- clist2[[Aoffset+Qoffset+kk]] <- Blist[[colx1.index[kk]]]
+ for (kk in 1:p1)
+ clist2[[Aoffset+Qoffset+kk]] <- Blist[[colx1.index[kk]]]
if (!no.thrills) {
- i63 = iam(NA, NA, M=Rank, both = TRUE)
- names(clist2) = c(
+ i63 <- iam(NA, NA, M=Rank, both = TRUE)
+ names(clist2) <- c(
if (NoA) NULL else paste("(lv", 1:Rank, ")", sep = ""),
if (Quadratic && Rank == 1 && !ITolerances)
"(lv^2)" else
@@ -274,23 +271,23 @@ lm2qrrvlm.model.matrix = function(x, Blist, C, control, assign = TRUE,
if (p1) names(colx1.index) else NULL)
}
- lv.mat = x[,control$colx2.index,drop = FALSE] %*% C
+ lv.mat <- x[,control$colx2.index,drop = FALSE] %*% C
- tmp900 = qrrvglm.xprod(lv.mat, Aoffset, Quadratic, ITolerances)
- new.lv.model.matrix = cbind(tmp900$matrix,
+ tmp900 <- qrrvglm.xprod(lv.mat, Aoffset, Quadratic, ITolerances)
+ new.lv.model.matrix <- cbind(tmp900$matrix,
if (p1) x[,colx1.index] else NULL)
if (!no.thrills)
- dimnames(new.lv.model.matrix) = list(dimnames(x)[[1]], names(clist2))
+ dimnames(new.lv.model.matrix) <- list(dimnames(x)[[1]], names(clist2))
if (assign) {
- asx = attr(x, "assign")
- asx = vector("list", ncol(new.lv.model.matrix))
- names(asx) = names(clist2)
- for(ii in 1:length(names(asx))) {
- asx[[ii]] = ii
+ asx <- attr(x, "assign")
+ asx <- vector("list", ncol(new.lv.model.matrix))
+ names(asx) <- names(clist2)
+ for (ii in 1:length(names(asx))) {
+ asx[[ii]] <- ii
}
- attr(new.lv.model.matrix, "assign") = asx
+ attr(new.lv.model.matrix, "assign") <- asx
}
if (no.thrills)
@@ -306,86 +303,88 @@ lm2qrrvlm.model.matrix = function(x, Blist, C, control, assign = TRUE,
valt.2iter <- function(x, z, U, Blist, A, control) {
- clist1 = replace.constraints(Blist, A, control$colx2.index)
- fit <- vlm.wfit(xmat=x, z, Blist=clist1, U = U, matrix.out = TRUE,
- is.vlmX = FALSE, rss = TRUE, qr = FALSE, xij = control$xij)
- C = fit$mat.coef[control$colx2.index,,drop = FALSE] %*% A %*% solve(t(A) %*% A)
+ clist1 <- replace.constraints(Blist, A, control$colx2.index)
+ fit <- vlm.wfit(xmat = x, z, Blist = clist1, U = U, matrix.out = TRUE,
+ is.vlmX = FALSE, rss = TRUE, qr = FALSE, xij = control$xij)
+ C <- fit$mat.coef[control$colx2.index, , drop = FALSE] %*%
+ A %*% solve(t(A) %*% A)
- list(A=A, C=C, fitted=fit$fitted, new.coeffs = fit$coef,
- Blist=clist1, rss=fit$rss)
+ list(A = A, C = C,
+ fitted = fit$fitted, new.coeffs = fit$coef,
+ Blist = clist1, rss = fit$rss)
}
-valt.1iter = function(x, z, U, Blist, C, control,
+valt.1iter <- function(x, z, U, Blist, C, control,
lp.names = NULL, nice31 = FALSE,
MSratio = 1) {
- Rank = control$Rank
- Quadratic = control$Quadratic
- Index.corner = control$Index.corner
- p1 = length(control$colx1.index)
- M = ncol(zedd <- as.matrix(z))
- NOS = M / MSratio
- Corner = control$Corner
- ITolerances = control$ITolerances
-
- Qoffset = if (Quadratic) ifelse(ITolerances, 0, sum(1:Rank)) else 0
- tmp833 = lm2qrrvlm.model.matrix(x=x, Blist = Blist, C=C, control=control)
- new.lv.model.matrix = tmp833$new.lv.model.matrix
- clist2 = clist2 = tmp833$constraints # Does not contain \bI_{Rank}
- lv.mat = tmp833$lv.mat
+ Rank <- control$Rank
+ Quadratic <- control$Quadratic
+ Index.corner <- control$Index.corner
+ p1 <- length(control$colx1.index)
+ M <- ncol(zedd <- as.matrix(z))
+ NOS <- M / MSratio
+ Corner <- control$Corner
+ ITolerances <- control$ITolerances
+
+ Qoffset <- if (Quadratic) ifelse(ITolerances, 0, sum(1:Rank)) else 0
+ tmp833 <- lm2qrrvlm.model.matrix(x = x, Blist = Blist, C=C, control=control)
+ new.lv.model.matrix <- tmp833$new.lv.model.matrix
+ clist2 <- tmp833$constraints # Does not contain \bI_{Rank}
+ lv.mat <- tmp833$lv.mat
if (Corner)
- zedd[,Index.corner] = zedd[,Index.corner] - lv.mat
+ zedd[,Index.corner] <- zedd[,Index.corner] - lv.mat
if (nice31 && MSratio == 1) {
- fit = list(mat.coef = NULL, fitted.values = NULL, rss = 0)
-
- clist2 = NULL # for vlm.wfit
-
- i5 = rep(0, length.out = MSratio)
- for(ii in 1:NOS) {
- i5 = i5 + 1:MSratio
-
- tmp100 = vlm.wfit(xmat=new.lv.model.matrix,
- zedd[,i5,drop = FALSE],
- Blist=clist2,
- U = U[i5,,drop = FALSE],
- matrix.out = TRUE,
- is.vlmX = FALSE, rss = TRUE,
- qr = FALSE,
- Eta.range = control$Eta.range,
- xij = control$xij,
- lp.names=lp.names[i5])
- fit$rss = fit$rss + tmp100$rss
- fit$mat.coef = cbind(fit$mat.coef, tmp100$mat.coef)
- fit$fitted.values = cbind(fit$fitted.values,
- tmp100$fitted.values)
+ fit <- list(mat.coef = NULL, fitted.values = NULL, rss = 0)
+
+ clist2 <- NULL # for vlm.wfit
+
+ i5 <- rep(0, length.out = MSratio)
+ for (ii in 1:NOS) {
+ i5 <- i5 + 1:MSratio
+
+ tmp100 <- vlm.wfit(xmat = new.lv.model.matrix,
+ zedd[, i5, drop = FALSE],
+ Blist = clist2,
+ U = U[i5,,drop = FALSE],
+ matrix.out = TRUE,
+ is.vlmX = FALSE, rss = TRUE,
+ qr = FALSE,
+ Eta.range = control$Eta.range,
+ xij = control$xij,
+ lp.names = lp.names[i5])
+ fit$rss <- fit$rss + tmp100$rss
+ fit$mat.coef <- cbind(fit$mat.coef, tmp100$mat.coef)
+ fit$fitted.values <- cbind(fit$fitted.values,
+ tmp100$fitted.values)
}
} else {
- fit = vlm.wfit(xmat=new.lv.model.matrix,
- zedd, Blist=clist2, U = U,
+ fit <- vlm.wfit(xmat = new.lv.model.matrix,
+ zedd, Blist = clist2, U = U,
matrix.out = TRUE,
is.vlmX = FALSE, rss = TRUE, qr = FALSE,
Eta.range = control$Eta.range,
- xij = control$xij, lp.names=lp.names)
+ xij = control$xij, lp.names = lp.names)
}
- A = if (tmp833$NoA) matrix(0, M, Rank) else
+ A <- if (tmp833$NoA) matrix(0, M, Rank) else
t(fit$mat.coef[1:Rank,,drop = FALSE])
if (Corner)
- A[Index.corner,] = diag(Rank)
+ A[Index.corner,] <- diag(Rank)
- B1 = if (p1)
+ B1 <- if (p1)
fit$mat.coef[-(1:(tmp833$Aoffset+Qoffset)),,drop = FALSE] else
NULL
- fv = as.matrix(fit$fitted.values)
+ fv <- as.matrix(fit$fitted.values)
if (Corner)
- fv[,Index.corner] = fv[,Index.corner] + lv.mat
- Dmat = if (Quadratic) {
+ fv[,Index.corner] <- fv[,Index.corner] + lv.mat
+ Dmat <- if (Quadratic) {
if (ITolerances) {
- tmp800 = matrix(0, M, Rank*(Rank+1)/2)
+ tmp800 <- matrix(0, M, Rank*(Rank+1)/2)
tmp800[if (MSratio == 2) c(TRUE, FALSE) else
- TRUE, 1:Rank] = -0.5
+ TRUE, 1:Rank] <- -0.5
tmp800
} else
t(fit$mat.coef[(tmp833$Aoffset+1):
@@ -393,9 +392,9 @@ valt.1iter = function(x, z, U, Blist, C, control,
} else
NULL
- list(Amat=A, B1=B1, Cmat=C, Dmat=Dmat,
+ list(Amat = A, B1 = B1, Cmat = C, Dmat = Dmat,
fitted = if (M == 1) c(fv) else fv,
- new.coeffs = fit$coef, constraints=clist2, rss=fit$rss,
+ new.coeffs = fit$coef, constraints = clist2, rss = fit$rss,
offset = if (length(tmp833$offset)) tmp833$offset else NULL)
}
@@ -412,7 +411,7 @@ rrr.init.expression <- expression({
if (function.name %in% c("cqo", "cao")) {
- modelno = switch(family at vfamily[1], "poissonff" = 2,
+ modelno <- switch(family at vfamily[1], "poissonff" = 2,
"quasipoissonff" = 2, "quasipoisson" = 2,
"binomialff" = 1, "quasibinomialff" = 1,
"quasibinomial" = 1, "negbinomial" = 3,
@@ -423,16 +422,16 @@ rrr.init.expression <- expression({
if (modelno == 3 || modelno == 5) {
- M = 2 * ifelse(is.matrix(y), ncol(y), 1)
- control$szero =
- rrcontrol$szero = seq(from = 2, to=M, by = 2) # Handles A
- control$Dzero =
- rrcontrol$Dzero = seq(from = 2, to=M, by = 2) # Handles D
+ M <- 2 * ifelse(is.matrix(y), ncol(y), 1)
+ control$szero <-
+ rrcontrol$szero <- seq(from = 2, to=M, by = 2) # Handles A
+ control$Dzero <-
+ rrcontrol$Dzero <- seq(from = 2, to=M, by = 2) # Handles D
}
} else {
- modelno = 0 # Any value will do as the variable is unused.
+ modelno <- 0 # Any value will do as the variable is unused.
}
@@ -456,10 +455,10 @@ rrr.alternating.expression <- expression({
trace = trace,
xij = control$xij) # This is subject to drift in A and C
- ans2 = rrr.normalize(rrcontrol = rrcontrol, A=alt$A, C=alt$C, x=x)
+ ans2 <- rrr.normalize(rrcontrol = rrcontrol, A=alt$A, C=alt$C, x = x)
- Amat = ans2$A # Fed into Blist below (in rrr.end.expression)
- tmp.fitted = alt$fitted # Also fed; was alt2$fitted
+ Amat <- ans2$A # Fed into Blist below (in rrr.end.expression)
+ tmp.fitted <- alt$fitted # Also fed; was alt2$fitted
rrcontrol$Cinit <- ans2$C # For next valt() call
@@ -468,77 +467,93 @@ rrr.alternating.expression <- expression({
- adjust.Dmat.expression = expression({
+ adjust.Dmat.expression <- function(Mmat, Rank, Dmat, M) {
+
if (length(Dmat)) {
- ind0 = iam(NA, NA, both= TRUE, M=Rank)
- for(kay in 1:M) {
- elts = Dmat[kay,,drop = FALSE] # Manual recycling
- if (length(elts) < Rank)
- elts = matrix(elts, 1, Rank)
- Dk = m2adefault(elts, M=Rank)[,,1]
- Dk = matrix(Dk, Rank, Rank)
- Dk = t(Mmat) %*% Dk %*% Mmat # 22/8/03; Not diagonal in general
- Dmat[kay,] = Dk[cbind(ind0$row.index[1:ncol(Dmat)],
- ind0$col.index[1:ncol(Dmat)])]
- }
- }})
+ ind0 <- iam(NA, NA, both = TRUE, M = Rank)
+ for (kay in 1:M) {
+ elts <- Dmat[kay, , drop = FALSE] # Manual recycling
+ if (length(elts) < Rank)
+ elts <- matrix(elts, 1, Rank)
+ Dk <- m2adefault(elts, M = Rank)[, , 1]
+ Dk <- matrix(Dk, Rank, Rank)
+ Dk <- t(Mmat) %*% Dk %*% Mmat # 22/8/03; Not diagonal in general
+ Dmat[kay, ] <- Dk[cbind(ind0$row.index[1:ncol(Dmat)],
+ ind0$col.index[1:ncol(Dmat)])]
+ }
+ }
+ Dmat
+ }
-rrr.normalize = function(rrcontrol, A, C, x, Dmat = NULL) {
+rrr.normalize <- function(rrcontrol, A, C, x, Dmat = NULL) {
- colx2.index = rrcontrol$colx2.index
- Rank = rrcontrol$Rank
- Index.corner = rrcontrol$Index.corner
- M = nrow(A)
- C.old = C
+ colx2.index <- rrcontrol$colx2.index
+ Rank <- rrcontrol$Rank
+ Index.corner <- rrcontrol$Index.corner
+ M <- nrow(A)
+ C.old <- C
if (rrcontrol$Corner) {
- tmp87 = A[Index.corner,,drop = FALSE]
- Mmat <- solve(tmp87) # The normalizing matrix
- C <- C %*% t(tmp87)
- A <- A %*% Mmat
- A[Index.corner,] <- diag(Rank) # Make sure
- eval(adjust.Dmat.expression)
+ tmp87 <- A[Index.corner,,drop = FALSE]
+ Mmat <- solve(tmp87) # The normalizing matrix
+ C <- C %*% t(tmp87)
+ A <- A %*% Mmat
+ A[Index.corner,] <- diag(Rank) # Make sure
+
+ Dmat <- adjust.Dmat.expression(Mmat = Mmat, Rank = Rank,
+ Dmat = Dmat, M = M)
}
if (rrcontrol$Svd.arg) {
- temp = svd(C %*% t(A))
- if (!is.matrix(temp$v))
- temp$v = as.matrix(temp$v)
- C = temp$u[,1:Rank,drop = FALSE] %*%
- diag(temp$d[1:Rank]^(1-rrcontrol$Alpha), nrow=Rank)
- A = diag(temp$d[1:Rank]^(rrcontrol$Alpha), nrow=Rank) %*%
- t(temp$v[,1:Rank,drop = FALSE])
- A = t(A)
- Mmat = t(C.old) %*% C.old %*% solve(t(C) %*% C.old)
- eval(adjust.Dmat.expression)
+ temp <- svd(C %*% t(A))
+ if (!is.matrix(temp$v))
+ temp$v <- as.matrix(temp$v)
+ C <- temp$u[, 1:Rank, drop = FALSE] %*%
+ diag(temp$d[1:Rank]^(1-rrcontrol$Alpha), nrow = Rank)
+ A <- diag(temp$d[1:Rank]^( rrcontrol$Alpha), nrow = Rank) %*%
+ t(temp$v[, 1:Rank, drop = FALSE])
+ A <- t(A)
+ Mmat <- t(C.old) %*% C.old %*% solve(t(C) %*% C.old)
+
+
+ Dmat <- adjust.Dmat.expression(Mmat = Mmat, Rank = Rank,
+ Dmat = Dmat, M = M)
}
if (rrcontrol$Uncorrelated.lv) {
- lv.mat <- x[,colx2.index,drop = FALSE] %*% C
+ lv.mat <- x[, colx2.index, drop = FALSE] %*% C
var.lv.mat <- var(lv.mat)
- UU = chol(var.lv.mat)
+ UU <- chol(var.lv.mat)
Ut <- solve(UU)
Mmat <- t(UU)
C <- C %*% Ut
A <- A %*% t(UU)
- eval(adjust.Dmat.expression)
+
+
+
+ Dmat <- adjust.Dmat.expression(Mmat = Mmat, Rank = Rank,
+ Dmat = Dmat, M = M)
}
if (rrcontrol$Quadratic) {
- Mmat = diag(Rank)
- for(LV in 1:Rank)
+ Mmat <- diag(Rank)
+ for (LV in 1:Rank)
if (( rrcontrol$Crow1positive[LV] && C[1,LV] < 0) ||
(!rrcontrol$Crow1positive[LV] && C[1,LV] > 0)) {
- C[,LV] = -C[,LV]
- A[,LV] = -A[,LV]
- Mmat[LV,LV] = -1
+ C[,LV] <- -C[,LV]
+ A[,LV] <- -A[,LV]
+ Mmat[LV,LV] <- -1
}
- eval(adjust.Dmat.expression) # Using Mmat above
+
+
+
+ Dmat <- adjust.Dmat.expression(Mmat = Mmat, Rank = Rank,
+ Dmat = Dmat, M = M)
}
@@ -549,7 +564,7 @@ rrr.normalize = function(rrcontrol, A, C, x, Dmat = NULL) {
-rrr.end.expression = expression({
+rrr.end.expression <- expression({
if (exists(".VGAM.etamat", envir = VGAM:::VGAMenv))
rm(".VGAM.etamat", envir = VGAM:::VGAMenv)
@@ -557,18 +572,18 @@ rrr.end.expression = expression({
if (control$Quadratic) {
if (!length(extra))
- extra = list()
- extra$Cmat = Cmat # Saves the latest iteration
- extra$Dmat = Dmat # Not the latest iteration
- extra$B1 = B1.save # Not the latest iteration (not good)
+ extra <- list()
+ extra$Cmat <- Cmat # Saves the latest iteration
+ extra$Dmat <- Dmat # Not the latest iteration
+ extra$B1 <- B1.save # Not the latest iteration (not good)
} else {
- Blist = replace.constraints(Blist.save, Amat, colx2.index)
+ Blist <- replace.constraints(Blist.save, Amat, colx2.index)
}
- X_vlm_save = if (control$Quadratic) {
- tmp300 = lm2qrrvlm.model.matrix(x=x, Blist = Blist.save,
+ X_vlm_save <- if (control$Quadratic) {
+ tmp300 <- lm2qrrvlm.model.matrix(x = x, Blist = Blist.save,
C = Cmat, control=control)
- lv.mat = tmp300$lv.mat # Needed at the top of new.s.call
+ lv.mat <- tmp300$lv.mat # Needed at the top of new.s.call
lm2vlm.model.matrix(tmp300$new.lv.model.matrix,
B.list,
@@ -581,7 +596,7 @@ rrr.end.expression = expression({
fv <- tmp.fitted # Contains \bI \bnu
eta <- fv + offset
if (FALSE && control$Rank == 1) {
- ooo = order(lv.mat[,1])
+ ooo <- order(lv.mat[,1])
}
mu <- family at linkinv(eta, extra)
@@ -591,7 +606,7 @@ rrr.end.expression = expression({
deriv.mu <- eval(family at deriv)
wz <- eval(family at weight)
if (control$checkwz)
- wz = checkwz(wz, M = M, trace = trace,
+ wz <- checkwz(wz, M = M, trace = trace,
wzepsilon = control$wzepsilon)
U <- vchol(wz, M = M, n = n, silent=!trace)
tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
@@ -610,7 +625,7 @@ rrr.derivative.expression <- expression({
- which.optimizer = if (control$Quadratic && control$FastAlgorithm) {
+ which.optimizer <- if (control$Quadratic && control$FastAlgorithm) {
"BFGS"
} else {
if (iter <= rrcontrol$Switch.optimizer) "Nelder-Mead" else "BFGS"
@@ -622,7 +637,7 @@ rrr.derivative.expression <- expression({
}
constraints=replace.constraints(constraints,diag(M),rrcontrol$colx2.index)
- nice31 = (!control$EqualTol || control$ITolerances) &&
+ nice31 <- (!control$EqualTol || control$ITolerances) &&
all(trivial.constraints(constraints) == 1)
theta0 <- c(Cmat)
@@ -635,35 +650,35 @@ rrr.derivative.expression <- expression({
}
if (iter > 2 && !quasi.newton$convergence) {
if (zthere <- exists(".VGAM.z", envir = VGAM:::VGAMenv)) {
- ..VGAM.z = get(".VGAM.z", envir = VGAM:::VGAMenv)
- ..VGAM.U = get(".VGAM.U", envir = VGAM:::VGAMenv)
- ..VGAM.beta = get(".VGAM.beta", envir = VGAM:::VGAMenv)
+ ..VGAM.z <- get(".VGAM.z", envir = VGAM:::VGAMenv)
+ ..VGAM.U <- get(".VGAM.U", envir = VGAM:::VGAMenv)
+ ..VGAM.beta <- get(".VGAM.beta", envir = VGAM:::VGAMenv)
}
if (zthere) {
- z = matrix(..VGAM.z, n, M) # minus any offset
- U = matrix(..VGAM.U, M, n)
+ z <- matrix(..VGAM.z, n, M) # minus any offset
+ U <- matrix(..VGAM.U, M, n)
}
}
if (iter == 2 || quasi.newton$convergence) {
- NOS = ifelse(modelno == 3 || modelno == 5, M/2, M)
+ NOS <- ifelse(modelno == 3 || modelno == 5, M/2, M)
- canfitok =
+ canfitok <-
(exists("CQO.FastAlgorithm", envir=VGAM:::VGAMenv) &&
get("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
if (!canfitok)
stop("cannot fit this model using fast algorithm")
- p2star = if (nice31)
+ p2star <- if (nice31)
ifelse(control$IToleran, Rank, Rank+0.5*Rank*(Rank+1)) else
(NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$EqualTol,1,NOS))
- p1star = if (nice31) p1 *
+ p1star <- if (nice31) p1 *
ifelse(modelno == 3 || modelno == 5, 2, 1) else
(ncol(X_vlm_save) - p2star)
- X_vlm_1save = if (p1star > 0)
+ X_vlm_1save <- if (p1star > 0)
X_vlm_save[,-(1:p2star)] else NULL
- quasi.newton = optim(par=Cmat, fn=callcqof,
- gr = if (control$GradientFunction) calldcqo else NULL,
+ quasi.newton <- optim(par=Cmat, fn=callcqof,
+ gr <- if (control$GradientFunction) calldcqo else NULL,
method=which.optimizer,
control=list(fnscale = 1,
trace = as.integer(control$trace),
@@ -674,24 +689,24 @@ rrr.derivative.expression <- expression({
X_vlm_1save = if (nice31) NULL else X_vlm_1save,
modelno=modelno, Control=control,
n = n, M = M, p1star=p1star,
- p2star=p2star, nice31=nice31)
+ p2star=p2star, nice31 = nice31)
if (zthere <- exists(".VGAM.z", envir = VGAM:::VGAMenv)) {
- ..VGAM.z = get(".VGAM.z", envir = VGAM:::VGAMenv)
- ..VGAM.U = get(".VGAM.U", envir = VGAM:::VGAMenv)
- ..VGAM.beta = get(".VGAM.beta", envir = VGAM:::VGAMenv)
+ ..VGAM.z <- get(".VGAM.z", envir = VGAM:::VGAMenv)
+ ..VGAM.U <- get(".VGAM.U", envir = VGAM:::VGAMenv)
+ ..VGAM.beta <- get(".VGAM.beta", envir = VGAM:::VGAMenv)
}
if (zthere) {
- z = matrix(..VGAM.z, n, M) # minus any offset
- U = matrix(..VGAM.U, M, n)
+ z <- matrix(..VGAM.z, n, M) # minus any offset
+ U <- matrix(..VGAM.U, M, n)
}
} else {
if (exists(".VGAM.offset", envir = VGAM:::VGAMenv))
rm(".VGAM.offset", envir = VGAM:::VGAMenv)
}
} else {
- use.reltol = if (length(rrcontrol$Reltol) >= iter)
+ use.reltol <- if (length(rrcontrol$Reltol) >= iter)
rrcontrol$Reltol[iter] else rev(rrcontrol$Reltol)[1]
quasi.newton <-
optim(par=theta0,
@@ -709,71 +724,71 @@ rrr.derivative.expression <- expression({
- Cmat = matrix(quasi.newton$par, p2, Rank, byrow = FALSE)
+ Cmat <- matrix(quasi.newton$par, p2, Rank, byrow = FALSE)
if (Rank > 1 && rrcontrol$ITolerances) {
- numat = x[,rrcontrol$colx2.index,drop = FALSE] %*% Cmat
- evnu = eigen(var(numat))
- Cmat = Cmat %*% evnu$vector
- numat = x[,rrcontrol$colx2.index,drop = FALSE] %*% Cmat
- offset = if (Rank > 1) -0.5*rowSums(numat^2) else -0.5*numat^2
+ numat <- x[,rrcontrol$colx2.index,drop = FALSE] %*% Cmat
+ evnu <- eigen(var(numat))
+ Cmat <- Cmat %*% evnu$vector
+ numat <- x[,rrcontrol$colx2.index,drop = FALSE] %*% Cmat
+ offset <- if (Rank > 1) -0.5*rowSums(numat^2) else -0.5*numat^2
}
}
- alt = valt.1iter(x=x, z=z, U = U, Blist = Blist,
- C = Cmat, nice31=nice31,
+ alt <- valt.1iter(x = x, z = z, U = U, Blist = Blist,
+ C = Cmat, nice31 = nice31,
control = rrcontrol,
- lp.names=predictors.names)
+ lp.names = predictors.names)
if (length(alt$offset))
- offset = alt$offset
+ offset <- alt$offset
- B1.save = alt$B1 # Put later into extra
- tmp.fitted = alt$fitted # contains \bI_{Rank} \bnu if Corner
+ B1.save <- alt$B1 # Put later into extra
+ tmp.fitted <- alt$fitted # contains \bI_{Rank} \bnu if Corner
- if (modelno!=33 && control$OptimizeWrtC)
- alt = rrr.normalize(rrc=rrcontrol, A=alt$Amat, C=alt$Cmat,
- x=x, Dmat=alt$Dmat)
+ if (modelno != 33 && control$OptimizeWrtC)
+ alt <- rrr.normalize(rrc = rrcontrol, A = alt$Amat, C = alt$Cmat,
+ x = x, Dmat = alt$Dmat)
if (trace && control$OptimizeWrtC) {
- cat("\n")
- cat(which.optimizer, "using optim():\n")
- cat("Objective = ", quasi.newton$value, "\n")
- cat("Parameters (= c(C)) = ", if (length(quasi.newton$par) < 5)
- "" else "\n")
- cat(alt$Cmat, fill = TRUE)
- cat("\n")
- cat("Number of function evaluations = ", quasi.newton$count[1], "\n")
- if (length(quasi.newton$message))
- cat("Message = ", quasi.newton$message, "\n")
- cat("\n")
- flush.console()
+ cat("\n")
+ cat(which.optimizer, "using optim():\n")
+ cat("Objective = ", quasi.newton$value, "\n")
+ cat("Parameters (= c(C)) = ", if (length(quasi.newton$par) < 5)
+ "" else "\n")
+ cat(alt$Cmat, fill = TRUE)
+ cat("\n")
+ cat("Number of function evaluations = ", quasi.newton$count[1], "\n")
+ if (length(quasi.newton$message))
+ cat("Message = ", quasi.newton$message, "\n")
+ cat("\n")
+ flush.console()
}
- Amat = alt$Amat # Needed in rrr.end.expression
- Cmat = alt$Cmat # Needed in rrr.end.expression if Quadratic
- Dmat = alt$Dmat # Put later into extra
+ Amat <- alt$Amat # Needed in rrr.end.expression
+ Cmat <- alt$Cmat # Needed in rrr.end.expression if Quadratic
+ Dmat <- alt$Dmat # Put later into extra
eval(rrr.end.expression) # Put Amat into Blist, and create new z
})
-rrr.derivC.rss = function(theta, U, z, M, xmat, Blist, rrcontrol,
+rrr.derivC.rss <- function(theta, U, z, M, xmat, Blist, rrcontrol,
omit.these = NULL) {
if (rrcontrol$trace) {
cat(".")
flush.console()
}
- alreadyThere = exists(".VGAM.dot.counter", envir = VGAM:::VGAMenv)
+ alreadyThere <- exists(".VGAM.dot.counter", envir = VGAM:::VGAMenv)
if (alreadyThere) {
- VGAM.dot.counter = get(".VGAM.dot.counter", envir = VGAM:::VGAMenv)
- VGAM.dot.counter = VGAM.dot.counter + 1
+ VGAM.dot.counter <- get(".VGAM.dot.counter", envir = VGAM:::VGAMenv)
+ VGAM.dot.counter <- VGAM.dot.counter + 1
assign(".VGAM.dot.counter", VGAM.dot.counter,
envir = VGAM:::VGAMenv)
if (VGAM.dot.counter > max(50, options()$width - 5)) {
@@ -785,21 +800,21 @@ rrr.derivC.rss = function(theta, U, z, M, xmat, Blist, rrcontrol,
}
}
- Cmat = matrix(theta, length(rrcontrol$colx2.index), rrcontrol$Rank)
+ Cmat <- matrix(theta, length(rrcontrol$colx2.index), rrcontrol$Rank)
- tmp700 = lm2qrrvlm.model.matrix(x = xmat, Blist = Blist,
+ tmp700 <- lm2qrrvlm.model.matrix(x = xmat, Blist = Blist,
no.thrills = !rrcontrol$Corner,
C = Cmat, control = rrcontrol, assign = FALSE)
- Blist = tmp700$constraints # Does not contain \bI_{Rank} \bnu
+ Blist <- tmp700$constraints # Does not contain \bI_{Rank} \bnu
if (rrcontrol$Corner) {
- z = as.matrix(z) # should actually call this zedd
- z[,rrcontrol$Index.corner] = z[,rrcontrol$Index.corner] -
+ z <- as.matrix(z) # should actually call this zedd
+ z[,rrcontrol$Index.corner] <- z[,rrcontrol$Index.corner] -
tmp700$lv.mat
}
- if (length(tmp700$offset)) z = z - tmp700$offset
+ if (length(tmp700$offset)) z <- z - tmp700$offset
vlm.wfit(xmat=tmp700$new.lv.model.matrix, zmat=z,
@@ -811,44 +826,42 @@ rrr.derivC.rss = function(theta, U, z, M, xmat, Blist, rrcontrol,
-rrvglm.optim.control = function(Fnscale = 1,
+rrvglm.optim.control <- function(Fnscale = 1,
Maxit = 100,
- Switch.optimizer=3,
- Abstol= -Inf,
- Reltol=sqrt(.Machine$double.eps),
- ...)
-{
+ Switch.optimizer = 3,
+ Abstol = -Inf,
+ Reltol = sqrt(.Machine$double.eps),
+ ...) {
- list(Fnscale=Fnscale,
- Maxit=Maxit,
- Switch.optimizer=Switch.optimizer,
- Abstol=Abstol,
- Reltol=Reltol)
+ list(Fnscale = Fnscale,
+ Maxit = Maxit,
+ Switch.optimizer = Switch.optimizer,
+ Abstol = Abstol,
+ Reltol = Reltol)
}
-nlminbcontrol = function(Abs.tol = 10^(-6),
- Eval.max=91,
- Iter.max=91,
+nlminbcontrol <- function(Abs.tol = 10^(-6),
+ Eval.max = 91,
+ Iter.max = 91,
Rel.err = 10^(-6),
Rel.tol = 10^(-6),
Step.min = 10^(-6),
X.tol = 10^(-6),
- ...)
-{
+ ...) {
list(Abs.tol = Abs.tol,
- Eval.max=Eval.max,
+ Eval.max = Eval.max,
Iter.max = Iter.max,
- Rel.err=Rel.err,
- Rel.tol=Rel.tol,
- Step.min=Step.min,
- X.tol=X.tol)
+ Rel.err = Rel.err,
+ Rel.tol = Rel.tol,
+ Step.min = Step.min,
+ X.tol = X.tol)
}
@@ -857,225 +870,286 @@ nlminbcontrol = function(Abs.tol = 10^(-6),
Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
- if (length(varlvI) != 1 || !is.logical(varlvI))
- stop("'varlvI' must be TRUE or FALSE")
- if (length(reference) > 1)
- stop("'reference' must be of length 0 or 1")
- if (length(reference) &&
- is.Numeric(reference))
- if (!is.Numeric(reference, allowable.length = 1,
- integer.valued = TRUE))
- stop("bad input for argument 'reference'")
- if (!is.logical(ConstrainedQO <- object at control$ConstrainedQO))
- stop("cannot determine whether the model is constrained or not")
-
- ocontrol = object at control
- coef.object = object at coefficients
- Rank = ocontrol$Rank
- M = object at misc$M
- NOS = if (length(object at y)) ncol(object at y) else M
- MSratio = M / NOS # First value is g(mean) = quadratic form in lv
- Quadratic = if (ConstrainedQO) ocontrol$Quadratic else TRUE
- if (!Quadratic) stop("object is not a quadratic ordination object")
- p1 = length(ocontrol$colx1.index)
- p2 = length(ocontrol$colx2.index)
- Index.corner = ocontrol$Index.corner
- szero = ocontrol$szero
- EqualTolerances = ocontrol$EqualTolerances
- Dzero = ocontrol$Dzero
- Corner = if (ConstrainedQO) ocontrol$Corner else FALSE
- estITol = if (ConstrainedQO) object at control$ITolerances else FALSE
- modelno = object at control$modelno # 1,2,3,4,5,6,7 or 0
- combine2 = c(szero, if (Corner) Index.corner else NULL)
- NoA = length(combine2) == M # A is fully known.
-
- Qoffset = if (Quadratic) ifelse(estITol, 0, sum(1:Rank)) else 0
-
- ynames = object at misc$ynames
- if (!length(ynames)) ynames = object at misc$predictors.names
- if (!length(ynames)) ynames = object at misc$ynames
- if (!length(ynames)) ynames = paste("Y", 1:NOS, sep = "")
- lp.names = object at misc$predictors.names
- if (!length(lp.names)) lp.names = NULL
-
- dzero.vector = rep(FALSE, length = M)
- if (length(Dzero))
- dzero.vector[Dzero] = TRUE
- names(dzero.vector) = ynames
- lv.names = if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = "")
-
- td.expression = expression({
- Tolerance = Darray = m2adefault(Dmat, M=Rank)
- for(ii in 1:M)
- if (length(Dzero) && any(Dzero == ii)) {
- Tolerance[,,ii] = NA # Darray[,,ii] == O
- bellshaped[ii] = FALSE
- } else {
- Tolerance[,,ii] = -0.5 * solve(Darray[,,ii])
- bellshaped[ii] = all(eigen(Tolerance[,,ii])$values > 0)
- }
- optimum = matrix(as.numeric(NA),Rank,M) # dimnames=list(lv.names,ynames)
- for(ii in 1:M)
- if (bellshaped[ii])
- optimum[,ii] = Tolerance[,,ii] %*% cbind(Amat[ii,])
- })
- Amat = object at extra$Amat # M x Rank
- Cmat = object at extra$Cmat # p2 x Rank
- Dmat = object at extra$Dmat #
- B1 = object at extra$B1 #
- bellshaped = rep(FALSE, length = M)
-
- if (is.character(reference)) {
- reference = (1:NOS)[reference == ynames]
- if (length(reference) != 1)
- stop("could not match argument 'reference' with any response")
- }
- ptr1 = 1
- candidates = if (length(reference)) reference else {
- if (length(ocontrol$Dzero)) (1:M)[-ocontrol$Dzero] else (1:M)}
- repeat {
- if (ptr1 > 0) {
- this.spp = candidates[ptr1]
- }
- elts = Dmat[this.spp,,drop = FALSE]
- if (length(elts) < Rank)
- elts = matrix(elts, 1, Rank)
- Dk = m2adefault(elts, M=Rank)[,,1] # Hopefully negative-def
- temp400 = eigen(Dk)
- ptr1 = ptr1 + 1
- if (all(temp400$value < 0)) break
- if (ptr1 > length(candidates)) break
- }
- if (all(temp400$value < 0)) {
- temp1tol = -0.5 * solve(Dk)
- dim(temp1tol) = c(Rank,Rank)
- Mmat = t(chol(temp1tol))
- if (ConstrainedQO) {
- temp900 = solve(t(Mmat))
- Cmat = Cmat %*% temp900
- Amat = Amat %*% Mmat
- }
- if (length(Cmat)) {
- temp800 = crow1C(Cmat, ocontrol$Crow1positive, amat = Amat)
- Cmat = temp800$cmat
- Amat = temp800$amat
- }
- eval(adjust.Dmat.expression)
- eval(td.expression)
+ if (length(varlvI) != 1 || !is.logical(varlvI))
+ stop("'varlvI' must be TRUE or FALSE")
+ if (length(reference) > 1)
+ stop("'reference' must be of length 0 or 1")
+ if (length(reference) &&
+ is.Numeric(reference))
+ if (!is.Numeric(reference, allowable.length = 1,
+ integer.valued = TRUE))
+ stop("bad input for argument 'reference'")
+ if (!is.logical(ConstrainedQO <- object at control$ConstrainedQO))
+ stop("cannot determine whether the model is constrained or not")
+
+ ocontrol <- object at control
+ coef.object <- object at coefficients
+ Rank <- ocontrol$Rank
+ M <- object at misc$M
+ NOS <- if (length(object at y)) ncol(object at y) else M
+ MSratio <- M / NOS # First value is g(mean) = quadratic form in lv
+ Quadratic <- if (ConstrainedQO) ocontrol$Quadratic else TRUE
+ if (!Quadratic) stop("object is not a quadratic ordination object")
+ p1 <- length(ocontrol$colx1.index)
+ p2 <- length(ocontrol$colx2.index)
+ Index.corner <- ocontrol$Index.corner
+ szero <- ocontrol$szero
+ EqualTolerances <- ocontrol$EqualTolerances
+ Dzero <- ocontrol$Dzero
+ Corner <- if (ConstrainedQO) ocontrol$Corner else FALSE
+
+ estITol <- if (ConstrainedQO) object at control$ITolerances else FALSE
+ modelno <- object at control$modelno # 1,2,3,4,5,6,7 or 0
+ combine2 <- c(szero, if (Corner) Index.corner else NULL)
+ NoA <- length(combine2) == M # A is fully known.
+
+ Qoffset <- if (Quadratic) ifelse(estITol, 0, sum(1:Rank)) else 0
+
+ ynames <- object at misc$ynames
+ if (!length(ynames)) ynames <- object at misc$predictors.names
+ if (!length(ynames)) ynames <- object at misc$ynames
+ if (!length(ynames)) ynames <- paste("Y", 1:NOS, sep = "")
+ lp.names <- object at misc$predictors.names
+ if (!length(lp.names)) lp.names <- NULL
+
+ dzero.vector <- rep(FALSE, length = M)
+ if (length(Dzero))
+ dzero.vector[Dzero] <- TRUE
+ names(dzero.vector) <- ynames
+ lv.names <- if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = "")
+
+
+
+ td.expression <- function(Dmat, Amat, M, Dzero, Rank, bellshaped) {
+
+
+ Tolerance <- Darray <- m2adefault(Dmat, M = Rank)
+ for (ii in 1:M)
+ if (length(Dzero) && any(Dzero == ii)) {
+ Tolerance[, , ii] <- NA # Darray[,,ii] == O
+ bellshaped[ii] <- FALSE
+ } else {
+ Tolerance[, , ii] <- -0.5 * solve(Darray[, , ii])
+ bellshaped[ii] <- all(eigen(Tolerance[, , ii])$values > 0)
+ }
+ optimum <- matrix(as.numeric(NA), Rank, M)
+ for (ii in 1:M)
+ if (bellshaped[ii])
+ optimum[, ii] <- Tolerance[, , ii] %*% cbind(Amat[ii, ])
+
+ list(optimum = optimum,
+ Tolerance = Tolerance,
+ Darray = Darray,
+ bellshaped = bellshaped)
+ }
+
+
+
+
+
+ Amat <- object at extra$Amat # M x Rank
+ Cmat <- object at extra$Cmat # p2 x Rank
+ Dmat <- object at extra$Dmat #
+ B1 <- object at extra$B1 #
+ bellshaped <- rep(FALSE, length = M)
+
+ if (is.character(reference)) {
+ reference <- (1:NOS)[reference == ynames]
+ if (length(reference) != 1)
+ stop("could not match argument 'reference' with any response")
+ }
+ ptr1 <- 1
+ candidates <- if (length(reference)) reference else {
+ if (length(ocontrol$Dzero)) (1:M)[-ocontrol$Dzero] else (1:M)}
+ repeat {
+ if (ptr1 > 0) {
+ this.spp <- candidates[ptr1]
+ }
+ elts <- Dmat[this.spp,,drop = FALSE]
+ if (length(elts) < Rank)
+ elts <- matrix(elts, 1, Rank)
+ Dk <- m2adefault(elts, M = Rank)[,,1] # Hopefully negative-def
+ temp400 <- eigen(Dk)
+ ptr1 <- ptr1 + 1
+ if (all(temp400$value < 0))
+ break
+ if (ptr1 > length(candidates))
+ break
+ }
+ if (all(temp400$value < 0)) {
+ temp1tol <- -0.5 * solve(Dk)
+ dim(temp1tol) <- c(Rank,Rank)
+ Mmat <- t(chol(temp1tol))
+ if (ConstrainedQO) {
+ temp900 <- solve(t(Mmat))
+ Cmat <- Cmat %*% temp900
+ Amat <- Amat %*% Mmat
+ }
+ if (length(Cmat)) {
+ temp800 <- crow1C(Cmat, ocontrol$Crow1positive, amat = Amat)
+ Cmat <- temp800$cmat
+ Amat <- temp800$amat
+ }
+
+
+
+ Dmat <- adjust.Dmat.expression(Mmat = Mmat, Rank = Rank,
+ Dmat = Dmat, M = M)
+
+
+
+ retlist <- td.expression(Dmat = Dmat, Amat = Amat, M = M,
+ Dzero = Dzero, Rank = Rank,
+ bellshaped = bellshaped)
+ optimum <- retlist$optimum
+ Tolerance <- retlist$Tolerance
+ Darray <- retlist$Darray
+ bellshaped <- retlist$bellshaped
+
+
+
+
} else {
- if (length(reference) == 1)
- stop("tolerance matrix specified by 'reference' ",
- "is not positive-definite") else
- warning("could not find any positive-definite ",
- "tolerance matrix")
+ if (length(reference) == 1)
+ stop("tolerance matrix specified by 'reference' ",
+ "is not positive-definite") else
+ warning("could not find any positive-definite ",
+ "tolerance matrix")
}
- if (ConstrainedQO)
+ if (ConstrainedQO)
if (Rank > 1) {
- if (!length(xmat <- object at x)) stop("cannot obtain the model matrix")
- numat = xmat[,ocontrol$colx2.index,drop = FALSE] %*% Cmat
- evnu = eigen(var(numat))
- Mmat = solve(t(evnu$vector))
- Cmat = Cmat %*% evnu$vector # == Cmat %*% solve(t(Mmat))
- Amat = Amat %*% Mmat
- temp800 = crow1C(Cmat, ocontrol$Crow1positive, amat = Amat)
- Cmat = temp800$cmat
- Amat = temp800$amat
- eval(adjust.Dmat.expression)
- eval(td.expression)
- }
+ if (!length(xmat <- object at x))
+ stop("cannot obtain the model matrix")
+ numat <- xmat[,ocontrol$colx2.index,drop = FALSE] %*% Cmat
+ evnu <- eigen(var(numat))
+ Mmat <- solve(t(evnu$vector))
+ Cmat <- Cmat %*% evnu$vector # == Cmat %*% solve(t(Mmat))
+ Amat <- Amat %*% Mmat
+ temp800 <- crow1C(Cmat, ocontrol$Crow1positive, amat = Amat)
+ Cmat <- temp800$cmat
+ Amat <- temp800$amat
+
+
+ Dmat <- adjust.Dmat.expression(Mmat = Mmat, Rank = Rank,
+ Dmat = Dmat, M = M)
+
+
+
+ retlist <- td.expression(Dmat = Dmat, Amat = Amat, M = M,
+ Dzero = Dzero, Rank = Rank,
+ bellshaped = bellshaped)
+ optimum <- retlist$optimum
+ Tolerance <- retlist$Tolerance
+ Darray <- retlist$Darray
+ bellshaped <- retlist$bellshaped
+ }
- if (ConstrainedQO)
+ if (ConstrainedQO)
if (varlvI) {
- if (!length(xmat <- object at x)) stop("cannot obtain the model matrix")
- numat = xmat[,ocontrol$colx2.index,drop = FALSE] %*% Cmat
- sdnumat = apply(cbind(numat), 2, sd)
- Mmat = if (Rank > 1) diag(sdnumat) else matrix(sdnumat, 1, 1)
- Cmat = Cmat %*% solve(t(Mmat))
- Amat = Amat %*% Mmat
- temp800 = crow1C(Cmat, ocontrol$Crow1positive, amat = Amat)
- Cmat = temp800$cmat
- Amat = temp800$amat
- eval(adjust.Dmat.expression)
- eval(td.expression)
- }
-
-
- cx1i = ocontrol$colx1.index
- maximum = if (length(cx1i) == 1 && names(cx1i) == "(Intercept)") {
- eta.temp = B1
- for(ii in 1:M)
- eta.temp[ii] = eta.temp[ii] +
- Amat[ii,,drop = FALSE] %*% optimum[,ii,drop = FALSE] +
- t(optimum[,ii,drop = FALSE]) %*%
- Darray[,,ii,drop= TRUE] %*% optimum[,ii,drop = FALSE]
- mymax = object at family@linkinv(rbind(eta.temp), extra = object at extra)
- c(mymax) # Convert from matrix to vector
- } else {
- 5 * rep(as.numeric(NA), length.out = M) # Make "numeric"
+ if (!length(xmat <- object at x))
+ stop("cannot obtain the model matrix")
+ numat <- xmat[,ocontrol$colx2.index,drop = FALSE] %*% Cmat
+ sdnumat <- apply(cbind(numat), 2, sd)
+ Mmat <- if (Rank > 1) diag(sdnumat) else matrix(sdnumat, 1, 1)
+ Cmat <- Cmat %*% solve(t(Mmat))
+ Amat <- Amat %*% Mmat
+ temp800 <- crow1C(Cmat, ocontrol$Crow1positive, amat = Amat)
+ Cmat <- temp800$cmat
+ Amat <- temp800$amat
+
+
+
+ Dmat <- adjust.Dmat.expression(Mmat = Mmat, Rank = Rank,
+ Dmat = Dmat, M = M)
+
+
+
+ retlist <- td.expression(Dmat = Dmat, Amat = Amat, M = M,
+ Dzero = Dzero, Rank = Rank,
+ bellshaped = bellshaped)
+ optimum <- retlist$optimum
+ Tolerance <- retlist$Tolerance
+ Darray <- retlist$Darray
+ bellshaped <- retlist$bellshaped
}
- names(maximum) = ynames
-
- lv.mat = if (ConstrainedQO) {
- object at x[,ocontrol$colx2.index,drop = FALSE] %*% Cmat
- } else {
- object at lv
- }
-
- dimnames(Amat) = list(lp.names, lv.names)
- if (ConstrainedQO)
- dimnames(Cmat) = list(names(ocontrol$colx2.index), lv.names)
- if (!length(xmat <- object at x)) stop("cannot obtain the model matrix")
- dimnames(lv.mat) = list(dimnames(xmat)[[1]], lv.names)
-
- ans =
- new(Class = if (ConstrainedQO) "Coef.qrrvglm" else "Coef.uqo",
- A=Amat, B1=B1, Constrained=ConstrainedQO, D=Darray,
- NOS = NOS, Rank = Rank,
- lv = lv.mat,
- lvOrder = lv.mat,
- Optimum=optimum,
- OptimumOrder=optimum,
- bellshaped=bellshaped,
- Dzero=dzero.vector,
- Maximum = maximum,
- Tolerance=Tolerance)
- if (ConstrainedQO) {ans at C = Cmat} else {Cmat = NULL}
-
- for(rrr in 1:Rank)
- ans at OptimumOrder[rrr,] = order(ans at Optimum[rrr,])
- for(rrr in 1:Rank)
- ans at lvOrder[,rrr] = order(ans at lv[,rrr])
-
- if (length(object at misc$estimated.dispersion) &&
- object at misc$estimated.dispersion) {
- p = length(object at coefficients)
- n = object at misc$n
- M = object at misc$M
- NOS = if (length(object at y)) ncol(object at y) else M
- pstar = if (ConstrainedQO) (p + length(Cmat)) else
- p + n*Rank # Adjustment; not sure about UQO
- adjusted.dispersion = object at misc$dispersion * (n*M - p) /
- (n*M - pstar)
- ans at dispersion = adjusted.dispersion
- }
-
- if (MSratio > 1) {
- keepIndex = seq(from = 1, to=M, by=MSratio)
- ans at Dzero = ans at Dzero[keepIndex]
- ans at Optimum = ans at Optimum[,keepIndex,drop = FALSE]
- ans at Tolerance = ans at Tolerance[,,keepIndex,drop = FALSE]
- ans at bellshaped = ans at bellshaped[keepIndex]
- names(ans at Dzero) = ynames
+
+
+ cx1i <- ocontrol$colx1.index
+ maximum <- if (length(cx1i) == 1 && names(cx1i) == "(Intercept)") {
+ eta.temp <- B1
+ for (ii in 1:M)
+ eta.temp[ii] <- eta.temp[ii] +
+ Amat[ii, , drop = FALSE] %*% optimum[, ii, drop = FALSE] +
+ t(optimum[, ii, drop = FALSE]) %*%
+ Darray[,, ii, drop = TRUE] %*% optimum[, ii, drop = FALSE]
+ mymax <- object at family@linkinv(rbind(eta.temp), extra = object at extra)
+ c(mymax) # Convert from matrix to vector
} else {
- dimnames(ans at D) = list(lv.names, lv.names, ynames)
- }
- names(ans at bellshaped) = ynames
- dimnames(ans at Optimum) = list(lv.names, ynames)
- dimnames(ans at Tolerance) = list(lv.names, lv.names, ynames)
- ans
+ 5 * rep(as.numeric(NA), length.out = M) # Make "numeric"
+ }
+ names(maximum) <- ynames
+
+ lv.mat <- if (ConstrainedQO) {
+ object at x[,ocontrol$colx2.index,drop = FALSE] %*% Cmat
+ } else {
+ object at lv
+ }
+
+ dimnames(Amat) <- list(lp.names, lv.names)
+ if (ConstrainedQO)
+ dimnames(Cmat) <- list(names(ocontrol$colx2.index), lv.names)
+ if (!length(xmat <- object at x)) stop("cannot obtain the model matrix")
+ dimnames(lv.mat) <- list(dimnames(xmat)[[1]], lv.names)
+
+ ans <-
+ new(Class <- if (ConstrainedQO) "Coef.qrrvglm" else "Coef.uqo",
+ A = Amat, B1 = B1, Constrained = ConstrainedQO, D = Darray,
+ NOS = NOS, Rank = Rank,
+ lv = lv.mat,
+ lvOrder = lv.mat,
+ Optimum = optimum,
+ OptimumOrder = optimum,
+ bellshaped = bellshaped,
+ Dzero = dzero.vector,
+ Maximum = maximum,
+ Tolerance = Tolerance)
+ if (ConstrainedQO) {ans at C <- Cmat} else {Cmat <- NULL}
+
+ for (rrr in 1:Rank)
+ ans at OptimumOrder[rrr,] <- order(ans at Optimum[rrr,])
+ for (rrr in 1:Rank)
+ ans at lvOrder[,rrr] <- order(ans at lv[,rrr])
+
+ if (length(object at misc$estimated.dispersion) &&
+ object at misc$estimated.dispersion) {
+ p <- length(object at coefficients)
+ n <- object at misc$n
+ M <- object at misc$M
+ NOS <- if (length(object at y)) ncol(object at y) else M
+ pstar <- if (ConstrainedQO) (p + length(Cmat)) else
+ p + n*Rank # Adjustment; not sure about UQO
+ adjusted.dispersion <- object at misc$dispersion * (n*M - p) /
+ (n*M - pstar)
+ ans at dispersion <- adjusted.dispersion
+ }
+
+ if (MSratio > 1) {
+ keepIndex <- seq(from = 1, to = M, by = MSratio)
+ ans at Dzero <- ans at Dzero[keepIndex]
+ ans at Optimum <- ans at Optimum[,keepIndex,drop = FALSE]
+ ans at Tolerance <- ans at Tolerance[,,keepIndex,drop = FALSE]
+ ans at bellshaped <- ans at bellshaped[keepIndex]
+ names(ans at Dzero) <- ynames
+ } else {
+ dimnames(ans at D) <- list(lv.names, lv.names, ynames)
+ }
+ names(ans at bellshaped) <- ynames
+ dimnames(ans at Optimum) <- list(lv.names, ynames)
+ dimnames(ans at Tolerance) <- list(lv.names, lv.names, ynames)
+ ans
}
@@ -1109,49 +1183,49 @@ setClass(Class = "Coef.qrrvglm", representation(
"C" = "matrix"),
contains = "Coef.uqo")
-show.Coef.qrrvglm = function(x, ...) {
+show.Coef.qrrvglm <- function(x, ...) {
- object = x
- Rank = object at Rank
- M = nrow(object at A)
- NOS = object at NOS
- mymat = matrix(as.numeric(NA), NOS, Rank)
+ object <- x
+ Rank <- object at Rank
+ M <- nrow(object at A)
+ NOS <- object at NOS
+ mymat <- matrix(as.numeric(NA), NOS, Rank)
if (Rank == 1) { # || object at Diagonal
- for(ii in 1:NOS) {
- fred = if (Rank>1) diag(object at Tolerance[,,ii,drop = FALSE]) else
+ for (ii in 1:NOS) {
+ fred <- if (Rank>1) diag(object at Tolerance[,,ii,drop = FALSE]) else
object at Tolerance[,,ii]
if (all(fred > 0))
- mymat[ii,] = sqrt(fred)
+ mymat[ii,] <- sqrt(fred)
}
- dimnames(mymat) = list(dimnames(object at Tolerance)[[3]],
+ dimnames(mymat) <- list(dimnames(object at Tolerance)[[3]],
if (Rank == 1) "lv" else
paste("Tolerance", dimnames(mymat)[[2]],
sep = ""))
} else {
- for(ii in 1:NOS) {
- fred = eigen(object at Tolerance[,,ii])
+ for (ii in 1:NOS) {
+ fred <- eigen(object at Tolerance[,,ii])
if (all(fred$value > 0))
- mymat[ii,] = sqrt(fred$value)
+ mymat[ii,] <- sqrt(fred$value)
}
- dimnames(mymat) = list(dimnames(object at Tolerance)[[3]],
+ dimnames(mymat) <- list(dimnames(object at Tolerance)[[3]],
paste("tol", 1:Rank, sep = ""))
}
- dimnames(object at A) = list(dimnames(object at A)[[1]],
+ dimnames(object at A) <- list(dimnames(object at A)[[1]],
if (Rank > 1) paste("A", dimnames(object at A)[[2]], sep = ".") else
"A")
- Maximum = if (length(object at Maximum))
+ Maximum <- if (length(object at Maximum))
cbind(Maximum = object at Maximum) else NULL
if (length(Maximum) && length(mymat) && Rank == 1)
- Maximum[is.na(mymat),] = NA
+ Maximum[is.na(mymat),] <- NA
- optmat = cbind(t(object at Optimum))
- dimnames(optmat) = list(dimnames(optmat)[[1]],
+ optmat <- cbind(t(object at Optimum))
+ dimnames(optmat) <- list(dimnames(optmat)[[1]],
if (Rank > 1) paste("Optimum", dimnames(optmat)[[2]], sep = ".")
else "Optimum")
if (length(optmat) && length(mymat) && Rank == 1)
- optmat[is.na(mymat),] = NA
+ optmat[is.na(mymat),] <- NA
if ( object at Constrained ) {
cat("\nC matrix (constrained/canonical coefficients)\n")
@@ -1159,9 +1233,9 @@ show.Coef.qrrvglm = function(x, ...) {
}
cat("\nB1 and A matrices\n")
print(cbind(t(object at B1),
- A=object at A), ...)
+ A = object at A), ...)
cat("\nOptima and maxima\n")
- print(cbind(Optimum=optmat,
+ print(cbind(Optimum = optmat,
Maximum), ...)
if (Rank > 1) { # !object at Diagonal && Rank > 1
cat("\nTolerances\n") } else
@@ -1197,8 +1271,7 @@ predictqrrvglm <- function(object,
deriv = 0,
dispersion = NULL,
extra = object at extra,
- varlvI = FALSE, reference = NULL, ...)
-{
+ varlvI = FALSE, reference = NULL, ...) {
if (se.fit)
stop("cannot handle se.fit == TRUE yet")
if (deriv != 0)
@@ -1212,11 +1285,11 @@ predictqrrvglm <- function(object,
if (type == "terms")
stop("cannot handle type='terms' yet")
- M = object at misc$M
- Rank = object at control$Rank
+ M <- object at misc$M
+ Rank <- object at control$Rank
- na.act = object at na.action
- object at na.action = list()
+ na.act <- object at na.action
+ object at na.action <- list()
if (!length(newdata) && type == "response" && length(object at fitted.values)) {
if (length(na.act)) {
@@ -1231,7 +1304,7 @@ predictqrrvglm <- function(object,
offset <- object at offset
tt <- object at terms$terms # terms(object)
if (!length(object at x))
- attr(X, "assign") = attrassignlm(X, tt)
+ attr(X, "assign") <- attrassignlm(X, tt)
} else {
if (is.smart(object) && length(object at smart.prediction)) {
setup.smart("read", smart.prediction = object at smart.prediction)
@@ -1243,10 +1316,10 @@ predictqrrvglm <- function(object,
xlev = object at xlevels)
if (nrow(X) != nrow(newdata)) {
- as.save = attr(X, "assign")
- X = X[rep(1, nrow(newdata)),,drop = FALSE]
- dimnames(X) = list(dimnames(newdata)[[1]], "(Intercept)")
- attr(X, "assign") = as.save # Restored
+ as.save <- attr(X, "assign")
+ X <- X[rep(1, nrow(newdata)),,drop = FALSE]
+ dimnames(X) <- list(dimnames(newdata)[[1]], "(Intercept)")
+ attr(X, "assign") <- as.save # Restored
}
offset <- if (!is.null(off.num<-attr(tt,"offset"))) {
@@ -1260,42 +1333,43 @@ predictqrrvglm <- function(object,
wrapup.smart()
}
- attr(X, "assign") = attrassigndefault(X, tt)
+ attr(X, "assign") <- attrassigndefault(X, tt)
}
- ocontrol = object at control
+ ocontrol <- object at control
- Rank = ocontrol$Rank
- NOS = ncol(object at y)
- sppnames = dimnames(object at y)[[2]]
- modelno = ocontrol$modelno # 1,2,3,5 or 0
- M = if (any(slotNames(object) == "predictors") &&
- is.matrix(object at predictors)) ncol(object at predictors) else
+ Rank <- ocontrol$Rank
+ NOS <- ncol(object at y)
+ sppnames <- dimnames(object at y)[[2]]
+ modelno <- ocontrol$modelno # 1,2,3,5 or 0
+ M <- if (any(slotNames(object) == "predictors") &&
+ is.matrix(object at predictors))
+ ncol(object at predictors) else
object at misc$M
- MSratio = M / NOS # First value is g(mean) = quadratic form in lv
+ MSratio <- M / NOS # First value is g(mean) = quadratic form in lv
if (MSratio != 1) stop("can only handle MSratio == 1 for now")
if (length(newdata)) {
- Coefs = Coef(object, varlvI = varlvI, reference = reference)
- X1mat = X[,ocontrol$colx1.index,drop = FALSE]
- X2mat = X[,ocontrol$colx2.index,drop = FALSE]
- lvmat = as.matrix(X2mat %*% Coefs at C) # n x Rank
-
- etamat = as.matrix(X1mat %*% Coefs at B1 + lvmat %*% t(Coefs at A))
- whichSpecies = 1:NOS # Do it all for all species
- for(sppno in 1:length(whichSpecies)) {
- thisSpecies = whichSpecies[sppno]
- Dmat = matrix(Coefs at D[,,thisSpecies], Rank, Rank)
- etamat[,thisSpecies] = etamat[,thisSpecies] +
+ Coefs <- Coef(object, varlvI = varlvI, reference = reference)
+ X1mat <- X[,ocontrol$colx1.index,drop = FALSE]
+ X2mat <- X[,ocontrol$colx2.index,drop = FALSE]
+ lvmat <- as.matrix(X2mat %*% Coefs at C) # n x Rank
+
+ etamat <- as.matrix(X1mat %*% Coefs at B1 + lvmat %*% t(Coefs at A))
+ whichSpecies <- 1:NOS # Do it all for all species
+ for (sppno in 1:length(whichSpecies)) {
+ thisSpecies <- whichSpecies[sppno]
+ Dmat <- matrix(Coefs at D[,,thisSpecies], Rank, Rank)
+ etamat[,thisSpecies] <- etamat[,thisSpecies] +
mux34(lvmat, Dmat, symmetric = TRUE)
}
} else {
- etamat = object at predictors
+ etamat <- object at predictors
}
- pred = switch(type,
- response={
+ pred <- switch(type,
+ response = {
fv = if (length(newdata)) object at family@linkinv(etamat, extra) else
fitted(object)
if (M > 1 && is.matrix(fv)) {
@@ -1305,15 +1379,15 @@ predictqrrvglm <- function(object,
fv
},
link = etamat,
- lv=stop("failure here"),
- terms=stop("failure here"))
+ lv = stop("failure here"),
+ terms = stop("failure here"))
if (!length(newdata) && length(na.act)) {
if (se.fit) {
- pred$fitted.values = napredict(na.act[[1]], pred$fitted.values)
- pred$se.fit = napredict(na.act[[1]], pred$se.fit)
+ pred$fitted.values <- napredict(na.act[[1]], pred$fitted.values)
+ pred$se.fit <- napredict(na.act[[1]], pred$se.fit)
} else {
- pred = napredict(na.act[[1]], pred)
+ pred <- napredict(na.act[[1]], pred)
}
}
pred
@@ -1324,7 +1398,7 @@ setMethod("predict", "qrrvglm", function(object, ...)
predictqrrvglm(object, ...))
-coefqrrvglm = function(object, matrix.out = FALSE,
+coefqrrvglm <- function(object, matrix.out = FALSE,
label = TRUE) {
if (matrix.out)
stop("currently cannot handle matrix.out = TRUE")
@@ -1335,27 +1409,27 @@ coefqrrvglm = function(object, matrix.out = FALSE,
residualsqrrvglm <- function(object,
type = c("deviance", "pearson", "working", "response", "ldot"),
- matrix.arg= TRUE) {
+ matrix.arg = TRUE) {
stop("this function has not been written yet")
}
-setMethod("residuals", "qrrvglm", function(object, ...)
- residualsqrrvglm(object, ...))
+setMethod("residuals", "qrrvglm",
+ function(object, ...)
+ residualsqrrvglm(object, ...))
-show.rrvglm <- function(x, ...)
-{
+show.rrvglm <- function(x, ...) {
if (!is.null(cl <- x at call)) {
- cat("Call:\n")
- dput(cl)
+ cat("Call:\n")
+ dput(cl)
}
vecOfBetas <- x at coefficients
if (any(nas <- is.na(vecOfBetas))) {
if (is.null(names(vecOfBetas)))
- names(vecOfBetas) = paste("b",
+ names(vecOfBetas) <- paste("b",
1:length(vecOfBetas), sep = "")
cat("\nCoefficients: (", sum(nas),
" not defined because of singularities)\n", sep = "")
@@ -1364,9 +1438,9 @@ show.rrvglm <- function(x, ...)
print.default(vecOfBetas, ...) # used to be print()
if (FALSE) {
- Rank <- x at Rank
- if (!length(Rank))
- Rank <- sum(!nas)
+ Rank <- x at Rank
+ if (!length(Rank))
+ Rank <- sum(!nas)
}
if (FALSE) {
@@ -1384,7 +1458,7 @@ show.rrvglm <- function(x, ...)
if (length(x at criterion)) {
ncrit <- names(x at criterion)
- for(iii in ncrit)
+ for (iii in ncrit)
if (iii != "loglikelihood" && iii != "deviance")
cat(paste(iii, ":", sep = ""),
format(x at criterion[[iii]]), "\n")
@@ -1403,12 +1477,11 @@ setMethod("show", "rrvglm", function(object) show.rrvglm(object))
-rrvglm.control.Gaussian <- function(half.stepsizing= FALSE,
- save.weight= TRUE, ...)
-{
+rrvglm.control.Gaussian <- function(half.stepsizing = FALSE,
+ save.weight = TRUE, ...) {
- list(half.stepsizing= FALSE,
- save.weight=as.logical(save.weight)[1])
+ list(half.stepsizing = FALSE,
+ save.weight = as.logical(save.weight)[1])
}
@@ -1418,8 +1491,7 @@ summary.rrvglm <- function(object, correlation = FALSE,
numerical= TRUE,
h.step = 0.0001,
kill.all = FALSE, omit13 = FALSE,
- fixA = FALSE, ...)
-{
+ fixA = FALSE, ...) {
@@ -1444,23 +1516,23 @@ summary.rrvglm <- function(object, correlation = FALSE,
answer <-
new(Class = "summary.rrvglm",
object,
- call=stuff at call,
- coef3=stuff at coef3,
- cov.unscaled=stuff at cov.unscaled,
- correlation=stuff at correlation,
- df=stuff at df,
- pearson.resid=stuff at pearson.resid,
- sigma=stuff at sigma)
+ call = stuff at call,
+ coef3 = stuff at coef3,
+ cov.unscaled = stuff at cov.unscaled,
+ correlation = stuff at correlation,
+ df = stuff at df,
+ pearson.resid = stuff at pearson.resid,
+ sigma = stuff at sigma)
if (is.numeric(stuff at dispersion))
- slot(answer, "dispersion") = stuff at dispersion
+ slot(answer, "dispersion") <- stuff at dispersion
- tmp5 <- get.rrvglm.se1(object, omit13=omit13,
- numerical=numerical, h.step = h.step,
- kill.all=kill.all, fixA=fixA, ...)
+ tmp5 <- get.rrvglm.se1(object, omit13 = omit13,
+ numerical = numerical, h.step = h.step,
+ kill.all = kill.all, fixA = fixA, ...)
if (any(diag(tmp5$cov.unscaled) <= 0) ||
any(eigen(tmp5$cov.unscaled)$value <= 0)) {
warning("cov.unscaled is not positive definite")
@@ -1472,14 +1544,14 @@ summary.rrvglm <- function(object, correlation = FALSE,
object at misc$disper else
object at misc$default.disper
if (is.numeric(dispersion)) {
- if (is.numeric(od) && dispersion!=od)
+ if (is.numeric(od) && dispersion != od)
warning("dispersion != object at misc$dispersion; ",
"using the former")
} else {
dispersion <- if (is.numeric(od)) od else 1
}
- tmp8 = object at misc$M - object at control$Rank -
+ tmp8 <- object at misc$M - object at control$Rank -
length(object at control$szero)
answer at df[1] <- answer at df[1] + tmp8 * object at control$Rank
answer at df[2] <- answer at df[2] - tmp8 * object at control$Rank
@@ -1503,7 +1575,7 @@ summary.rrvglm <- function(object, correlation = FALSE,
-get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
+get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
numerical = TRUE,
fixA = FALSE, h.step = 0.0001,
trace.arg = FALSE, ...) {
@@ -1514,24 +1586,24 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
if (length(fit at control$Nested) && fit at control$Nested)
stop("sorry, cannot handle nested models yet")
- szero = fit at control$szero
+ szero <- fit at control$szero
if (!length(fit at x))
stop("fix at x is empty. Run rrvglm(... , x= TRUE)")
- colx1.index = fit at control$colx1.index # May be NULL
- colx2.index = fit at control$colx2.index
+ colx1.index <- fit at control$colx1.index # May be NULL
+ colx2.index <- fit at control$colx2.index
Blist <- fit at constraints
ncolBlist <- unlist(lapply(Blist, ncol))
- p1 = length(colx1.index) # May be 0
- p2 = length(colx2.index)
+ p1 <- length(colx1.index) # May be 0
+ p2 <- length(colx2.index)
Rank <- fit at control$Rank # fit at misc$Nested.Rank
Amat <- fit at constraints[[colx2.index[1]]]
- B1mat =if (p1)
+ B1mat <- if (p1)
coefvlm(fit, matrix.out = TRUE)[colx1.index, , drop = FALSE] else
NULL
C.try <- coefvlm(fit, matrix.out= TRUE)[colx2.index, , drop = FALSE]
@@ -1540,7 +1612,7 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
x1mat <- if (p1) fit at x[, colx1.index, drop = FALSE] else NULL
x2mat <- fit at x[, colx2.index, drop = FALSE]
- wz <- weights(fit, type = "work") # old: wweights(fit) #fit at weights
+ wz <- weights(fit, type = "work") # old: wweights(fit) #fit at weights
if (!length(wz))
stop("cannot get fit at weights")
@@ -1550,7 +1622,7 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
zmat <- fit at predictors + fit at residuals
theta <- c(Amat[-c(Index.corner,szero),])
if (fit at control$checkwz)
- wz = checkwz(wz, M = M, trace = trace,
+ wz <- checkwz(wz, M = M, trace = trace,
wzepsilon = fit at control$wzepsilon)
U <- vchol(wz, M = M, n = n, silent= TRUE)
@@ -1585,68 +1657,68 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
sfit2233 <- summaryvglm(newobject)
d8 <- dimnames(sfit2233 at cov.unscaled)[[1]]
cov2233 <- solve(sfit2233 at cov.unscaled) # Includes any intercepts
- dimnames(cov2233) = list(d8, d8)
-
- log.vec33 = NULL
- nassign = names(fit at constraints)
- choose.from = varassign(fit at constraints, nassign)
- for(ii in nassign)
- if (any(ii == names(colx2.index))) {
- log.vec33 = c(log.vec33, choose.from[[ii]])
- }
- cov33 = cov2233[ log.vec33, log.vec33, drop = FALSE] # r*p2 by r*p2
- cov23 = cov2233[-log.vec33, log.vec33, drop = FALSE]
- cov22 = cov2233[-log.vec33,-log.vec33, drop = FALSE]
+ dimnames(cov2233) <- list(d8, d8)
+
+ log.vec33 <- NULL
+ nassign <- names(fit at constraints)
+ choose.from <- varassign(fit at constraints, nassign)
+ for (ii in nassign)
+ if (any(ii == names(colx2.index))) {
+ log.vec33 <- c(log.vec33, choose.from[[ii]])
+ }
+ cov33 <- cov2233[ log.vec33, log.vec33, drop = FALSE] # r*p2 by r*p2
+ cov23 <- cov2233[-log.vec33, log.vec33, drop = FALSE]
+ cov22 <- cov2233[-log.vec33,-log.vec33, drop = FALSE]
lv.mat <- x2mat %*% Cmat
- offs = matrix(0, n, M) # The "0" handles szero's
- offs[,Index.corner] = lv.mat
+ offs <- matrix(0, n, M) # The "0" handles szero's
+ offs[,Index.corner] <- lv.mat
if (M == (Rank + length(szero)))
- stop("cannot handle full-rank models yet")
- cm = matrix(0, M, M - Rank - length(szero))
- cm[-c(Index.corner, szero),] = diag(M - Rank - length(szero))
+ stop("cannot handle full-rank models yet")
+ cm <- matrix(0, M, M - Rank - length(szero))
+ cm[-c(Index.corner, szero),] <- diag(M - Rank - length(szero))
- Blist = vector("list", length(colx1.index)+1)
- names(Blist) = c(names(colx1.index), "I(lv.mat)")
- for(ii in names(colx1.index))
- Blist[[ii]] = fit at constraints[[ii]]
- Blist[["I(lv.mat)"]] = cm
+ Blist <- vector("list", length(colx1.index)+1)
+ names(Blist) <- c(names(colx1.index), "I(lv.mat)")
+ for (ii in names(colx1.index))
+ Blist[[ii]] <- fit at constraints[[ii]]
+ Blist[["I(lv.mat)"]] <- cm
if (p1) {
- ooo = fit at assign
- bb = NULL
- for(ii in 1:length(ooo)) {
- if (any(ooo[[ii]][1] == colx1.index))
- bb = c(bb, names(ooo)[ii])
- }
-
- has.intercept = any(bb == "(Intercept)")
- bb[bb == "(Intercept)"] = "1"
- if (p1>1)
- bb = paste(bb, collapse = "+")
- if (has.intercept) {
- bb = paste("zmat - offs ~ ", bb, " + I(lv.mat)", collapse = " ")
- } else {
- bb = paste("zmat - offs ~ -1 + ", bb, " + I(lv.mat)", collapse = " ")
- }
- bb = as.formula(bb)
+ ooo <- fit at assign
+ bb <- NULL
+ for (ii in 1:length(ooo)) {
+ if (any(ooo[[ii]][1] == colx1.index))
+ bb <- c(bb, names(ooo)[ii])
+ }
+
+ has.intercept <- any(bb == "(Intercept)")
+ bb[bb == "(Intercept)"] <- "1"
+ if (p1>1)
+ bb <- paste(bb, collapse = "+")
+ if (has.intercept) {
+ bb <- paste("zmat - offs ~ ", bb, " + I(lv.mat)", collapse = " ")
+ } else {
+ bb <- paste("zmat - offs ~ -1 + ", bb, " + I(lv.mat)", collapse = " ")
+ }
+ bb <- as.formula(bb)
} else {
- bb = as.formula("zmat - offs ~ -1 + I(lv.mat)")
+ bb <- as.formula("zmat - offs ~ -1 + I(lv.mat)")
}
if (fit at misc$dataname == "list") {
- dspec = FALSE
+ dspec <- FALSE
} else {
- mytext1 = "exists(x=fit at misc$dataname, envir = VGAM:::VGAMenv)"
- myexp1 = parse(text=mytext1)
- is.there = eval(myexp1)
- bbdata= if (is.there)
+ mytext1 <- "exists(x=fit at misc$dataname, envir = VGAM:::VGAMenv)"
+ myexp1 <- parse(text=mytext1)
+ is.there <- eval(myexp1)
+ bbdata <- if (is.there)
get(fit at misc$dataname, envir=VGAM:::VGAMenv) else
get(fit at misc$dataname)
- dspec = TRUE
+ dspec <- TRUE
}
@@ -1666,29 +1738,29 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
sfit1122 <- summaryvlm(fit1122)
d8 <- dimnames(sfit1122 at cov.unscaled)[[1]]
cov1122 <- solve(sfit1122 at cov.unscaled)
- dimnames(cov1122) = list(d8, d8)
+ dimnames(cov1122) <- list(d8, d8)
- lcs = length(coefvlm(sfit1122))
- log.vec11 = (lcs-(M-Rank-length(szero))*Rank+1):lcs
- cov11 = cov1122[log.vec11, log.vec11, drop = FALSE]
- cov12 = cov1122[ log.vec11, -log.vec11, drop = FALSE]
- cov22 = cov1122[-log.vec11, -log.vec11, drop = FALSE]
- cov13 = delct.da %*% cov33
+ lcs <- length(coefvlm(sfit1122))
+ log.vec11 <- (lcs-(M-Rank-length(szero))*Rank+1):lcs
+ cov11 <- cov1122[log.vec11, log.vec11, drop = FALSE]
+ cov12 <- cov1122[ log.vec11, -log.vec11, drop = FALSE]
+ cov22 <- cov1122[-log.vec11, -log.vec11, drop = FALSE]
+ cov13 <- delct.da %*% cov33
if (omit13)
- cov13 = cov13 * 0 # zero it
+ cov13 <- cov13 * 0 # zero it
if (kill.all) {
- cov13 = cov13 * 0 # zero it
+ cov13 <- cov13 * 0 # zero it
if (fixA) {
- cov12 = cov12 * 0 # zero it
+ cov12 <- cov12 * 0 # zero it
} else {
- cov23 = cov23 * 0 # zero it
+ cov23 <- cov23 * 0 # zero it
}
}
- cov13 = -cov13 # Richards (1961)
+ cov13 <- -cov13 # Richards (1961)
if (fixA) {
cov.unscaled <- rbind(cbind(cov1122, rbind(cov13, cov23)),
@@ -1702,8 +1774,10 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
# Get all the coefficients
acoefs <- c(fit1122 at coefficients[log.vec11], fit at coefficients)
- dimnames(ans) = list(names(acoefs), names(acoefs))
- list(cov.unscaled=ans, coefficients=acoefs, rss=sfit1122 at rss)
+ dimnames(ans) <- list(names(acoefs), names(acoefs))
+ list(cov.unscaled = ans,
+ coefficients = acoefs,
+ rss = sfit1122 at rss)
}
@@ -1723,8 +1797,7 @@ get.rrvglm.se2 <- function(cov.unscaled, dispersion = 1, coefficients) {
num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
p2, Index.corner, Aimat, B1mat, Cimat,
h.step = 0.0001, colx2.index,
- xij = NULL, szero = NULL)
-{
+ xij = NULL, szero = NULL) {
nn <- nrow(x2mat)
@@ -1735,47 +1808,47 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
if ((length(Index.corner) + length(szero)) == M)
stop("cannot handle full rank models yet")
- cbindex = (1:M)[-c(Index.corner, szero)]
-
- ptr = 1
- for(sss in 1:r)
- for(tt in cbindex) {
- small.Blist = vector("list", p2)
- pAmat = Aimat
- pAmat[tt,sss] = pAmat[tt,sss] + h.step # Perturb it
- for(ii in 1:p2)
- small.Blist[[ii]] = pAmat
-
- offset = if (length(fit at offset)) fit at offset else 0
- if (all(offset == 0)) offset = 0
- neweta = x2mat %*% Cimat %*% t(pAmat)
+ cbindex <- (1:M)[-c(Index.corner, szero)]
+
+ ptr <- 1
+ for (sss in 1:r)
+ for (tt in cbindex) {
+ small.Blist <- vector("list", p2)
+ pAmat <- Aimat
+ pAmat[tt,sss] <- pAmat[tt,sss] + h.step # Perturb it
+ for (ii in 1:p2)
+ small.Blist[[ii]] <- pAmat
+
+ offset <- if (length(fit at offset)) fit at offset else 0
+ if (all(offset == 0)) offset <- 0
+ neweta <- x2mat %*% Cimat %*% t(pAmat)
if (is.numeric(x1mat))
- neweta = neweta + x1mat %*% B1mat
- fit at predictors = neweta
+ neweta <- neweta + x1mat %*% B1mat
+ fit at predictors <- neweta
newmu <- fit at family@linkinv(neweta, fit at extra)
- fit at fitted.values = as.matrix(newmu) # 20100909
+ fit at fitted.values <- as.matrix(newmu) # 20100909
- fred = weights(fit, type = "w", deriv= TRUE, ignore.slot= TRUE)
+ fred <- weights(fit, type = "w", deriv= TRUE, ignore.slot= TRUE)
if (!length(fred))
stop("cannot get @weights and $deriv from object")
- wz = fred$weights
+ wz <- fred$weights
deriv.mu <- fred$deriv
U <- vchol(wz, M = M, n = nn, silent = TRUE)
tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = nn)
newzmat <- neweta + vbacksub(U, tvfor, M = M, n = nn) - offset
if (is.numeric(x1mat))
- newzmat = newzmat - x1mat %*% B1mat
+ newzmat <- newzmat - x1mat %*% B1mat
- newfit = vlm.wfit(xmat = x2mat, zmat = newzmat,
+ newfit <- vlm.wfit(xmat = x2mat, zmat = newzmat,
Blist = small.Blist, U = U,
matrix.out = FALSE, is.vlmX = FALSE,
rss = TRUE, qr = FALSE, x.ret = FALSE,
offset = NULL, xij = xij)
dct.da[ptr,] <- (newfit$coef - t(Cimat)) / h.step
- ptr = ptr + 1
+ ptr <- ptr + 1
}
dct.da
@@ -1784,74 +1857,72 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
-dctda.fast.only = function(theta, wz, U, zmat, M, r, x1mat, x2mat,
+dctda.fast.only <- function(theta, wz, U, zmat, M, r, x1mat, x2mat,
p2, Index.corner, Aimat, B1mat, Cimat,
xij = NULL,
- szero = NULL)
-{
+ szero = NULL) {
if (length(szero))
stop("cannot handle 'szero' in dctda.fast.only()")
- nn = nrow(x2mat)
+ nn <- nrow(x2mat)
if (nrow(Cimat) != p2 || ncol(Cimat) != r)
stop("Cimat wrong shape")
- fred = kronecker(matrix(1,1,r), x2mat)
- fred = kronecker(fred, matrix(1,M,1))
- barney = kronecker(Aimat, matrix(1,1,p2))
- barney = kronecker(matrix(1, nn, 1), barney)
+ fred <- kronecker(matrix(1,1,r), x2mat)
+ fred <- kronecker(fred, matrix(1,M,1))
+ barney <- kronecker(Aimat, matrix(1,1,p2))
+ barney <- kronecker(matrix(1, nn, 1), barney)
- temp = array(t(barney*fred), c(p2*r, M, nn))
- temp = aperm(temp, c(2,1,3)) # M by p2*r by nn
- temp = mux5(wz, temp, M = M, matrix.arg= TRUE)
- temp = m2adefault(temp, M=p2*r) # Note M != M here!
- G = solve(rowSums(temp, dims = 2)) # p2*r by p2*r
+ temp <- array(t(barney*fred), c(p2*r, M, nn))
+ temp <- aperm(temp, c(2,1,3)) # M by p2*r by nn
+ temp <- mux5(wz, temp, M = M, matrix.arg= TRUE)
+ temp <- m2adefault(temp, M=p2*r) # Note M != M here!
+ G <- solve(rowSums(temp, dims = 2)) # p2*r by p2*r
- dc.da = array(NA, c(p2, r, M, r)) # different from other functions
+ dc.da <- array(NA, c(p2, r, M, r)) # different from other functions
if (length(Index.corner) == M)
stop("cannot handle full rank models yet")
- cbindex = (1:M)[-Index.corner] # complement of Index.corner
- resid2 = if (length(x1mat))
+ cbindex <- (1:M)[-Index.corner] # complement of Index.corner
+ resid2 <- if (length(x1mat))
mux22(t(wz), zmat - x1mat %*% B1mat, M = M,
upper = FALSE, as.matrix = TRUE) else
mux22(t(wz), zmat , M = M,
upper = FALSE, as.matrix = TRUE)
- for(sss in 1:r)
- for(ttt in cbindex) {
- fred = t(x2mat) *
+ for (sss in 1:r)
+ for (ttt in cbindex) {
+ fred <- t(x2mat) *
matrix(resid2[, ttt], p2, nn, byrow = TRUE) # p2 * nn
- temp2 = kronecker(ei(sss,r), rowSums(fred))
- for(kkk in 1:r) {
- Wiak = mux22(t(wz), matrix(Aimat[,kkk], nn, M, byrow = TRUE),
+ temp2 <- kronecker(eifun(sss,r), rowSums(fred))
+ for (kkk in 1:r) {
+ Wiak <- mux22(t(wz), matrix(Aimat[,kkk], nn, M, byrow = TRUE),
M = M, upper = FALSE,
as.matrix = TRUE) # nn * M
- wxx = Wiak[,ttt] * x2mat
- blocki = t(x2mat) %*% wxx
- temp4a = blocki %*% Cimat[,kkk]
+ wxx <- Wiak[,ttt] * x2mat
+ blocki <- t(x2mat) %*% wxx
+ temp4a <- blocki %*% Cimat[,kkk]
if (kkk == 1) {
- temp4b = blocki %*% Cimat[,sss]
+ temp4b <- blocki %*% Cimat[,sss]
}
- temp2 = temp2 - kronecker(ei(sss,r), temp4a) -
- kronecker(ei(kkk,r), temp4b)
+ temp2 <- temp2 - kronecker(eifun(sss,r), temp4a) -
+ kronecker(eifun(kkk,r), temp4b)
}
- dc.da[,,ttt,sss] = G %*% temp2
+ dc.da[,,ttt,sss] <- G %*% temp2
}
- ans1 = dc.da[,,cbindex,,drop = FALSE] # p2 x r x (M-r) x r
- ans1 = aperm(ans1, c(2,1,3,4)) # r x p2 x (M-r) x r
+ ans1 <- dc.da[,,cbindex,,drop = FALSE] # p2 x r x (M-r) x r
+ ans1 <- aperm(ans1, c(2,1,3,4)) # r x p2 x (M-r) x r
- ans1 = matrix(c(ans1), r*p2, (M-r)*r)
- ans1 = t(ans1)
+ ans1 <- matrix(c(ans1), r*p2, (M-r)*r)
+ ans1 <- t(ans1)
ans1
}
-dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
- intercept= TRUE, xij = NULL)
-{
+dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
+ intercept= TRUE, xij = NULL) {
@@ -1864,15 +1935,15 @@ dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
if (intercept) {
Blist <- vector("list", pp+1)
Blist[[1]] <- diag(M)
- for(ii in 2:(pp+1))
- Blist[[ii]] = Aimat
+ for (ii in 2:(pp+1))
+ Blist[[ii]] <- Aimat
} else {
Blist <- vector("list", pp)
- for(ii in 1:pp)
- Blist[[ii]] = Aimat
+ for (ii in 1:pp)
+ Blist[[ii]] <- Aimat
}
- coeffs = vlm.wfit(xmat=xmat, z, Blist, U = U, matrix.out = TRUE,
+ coeffs <- vlm.wfit(xmat=xmat, z, Blist, U = U, matrix.out = TRUE,
xij = xij)$mat.coef
c3 <- coeffs <- t(coeffs) # transpose to make M x (pp+1)
@@ -1891,9 +1962,9 @@ dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
temp <- array(t(barney*fred), c(r*pp,M,nn))
temp <- aperm(temp, c(2,1,3))
- temp <- mux5(wz, temp, M = M, matrix.arg= TRUE)
- temp <- m2adefault(temp, M=r*pp) # Note M != M here!
- G = solve(rowSums(temp, dims = 2))
+ temp <- mux5(wz, temp, M = M, matrix.arg = TRUE)
+ temp <- m2adefault(temp, M = r*pp) # Note M != M here!
+ G <- solve(rowSums(temp, dims = 2))
dc.da <- array(NA, c(pp,r,M,r)) # different from other functions
cbindex <- (1:M)[-Index.corner]
@@ -1901,14 +1972,14 @@ dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
z - matrix(int.vec, nn, M, byrow = TRUE), M = M,
upper = FALSE, as.matrix = TRUE) # mat= TRUE,
- for(s in 1:r)
- for(tt in cbindex) {
+ for (s in 1:r)
+ for (tt in cbindex) {
fred <- (if (intercept) t(xmat[, -1, drop = FALSE]) else
t(xmat)) * matrix(resid2[, tt], pp, nn, byrow = TRUE)
- temp2 <- kronecker(ei(s,r), rowSums(fred))
+ temp2 <- kronecker(eifun(s,r), rowSums(fred))
temp4 <- rep(0,pp)
- for(k in 1:r) {
+ for (k in 1:r) {
Wiak <- mux22(t(wz),
matrix(Aimat[, k], nn, M, byrow = TRUE),
M = M, upper = FALSE, as.matrix = TRUE)
@@ -1920,7 +1991,7 @@ dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
t(xmat)) %*% wxx
temp4 <- temp4 + blocki %*% Cimat[, k]
}
- dc.da[,,tt,s] <- G %*% (temp2 - 2 * kronecker(ei(s,r),temp4))
+ dc.da[,,tt,s] <- G %*% (temp2 - 2 * kronecker(eifun(s,r),temp4))
}
ans1 <- dc.da[,,cbindex,,drop = FALSE] # pp x r x (M-r) x r
ans1 <- aperm(ans1, c(2,1,3,4)) # r x pp x (M-r) x r
@@ -1929,8 +2000,8 @@ dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
detastar.da <- array(0,c(M,r,r,nn))
- for(s in 1:r)
- for(j in 1:r) {
+ for (s in 1:r)
+ for (j in 1:r) {
t1 <- t(dc.da[,j,,s])
t1 <- matrix(t1, M, pp)
detastar.da[,j,s,] <- t1 %*% (if (intercept)
@@ -1946,166 +2017,166 @@ dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
AtWi <- kronecker(matrix(1, nn, 1), Aimat)
AtWi <- mux111(t(wz), AtWi, M = M, upper= FALSE) # matrix.arg= TRUE,
AtWi <- array(t(AtWi), c(r,M,nn))
- for(ss in 1:r) {
+ for (ss in 1:r) {
temp90 <- (m2adefault(t(colSums(etastar[,ss]*wz)), M = M))[,,1] #MxM
temp92 <- array(detastar.da[,,ss,], c(M,r,nn))
temp93 <- mux7(temp92, AtWi)
- temp91 = rowSums(temp93, dims = 2) # M x M
+ temp91 <- rowSums(temp93, dims = 2) # M x M
deta0.da[,,ss] <- -(temp90 + temp91) %*% sumWinv
}
ans2 <- deta0.da[-(1:r),,,drop = FALSE] # (M-r) x M x r
ans2 <- aperm(ans2, c(1,3,2)) # (M-r) x r x M
ans2 <- matrix(c(ans2), (M-r)*r, M)
- list(dc.da=ans1, dint.da=ans2)
+ list(dc.da = ans1, dint.da = ans2)
}
-rrr.deriv.rss = function(theta, wz, U, z, M, r, xmat,
- pp, Index.corner, intercept= TRUE,
- xij = NULL)
-{
+rrr.deriv.rss <- function(theta, wz, U, z, M, r, xmat,
+ pp, Index.corner, intercept = TRUE,
+ xij = NULL) {
- Amat = matrix(as.numeric(NA), M, r)
- Amat[Index.corner,] = diag(r)
- Amat[-Index.corner,] = theta # [-(1:M)]
+ Amat <- matrix(as.numeric(NA), M, r)
+ Amat[Index.corner,] <- diag(r)
+ Amat[-Index.corner,] <- theta # [-(1:M)]
- if (intercept) {
- Blist = vector("list", pp+1)
- Blist[[1]] = diag(M)
- for(ii in 2:(pp+1))
- Blist[[ii]] = Amat
- } else {
- Blist = vector("list", pp)
- for(ii in 1:pp)
- Blist[[ii]] = Amat
- }
+ if (intercept) {
+ Blist <- vector("list", pp+1)
+ Blist[[1]] <- diag(M)
+ for (ii in 2:(pp+1))
+ Blist[[ii]] <- Amat
+ } else {
+ Blist <- vector("list", pp)
+ for (ii in 1:pp)
+ Blist[[ii]] <- Amat
+ }
- vlm.wfit(xmat=xmat, z, Blist, U = U, matrix.out = FALSE,
- rss = TRUE, xij = xij)$rss
+ vlm.wfit(xmat = xmat, z, Blist, U = U, matrix.out = FALSE,
+ rss = TRUE, xij = xij)$rss
}
-rrr.deriv.gradient.fast = function(theta, wz, U, z, M, r, xmat,
- pp, Index.corner, intercept= TRUE)
-{
+rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
+ pp, Index.corner,
+ intercept = TRUE) {
- nn = nrow(xmat)
+ nn <- nrow(xmat)
- Aimat = matrix(as.numeric(NA), M, r)
- Aimat[Index.corner,] = diag(r)
- Aimat[-Index.corner,] = theta # [-(1:M)]
+ Aimat <- matrix(as.numeric(NA), M, r)
+ Aimat[Index.corner,] <- diag(r)
+ Aimat[-Index.corner,] <- theta # [-(1:M)]
- if (intercept) {
- Blist = vector("list", pp+1)
- Blist[[1]] = diag(M)
- for(i in 2:(pp+1))
- Blist[[i]] = Aimat
- } else {
- Blist = vector("list", pp)
- for(i in 1:(pp))
- Blist[[i]] = Aimat
- }
-
- coeffs = vlm.wfit(xmat, z, Blist, U = U, matrix.out= TRUE,
- xij = NULL)$mat.coef
- c3 = coeffs = t(coeffs) # transpose to make M x (pp+1)
-
-
- int.vec = if (intercept) c3[,1] else 0 # \boldeta_0
- Cimat = if (intercept) t(c3[Index.corner,-1,drop = FALSE]) else
- t(c3[Index.corner,,drop = FALSE])
- if (nrow(Cimat)!=pp || ncol(Cimat)!=r)
- stop("Cimat wrong shape")
-
- fred = kronecker(matrix(1,1,r),
- if (intercept) xmat[,-1,drop = FALSE] else xmat)
- fred = kronecker(fred, matrix(1,M,1))
- barney = kronecker(Aimat, matrix(1,1,pp))
- barney = kronecker(matrix(1, nn, 1), barney)
-
- temp = array(t(barney*fred), c(r*pp,M,nn))
- temp = aperm(temp, c(2,1,3))
- temp = mux5(wz, temp, M = M, matrix.arg= TRUE)
- temp = m2adefault(temp, M=r*pp) # Note M != M here!
- G = solve(rowSums(temp, dims = 2))
-
- dc.da = array(NA,c(pp,r,r,M))
- cbindex = (1:M)[-Index.corner]
- resid2 = mux22(t(wz), z - matrix(int.vec, nn, M, byrow = TRUE),
- M = M,
- upper = FALSE, as.matrix = TRUE)
-
- for(s in 1:r)
- for(tt in cbindex) {
- fred = (if (intercept) t(xmat[,-1,drop = FALSE]) else
- t(xmat)) * matrix(resid2[,tt],pp,nn,byrow = TRUE)
- temp2 = kronecker(ei(s,r), rowSums(fred))
-
- temp4 = rep(0,pp)
- for(k in 1:r) {
- Wiak = mux22(t(wz),
- matrix(Aimat[, k], nn, M, byrow = TRUE),
- M = M, upper = FALSE, as.matrix = TRUE)
- wxx = Wiak[,tt] * (if (intercept)
- xmat[, -1, drop = FALSE] else xmat)
- blocki = (if (intercept) t(xmat[, -1, drop = FALSE]) else
- t(xmat)) %*% wxx
- temp4 = temp4 + blocki %*% Cimat[,k]
- }
- dc.da[,,s,tt] = G %*% (temp2 - 2 * kronecker(ei(s,r),temp4))
- }
-
- detastar.da = array(0,c(M,r,r,nn))
- for(s in 1:r)
- for(j in 1:r) {
- t1 = t(dc.da[,j,s,])
- t1 = matrix(t1, M, pp)
- detastar.da[,j,s,] = t1 %*% (if (intercept)
- t(xmat[,-1,drop = FALSE]) else t(xmat))
- }
-
- etastar = (if (intercept) xmat[,-1,drop = FALSE] else xmat) %*% Cimat
- eta = matrix(int.vec, nn, M, byrow = TRUE) + etastar %*% t(Aimat)
-
- sumWinv = solve((m2adefault(t(colSums(wz)), M = M))[,,1])
-
- deta0.da = array(0,c(M,M,r))
+ if (intercept) {
+ Blist <- vector("list", pp+1)
+ Blist[[1]] <- diag(M)
+ for (i in 2:(pp+1))
+ Blist[[i]] <- Aimat
+ } else {
+ Blist <- vector("list", pp)
+ for (i in 1:(pp))
+ Blist[[i]] <- Aimat
+ }
- AtWi = kronecker(matrix(1, nn, 1), Aimat)
- AtWi = mux111(t(wz), AtWi, M = M, upper= FALSE) # matrix.arg= TRUE,
- AtWi = array(t(AtWi), c(r,M,nn))
+ coeffs <- vlm.wfit(xmat, z, Blist, U = U, matrix.out= TRUE,
+ xij = NULL)$mat.coef
+ c3 <- coeffs <- t(coeffs) # transpose to make M x (pp+1)
+
+
+ int.vec <- if (intercept) c3[,1] else 0 # \boldeta_0
+ Cimat <- if (intercept) t(c3[Index.corner, -1, drop = FALSE]) else
+ t(c3[Index.corner,,drop = FALSE])
+ if (nrow(Cimat) != pp || ncol(Cimat) != r)
+ stop("Cimat wrong shape")
+
+ fred <- kronecker(matrix(1,1,r),
+ if (intercept) xmat[, -1, drop = FALSE] else xmat)
+ fred <- kronecker(fred, matrix(1, M, 1))
+ barney <- kronecker(Aimat, matrix(1, 1, pp))
+ barney <- kronecker(matrix(1, nn, 1), barney)
+
+ temp <- array(t(barney*fred), c(r*pp, M, nn))
+ temp <- aperm(temp, c(2, 1, 3))
+ temp <- mux5(wz, temp, M = M, matrix.arg= TRUE)
+ temp <- m2adefault(temp, M = r*pp) # Note M != M here!
+ G <- solve(rowSums(temp, dims = 2))
+
+ dc.da <- array(NA,c(pp,r,r,M))
+ cbindex <- (1:M)[-Index.corner]
+ resid2 <- mux22(t(wz), z - matrix(int.vec, nn, M, byrow = TRUE),
+ M = M,
+ upper = FALSE, as.matrix = TRUE)
- for(ss in 1:r) {
- temp90 = (m2adefault(t(colSums(etastar[,ss]*wz)), M = M))[,,1]
- temp92 = array(detastar.da[,,ss,],c(M,r,nn))
- temp93 = mux7(temp92,AtWi)
- temp91 = rowSums(temp93, dims = 2) # M x M
- deta0.da[,,ss] = -(temp90 + temp91) %*% sumWinv
- }
+ for (s in 1:r)
+ for (tt in cbindex) {
+ fred <- (if (intercept) t(xmat[,-1,drop = FALSE]) else
+ t(xmat)) * matrix(resid2[,tt],pp,nn,byrow = TRUE)
+ temp2 <- kronecker(eifun(s,r), rowSums(fred))
+
+ temp4 <- rep(0,pp)
+ for (k in 1:r) {
+ Wiak <- mux22(t(wz),
+ matrix(Aimat[, k], nn, M, byrow = TRUE),
+ M = M, upper = FALSE, as.matrix = TRUE)
+ wxx <- Wiak[,tt] * (if (intercept)
+ xmat[, -1, drop = FALSE] else xmat)
+ blocki <- (if (intercept) t(xmat[, -1, drop = FALSE]) else
+ t(xmat)) %*% wxx
+ temp4 <- temp4 + blocki %*% Cimat[,k]
+ }
+ dc.da[,,s,tt] <- G %*% (temp2 - 2 * kronecker(eifun(s,r),temp4))
+ }
+
+ detastar.da <- array(0,c(M,r,r,nn))
+ for (s in 1:r)
+ for (j in 1:r) {
+ t1 <- t(dc.da[,j,s,])
+ t1 <- matrix(t1, M, pp)
+ detastar.da[,j,s,] <- t1 %*% (if (intercept)
+ t(xmat[,-1,drop = FALSE]) else t(xmat))
+ }
+
+ etastar <- (if (intercept) xmat[,-1,drop = FALSE] else xmat) %*% Cimat
+ eta <- matrix(int.vec, nn, M, byrow = TRUE) + etastar %*% t(Aimat)
+
+ sumWinv <- solve((m2adefault(t(colSums(wz)), M = M))[,,1])
+
+ deta0.da <- array(0,c(M,M,r))
+
+ AtWi <- kronecker(matrix(1, nn, 1), Aimat)
+ AtWi <- mux111(t(wz), AtWi, M = M, upper= FALSE) # matrix.arg= TRUE,
+ AtWi <- array(t(AtWi), c(r,M,nn))
+
+ for (ss in 1:r) {
+ temp90 <- (m2adefault(t(colSums(etastar[,ss]*wz)), M = M))[,,1]
+ temp92 <- array(detastar.da[,,ss,],c(M,r,nn))
+ temp93 <- mux7(temp92,AtWi)
+ temp91 <- apply(temp93,1:2,sum) # M x M
+ temp91 <- rowSums(temp93, dims = 2) # M x M
+ deta0.da[,,ss] <- -(temp90 + temp91) %*% sumWinv
+ }
- ans = matrix(0,M,r)
- fred = mux22(t(wz), z - eta, M = M,
- upper = FALSE, as.matrix = TRUE)
- fred.array = array(t(fred %*% Aimat),c(r,1, nn))
- for(s in 1:r) {
- a1 = colSums(fred %*% t(deta0.da[,,s]))
- a2 = colSums(fred * etastar[,s])
- temp92 = array(detastar.da[,,s,],c(M,r,nn))
- temp93 = mux7(temp92, fred.array)
- a3 = rowSums(temp93, dims = 2)
- ans[,s] = a1 + a2 + a3
- }
+ ans <- matrix(0,M,r)
+ fred <- mux22(t(wz), z - eta, M = M,
+ upper = FALSE, as.matrix = TRUE)
+ fred.array <- array(t(fred %*% Aimat),c(r, 1, nn))
+ for (s in 1:r) {
+ a1 <- colSums(fred %*% t(deta0.da[,, s]))
+ a2 <- colSums(fred * etastar[, s])
+ temp92 <- array(detastar.da[, , s, ],c(M, r, nn))
+ temp93 <- mux7(temp92, fred.array)
+ a3 <- rowSums(temp93, dims = 2)
+ ans[,s] <- a1 + a2 + a3
+ }
- ans = -2 * c(ans[cbindex,])
+ ans <- -2 * c(ans[cbindex,])
- ans
+ ans
}
@@ -2113,25 +2184,25 @@ rrr.deriv.gradient.fast = function(theta, wz, U, z, M, r, xmat,
-vellipse = function(R, ratio = 1, orientation = 0,
+vellipse <- function(R, ratio = 1, orientation = 0,
center = c(0,0), N=300) {
if (length(center) != 2) stop("center must be of length 2")
- theta = 2*pi*(0:N)/N
- x1 = R*cos(theta)
- y1 = ratio*R*sin(theta)
- x = center[1] + cos(orientation)*x1 - sin(orientation)*y1
- y = center[2] + sin(orientation)*x1 + cos(orientation)*y1
+ theta <- 2*pi*(0:N)/N
+ x1 <- R*cos(theta)
+ y1 <- ratio*R*sin(theta)
+ x <- center[1] + cos(orientation)*x1 - sin(orientation)*y1
+ y <- center[2] + sin(orientation)*x1 + cos(orientation)*y1
cbind(x, y)
}
-biplot.qrrvglm = function(x, ...) {
+biplot.qrrvglm <- function(x, ...) {
stop("biplot.qrrvglm has been replaced by the function lvplot.qrrvglm")
}
-lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
- add= FALSE, plot.it= TRUE, rug= TRUE, y = FALSE,
+lvplot.qrrvglm <- function(object, varlvI = FALSE, reference = NULL,
+ add = FALSE, plot.it= TRUE, rug= TRUE, y = FALSE,
type = c("fitted.values", "predictors"),
xlab=paste("Latent Variable",
if (Rank == 1) "" else " 1", sep = ""),
@@ -2139,25 +2210,24 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
fitted.values = "Fitted values") else "Latent Variable 2",
pcex=par()$cex, pcol=par()$col, pch=par()$pch,
llty=par()$lty, lcol=par()$col, llwd=par()$lwd,
- label.arg= FALSE, adj.arg=-0.1,
- ellipse = 0.95, Absolute= FALSE,
- elty=par()$lty, ecol=par()$col, elwd=par()$lwd, egrid = 200,
- chull.arg= FALSE, clty = 2, ccol=par()$col, clwd=par()$lwd,
+ label.arg = FALSE, adj.arg = -0.1,
+ ellipse = 0.95, Absolute = FALSE,
+ elty = par()$lty, ecol = par()$col, elwd = par()$lwd, egrid = 200,
+ chull.arg = FALSE, clty = 2, ccol=par()$col, clwd=par()$lwd,
cpch = " ",
C = FALSE,
OriginC = c("origin","mean"),
- Clty=par()$lty, Ccol=par()$col, Clwd=par()$lwd,
- Ccex=par()$cex, Cadj.arg=-0.1, stretchC = 1,
- sites= FALSE, spch = NULL, scol=par()$col, scex=par()$cex,
- sfont=par()$font,
- check.ok = TRUE, ...)
-{
+ Clty = par()$lty, Ccol = par()$col, Clwd = par()$lwd,
+ Ccex = par()$cex, Cadj.arg = -0.1, stretchC = 1,
+ sites = FALSE, spch = NULL, scol = par()$col, scex = par()$cex,
+ sfont = par()$font,
+ check.ok = TRUE, ...) {
if (mode(type) != "character" && mode(type) != "name")
type <- as.character(substitute(type))
type <- match.arg(type, c("fitted.values", "predictors"))[1]
if (is.numeric(OriginC))
- OriginC = rep(OriginC, length.out = 2) else {
+ OriginC <- rep(OriginC, length.out = 2) else {
if (mode(OriginC) != "character" && mode(OriginC) != "name")
OriginC <- as.character(substitute(OriginC))
OriginC <- match.arg(OriginC, c("origin","mean"))[1]
@@ -2165,29 +2235,29 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
if (length(ellipse) > 1)
stop("ellipse must be of length 1 or 0")
- if (is.logical(ellipse)) {ellipse = if (ellipse) 0.95 else NULL}
+ if (is.logical(ellipse)) {ellipse <- if (ellipse) 0.95 else NULL}
Rank <- object at control$Rank
if (Rank > 2)
stop("can only handle rank 1 or 2 models")
- M = object at misc$M
- NOS = ncol(object at y)
- MSratio = M / NOS # First value is g(mean) = quadratic form in lv
- n = object at misc$n
- colx2.index = object at control$colx2.index
- cx1i = object at control$colx1.index # May be NULL
+ M <- object at misc$M
+ NOS <- ncol(object at y)
+ MSratio <- M / NOS # First value is g(mean) = quadratic form in lv
+ n <- object at misc$n
+ colx2.index <- object at control$colx2.index
+ cx1i <- object at control$colx1.index # May be NULL
if (check.ok)
if (!(length(cx1i) == 1 && names(cx1i) == "(Intercept)"))
stop("latent variable plots allowable only for ",
- "Norrr = ~ 1 models")
+ "noRRR = ~ 1 models")
- Coef.list = Coef(object, varlvI = varlvI, reference = reference)
- if ( C) Cmat = Coef.list at C
- nustar = Coef.list at lv # n x Rank
+ Coef.list <- Coef(object, varlvI = varlvI, reference = reference)
+ if ( C) Cmat <- Coef.list at C
+ nustar <- Coef.list at lv # n x Rank
if (!plot.it) return(nustar)
- r.curves = slot(object, type) # n times M (\boldeta or \boldmu)
+ r.curves <- slot(object, type) # n times M (\boldeta or \boldmu)
if (!add) {
if (Rank == 1) {
matplot(nustar,
@@ -2225,15 +2295,15 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
}
if (Rank == 1) {
- for(i in 1:ncol(r.curves)) {
- xx = nustar
- yy = r.curves[,i]
- o = sort.list(xx)
- xx = xx[o]
- yy = yy[o]
+ for (i in 1:ncol(r.curves)) {
+ xx <- nustar
+ yy <- r.curves[,i]
+ o <- sort.list(xx)
+ xx <- xx[o]
+ yy <- yy[o]
lines(xx, yy, col=lcol[i], lwd=llwd[i], lty=llty[i])
if ( y && type == "fitted.values") {
- ypts = object at y
+ ypts <- object at y
if (ncol(as.matrix(ypts)) == ncol(r.curves))
points(xx, ypts[o,i], col=pcol[i],
cex=pcex[i], pch=pch[i])
@@ -2241,53 +2311,53 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
}
if (rug) rug(xx)
} else {
- for(i in 1:ncol(r.curves))
+ for (i in 1:ncol(r.curves))
points(Coef.list at Optimum[1,i], Coef.list at Optimum[2,i],
col=pcol[i], cex=pcex[i], pch=pch[i])
if (label.arg) {
- for(i in 1:ncol(r.curves))
+ for (i in 1:ncol(r.curves))
text(Coef.list at Optimum[1,i], Coef.list at Optimum[2,i],
labels=(dimnames(Coef.list at Optimum)[[2]])[i],
adj=adj.arg[i], col=pcol[i], cex=pcex[i])
}
if (chull.arg) {
- hull = chull(nustar[,1], nustar[,2])
- hull = c(hull, hull[1])
+ hull <- chull(nustar[,1], nustar[,2])
+ hull <- c(hull, hull[1])
lines(nustar[hull,1], nustar[hull,2], type = "b", pch=cpch,
lty=clty, col=ccol, lwd=clwd)
}
if (length(ellipse)) {
- ellipse.temp = if (ellipse > 0) ellipse else 0.95
+ ellipse.temp <- if (ellipse > 0) ellipse else 0.95
if (ellipse < 0 && (!object at control$EqualTolerances || varlvI))
stop("an equal-tolerances assumption and 'varlvI = FALSE' ",
"is needed for 'ellipse' < 0")
if ( check.ok ) {
- colx1.index = object at control$colx1.index
+ colx1.index <- object at control$colx1.index
if (!(length(colx1.index) == 1 &&
names(colx1.index) == "(Intercept)"))
stop("can only plot ellipses for intercept models only")
}
- for(i in 1:ncol(r.curves)) {
- cutpoint = object at family@linkfun( if (Absolute) ellipse.temp
+ for (i in 1:ncol(r.curves)) {
+ cutpoint <- object at family@linkfun( if (Absolute) ellipse.temp
else Coef.list at Maximum[i] * ellipse.temp,
extra = object at extra)
if (MSratio > 1)
- cutpoint = cutpoint[1,1]
+ cutpoint <- cutpoint[1,1]
- cutpoint = object at family@linkfun(Coef.list at Maximum[i],
+ cutpoint <- object at family@linkfun(Coef.list at Maximum[i],
extra = object at extra) - cutpoint
if (is.finite(cutpoint) && cutpoint > 0) {
- Mmat = diag(rep(ifelse(object at control$Crow1positive, 1, -1),
+ Mmat <- diag(rep(ifelse(object at control$Crow1positive, 1, -1),
length.out = Rank))
- etoli = eigen(t(Mmat) %*% Coef.list at Tolerance[,,i] %*% Mmat)
+ etoli <- eigen(t(Mmat) %*% Coef.list at Tolerance[,,i] %*% Mmat)
A=ifelse(etoli$val[1]>0,sqrt(2*cutpoint*etoli$val[1]),Inf)
B=ifelse(etoli$val[2]>0,sqrt(2*cutpoint*etoli$val[2]),Inf)
- if (ellipse < 0) A = B = -ellipse / 2
+ if (ellipse < 0) A <- B <- -ellipse / 2
- theta.angle = asin(etoli$vector[2,1]) *
+ theta.angle <- asin(etoli$vector[2,1]) *
ifelse(object at control$Crow1positive[2], 1, -1)
if (object at control$Crow1positive[1])
- theta.angle = pi - theta.angle
+ theta.angle <- pi - theta.angle
if (all(is.finite(c(A,B))))
lines(vellipse(R = 2*A, ratio = B/A,
orientation = theta.angle,
@@ -2300,17 +2370,17 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
if ( C ) {
if (is.character(OriginC) && OriginC == "mean")
- OriginC = c(mean(nustar[,1]), mean(nustar[,2]))
+ OriginC <- c(mean(nustar[,1]), mean(nustar[,2]))
if (is.character(OriginC) && OriginC == "origin")
- OriginC = c(0,0)
- for(i in 1:nrow(Cmat))
+ OriginC <- c(0,0)
+ for (i in 1:nrow(Cmat))
arrows(x0=OriginC[1], y0=OriginC[2],
x1=OriginC[1] + stretchC*Cmat[i,1],
y1=OriginC[2] + stretchC*Cmat[i,2],
lty=Clty[i], col=Ccol[i], lwd=Clwd[i])
if (label.arg) {
- temp200 = dimnames(Cmat)[[1]]
- for(i in 1:nrow(Cmat))
+ temp200 <- dimnames(Cmat)[[1]]
+ for (i in 1:nrow(Cmat))
text(OriginC[1] + stretchC*Cmat[i,1],
OriginC[2] + stretchC*Cmat[i,2], col=Ccol[i],
labels=temp200[i], adj=Cadj.arg[i], cex=Ccex[i])
@@ -2327,7 +2397,7 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
-lvplot.rrvglm = function(object,
+lvplot.rrvglm <- function(object,
A = TRUE,
C = TRUE,
scores = FALSE, plot.it= TRUE,
@@ -2356,32 +2426,31 @@ lvplot.rrvglm = function(object,
scex=par()$cex,
scol=par()$col,
slabels=rownames(x2mat),
- ...)
-{
+ ...) {
if (object at control$Rank != 2 && plot.it)
stop("can only handle rank-2 models")
- M = object at misc$M
- n = object at misc$n
- colx2.index = object at control$colx2.index
- Coef.list = Coef(object)
- Amat = Coef.list at A
- Cmat = Coef.list at C
+ M <- object at misc$M
+ n <- object at misc$n
+ colx2.index <- object at control$colx2.index
+ Coef.list <- Coef(object)
+ Amat <- Coef.list at A
+ Cmat <- Coef.list at C
- Amat = Amat * scaleA
- dimnames(Amat) = list(object at misc$predictors.names, NULL)
- Cmat = Cmat / scaleA
+ Amat <- Amat * scaleA
+ dimnames(Amat) <- list(object at misc$predictors.names, NULL)
+ Cmat <- Cmat / scaleA
if (!length(object at x)) {
- object at x = model.matrixvlm(object, type = "lm")
+ object at x <- model.matrixvlm(object, type = "lm")
}
- x2mat = object at x[, colx2.index, drop = FALSE]
- nuhat = x2mat %*% Cmat
+ x2mat <- object at x[, colx2.index, drop = FALSE]
+ nuhat <- x2mat %*% Cmat
if (!plot.it) return(as.matrix(nuhat))
- index.nosz = 1:M
- allmat = rbind(if (A) Amat else NULL,
+ index.nosz <- 1:M
+ allmat <- rbind(if (A) Amat else NULL,
if (C) Cmat else NULL,
if (scores) nuhat else NULL)
@@ -2389,19 +2458,19 @@ lvplot.rrvglm = function(object,
xlab=xlab, ylab=ylab, ...) # xlim etc. supplied through ...
if (A) {
- Aadj = rep(Aadj, length.out = length(index.nosz))
- Acex = rep(Acex, length.out = length(index.nosz))
- Acol = rep(Acol, length.out = length(index.nosz))
+ Aadj <- rep(Aadj, length.out = length(index.nosz))
+ Acex <- rep(Acex, length.out = length(index.nosz))
+ Acol <- rep(Acol, length.out = length(index.nosz))
if (length(Alabels) != M)
stop("'Alabels' must be of length ", M)
if (length(Apch)) {
- Apch = rep(Apch, length.out = length(index.nosz))
- for(i in index.nosz)
+ Apch <- rep(Apch, length.out = length(index.nosz))
+ for (i in index.nosz)
points(Amat[i,1],
Amat[i,2],
pch=Apch[i],cex=Acex[i],col=Acol[i])
} else {
- for(i in index.nosz)
+ for (i in index.nosz)
text(Amat[i,1], Amat[i,2],
Alabels[i], cex=Acex[i],
col=Acol[i], adj=Aadj[i])
@@ -2409,19 +2478,19 @@ lvplot.rrvglm = function(object,
}
if (C) {
- p2 = nrow(Cmat)
- gapC = rep(gapC, length.out = p2)
- Cadj = rep(Cadj, length.out = p2)
- Ccex = rep(Ccex, length.out = p2)
- Ccol = rep(Ccol, length.out = p2)
- Clwd = rep(Clwd, length.out = p2)
- Clty = rep(Clty, length.out = p2)
+ p2 <- nrow(Cmat)
+ gapC <- rep(gapC, length.out = p2)
+ Cadj <- rep(Cadj, length.out = p2)
+ Ccex <- rep(Ccex, length.out = p2)
+ Ccol <- rep(Ccol, length.out = p2)
+ Clwd <- rep(Clwd, length.out = p2)
+ Clty <- rep(Clty, length.out = p2)
if (length(Clabels) != p2)
stop("'length(Clabels)' must be equal to ", p2)
- for(ii in 1:p2) {
+ for (ii in 1:p2) {
arrows(0, 0, Cmat[ii,1], Cmat[ii,2],
lwd=Clwd[ii], lty=Clty[ii], col=Ccol[ii])
- const = 1 + gapC[ii] / sqrt(Cmat[ii,1]^2 + Cmat[ii,2]^2)
+ const <- 1 + gapC[ii] / sqrt(Cmat[ii,1]^2 + Cmat[ii,2]^2)
text(const*Cmat[ii,1], const*Cmat[ii,2],
Clabels[ii], cex=Ccex[ii],
adj=Cadj[ii], col=Ccol[ii])
@@ -2429,24 +2498,24 @@ lvplot.rrvglm = function(object,
}
if (scores) {
- ugrp = unique(groups)
- nlev = length(ugrp) # number of groups
- clty = rep(clty, length.out = nlev)
- clwd = rep(clwd, length.out = nlev)
- ccol = rep(ccol, length.out = nlev)
+ ugrp <- unique(groups)
+ nlev <- length(ugrp) # number of groups
+ clty <- rep(clty, length.out = nlev)
+ clwd <- rep(clwd, length.out = nlev)
+ ccol <- rep(ccol, length.out = nlev)
if (length(spch))
- spch = rep(spch, length.out = n)
- scol = rep(scol, length.out = n)
- scex = rep(scex, length.out = n)
- for(ii in ugrp) {
- gp = groups == ii
+ spch <- rep(spch, length.out = n)
+ scol <- rep(scol, length.out = n)
+ scex <- rep(scex, length.out = n)
+ for (ii in ugrp) {
+ gp <- groups == ii
if (nlev > 1 && (length(unique(spch[gp])) != 1 ||
length(unique(scol[gp])) != 1 ||
length(unique(scex[gp])) != 1))
warning("spch/scol/scex is different for individuals ",
"from the same group")
- temp = nuhat[gp,,drop = FALSE]
+ temp <- nuhat[gp,,drop = FALSE]
if (length(spch)) {
points(temp[,1], temp[,2], cex=scex[gp], pch=spch[gp],
col=scol[gp])
@@ -2455,8 +2524,8 @@ lvplot.rrvglm = function(object,
col=scol[gp])
}
if (chull.arg) {
- hull = chull(temp[,1],temp[,2])
- hull = c(hull, hull[1])
+ hull <- chull(temp[,1],temp[,2])
+ hull <- c(hull, hull[1])
lines(temp[hull,1], temp[hull,2],
type = "b", lty=clty[ii],
col=ccol[ii], lwd=clwd[ii], pch = " ")
@@ -2475,12 +2544,12 @@ lvplot.rrvglm = function(object,
Coef.rrvglm <- function(object, ...) {
M <- object at misc$M
n <- object at misc$n
- colx1.index = object at control$colx1.index
- colx2.index = object at control$colx2.index
- p1 = length(colx1.index) # May be 0
+ colx1.index <- object at control$colx1.index
+ colx2.index <- object at control$colx2.index
+ p1 <- length(colx1.index) # May be 0
Amat <- object at constraints[[colx2.index[1]]]
- B1mat = if (p1)
+ B1mat <- if (p1)
coefvlm(object, matrix.out = TRUE)[colx1.index,,drop = FALSE] else
NULL
@@ -2491,24 +2560,24 @@ lvplot.rrvglm = function(object,
Cmat <- C.try %*% Amat %*% solve(t(Amat) %*% Amat)
- Rank = object at control$Rank
- lv.names = if (Rank>1) paste("lv", 1:Rank, sep = "") else "lv"
- dimnames(Amat) = list(object at misc$predictors.names, lv.names)
- dimnames(Cmat) = list(dimnames(Cmat)[[1]], lv.names)
+ Rank <- object at control$Rank
+ lv.names <- if (Rank>1) paste("lv", 1:Rank, sep = "") else "lv"
+ dimnames(Amat) <- list(object at misc$predictors.names, lv.names)
+ dimnames(Cmat) <- list(dimnames(Cmat)[[1]], lv.names)
- ans = new(Class = "Coef.rrvglm",
+ ans <- new(Class = "Coef.rrvglm",
A = Amat,
C = Cmat,
Rank = Rank,
colx2.index = colx2.index)
if (!is.null(colx1.index)) {
- ans at colx1.index = colx1.index
- ans at B1 = B1mat
+ ans at colx1.index <- colx1.index
+ ans at B1 <- B1mat
}
if (object at control$Corner)
- ans at Atilde = Amat[-c(object at control$Index.corner,
+ ans at Atilde <- Amat[-c(object at control$Index.corner,
object at control$szero),,drop = FALSE]
ans
}
@@ -2516,15 +2585,16 @@ lvplot.rrvglm = function(object,
-setMethod("Coef", "rrvglm", function(object, ...) Coef.rrvglm(object, ...))
+setMethod("Coef", "rrvglm",
+ function(object, ...) Coef.rrvglm(object, ...))
-show.Coef.rrvglm = function(x, ...) {
+show.Coef.rrvglm <- function(x, ...) {
- object = x
+ object <- x
cat("A matrix:\n")
print(object at A, ...)
@@ -2532,7 +2602,7 @@ show.Coef.rrvglm = function(x, ...) {
cat("\nC matrix:\n")
print(object at C, ...)
- p1 = length(object at colx1.index)
+ p1 <- length(object at colx1.index)
if (p1) {
cat("\nB1 matrix:\n")
print(object at B1, ...)
@@ -2563,8 +2633,8 @@ setMethod("lvplot", "rrvglm",
invisible(lvplot.rrvglm(object, ...))})
-biplot.rrvglm = function(x, ...)
- lvplot(object=x, ...)
+biplot.rrvglm <- function(x, ...)
+ lvplot(object = x, ...)
setMethod("biplot", "rrvglm", function(x, ...)
invisible(biplot.rrvglm(x, ...)))
@@ -2572,43 +2642,44 @@ setMethod("biplot", "rrvglm", function(x, ...)
-summary.qrrvglm = function(object,
+summary.qrrvglm <- function(object,
varlvI = FALSE, reference = NULL, ...) {
- answer = object
- answer at post$Coef = Coef(object, varlvI = varlvI, reference = reference,
+ answer <- object
+ answer at post$Coef <- Coef(object, varlvI = varlvI, reference = reference,
...) # Store it here; non-elegant
if (length((answer at post$Coef)@dispersion) &&
length(object at misc$estimated.dispersion) &&
object at misc$estimated.dispersion)
- answer at dispersion =
- answer at misc$dispersion = (answer at post$Coef)@dispersion
+ answer at dispersion <-
+ answer at misc$dispersion <- (answer at post$Coef)@dispersion
as(answer, "summary.qrrvglm")
}
-show.summary.qrrvglm = function(x, ...) {
+show.summary.qrrvglm <- function(x, ...) {
- cat("\nCall:\n")
- dput(x at call)
+ cat("\nCall:\n")
+ dput(x at call)
- print(x at post$Coef, ...) # non-elegant programming
+ print(x at post$Coef, ...) # non-elegant programming
- if (length(x at dispersion) > 1) {
- cat("\nDispersion parameters:\n")
- if (length(x at misc$ynames)) {
- names(x at dispersion) = x at misc$ynames
- print(x at dispersion, ...)
- } else
- cat(x at dispersion, fill = TRUE)
- cat("\n")
- } else if (length(x at dispersion) == 1) {
- cat("\nDispersion parameter: ", x at dispersion, "\n")
+ if (length(x at dispersion) > 1) {
+ cat("\nDispersion parameters:\n")
+ if (length(x at misc$ynames)) {
+ names(x at dispersion) <- x at misc$ynames
+ print(x at dispersion, ...)
+ } else {
+ cat(x at dispersion, fill = TRUE)
}
+ cat("\n")
+ } else if (length(x at dispersion) == 1) {
+ cat("\nDispersion parameter: ", x at dispersion, "\n")
+ }
}
@@ -2647,91 +2718,92 @@ setMethod("show", "Coef.rrvglm", function(object)
- myrrcontrol = rrvglm.control(Rank = Rank, Index.corner = Index.corner,
- szero = szero, ...)
- object.save = y
+ myrrcontrol <- rrvglm.control(Rank = Rank,
+ Index.corner = Index.corner,
+ szero = szero, ...)
+ object.save <- y
if (is(y, "rrvglm")) {
- y = object.save at y
+ y <- object.save at y
} else {
- y = as.matrix(y)
- y = as(y, "matrix")
+ y <- as.matrix(y)
+ y <- as(y, "matrix")
}
if (length(dim(y)) != 2 || nrow(y) < 3 || ncol(y) < 3)
stop("y must be a matrix with >= 3 rows & columns, ",
"or a rrvglm() object")
- ei = function(i, n) diag(n)[,i,drop = FALSE]
- .grc.df = data.frame(Row.2 = ei(2, nrow(y)))
+ ei <- function(i, n) diag(n)[, i, drop = FALSE]
+ .grc.df <- data.frame(Row.2 = eifun(2, nrow(y)))
- yn1 = if (length(dimnames(y)[[1]])) dimnames(y)[[1]] else
+ yn1 <- if (length(dimnames(y)[[1]])) dimnames(y)[[1]] else
paste("X2.", 1:nrow(y), sep = "")
- warn.save = options()$warn
- options(warn = -3) # Suppress the warnings (hopefully, temporarily)
+ warn.save <- options()$warn
+ options(warn = -3) # Suppress the warnings (hopefully, temporarily)
if (any(!is.na(as.numeric(substring(yn1, 1, 1)))))
- yn1 = paste("X2.", 1:nrow(y), sep = "")
+ yn1 <- paste("X2.", 1:nrow(y), sep = "")
options(warn = warn.save)
- Row. = factor(1:nrow(y))
- modmat.row = model.matrix( ~ Row.)
- Col. = factor(1:ncol(y))
- modmat.col = model.matrix( ~ Col.)
+ Row. <- factor(1:nrow(y))
+ modmat.row <- model.matrix( ~ Row.)
+ Col. <- factor(1:ncol(y))
+ modmat.col <- model.matrix( ~ Col.)
- cms = list("(Intercept)" = matrix(1, ncol(y), 1))
- for(ii in 2:nrow(y)) {
- cms[[paste("Row.", ii, sep = "")]] = matrix(1, ncol(y), 1)
- .grc.df[[paste("Row.", ii, sep = "")]] = modmat.row[,ii]
+ cms <- list("(Intercept)" = matrix(1, ncol(y), 1))
+ for (ii in 2:nrow(y)) {
+ cms[[paste("Row.", ii, sep = "")]] <- matrix(1, ncol(y), 1)
+ .grc.df[[paste("Row.", ii, sep = "")]] <- modmat.row[,ii]
}
- for(ii in 2:ncol(y)) {
- cms[[paste("Col.", ii, sep = "")]] =
+ for (ii in 2:ncol(y)) {
+ cms[[paste("Col.", ii, sep = "")]] <-
modmat.col[,ii,drop = FALSE]
- .grc.df[[paste("Col.", ii, sep = "")]] = rep(1, nrow(y))
+ .grc.df[[paste("Col.", ii, sep = "")]] <- rep(1, nrow(y))
}
- for(ii in 2:nrow(y)) {
- cms[[yn1[ii]]] = diag(ncol(y))
- .grc.df[[yn1[ii]]] = ei(ii, nrow(y))
+ for (ii in 2:nrow(y)) {
+ cms[[yn1[ii]]] <- diag(ncol(y))
+ .grc.df[[yn1[ii]]] <- eifun(ii, nrow(y))
}
- dimnames(.grc.df) = list(if (length(dimnames(y)[[1]]))
- dimnames(y)[[1]] else
- as.character(1:nrow(y)),
- dimnames(.grc.df)[[2]])
+ dimnames(.grc.df) <- list(if (length(dimnames(y)[[1]]))
+ dimnames(y)[[1]] else
+ as.character(1:nrow(y)),
+ dimnames(.grc.df)[[2]])
- str1 = "~ Row.2"
+ str1 <- "~ Row.2"
if (nrow(y) > 2)
- for(ii in 3:nrow(y))
- str1 = paste(str1, paste("Row.", ii, sep = ""), sep = " + ")
- for(ii in 2:ncol(y))
- str1 = paste(str1, paste("Col.", ii, sep = ""), sep = " + ")
- str2 = paste("y ", str1)
- for(ii in 2:nrow(y))
- str2 = paste(str2, yn1[ii], sep = " + ")
- myrrcontrol$Norrr = as.formula(str1) # Overwrite this
+ for (ii in 3:nrow(y))
+ str1 <- paste(str1, paste("Row.", ii, sep = ""), sep = " + ")
+ for (ii in 2:ncol(y))
+ str1 <- paste(str1, paste("Col.", ii, sep = ""), sep = " + ")
+ str2 <- paste("y ", str1)
+ for (ii in 2:nrow(y))
+ str2 <- paste(str2, yn1[ii], sep = " + ")
+ myrrcontrol$noRRR <- as.formula(str1) # Overwrite this
assign(".grc.df", .grc.df, envir = VGAM:::VGAMenv)
- warn.save = options()$warn
+ warn.save <- options()$warn
options(warn = -3) # Suppress the warnings (hopefully, temporarily)
- answer = if (is(object.save, "rrvglm")) object.save else
- rrvglm(as.formula(str2), family = poissonff,
- constraints = cms, control = myrrcontrol,
- data = .grc.df)
+ answer <- if (is(object.save, "rrvglm")) object.save else
+ rrvglm(as.formula(str2), family = poissonff,
+ constraints = cms, control = myrrcontrol,
+ data = .grc.df)
options(warn = warn.save)
if (summary.arg) {
- answer = as(answer, "rrvglm")
+ answer <- as(answer, "rrvglm")
- answer = summary.rrvglm(answer, h.step = h.step)
+ answer <- summary.rrvglm(answer, h.step = h.step)
} else {
- answer = as(answer, "grc")
+ answer <- as(answer, "grc")
}
if (exists(".grc.df", envir = VGAM:::VGAMenv))
- rm(".grc.df", envir = VGAM:::VGAMenv)
+ rm(".grc.df", envir = VGAM:::VGAMenv)
answer
}
-summary.grc = function(object, ...) {
+summary.grc <- function(object, ...) {
grc(object, summary.arg= TRUE, ...)
}
@@ -2739,7 +2811,7 @@ summary.grc = function(object, ...) {
-trplot.qrrvglm = function(object,
+trplot.qrrvglm <- function(object,
whichSpecies = NULL,
add = FALSE, plot.it = TRUE,
label.sites = FALSE,
@@ -2753,169 +2825,175 @@ trplot.qrrvglm = function(object,
tcol= rep(par()$col, length.out = nos*(nos-1)/2),
xlab = NULL, ylab = NULL,
main = "", # "Trajectory plot",
- type = "b", check.ok = TRUE, ...) {
- coef.obj = Coef(object) # use defaults for those two arguments
- if (coef.obj at Rank != 1) stop("object must be a rank-1 model")
- fv = fitted(object)
- modelno = object at control$modelno # 1,2,3, or 0
- NOS = ncol(fv) # Number of species
- M = object at misc$M #
- nn = nrow(fv) # Number of sites
- if (length(sitenames))
- sitenames = rep(sitenames, length.out = nn)
- sppNames = dimnames(object at y)[[2]]
- if (!length(whichSpecies)) {
- whichSpecies = sppNames[1:NOS]
- whichSpecies.numer = 1:NOS
- } else
- if (is.numeric(whichSpecies)) {
- whichSpecies.numer = whichSpecies
- whichSpecies = sppNames[whichSpecies.numer] # Convert to character
- } else
- whichSpecies.numer = match(whichSpecies, sppNames)
- nos = length(whichSpecies) # nos = number of species to be plotted
+ type = "b",
+ check.ok = TRUE, ...) {
+ coef.obj <- Coef(object) # use defaults for those two arguments
+ if (coef.obj at Rank != 1)
+ stop("object must be a rank-1 model")
+ fv <- fitted(object)
+ modelno <- object at control$modelno # 1,2,3, or 0
+ NOS <- ncol(fv) # Number of species
+ M <- object at misc$M #
+ nn <- nrow(fv) # Number of sites
+ if (length(sitenames))
+ sitenames <- rep(sitenames, length.out = nn)
+ sppNames <- dimnames(object at y)[[2]]
+ if (!length(whichSpecies)) {
+ whichSpecies <- sppNames[1:NOS]
+ whichSpecies.numer <- 1:NOS
+ } else
+ if (is.numeric(whichSpecies)) {
+ whichSpecies.numer <- whichSpecies
+ whichSpecies <- sppNames[whichSpecies.numer] # Convert to character
+ } else {
+ whichSpecies.numer <- match(whichSpecies, sppNames)
+ }
+ nos <- length(whichSpecies) # nos = number of species to be plotted
+
+ if (length(whichSpecies.numer) <= 1)
+ stop("must have at least 2 species to be plotted")
+ cx1i <- object at control$colx1.index
+ if (check.ok)
+ if (!(length(cx1i) == 1 && names(cx1i) == "(Intercept)"))
+ stop("trajectory plots allowable only for noRRR = ~ 1 models")
+
+ first.spp <- iam(1,1,M = M,both = TRUE,diag = FALSE)$row.index
+ second.spp <- iam(1,1,M = M,both = TRUE,diag = FALSE)$col.index
+ myxlab <- if (length(whichSpecies.numer) == 2) {
+ paste("Fitted value for",
+ if (is.character(whichSpecies.numer))
+ whichSpecies.numer[1] else
+ sppNames[whichSpecies.numer[1]])
+ } else "Fitted value for 'first' species"
+ myxlab <- if (length(xlab)) xlab else myxlab
+ myylab <- if (length(whichSpecies.numer) == 2) {
+ paste("Fitted value for",
+ if (is.character(whichSpecies.numer))
+ whichSpecies.numer[2] else
+ sppNames[whichSpecies.numer[2]])
+ } else "Fitted value for 'second' species"
+ myylab <- if (length(ylab)) ylab else myylab
+ if (!add) {
+ xxx <- if (axes.equal) fv[,whichSpecies.numer] else
+ fv[,whichSpecies.numer[first.spp]]
+ yyy <- if (axes.equal) fv[,whichSpecies.numer] else
+ fv[,whichSpecies.numer[second.spp]]
+ matplot(xxx, yyy, type = "n", log = log, xlab = myxlab,
+ ylab = myylab, main = main, ...)
+ }
- if (length(whichSpecies.numer) <= 1)
- stop("must have at least 2 species to be plotted")
- cx1i = object at control$colx1.index
- if (check.ok)
- if (!(length(cx1i) == 1 && names(cx1i) == "(Intercept)"))
- stop("trajectory plots allowable only for Norrr = ~ 1 models")
-
- first.spp = iam(1,1,M = M,both = TRUE,diag = FALSE)$row.index
- second.spp = iam(1,1,M = M,both = TRUE,diag = FALSE)$col.index
- myxlab = if (length(whichSpecies.numer) == 2) {
- paste("Fitted value for",
- if (is.character(whichSpecies.numer))
- whichSpecies.numer[1] else
- sppNames[whichSpecies.numer[1]])
- } else "Fitted value for 'first' species"
- myxlab = if (length(xlab)) xlab else myxlab
- myylab = if (length(whichSpecies.numer) == 2) {
- paste("Fitted value for",
- if (is.character(whichSpecies.numer))
- whichSpecies.numer[2] else
- sppNames[whichSpecies.numer[2]])
- } else "Fitted value for 'second' species"
- myylab = if (length(ylab)) ylab else myylab
- if (!add) {
- xxx = if (axes.equal) fv[,whichSpecies.numer] else
- fv[,whichSpecies.numer[first.spp]]
- yyy = if (axes.equal) fv[,whichSpecies.numer] else
- fv[,whichSpecies.numer[second.spp]]
- matplot(xxx, yyy, type = "n", log=log, xlab=myxlab,
- ylab=myylab, main=main, ...)
- }
-
- lwd = rep(lwd, length.out = nos*(nos-1)/2)
- col = rep(col, length.out = nos*(nos-1)/2)
- lty = rep(lty, length.out = nos*(nos-1)/2)
- tcol = rep(tcol, length.out = nos*(nos-1)/2)
-
- oo = order(coef.obj at lv) # Sort by the latent variable
- ii = 0
- col = rep(col, length = nos*(nos-1)/2)
- species.names = NULL
- if (plot.it)
- for(i1 in seq(whichSpecies.numer)) {
- for(i2 in seq(whichSpecies.numer))
- if (i1 < i2) {
- ii = ii + 1
- species.names = rbind(species.names,
- cbind(sppNames[i1], sppNames[i2]))
- matplot(fv[oo,whichSpecies.numer[i1]],
- fv[oo,whichSpecies.numer[i2]],
- type=type, add = TRUE,
- lty=lty[ii], lwd=lwd[ii], col=col[ii],
- pch = if (label.sites) " " else "*" )
- if (label.sites && length(sitenames))
- text(fv[oo,whichSpecies.numer[i1]],
- fv[oo,whichSpecies.numer[i2]],
- labels=sitenames[oo], cex=cex, col=tcol[ii])
- }
+ lwd <- rep(lwd, length.out = nos*(nos-1)/2)
+ col <- rep(col, length.out = nos*(nos-1)/2)
+ lty <- rep(lty, length.out = nos*(nos-1)/2)
+ tcol <- rep(tcol, length.out = nos*(nos-1)/2)
+
+ oo <- order(coef.obj at lv) # Sort by the latent variable
+ ii <- 0
+ col <- rep(col, length = nos*(nos-1)/2)
+ species.names <- NULL
+ if (plot.it)
+ for (i1 in seq(whichSpecies.numer)) {
+ for (i2 in seq(whichSpecies.numer))
+ if (i1 < i2) {
+ ii <- ii + 1
+ species.names <- rbind(species.names,
+ cbind(sppNames[i1], sppNames[i2]))
+ matplot(fv[oo, whichSpecies.numer[i1]],
+ fv[oo, whichSpecies.numer[i2]],
+ type = type, add = TRUE,
+ lty = lty[ii], lwd = lwd[ii], col = col[ii],
+ pch = if (label.sites) " " else "*" )
+ if (label.sites && length(sitenames))
+ text(fv[oo, whichSpecies.numer[i1]],
+ fv[oo, whichSpecies.numer[i2]],
+ labels = sitenames[oo], cex = cex, col = tcol[ii])
+ }
}
- invisible(list(species.names=species.names,
- sitenames=sitenames[oo]))
+ invisible(list(species.names = species.names,
+ sitenames = sitenames[oo]))
}
if (!isGeneric("trplot"))
- setGeneric("trplot",
- function(object, ...) standardGeneric("trplot"))
+ setGeneric("trplot",
+ function(object, ...) standardGeneric("trplot"))
setMethod("trplot", "qrrvglm",
function(object, ...) trplot.qrrvglm(object, ...))
+setMethod("trplot", "cao",
+ function(object, ...) trplot.qrrvglm(object, ...))
+
-vcovrrvglm = function(object, ...) {
+vcovrrvglm <- function(object, ...) {
summary.rrvglm(object, ...)@cov.unscaled
}
-vcovqrrvglm = function(object,
+vcovqrrvglm <- function(object,
ITolerances = object at control$EqualTolerances,
MaxScale = c("predictors", "response"),
dispersion = rep(if (length(sobj at dispersion))
sobj at dispersion else 1,
length.out = M), ...) {
- stop("this function is not yet completed")
-
- if (mode(MaxScale) != "character" && mode(MaxScale) != "name")
- MaxScale <- as.character(substitute(MaxScale))
- MaxScale <- match.arg(MaxScale, c("predictors", "response"))[1]
- if (MaxScale != "predictors")
- stop("can currently only handle MaxScale='predictors'")
-
- sobj = summary(object)
- cobj = Coef(object, ITolerances = ITolerances, ...)
- M = nrow(cobj at A)
- dispersion = rep(dispersion, length.out = M)
- if (cobj at Rank != 1)
- stop("object must be a rank 1 model")
-
- dvecMax = cbind(1, -0.5 * cobj at A / c(cobj at D), (cobj at A / c(2*cobj at D))^2)
- dvecTol = cbind(0, 0, 1 / c(-2 * cobj at D)^1.5)
- dvecOpt = cbind(0, -0.5 / c(cobj at D), 0.5 * cobj at A / c(cobj at D^2))
-
- if ((length(object at control$colx1.index) != 1) ||
- (names(object at control$colx1.index) != "(Intercept)"))
- stop("Can only handle Norrr=~1 models")
-
- okvals = c(3*M,2*M+1)
- if (all(length(coef(object)) != okvals))
- stop("Can only handle intercepts-only model with ",
- "EqualTolerances = FALSE")
-
- answer = NULL
- Cov.unscaled = array(NA, c(3,3,M), dimnames=list(
- c("(Intercept)", "lv", "lv^2"),
- c("(Intercept)", "lv", "lv^2"), dimnames(cobj at D)[[3]]))
- for(spp in 1:M) {
- index = c(M+ifelse(object at control$EqualTolerances, 1, M) + spp,
- spp,
- M+ifelse(object at control$EqualTolerances, 1, spp))
- vcov = Cov.unscaled[,,spp] =
- sobj at cov.unscaled[index,index] # Order is A, D, B1
- se2Max = dvecMax[spp,,drop = FALSE] %*% vcov %*% cbind(dvecMax[spp,])
- se2Tol = dvecTol[spp,,drop = FALSE] %*% vcov %*% cbind(dvecTol[spp,])
- se2Opt = dvecOpt[spp,,drop = FALSE] %*% vcov %*% cbind(dvecOpt[spp,])
- answer = rbind(answer, dispersion[spp]^0.5 *
- c(se2Opt=se2Opt, se2Tol=se2Tol, se2Max=se2Max))
+ stop("this function is not yet completed")
+
+ if (mode(MaxScale) != "character" && mode(MaxScale) != "name")
+ MaxScale <- as.character(substitute(MaxScale))
+ MaxScale <- match.arg(MaxScale, c("predictors", "response"))[1]
+ if (MaxScale != "predictors")
+ stop("can currently only handle MaxScale='predictors'")
+
+ sobj <- summary(object)
+ cobj <- Coef(object, ITolerances = ITolerances, ...)
+ M <- nrow(cobj at A)
+ dispersion <- rep(dispersion, length.out = M)
+ if (cobj at Rank != 1)
+ stop("object must be a rank 1 model")
+
+ dvecMax <- cbind(1, -0.5 * cobj at A / c(cobj at D), (cobj at A / c(2*cobj at D))^2)
+ dvecTol <- cbind(0, 0, 1 / c(-2 * cobj at D)^1.5)
+ dvecOpt <- cbind(0, -0.5 / c(cobj at D), 0.5 * cobj at A / c(cobj at D^2))
+
+ if ((length(object at control$colx1.index) != 1) ||
+ (names(object at control$colx1.index) != "(Intercept)"))
+ stop("Can only handle noRRR=~1 models")
+
+ okvals <- c(3*M, 2*M+1)
+ if (all(length(coef(object)) != okvals))
+ stop("Can only handle intercepts-only model with ",
+ "EqualTolerances = FALSE")
+
+ answer <- NULL
+ Cov.unscaled <- array(NA, c(3, 3, M), dimnames = list(
+ c("(Intercept)", "lv", "lv^2"),
+ c("(Intercept)", "lv", "lv^2"), dimnames(cobj at D)[[3]]))
+ for (spp in 1:M) {
+ index <- c(M + ifelse(object at control$EqualTolerances, 1, M) + spp,
+ spp,
+ M + ifelse(object at control$EqualTolerances, 1, spp))
+ vcov <- Cov.unscaled[,,spp] <-
+ sobj at cov.unscaled[index, index] # Order is A, D, B1
+ se2Max <- dvecMax[spp,,drop = FALSE] %*% vcov %*% cbind(dvecMax[spp,])
+ se2Tol <- dvecTol[spp,,drop = FALSE] %*% vcov %*% cbind(dvecTol[spp,])
+ se2Opt <- dvecOpt[spp,,drop = FALSE] %*% vcov %*% cbind(dvecOpt[spp,])
+ answer <- rbind(answer, dispersion[spp]^0.5 *
+ c(se2Opt = se2Opt, se2Tol = se2Tol, se2Max = se2Max))
}
- link.function = if (MaxScale == "predictors")
- remove.arg(object at misc$predictors.names[1]) else ""
- dimnames(answer) = list(dimnames(cobj at D)[[3]], c("Optimum", "Tolerance",
- if (nchar(link.function))
- paste(link.function, "(Maximum)", sep = "") else
- "Maximum"))
- NAthere = is.na(answer %*% rep(1, length.out = 3))
- answer[NAthere,] = NA # NA in tolerance means NA everywhere else
- new(Class = "vcov.qrrvglm",
- Cov.unscaled=Cov.unscaled,
- dispersion=dispersion,
- se=sqrt(answer))
+ link.function <- if (MaxScale == "predictors")
+ remove.arg(object at misc$predictors.names[1]) else ""
+ dimnames(answer) <- list(dimnames(cobj at D)[[3]], c("Optimum", "Tolerance",
+ if (nchar(link.function))
+ paste(link.function, "(Maximum)", sep = "") else
+ "Maximum"))
+ NAthere <- is.na(answer %*% rep(1, length.out = 3))
+ answer[NAthere,] <- NA # NA in tolerance means NA everywhere else
+ new(Class = "vcov.qrrvglm",
+ Cov.unscaled = Cov.unscaled,
+ dispersion = dispersion,
+ se = sqrt(answer))
}
@@ -2938,12 +3016,13 @@ model.matrix.qrrvglm <- function(object,
type = c("lv", "vlm"), ...) {
if (mode(type) != "character" && mode(type) != "name")
- type = as.character(substitute(type))
- type = match.arg(type, c("lv","vlm"))[1]
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("lv", "vlm"))[1]
- switch(type, lv=Coef(object, ...)@lv, vlm = object at x)
+ switch(type, lv = Coef(object, ...)@lv, vlm = object at x)
}
+
setMethod("model.matrix", "qrrvglm", function(object, ...)
model.matrix.qrrvglm(object, ...))
@@ -2953,7 +3032,7 @@ setMethod("model.matrix", "qrrvglm", function(object, ...)
-perspqrrvglm = function(x, varlvI = FALSE, reference = NULL,
+perspqrrvglm <- function(x, varlvI = FALSE, reference = NULL,
plot.it = TRUE,
xlim = NULL, ylim = NULL,
zlim = NULL, # zlim ignored if Rank == 1
@@ -2969,84 +3048,84 @@ perspqrrvglm = function(x, varlvI = FALSE, reference = NULL,
main = "",
ticktype = "detailed",
col = if (Rank == 1) par()$col else "white",
- llty=par()$lty, llwd=par()$lwd,
+ llty = par()$lty, llwd = par()$lwd,
add1 = FALSE,
...) {
- oylim = ylim
- object = x # don't like x as the primary argument
- coef.obj = Coef(object, varlvI = varlvI, reference = reference)
- if ((Rank <- coef.obj at Rank) > 2)
- stop("object must be a rank-1 or rank-2 model")
- fv = fitted(object)
- NOS = ncol(fv) # Number of species
- M = object at misc$M #
-
- xlim = rep(if (length(xlim)) xlim else
- range(coef.obj at lv[,1]), length = 2)
- if (!length(oylim)) {
- ylim = if (Rank == 1) c(0, max(fv)*stretch) else
- rep(range(coef.obj at lv[,2]), length = 2)
- }
- gridlength = rep(gridlength, length = Rank)
- lv1 = seq(xlim[1], xlim[2], length = gridlength[1])
- if (Rank == 1) {
- m = cbind(lv1)
- } else {
- lv2 = seq(ylim[1], ylim[2], length = gridlength[2])
- m = expand.grid(lv1,lv2)
- }
-
- if (dim(coef.obj at B1)[1] != 1 ||
- dimnames(coef.obj at B1)[[1]] != "(Intercept)")
- stop("Norrr = ~ 1 is needed")
- LP = coef.obj at A %*% t(cbind(m)) # M by n
- LP = LP + c(coef.obj at B1) # Assumes \bix_1 = 1 (intercept only)
-
- mm = as.matrix(m)
- N = ncol(LP)
- for(jay in 1:M) {
- for(ii in 1:N) {
- LP[jay, ii] = LP[jay, ii] +
- mm[ii, , drop = FALSE] %*%
- coef.obj at D[,,jay] %*%
- t(mm[ii, , drop = FALSE])
- }
+ oylim <- ylim
+ object <- x # don't like x as the primary argument
+ coef.obj <- Coef(object, varlvI = varlvI, reference = reference)
+ if ((Rank <- coef.obj at Rank) > 2)
+ stop("object must be a rank-1 or rank-2 model")
+ fv <- fitted(object)
+ NOS <- ncol(fv) # Number of species
+ M <- object at misc$M #
+
+ xlim <- rep(if (length(xlim)) xlim else
+ range(coef.obj at lv[,1]), length = 2)
+ if (!length(oylim)) {
+ ylim <- if (Rank == 1) c(0, max(fv)*stretch) else
+ rep(range(coef.obj at lv[,2]), length = 2)
+ }
+ gridlength <- rep(gridlength, length = Rank)
+ lv1 <- seq(xlim[1], xlim[2], length = gridlength[1])
+ if (Rank == 1) {
+ m <- cbind(lv1)
+ } else {
+ lv2 <- seq(ylim[1], ylim[2], length = gridlength[2])
+ m <- expand.grid(lv1,lv2)
+ }
+
+ if (dim(coef.obj at B1)[1] != 1 ||
+ dimnames(coef.obj at B1)[[1]] != "(Intercept)")
+ stop("noRRR = ~ 1 is needed")
+ LP <- coef.obj at A %*% t(cbind(m)) # M by n
+ LP <- LP + c(coef.obj at B1) # Assumes \bix_1 = 1 (intercept only)
+
+ mm <- as.matrix(m)
+ N <- ncol(LP)
+ for (jay in 1:M) {
+ for (ii in 1:N) {
+ LP[jay, ii] <- LP[jay, ii] +
+ mm[ii, , drop = FALSE] %*%
+ coef.obj at D[,,jay] %*%
+ t(mm[ii, , drop = FALSE])
}
- LP = t(LP) # n by M
+ }
+ LP <- t(LP) # n by M
- fitvals = object at family@linkinv(LP) # n by NOS
- dimnames(fitvals) = list(NULL, dimnames(fv)[[2]])
- sppNames = dimnames(object at y)[[2]]
+ fitvals <- object at family@linkinv(LP) # n by NOS
+ dimnames(fitvals) <- list(NULL, dimnames(fv)[[2]])
+ sppNames <- dimnames(object at y)[[2]]
if (!length(whichSpecies)) {
- whichSpecies = sppNames[1:NOS]
- whichSpecies.numer = 1:NOS
+ whichSpecies <- sppNames[1:NOS]
+ whichSpecies.numer <- 1:NOS
} else
if (is.numeric(whichSpecies)) {
- whichSpecies.numer = whichSpecies
- whichSpecies = sppNames[whichSpecies.numer] # Convert to character
+ whichSpecies.numer <- whichSpecies
+ whichSpecies <- sppNames[whichSpecies.numer] # Convert to character
} else {
- whichSpecies.numer = match(whichSpecies, sppNames)
+ whichSpecies.numer <- match(whichSpecies, sppNames)
}
if (Rank == 1) {
- if (plot.it) {
- if (!length(oylim))
- ylim = c(0, max(fitvals[,whichSpecies.numer]) *
- stretch) # A revision
- col = rep(col, length.out = length(whichSpecies.numer))
- llty = rep(llty, leng=length(whichSpecies.numer))
- llwd = rep(llwd, leng=length(whichSpecies.numer))
+ if (plot.it) {
+ if (!length(oylim))
+ ylim <- c(0, max(fitvals[,whichSpecies.numer]) *
+ stretch) # A revision
+ col <- rep(col, length.out = length(whichSpecies.numer))
+ llty <- rep(llty, leng = length(whichSpecies.numer))
+ llwd <- rep(llwd, leng = length(whichSpecies.numer))
if (!add1)
- matplot(lv1, fitvals, xlab=xlab, ylab=ylab, type = "n",
- main=main, xlim=xlim, ylim=ylim, ...)
- for(j in 1:length(whichSpecies.numer)) {
- ptr2 = whichSpecies.numer[j] # points to species column
+ matplot(lv1, fitvals, xlab = xlab, ylab = ylab, type = "n",
+ main = main, xlim = xlim, ylim = ylim, ...)
+ for (j in 1:length(whichSpecies.numer)) {
+ ptr2 <- whichSpecies.numer[j] # points to species column
lines(lv1, fitvals[,ptr2], col=col[j],
lty=llty[j], lwd=llwd[j], ...)
if (labelSpecies) {
- ptr1 = (1:nrow(fitvals))[max(fitvals[,ptr2]) ==
+ ptr1 <- (1:nrow(fitvals))[max(fitvals[,ptr2]) ==
fitvals[,ptr2]]
- ptr1 = ptr1[1]
+ ptr1 <- ptr1[1]
text(lv1[ptr1], fitvals[ptr1,ptr2]+
(stretch-1)*diff(range(ylim)),
label=sppNames[j], col=col[j], ...)
@@ -3054,25 +3133,25 @@ perspqrrvglm = function(x, varlvI = FALSE, reference = NULL,
}
}
} else {
- maxfitted = matrix(fitvals[,whichSpecies[1]],
+ maxfitted <- matrix(fitvals[,whichSpecies[1]],
length(lv1), length(lv2))
if (length(whichSpecies) > 1)
- for(j in whichSpecies[-1]) {
- maxfitted = pmax(maxfitted, matrix(fitvals[,j],
- length(lv1), length(lv2)))
+ for (j in whichSpecies[-1]) {
+ maxfitted <- pmax(maxfitted, matrix(fitvals[,j],
+ length(lv1), length(lv2)))
}
if (!length(zlim))
- zlim = range(maxfitted, na.rm = TRUE)
+ zlim <- range(maxfitted, na.rm = TRUE)
if (plot.it)
- graphics:::persp.default(lv1, lv2, maxfitted,
- zlim=zlim,
- xlab=xlab, ylab=ylab, zlab=zlab,
- ticktype = ticktype, col = col, main=main, ...)
+ graphics:::persp.default(lv1, lv2, maxfitted,
+ zlim = zlim,
+ xlab = xlab, ylab = ylab, zlab = zlab,
+ ticktype = ticktype, col = col, main = main, ...)
}
- invisible(list(fitted=fitvals,
- lv1grid=lv1,
+ invisible(list(fitted = fitvals,
+ lv1grid = lv1,
lv2grid = if (Rank == 2) lv2 else NULL,
maxfitted = if (Rank == 2) maxfitted else NULL))
}
@@ -3084,7 +3163,8 @@ perspqrrvglm = function(x, varlvI = FALSE, reference = NULL,
package = "VGAM")
setMethod("persp", "qrrvglm",
- function(x, ...) perspqrrvglm(x=x, ...))
+ function(x, ...) perspqrrvglm(x = x, ...))
+
@@ -3093,45 +3173,65 @@ setMethod("persp", "qrrvglm",
-ccoef.qrrvglm = function(object, varlvI = FALSE,
- reference = NULL, ...) {
- Coef(object, varlvI = varlvI, reference = reference, ...)@C
+Rank.rrvglm <- function(object, ...) {
+ object at control$Rank
+}
+
+
+Rank.qrrvglm <- function(object, ...) {
+ object at control$Rank
}
-ccoef.Coef.qrrvglm = function(object, ...) {
+Rank.cao <- function(object, ...) {
+ object at control$Rank
+}
+
+
+
+
+ccoef.qrrvglm <- function(object, varlvI = FALSE,
+ reference = NULL, ...) {
+ Coef(object, varlvI = varlvI, reference = reference, ...)@C
+}
+
+
+ccoef.Coef.qrrvglm <- function(object, ...) {
if (length(list(...)))
warning("Too late! Ignoring the extra arguments")
object at C
}
-lv.qrrvglm <- function(object, varlvI = FALSE,
+latvar.qrrvglm <- function(object, varlvI = FALSE,
reference = NULL, ...) {
Coef(object, varlvI = varlvI, reference = reference, ...)@lv
}
-lv.rrvglm = function(object, ...) {
- ans = lvplot(object, plot.it = FALSE)
+lv.rrvglm <- function(object, ...) {
+ ans <- lvplot(object, plot.it = FALSE)
if (ncol(ans) == 1)
- dimnames(ans) = list(dimnames(ans)[[1]], "lv")
+ dimnames(ans) <- list(dimnames(ans)[[1]], "lv")
ans
}
-lv.Coef.qrrvglm = function(object, ...) {
- if (length(list(...)))
- warning("Too late! Ignoring the extra arguments")
- object at lv
+
+latvar.Coef.qrrvglm <- function(object, ...) {
+ if (length(list(...)))
+ warning("Too late! Ignoring the extra arguments")
+ object at lv
}
-Max.qrrvglm = function(object, varlvI = FALSE,
- reference = NULL, ...) {
- Coef(object, varlvI = varlvI, reference = reference, ...)@Maximum
+
+Max.qrrvglm <- function(object, varlvI = FALSE,
+ reference = NULL, ...) {
+ Coef(object, varlvI = varlvI, reference = reference, ...)@Maximum
}
-Max.Coef.qrrvglm = function(object, ...) {
+
+Max.Coef.qrrvglm <- function(object, ...) {
if (length(list(...)))
warning("Too late! Ignoring the extra arguments")
if (any(slotNames(object) == "Maximum"))
@@ -3139,21 +3239,25 @@ Max.Coef.qrrvglm = function(object, ...) {
Max(object, ...)
}
-Opt.qrrvglm = function(object, varlvI = FALSE, reference = NULL, ...) {
- Coef(object, varlvI = varlvI, reference = reference, ...)@Optimum
+
+Opt.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
+ Coef(object, varlvI = varlvI, reference = reference, ...)@Optimum
}
-Opt.Coef.qrrvglm = function(object, ...) {
+
+Opt.Coef.qrrvglm <- function(object, ...) {
if (length(list(...)))
warning("Too late! Ignoring the extra arguments")
Coef(object, ...)@Optimum
}
-Tol.qrrvglm = function(object, varlvI = FALSE, reference = NULL, ...) {
- Coef(object, varlvI = varlvI, reference = reference, ...)@Tolerance
+
+Tol.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
+ Coef(object, varlvI = varlvI, reference = reference, ...)@Tolerance
}
-Tol.Coef.qrrvglm = function(object, ...) {
+
+Tol.Coef.qrrvglm <- function(object, ...) {
if (length(list(...)))
warning("Too late! Ignoring the extra arguments")
if (any(slotNames(object) == "Tolerance"))
@@ -3174,22 +3278,38 @@ setMethod("ccoef", "Coef.rrvglm",
setMethod("ccoef", "Coef.qrrvglm",
function(object, ...) ccoef.Coef.qrrvglm(object, ...))
+
setMethod("coef", "qrrvglm",
function(object, ...) Coef.qrrvglm(object, ...))
setMethod("coefficients", "qrrvglm",
function(object, ...) Coef.qrrvglm(object, ...))
+
if (!isGeneric("lv"))
setGeneric("lv",
function(object, ...) standardGeneric("lv"))
setMethod("lv", "rrvglm",
function(object, ...) lv.rrvglm(object, ...))
setMethod("lv", "qrrvglm",
- function(object, ...) lv.qrrvglm(object, ...))
+ function(object, ...) latvar.qrrvglm(object, ...))
setMethod("lv", "Coef.rrvglm",
- function(object, ...) lv.Coef.qrrvglm(object, ...))
+ function(object, ...) latvar.Coef.qrrvglm(object, ...))
setMethod("lv", "Coef.qrrvglm",
- function(object, ...) lv.Coef.qrrvglm(object, ...))
+ function(object, ...) latvar.Coef.qrrvglm(object, ...))
+
+
+ if (!isGeneric("latvar"))
+ setGeneric("latvar",
+ function(object, ...) standardGeneric("latvar"))
+setMethod("latvar", "rrvglm",
+ function(object, ...) lv.rrvglm(object, ...))
+setMethod("latvar", "qrrvglm",
+ function(object, ...) latvar.qrrvglm(object, ...))
+setMethod("latvar", "Coef.rrvglm",
+ function(object, ...) latvar.Coef.qrrvglm(object, ...))
+setMethod("latvar", "Coef.qrrvglm",
+ function(object, ...) latvar.Coef.qrrvglm(object, ...))
+
if (!isGeneric("Max"))
setGeneric("Max",
@@ -3199,6 +3319,7 @@ setMethod("Max", "qrrvglm",
setMethod("Max", "Coef.qrrvglm",
function(object, ...) Max.Coef.qrrvglm(object, ...))
+
if (!isGeneric("Opt"))
setGeneric("Opt",
function(object, ...) standardGeneric("Opt"))
@@ -3207,6 +3328,7 @@ setMethod("Opt", "qrrvglm",
setMethod("Opt", "Coef.qrrvglm",
function(object, ...) Opt.Coef.qrrvglm(object, ...))
+
if (!isGeneric("Tol"))
setGeneric("Tol",
function(object, ...) standardGeneric("Tol"))
@@ -3232,22 +3354,25 @@ clo <- function(...) {
is.bell.vlm <-
is.bell.rrvglm <- function(object, ...) {
- M = object at misc$M
- ynames = object at misc$ynames
- ans = rep(FALSE, length.out = M)
- if (length(ynames)) names(ans) = ynames
+ M <- object at misc$M
+ ynames <- object at misc$ynames
+ ans <- rep(FALSE, length.out = M)
+ if (length(ynames)) names(ans) <- ynames
ans
}
+
is.bell.uqo <-
is.bell.qrrvglm <- function(object, ...) {
is.finite(Max(object, ...))
}
+
is.bell.cao <- function(object, ...) {
NA * Max(object, ...)
}
+
if (!isGeneric("is.bell"))
setGeneric("is.bell",
function(object, ...) standardGeneric("is.bell"))
@@ -3266,5 +3391,20 @@ setMethod("is.bell","Coef.qrrvglm",
+ if (!isGeneric("Rank"))
+ setGeneric("Rank",
+ function(object, ...) standardGeneric("Rank"))
+
+setMethod("Rank", "rrvglm",
+ function(object, ...) Rank.rrvglm(object, ...))
+setMethod("Rank", "qrrvglm",
+ function(object, ...) Rank.qrrvglm(object, ...))
+setMethod("Rank", "cao",
+ function(object, ...) Rank.cao(object, ...))
+
+
+
+
+
diff --git a/R/family.sur.R b/R/family.sur.R
new file mode 100644
index 0000000..2548341
--- /dev/null
+++ b/R/family.sur.R
@@ -0,0 +1,442 @@
+# These functions are Copyright (C) 1998-2013 T. W. Yee All rights reserved
+
+# 26/6/98; family.sur.q
+# 20110406; renamed to family.sur.R
+
+# zz; does or doesn't handle? :
+# vglm(Sur(mydataframe), sur, ...), i.e., 1st coln
+# of mydataframe is the response.
+
+
+
+# History
+# 20110406; editing it to bring it up to scratch.
+# 20130125; trying to get SUR() going.
+
+
+
+# --------------------------------------------------------------------
+# Maybe should call this surff()??:
+
+
+ SUR <- function(
+ mle.normal = FALSE,
+ divisor = c("n", "n-max(pj,pk)", "sqrt((n-pj)*(n-pk))"),
+# estimator = c("classical", "iterative"),
+ parallel = FALSE,
+ apply.parint = TRUE,
+# zero = NULL,
+ Varcov = NULL,
+ matrix.arg = FALSE) {
+# Notes:
+# 1. Varcov may be assigned a solve(wz) (=solve(\bSigma)),
+# and matrix.arg tells what format it is in.
+# 2. Based a little on normal1().
+# 3. Set maxit = 1 for Zellner's estimator (2-stage).
+# Set maxit = 111 for iterative GLS === IGLS.
+
+
+# Wrong:
+# 1. "2stage" == Zellners estimator.
+# "iterative" == iterative GLS === IGLS.
+# "MLE.normal" == not yet done.
+# Or "maxit.sur = 2"?
+
+
+# Last modified:
+# 20130125; trying to get SUR() going.
+# 20130126; seems to work basically but not the above arguments.
+# A lot more work needed.
+# 20130130; seems to work.
+# Removed 'zero' argument.
+
+
+# Yettodo:
+# 2013013 ; argument 'mle.normal' is logical.
+
+
+#print("20130129; in SUR()")
+
+
+ lmean <- "identity"
+ lsdev <- "loge"
+ emean <- list()
+ esdev <- list()
+
+
+ if (!is.logical(mle.normal) ||
+ length(mle.normal) != 1)
+ stop("argument 'mle.normal' must be a single logical")
+
+ if (!is.logical(apply.parint) ||
+ length(apply.parint) != 1)
+ stop("argument 'apply.parint' must be a single logical")
+
+
+# if(mode(estimator) != "character" && mode(estimator) != "name")
+# estimator <- as.character(substitute(estimator))
+# estimator <- match.arg(estimator,
+# c("classical", "iterative"))[1]
+#print(paste('estimator =', estimator))
+
+
+ divisor <- match.arg(divisor,
+ c("n", "n-max(pj,pk)", "sqrt((n-pj)*(n-pk))"))[1]
+#print("divisor")
+#print( divisor )
+
+ if (mle.normal && divisor != "n")
+ warning("MLE requires 'n' as the value of argument 'divisor'. ",
+ "The solution will probably not be the MLE")
+
+
+ ret.ff <-
+ new("vglmff",
+ blurb = c("Seemingly unrelated regressions"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.vgam(matrix(1, M, 1), x,
+ .parallel , constraints,
+ apply.int = .apply.parint )
+# constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .parallel = parallel,
+# .zero = zero,
+ .apply.parint = apply.parint ))),
+
+# deviance = function(y, mu, w, residuals = FALSE,
+# eta = NULL, extra = NULL) {
+# Returns the residual sum of squares
+# Nb. extra$wz is wz
+
+#print("head(y - mu)")
+#print( head(y - mu) )
+#print("head(extra$wz)")
+#print( head(extra$wz) )
+
+# M <- if (length(extra$M)) extra$M else ifelse(is.matrix(y), ncol(y), 1)
+# if (residuals) {
+# if (M > 1) NULL else (y-mu) * sqrt(extra$wz)
+# } else {
+# ResSS.vgam(y - mu, extra$wz, M = M)
+# }
+# },
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1, # zz???
+# zero = .zero ,
+# link = .link ,
+ parallel = .parallel ,
+ multipleResponses = TRUE )
+ }, list( .parallel = parallel ))),
+
+ initialize = eval(substitute(expression({
+
+ if (!is.matrix(y) || ncol(y) == 1)
+ stop("response must be a matrix with at least 2 columns")
+ ncoly <- ncol(y)
+
+#print("constraints")
+#print( constraints )
+ if (is.logical( .parallel ) &&
+ .parallel &&
+ !all(as.logical(trivial.constraints(constraints))))
+ warning("setting 'parallel = TRUE' with nontrivial constraints may not ",
+ "make sense")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+# Is.positive.y = TRUE,
+ ncol.w.min = 1,
+ ncol.w.max = 1,
+ ncol.y.max = Inf,
+ Is.integer.y = FALSE,
+ Is.positive.y = FALSE,
+ out.wy = TRUE,
+ colsyperw = ncoly,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+#print("head(w)")
+#print( head(w) )
+
+ if (!all(w[1, 1] == w))
+ stop("all prior 'weights' must currently have equal values")
+
+
+ ncoly <- ncol(y)
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ predictors.names <- if (!length(ddd <- dimnames(y)[[2]]))
+ paste("Y", 1:M, sep = "") else ddd
+
+
+# if ( .estimator == "classical")
+# maxit <- 1
+
+
+# Iteration may lead to an increase in RSS
+# if ( .estimator == "iterative")
+# half.stepsizing <- FALSE
+
+
+# Assign "extra$wz" something corresponding to the M x M identity matrix.
+ extra$wz <- matrix(1, nrow(x), M)
+
+
+ if (!length(etastart)) {
+# Note: it is a good idea to start with the OLS estimators here first.
+ etastart <- matrix(0, n, M)
+
+
+ Blist.early <- process.constraints(constraints, x, M,
+ specialCM = specialCM)
+#print("Blist.early")
+#print( Blist.early )
+ X_vlm.early <- lm2vlm.model.matrix(x, Blist.early, xij = control$xij,
+ Xm2 = Xm2)
+#print("head(X_vlm.early)")
+#print( head(X_vlm.early) )
+
+ Hmatrices <- matrix(c(unlist(Blist.early)), nrow = M)
+ jay.index <- 1:ncol(Hmatrices)
+
+
+ extra$ncols_X_lm <- numeric(ncoly)
+ for (jay in 1:ncoly) {
+# model.matrix(fit, lapred.index = 1, type = "lm")
+#print("Hmatrices")
+#print( Hmatrices )
+# 20121231; this code adapted from model.matrixvlm():
+# lapred.index <- jay.index[jay]
+# index0 <- Hmatrices[jay, ] != 0 # Orig.
+# Index0 <- Hmatrices[lapred.index, ] != 0
+# X_lm_jay <- X_vlm[(0:(n_lm - 1)) * M + lapred.index, Index0,
+# drop = FALSE]
+
+ X_lm_jay <- vlm2lm.model.matrix(x_vlm = X_vlm.early,
+ Blist = Blist.early,
+ which.lp = jay, M = M)
+#print("head(X_lm_jay)")
+#print( head(X_lm_jay) )
+
+# This is useful, e.g,. for changing the denominator
+ extra$ncols_X_lm[jay] <- ncol(X_lm_jay)
+
+ etastart[, jay] <- y[, jay] -
+ lsfit(x = X_lm_jay, y = y[, jay],
+ wt = c(w), intercept = FALSE)$residuals
+ } # jay
+ } # !length(etastart)
+ }), list(
+# .estimator = estimator,
+ .parallel = parallel
+ ))),
+ linkinv = function(eta, extra = NULL) eta,
+ last = eval(substitute(expression({
+
+ Musual <- extra$Musual
+ misc$link <- c(rep( .lmean , length = ncoly))
+ temp.names <- predictors.names
+# temp.names <- temp.names[interleave.VGAM(Musual * ncoly, M = Musual)]
+ names(misc$link) <- temp.names
+#print("head(w)")
+#print( head(w) )
+
+ misc$earg <- vector("list", Musual * ncoly)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii]] <- .emean
+ }
+ names(misc$earg) <- temp.names
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$divisor <- .divisor
+ misc$values.divisor <- round(n / ratio.df)
+
+ }), list( .lmean = lmean, .lsdev = lsdev,
+ .emean = emean, .esdev = esdev,
+ .divisor = divisor
+ ))),
+
+# linkfun = function(mu, extra = NULL) mu,
+ vfamily = "SUR",
+
+
+ deriv = eval(substitute(expression({
+#print("in @deriv of SUR()")
+#print(paste("iter =", iter))
+ mymu <- eta
+ iam.indices <- iam(NA, NA, M = M, both = TRUE)
+#print("iam.indices")
+#print( iam.indices )
+#print("y")
+#print( y )
+#print("mu")
+#print( mu )
+ resmat <- y - mymu
+ Sigma.elts <- colMeans(resmat[, iam.indices$row.index] *
+ resmat[, iam.indices$col.index])
+
+ if ( .divisor != "n") {
+# Make an adjustment for the denominator (above assumes "n")
+# Here, ratio.df >= 1.
+ ratio.df <- n / switch( .divisor ,
+ "n-max(pj,pk)" = n - pmax(extra$ncols_X_lm[iam.indices$row.index],
+ extra$ncols_X_lm[iam.indices$col.index]),
+ "sqrt((n-pj)*(n-pk))" =
+ sqrt((n - extra$ncols_X_lm[iam.indices$row.index]) *
+ (n - extra$ncols_X_lm[iam.indices$col.index])),
+ stop("argument 'divisor' unmatched"))
+#print("ratio.df")
+#print( ratio.df )
+ Sigma.elts <- Sigma.elts * ratio.df
+ } else {
+ ratio.df <- rep(1, length = M*(M+1)/2)
+ }
+
+#print("Sigma.elts")
+#print( Sigma.elts )
+ Sigma.mat <- matrix(0, M, M)
+ Sigma.mat[cbind(iam.indices$row.index,
+ iam.indices$col.index)] <- Sigma.elts
+ Sigma.mat[cbind(iam.indices$col.index,
+ iam.indices$row.index)] <- Sigma.elts
+
+#print("Sigma.mat")
+#print( Sigma.mat )
+# Cholesky is more efficient than solve()
+ invSigma.mat <- chol2inv(chol(Sigma.mat))
+# invSigma.mat <- solve(Sigma.mat) # Inefficient
+#print("invSigma.mat")
+#print( invSigma.mat )
+
+
+# dl.dmu returns \bW_i (\biy_i - \bmu_i)
+ temp3 <- matrix(invSigma.mat[cbind(iam.indices$row.index,
+ iam.indices$col.index)],
+ M*(M+1)/2, n)
+ dl.dmu <- mux22(temp3, y - mymu, M = M,
+ upper = FALSE, as.matrix = TRUE)
+#print("dim(dl.dmu)")
+#print( dim(dl.dmu) )
+#print("head(dl.dmu)")
+#print( head(dl.dmu) )
+# dl.dmu <- (y - mymu) / sdev^2 # For normal1()
+ dmu.deta <- dtheta.deta(mymu, .lmean , earg = .emean )
+#print("head(dmu.deta)")
+#print( head(dmu.deta) )
+
+ c(w) * dl.dmu * dmu.deta
+ }), list( .lmean = lmean,
+ .emean = emean,
+ .divisor = divisor ))),
+
+
+ weight = eval(substitute(expression({
+#print("in @weight of SUR()")
+
+
+# Overwrite invSigma.mat with the inverse variance, if given.
+ if (length( .Varcov )) {
+ Sigma.mat <- if ( .matrix.arg ) .Varcov else {
+ temp.vec <- rep( .Varcov , len = M*(M+1)/2)
+ temp.mat <- matrix(0, M, M)
+ temp.mat[cbind(iam.indices$col.index,
+ iam.indices$row.index)] <- temp.vec
+ temp.mat[cbind(iam.indices$row.index,
+ iam.indices$col.index)] <- temp.vec
+# temp.mat <- chol2inv(chol(temp.mat))
+ temp.mat
+ }
+ invSigma.mat <- chol2inv(chol(Sigma.mat))
+ }
+
+
+ wz <-
+ extra$wz <- c(w) * matrix(invSigma.mat[cbind(iam.indices$col.index,
+ iam.indices$row.index)],
+ n, M*(M+1)/2, byrow = TRUE)
+ extra$Sigma.mat <- Sigma.mat
+ extra$invSigma.mat <- invSigma.mat
+
+#print("head(wz)")
+#print( head(wz) )
+ wz
+ }), list( .divisor = divisor,
+# .estimator = estimator,
+ .Varcov = Varcov,
+ .matrix.arg = matrix.arg ))))
+
+
+
+ if (mle.normal) {
+# Add a 'loglikelihood' slot to the object.
+# This code based on normal1().
+# Note wz is retrieved from 'extra', and 'wz' has only
+# one general symmetric pos-definite matrix that is replicated
+# a lot.
+
+# Yettodo: if "all prior 'weights' must currently have equal values" is
+# relaxed then have to do some code changes??
+
+ ret.ff at loglikelihood <-
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ M <- if (is.matrix(y)) ncol(y) else 1
+ n <- if (is.matrix(y)) nrow(y) else length(y)
+
+# Orig:
+# wz <- VGAM.weights.function(w = w, M = M, n = n)
+# Now:
+ wz <- extra$wz
+
+ temp1 <- ResSS.vgam(y-mu, wz = wz, M = M)
+# Each row of wz is the same (or should be!!)
+ onewz <- if (length(extra$invSigma.mat))
+ extra$invSigma.mat else
+ (m2adefault(wz[1, , drop = FALSE], M = M))[,, 1] # M x M
+#print("onewz")
+#print( onewz )
+#print("extra$invSigma.mat - onewz")
+#print( extra$invSigma.mat - onewz )
+
+
+# 20130131; done: use det() or determinant():
+ logdet <- determinant(onewz)$modulus
+#print("logdet")
+#print( logdet )
+# logdet <- sum(log(eigen(onewz, symmetric = TRUE,
+# only.values = TRUE)$values))
+#print("logdet2")
+#print( logdet )
+ logretval <- -0.5 * temp1 + 0.5 * n * logdet -
+ n * (M / 2) * log(2*pi)
+# logretval <- -(ncol(onewz) * log(2 * pi) + logdet + distval)/2
+ logretval
+ }
+ }
+
+ ret.ff
+}
+
+
+
+
+# 20130125; Below here is old stuff... i will leave this alone
+# --------------------------------------------------------------------
+# 20110407; Below here is old stuff... i will leave this alone
+# --------------------------------------------------------------------
+# --------------------------------------------------------------------
+# --------------------------------------------------------------------
+
+
+
+# Sur <- function...
+
+
+
+
+
diff --git a/R/family.survival.R b/R/family.survival.R
index 5ea3d7a..4d88af8 100644
--- a/R/family.survival.R
+++ b/R/family.survival.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -39,36 +39,36 @@
"\n",
"Variance: sd^2"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}) , list( .zero = zero))),
initialize = eval(substitute(expression({
- predictors.names =
+ predictors.names <-
c(namesof("mu", .lmu, earg =.emu, tag = FALSE),
namesof("sd", .lsd, earg =.esd, tag = FALSE))
if (ncol(y <- cbind(y)) != 1)
- stop("the response must be a vector or a one-column matrix")
+ stop("the response must be a vector or a one-column matrix")
if (length(w) != n ||
- !is.Numeric(w, integer.valued = TRUE, positive = TRUE))
- stop("the argument 'weights' must be a vector ",
- "of positive integers")
+ !is.Numeric(w, integer.valued = TRUE, positive = TRUE))
+ stop("the argument 'weights' must be a vector ",
+ "of positive integers")
- sumw = sum(w)
- extra$bign = sumw + .r1 + .r2 # Tot num of censored & uncensored obsns
+ sumw <- sum(w)
+ extra$bign <- sumw + .r1 + .r2 # Tot num of censored & uncensored obsns
if (!length(etastart)) {
- yyyy.est = if (length( .imu )) .imu else median(y)
- sd.y.est = if (length( .isd )) .isd else {
- junk = lm.wfit(x = x, y = y, w = w)
- 1.25 * sqrt( sum(w * junk$resid^2) / junk$df.residual )
- }
- yyyy.est = rep( yyyy.est , len = n)
- sd.y.est = rep( sd.y.est , len = n)
- etastart = cbind(mu = theta2eta(yyyy.est, .lmu, earg =.emu),
- sd = theta2eta(sd.y.est, .lsd, earg =.esd))
+ yyyy.est <- if (length( .imu )) .imu else median(y)
+ sd.y.est <- if (length( .isd )) .isd else {
+ junk <- lm.wfit(x = x, y = c(y), w = c(w))
+ 1.25 * sqrt( sum(w * junk$resid^2) / junk$df.residual )
+ }
+ yyyy.est <- rep(yyyy.est , len = n)
+ sd.y.est <- rep(sd.y.est , len = n)
+ etastart <- cbind(mu = theta2eta(yyyy.est, .lmu , earg =.emu ),
+ sd = theta2eta(sd.y.est, .lsd , earg =.esd ))
}
}) , list( .lmu = lmu, .lsd = lsd,
.emu = emu, .esd = esd,
@@ -76,70 +76,73 @@
.r1 = r1, .r2 = r2 ))),
linkinv = function(eta, extra = NULL) eta[, 1],
last = eval(substitute(expression({
- misc$link = c(mu = .lmu , sd = .lsd )
- misc$earg = list(mu = .emu , sd = .esd )
- misc$expected = TRUE
- misc$r1 = .r1
- misc$r2 = .r2
+ misc$link <- c(mu = .lmu , sd = .lsd )
+
+ misc$earg <- list(mu = .emu , sd = .esd )
+
+ misc$multipleResponses <- FALSE
+ misc$expected <- TRUE
+ misc$r1 <- .r1
+ misc$r2 <- .r2
}) , list( .lmu = lmu, .lsd = lsd,
.emu = emu, .esd = esd,
.r1 = r1, .r2 = r2 ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- sd = eta2theta(eta[, 2], .lsd, earg = .esd )
+ sd <- eta2theta(eta[, 2], .lsd, earg = .esd )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * dnorm(y, m = mu, sd = sd, log = TRUE)) +
(if ( .r1 == 0) 0 else {
- z1 = min((y - mu) / sd); Fz1 = pnorm(z1); .r1 * log(Fz1)}) +
+ z1 <- min((y - mu) / sd); Fz1 = pnorm(z1); .r1 * log(Fz1)}) +
(if ( .r2 == 0) 0 else {
- z2 = max((y - mu) / sd); Fz2 = pnorm(z2); .r2 * log1p(-Fz2)})
+ z2 <- max((y - mu) / sd); Fz2 = pnorm(z2); .r2 * log1p(-Fz2)})
} , list( .lmu = lmu, .lsd = lsd,
.emu = emu, .esd = esd,
.r1 = r1, .r2 = r2 ))),
vfamily = c("dcennormal1"),
deriv = eval(substitute(expression({
- sd = eta2theta(eta[, 2], .lsd, earg =.esd)
-
- q1 = .r1 / extra$bign
- q2 = .r2 / extra$bign
- pee = 1 - q1 - q2 # 1 if r1==r2==0
- z1 = if ( .r1 == 0) - 100 else min((y - mu) / sd) # 100==Inf
- z2 = if ( .r2 == 0) + 100 else max((y - mu) / sd) # 100==Inf
- fz1 = if ( .r1 == 0) 0 else dnorm(z1)
- fz2 = if ( .r2 == 0) 0 else dnorm(z2)
- Fz1 = if ( .r1 == 0) 0.02 else pnorm(z1) # 0/0 undefined
- Fz2 = if ( .r2 == 0) 0.99 else pnorm(z2)
-
- dl.dmu = (y - mu) / sd^2 +
+ sd <- eta2theta(eta[, 2], .lsd, earg =.esd)
+
+ q1 <- .r1 / extra$bign
+ q2 <- .r2 / extra$bign
+ pee <- 1 - q1 - q2 # 1 if r1==r2==0
+ z1 <- if ( .r1 == 0) - 100 else min((y - mu) / sd) # 100==Inf
+ z2 <- if ( .r2 == 0) + 100 else max((y - mu) / sd) # 100==Inf
+ fz1 <- if ( .r1 == 0) 0 else dnorm(z1)
+ fz2 <- if ( .r2 == 0) 0 else dnorm(z2)
+ Fz1 <- if ( .r1 == 0) 0.02 else pnorm(z1) # 0/0 undefined
+ Fz2 <- if ( .r2 == 0) 0.99 else pnorm(z2)
+
+ dl.dmu <- (y - mu) / sd^2 +
((- .r1 * fz1/Fz1 + .r2 * fz2/(1-Fz2)) / sd) / (n*w)
- dl.dsd = -1/sd + (y-mu)^2 / sd^3 +
+ dl.dsd <- -1/sd + (y-mu)^2 / sd^3 +
((- .r1 * z1*fz1/Fz1 + .r2 * z2*fz2/(1-Fz2)) / sd) / (n*w)
- dmu.deta = dtheta.deta(mu, .lmu, earg =.emu)
- dsd.deta = dtheta.deta(sd, .lsd, earg =.esd)
+ dmu.deta <- dtheta.deta(mu, .lmu, earg =.emu)
+ dsd.deta <- dtheta.deta(sd, .lsd, earg =.esd)
c(w) * cbind(dl.dmu * dmu.deta, dl.dsd * dsd.deta)
}) , list( .lmu = lmu, .lsd = lsd,
.emu = emu, .esd = esd,
.r1 = r1, .r2 = r2 ))),
weight=expression({
- wz = matrix(as.numeric(NA), n, dimm(M))
+ wz <- matrix(as.numeric(NA), n, dimm(M))
- Q1 = ifelse(q1 == 0, 1, q1) # Saves division by 0 below; not elegant
- Q2 = ifelse(q2 == 0, 1, q2) # Saves division by 0 below; not elegant
+ Q1 <- ifelse(q1 == 0, 1, q1) # Saves division by 0 below; not elegant
+ Q2 <- ifelse(q2 == 0, 1, q2) # Saves division by 0 below; not elegant
- ed2l.dmu2 = 1 / (sd^2) +
- ((fz1*(z1+fz1/Q1) - fz2*(z2-fz2/Q2)) / sd^2) / (pee*w)
- ed2l.dmusd = ((fz1-fz2 + z1*fz1*(z1+fz1/Q1) -
+ ed2l.dmu2 <- 1 / (sd^2) +
+ ((fz1*(z1+fz1/Q1) - fz2*(z2-fz2/Q2)) / sd^2) / (pee*w)
+ ed2l.dmusd <- ((fz1-fz2 + z1*fz1*(z1+fz1/Q1) -
z2*fz2*(z2-fz2/Q2)) / sd^2) / (pee*w)
- ed2l.dsd2 = 2 / (sd^2) +
- ((z1*fz1-z2*fz2 + z1^2 *fz1 *(z1+fz1/Q1) -
- z2^2 *fz2*(z2-fz2/Q2)) / sd^2) / (pee*w)
+ ed2l.dsd2 <- 2 / (sd^2) +
+ ((z1*fz1-z2*fz2 + z1^2 *fz1 *(z1+fz1/Q1) -
+ z2^2 *fz2*(z2-fz2/Q2)) / sd^2) / (pee*w)
- wz[,iam(1,1,M)] = w * ed2l.dmu2 * dmu.deta^2
- wz[,iam(2,2,M)] = w * ed2l.dsd2 * dsd.deta^2
- wz[,iam(1,2,M)] = w * ed2l.dmusd * dsd.deta * dmu.deta
+ wz[,iam(1,1,M)] <- w * ed2l.dmu2 * dmu.deta^2
+ wz[,iam(2,2,M)] <- w * ed2l.dsd2 * dsd.deta^2
+ wz[,iam(1,2,M)] <- w * ed2l.dmusd * dsd.deta * dmu.deta
wz
}))
}
@@ -155,66 +158,65 @@ dbisa <- function(x, shape, scale = 1, log = FALSE) {
rm(log)
- L = max(length(x), length(shape), length(scale))
- x = rep(x, len=L); shape = rep(shape, len=L); scale = rep(scale, len=L);
- logdensity = rep(log(0), len=L)
- xok = (x > 0)
- xifun <- function(x) {temp <- sqrt(x); temp - 1/temp}
- logdensity[xok] = dnorm(xifun(x[xok]/scale[xok]) / shape[xok], log = TRUE) +
- log1p(scale[xok]/x[xok]) - log(2) - log(shape[xok]) -
- 0.5 * log(x[xok]) - 0.5 * log(scale[xok])
- logdensity[scale <= 0] = NaN
- logdensity[shape <= 0] = NaN
- if (log.arg) logdensity else exp(logdensity)
+ L <- max(length(x), length(shape), length(scale))
+ x <- rep(x, len = L);
+ shape <- rep(shape, len = L);
+ scale <- rep(scale, len = L);
+ logdensity <- rep(log(0), len = L)
+ xok <- (x > 0)
+ xifun <- function(x) {temp <- sqrt(x); temp - 1/temp}
+ logdensity[xok] <-
+ dnorm(xifun(x[xok] / scale[xok]) / shape[xok], log = TRUE) +
+ log1p(scale[xok]/x[xok]) - log(2) - log(shape[xok]) -
+ 0.5 * log(x[xok]) - 0.5 * log(scale[xok])
+ logdensity[scale <= 0] <- NaN
+ logdensity[shape <= 0] <- NaN
+ if (log.arg) logdensity else exp(logdensity)
}
pbisa <- function(q, shape, scale=1) {
- if (!is.Numeric(q))
- stop("bad input for argument 'q'")
- if (!is.Numeric(shape, positive = TRUE))
- stop("bad input for argument 'shape'")
- if (!is.Numeric(scale, positive = TRUE))
- stop("bad input for argument 'scale'")
- ans = pnorm(((temp <- sqrt(q/scale)) - 1/temp) / shape)
- ans[scale < 0 | shape < 0] = NA
- ans[q <= 0] = 0
- ans
+ if (!is.Numeric(q))
+ stop("bad input for argument 'q'")
+ if (!is.Numeric(shape, positive = TRUE))
+ stop("bad input for argument 'shape'")
+ if (!is.Numeric(scale, positive = TRUE))
+ stop("bad input for argument 'scale'")
+ ans <- pnorm(((temp <- sqrt(q/scale)) - 1/temp) / shape)
+ ans[scale < 0 | shape < 0] <- NA
+ ans[q <= 0] <- 0
+ ans
}
qbisa <- function(p, shape, scale=1) {
- if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
- stop("argument 'p' must have values inside the interval (0,1)")
- if (!is.Numeric(shape, positive = TRUE))
- stop("bad input for argument 'shape'")
- if (!is.Numeric(scale, positive = TRUE))
- stop("bad input for argument 'scale'")
- A = qnorm(p)
- temp1 = A * shape * sqrt(4 + A^2 * shape^2)
- ans1 = (2 + A^2 * shape^2 + temp1) * scale / 2
- ans2 = (2 + A^2 * shape^2 - temp1) * scale / 2
- ifelse(p < 0.5, pmin(ans1, ans2), pmax(ans1, ans2))
+ if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
+ stop("argument 'p' must have values inside the interval (0,1)")
+ if (!is.Numeric(shape, positive = TRUE))
+ stop("bad input for argument 'shape'")
+ if (!is.Numeric(scale, positive = TRUE))
+ stop("bad input for argument 'scale'")
+ A <- qnorm(p)
+ temp1 <- A * shape * sqrt(4 + A^2 * shape^2)
+ ans1 <- (2 + A^2 * shape^2 + temp1) * scale / 2
+ ans2 <- (2 + A^2 * shape^2 - temp1) * scale / 2
+ ifelse(p < 0.5, pmin(ans1, ans2), pmax(ans1, ans2))
}
-rbisa <- function(n, shape, scale=1) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
+rbisa <- function(n, shape, scale = 1) {
- A = rnorm(use.n)
- temp1 = A * shape
- temp1 = temp1 * sqrt(4 + temp1^2)
- ans1 = (2 + A^2 * shape^2 + temp1) * scale / 2
- ans2 = (2 + A^2 * shape^2 - temp1) * scale / 2
+ A <- rnorm(n)
+ temp1 <- A * shape
+ temp1 <- temp1 * sqrt(4 + temp1^2)
+ ans1 <- (2 + A^2 * shape^2 + temp1) * scale / 2
+ ans2 <- (2 + A^2 * shape^2 - temp1) * scale / 2
- ans = ifelse(A < 0, pmin(ans1, ans2), pmax(ans1, ans2))
- ans[shape <= 0] = NaN
- ans[scale <= 0] = NaN
- ans
+ ans <- ifelse(A < 0, pmin(ans1, ans2), pmax(ans1, ans2))
+ ans[shape <= 0] <- NaN
+ ans[scale <= 0] <- NaN
+ ans
}
@@ -247,88 +249,94 @@ rbisa <- function(n, shape, scale=1) {
stop("argument 'imethod' must be 1 or 2 or 3")
+ new("vglmff",
+ blurb = c("Birnbaum-Saunders distribution\n\n",
+ "Links: ",
+ namesof("shape", lshape, earg = eshape, tag = TRUE), "; ",
+ namesof("scale", lscale, earg = escale, tag = TRUE)),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }) , list( .zero = zero))),
+ initialize = eval(substitute(expression({
+ if (ncol(y <- cbind(y)) != 1)
+ stop("the response must be a vector or a one-column matrix")
+
+ predictors.names <-
+ c(namesof("shape", .lshape, earg = .eshape, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE))
+
+ if (!length(etastart)) {
+ scale.init <- rep( .iscale, len = n)
+ shape.init <- if (is.Numeric( .ishape)) rep( .ishape, len = n) else {
+ if ( .imethod == 1) {
+ ybar <- rep(weighted.mean(y, w), len = n)
+ ybarr <- rep(1 / weighted.mean(1/y, w), len = n) # Reqrs y > 0
+ sqrt(ybar / scale.init + scale.init / ybarr - 2)
+ } else if ( .imethod == 2) {
+ sqrt(2*( pmax(y, scale.init+0.1) / scale.init - 1))
+ } else {
+ ybar <- rep(weighted.mean(y, w), len = n)
+ sqrt(2*(pmax(ybar, scale.init + 0.1) / scale.init - 1))
+ }
+ }
+ etastart <- cbind(theta2eta(shape.init, .lshape, earg = .eshape),
+ theta2eta(scale.init, .lscale, earg = .escale))
+ }
+ }) , list( .lshape = lshape, .lscale = lscale,
+ .ishape = ishape, .iscale = iscale,
+ .eshape = eshape, .escale = escale,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ sh <- eta2theta(eta[, 1], .lshape, earg = .eshape)
+ sc <- eta2theta(eta[, 2], .lscale, earg = .escale)
+ sc * (1 + sh^2 / 2)
+ }, list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
+ last = eval(substitute(expression({
+ misc$link <- c(shape = .lshape, scale = .lscale)
+
+ misc$earg <- list(shape = .eshape, scale = .escale)
+
+ misc$expected <- TRUE
+ misc$multipleResponses <- FALSE
+ }) , list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
+ sh <- eta2theta(eta[, 1], .lshape , earg = .eshape )
+ sc <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dbisa(x = y, shape = sh, scale = sc, log = TRUE))
+ }
+ }, list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale ))),
+ vfamily = c("bisa"),
+ deriv = eval(substitute(expression({
+ sh <- eta2theta(eta[, 1], .lshape, earg = .eshape)
+ sc <- eta2theta(eta[, 2], .lscale, earg = .escale)
+
+ dl.dsh <- ((y/sc - 2 + sc/y) / sh^2 - 1) / sh
+ dl.dsc <- -0.5 / sc + 1/(y+sc) + sqrt(y) * ((y+sc)/y) *
+ (sqrt(y/sc) - sqrt(sc/y)) / (2 * sh^2 * sc^1.5)
+ dsh.deta <- dtheta.deta(sh, .lshape, earg = .eshape)
+ dsc.deta <- dtheta.deta(sc, .lscale, earg = .escale)
- new("vglmff",
- blurb = c("Birnbaum-Saunders distribution\n\n",
- "Links: ",
- namesof("shape", lshape, earg = eshape, tag = TRUE), "; ",
- namesof("scale", lscale, earg = escale, tag = TRUE)),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }) , list( .zero = zero))),
- initialize = eval(substitute(expression({
- if (ncol(y <- cbind(y)) != 1)
- stop("the response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("shape", .lshape, earg = .eshape, tag = FALSE),
- namesof("scale", .lscale, tag = FALSE))
- if (!length(etastart)) {
- scale.init = rep( .iscale, len=n)
- shape.init = if (is.Numeric( .ishape)) rep( .ishape, len=n) else {
- if ( .imethod==1) {
- ybar = rep(weighted.mean(y, w), len=n)
- ybarr = rep(1 / weighted.mean(1/y, w), len=n) # Reqrs y > 0
- sqrt(ybar / scale.init + scale.init / ybarr - 2)
- } else if ( .imethod==2) {
- sqrt(2*( pmax(y, scale.init+0.1) / scale.init - 1))
- } else {
- ybar = rep(weighted.mean(y, w), len=n)
- sqrt(2*( pmax(ybar, scale.init+0.1) / scale.init - 1))
- }
- }
- etastart = cbind(theta2eta(shape.init, .lshape, earg = .eshape),
- theta2eta(scale.init, .lscale, earg = .escale))
- }
- }) , list( .lshape = lshape, .lscale = lscale,
- .ishape = ishape, .iscale = iscale,
- .eshape = eshape, .escale = escale,
- .imethod=imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- sh = eta2theta(eta[, 1], .lshape, earg = .eshape)
- sc = eta2theta(eta[, 2], .lscale, earg = .escale)
- sc * (1 + sh^2 / 2)
- }, list( .lshape = lshape, .lscale = lscale,
+ c(w) * cbind(dl.dsh * dsh.deta,
+ dl.dsc * dsc.deta)
+ }) , list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale ))),
- last = eval(substitute(expression({
- misc$link = c(shape = .lshape, scale = .lscale)
- misc$earg = list(shape = .eshape, scale = .escale)
- misc$expected = TRUE
- }) , list( .lshape = lshape, .lscale = lscale,
- .eshape = eshape, .escale = escale ))),
- loglikelihood = eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
- sh = eta2theta(eta[, 1], .lshape, earg = .eshape)
- sc = eta2theta(eta[, 2], .lscale, earg = .escale)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dbisa(x=y, shape=sh, scale=sc, log = TRUE))
- }
- } , list( .lshape = lshape, .lscale = lscale,
- .eshape = eshape, .escale = escale ))),
- vfamily=c("bisa"),
- deriv = eval(substitute(expression({
- sh = eta2theta(eta[, 1], .lshape, earg = .eshape)
- sc = eta2theta(eta[, 2], .lscale, earg = .escale)
- dl.dsh = ((y/sc - 2 + sc/y) / sh^2 - 1) / sh
- dl.dsc = -0.5 / sc + 1/(y+sc) + sqrt(y) * ((y+sc)/y) *
- (sqrt(y/sc) - sqrt(sc/y)) / (2 * sh^2 * sc^1.5)
- dsh.deta = dtheta.deta(sh, .lshape, earg = .eshape)
- dsc.deta = dtheta.deta(sc, .lscale, earg = .escale)
- c(w) * cbind(dl.dsh * dsh.deta,
- dl.dsc * dsc.deta)
- }) , list( .lshape = lshape, .lscale = lscale,
- .eshape = eshape, .escale = escale ))),
- weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, M) # Diagonal!!
- wz[,iam(1,1,M)] = 2 * dsh.deta^2 / sh^2
- hfunction <- function(alpha)
- alpha * sqrt(pi/2) - pi * exp(2/alpha^2) *
- pnorm(2/alpha, lower.tail = FALSE)
- wz[,iam(2,2,M)] = dsc.deta^2 * (sh * hfunction(sh) / sqrt(2*pi) +
- 1) / (sh*sc)^2
- c(w) * wz
- }), list( .zero = zero ))))
+ weight = eval(substitute(expression({
+ wz <- matrix(as.numeric(NA), n, M) # Diagonal!!
+ wz[,iam(1,1,M)] <- 2 * dsh.deta^2 / sh^2
+ hfunction <- function(alpha)
+ alpha * sqrt(pi/2) - pi * exp(2/alpha^2) *
+ pnorm(2/alpha, lower.tail = FALSE)
+ wz[,iam(2,2,M)] <- dsc.deta^2 *
+ (sh * hfunction(sh) / sqrt(2*pi) + 1) / (sh*sc)^2
+ c(w) * wz
+ }), list( .zero = zero ))))
}
diff --git a/R/family.ts.R b/R/family.ts.R
index 43cd1d4..40b3b2b 100644
--- a/R/family.ts.R
+++ b/R/family.ts.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -9,269 +9,270 @@
- rrar.Ci <- function(i, coeffs, aa, Ranks., MM) {
- index <- cumsum(c(aa, MM*Ranks.))
- ans <- matrix(coeffs[(index[i]+1):index[i+1]],
- Ranks.[i], MM, byrow = TRUE)
- t(ans)
- }
- rrar.Ak1 <- function(MM, coeffs, Ranks., aa) {
- ptr <- 0
- Ak1 <- diag(MM)
- for(jay in 1:MM) {
- for(i in 1:MM) {
- if (i > jay && (MM+1)-(Ranks.[jay]-1) <= i) {
- ptr <- ptr + 1
- Ak1[i,jay] <- coeffs[ptr]
- }
- }
- }
- if (aa > 0 && ptr != aa) stop("something wrong")
- Ak1
- }
-
-
- rrar.Di <- function(i, Ranks.) {
- if (Ranks.[1] == Ranks.[i]) diag(Ranks.[i]) else
- rbind(diag(Ranks.[i]),
- matrix(0, Ranks.[1] - Ranks.[i], Ranks.[i]))
- }
-
-
- rrar.Mi <- function(i, MM, Ranks., ki) {
- if (Ranks.[ki[i]] == MM)
- return(NULL)
- hi <- Ranks.[ki[i]] - Ranks.[ki[i+1]]
- Ji <- matrix(0, hi, Ranks.[1])
- for(j in 1:hi) {
- Ji[j,j+Ranks.[ki[i+1]]] <- 1
- }
- Mi <- matrix(0, MM-Ranks.[ki[i]], MM) # dim(Oi) == dim(Ji)
- for(j in 1:(MM-Ranks.[ki[i]])) {
- Mi[j,j+Ranks.[ki[i ]]] <- 1
- }
- kronecker(Mi, Ji)
- }
-
- rrar.Mmat <- function(MM, uu, Ranks., ki) {
- Mmat <- NULL
- for(ii in uu:1) {
- Mmat <- rbind(Mmat, rrar.Mi(ii, MM, Ranks., ki))
- }
- Mmat
- }
-
-
- block.diag <- function(A, B) {
- if (is.null(A) && is.null(B))
- return(NULL)
- if (!is.null(A) && is.null(B))
- return(A)
- if (is.null(A) && !is.null(B))
- return(B)
-
- A <- as.matrix(A)
- B <- as.matrix(B)
- temp <- cbind(A, matrix(0, nrow(A), ncol(B)))
- rbind(temp, cbind(matrix(0, nrow(B), ncol(A)), B))
- }
-
-
- rrar.Ht <- function(plag, MM, Ranks., coeffs, aa, uu, ki) {
- Htop <- Hbot <- NULL
- Mmat <- rrar.Mmat(MM, uu, Ranks., ki) # NULL if full rank
- Ak1 <- rrar.Ak1(MM, coeffs, Ranks., aa)
-
- if (!is.null(Mmat))
- for(i in 1:plag) {
- Di <- rrar.Di(i, Ranks.)
- Ci <- rrar.Ci(i, coeffs, aa, Ranks., MM)
- temp <- Di %*% t(Ci)
- Htop <- cbind(Htop, Mmat %*% kronecker(diag(MM), temp))
- }
-
- for(i in 1:plag) {
- Di <- rrar.Di(i, Ranks.)
- temp <- kronecker(t(Di) %*% t(Ak1), diag(MM))
- Hbot <- block.diag(Hbot, temp)
- }
- rbind(Htop, Hbot)
- }
-
-
- rrar.Ut <- function(y, tt, plag, MM) {
- Ut <- NULL
- if (plag>1)
- for(i in 1:plag) {
- Ut <- rbind(Ut, kronecker(diag(MM), cbind(y[tt-i,])))
- }
- Ut
- }
-
-
- rrar.UU <- function(y, plag, MM, n) {
- UU <- NULL
- for(i in (plag+1):n) {
- UU <- rbind(UU, t(rrar.Ut(y, i, plag, MM)))
- }
- UU
- }
-
-
- rrar.Wmat <- function(y, Ranks., MM, ki, plag, aa, uu, n, coeffs) {
- temp1 <- rrar.UU(y, plag, MM, n)
- temp2 <- t(rrar.Ht(plag, MM, Ranks., coeffs, aa, uu, ki))
- list(UU=temp1, Ht=temp2)
- }
+rrar.Ci <- function(i, coeffs, aa, Ranks., MM) {
+ index <- cumsum(c(aa, MM*Ranks.))
+ ans <- matrix(coeffs[(index[i]+1):index[i+1]],
+ Ranks.[i], MM, byrow = TRUE)
+ t(ans)
+}
+
+
+rrar.Ak1 <- function(MM, coeffs, Ranks., aa) {
+ ptr <- 0
+ Ak1 <- diag(MM)
+ for(jay in 1:MM) {
+ for(i in 1:MM) {
+ if (i > jay && (MM+1)-(Ranks.[jay]-1) <= i) {
+ ptr <- ptr + 1
+ Ak1[i,jay] <- coeffs[ptr]
+ }
+ }
+ }
+ if (aa > 0 && ptr != aa) stop("something wrong")
+ Ak1
+}
+
+
+rrar.Di <- function(i, Ranks.) {
+ if (Ranks.[1] == Ranks.[i]) diag(Ranks.[i]) else
+ rbind(diag(Ranks.[i]),
+ matrix(0, Ranks.[1] - Ranks.[i], Ranks.[i]))
+}
+
+
+rrar.Mi <- function(i, MM, Ranks., ki) {
+ if (Ranks.[ki[i]] == MM)
+ return(NULL)
+ hi <- Ranks.[ki[i]] - Ranks.[ki[i+1]]
+ Ji <- matrix(0, hi, Ranks.[1])
+ for(j in 1:hi) {
+ Ji[j,j+Ranks.[ki[i+1]]] <- 1
+ }
+ Mi <- matrix(0, MM-Ranks.[ki[i]], MM) # dim(Oi) == dim(Ji)
+ for(j in 1:(MM-Ranks.[ki[i]])) {
+ Mi[j,j+Ranks.[ki[i ]]] <- 1
+ }
+ kronecker(Mi, Ji)
+}
+
+
+rrar.Mmat <- function(MM, uu, Ranks., ki) {
+ Mmat <- NULL
+ for(ii in uu:1) {
+ Mmat <- rbind(Mmat, rrar.Mi(ii, MM, Ranks., ki))
+ }
+ Mmat
+}
+
+
+block.diag <- function(A, B) {
+ if (is.null(A) && is.null(B))
+ return(NULL)
+ if (!is.null(A) && is.null(B))
+ return(A)
+ if (is.null(A) && !is.null(B))
+ return(B)
+
+ A <- as.matrix(A)
+ B <- as.matrix(B)
+ temp <- cbind(A, matrix(0, nrow(A), ncol(B)))
+ rbind(temp, cbind(matrix(0, nrow(B), ncol(A)), B))
+}
+
+
+rrar.Ht <- function(plag, MM, Ranks., coeffs, aa, uu, ki) {
+ Htop <- Hbot <- NULL
+ Mmat <- rrar.Mmat(MM, uu, Ranks., ki) # NULL if full rank
+ Ak1 <- rrar.Ak1(MM, coeffs, Ranks., aa)
+
+ if (!is.null(Mmat))
+ for(i in 1:plag) {
+ Di <- rrar.Di(i, Ranks.)
+ Ci <- rrar.Ci(i, coeffs, aa, Ranks., MM)
+ temp <- Di %*% t(Ci)
+ Htop <- cbind(Htop, Mmat %*% kronecker(diag(MM), temp))
+ }
+
+ for(i in 1:plag) {
+ Di <- rrar.Di(i, Ranks.)
+ temp <- kronecker(t(Di) %*% t(Ak1), diag(MM))
+ Hbot <- block.diag(Hbot, temp)
+ }
+ rbind(Htop, Hbot)
+}
+
+
+rrar.Ut <- function(y, tt, plag, MM) {
+ Ut <- NULL
+ if (plag>1)
+ for(i in 1:plag) {
+ Ut <- rbind(Ut, kronecker(diag(MM), cbind(y[tt-i,])))
+ }
+ Ut
+}
+
+
+rrar.UU <- function(y, plag, MM, n) {
+ UU <- NULL
+ for(i in (plag+1):n) {
+ UU <- rbind(UU, t(rrar.Ut(y, i, plag, MM)))
+ }
+ UU
+}
+
+
+rrar.Wmat <- function(y, Ranks., MM, ki, plag, aa, uu, n, coeffs) {
+ temp1 <- rrar.UU(y, plag, MM, n)
+ temp2 <- t(rrar.Ht(plag, MM, Ranks., coeffs, aa, uu, ki))
+ list(UU = temp1, Ht = temp2)
+}
rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...) {
- if (stepsize <= 0 || stepsize > 1) {
- warning("bad value of stepsize; using 0.5 instead")
- stepsize <- 0.5
- }
- list(stepsize = stepsize,
- save.weight = as.logical(save.weight)[1])
+ if (stepsize <= 0 || stepsize > 1) {
+ warning("bad value of stepsize; using 0.5 instead")
+ stepsize <- 0.5
+ }
+ list(stepsize = stepsize,
+ save.weight = as.logical(save.weight)[1])
}
- rrar <- function(Ranks = 1, coefstart = NULL)
-{
- lag.p <- length(Ranks)
-
- new("vglmff",
- blurb = c("Nested reduced-rank vector autoregressive model AR(",
- lag.p, ")\n\n",
- "Link: ",
- namesof("mu_t", "identity"),
- ", t = ", paste(paste(1:lag.p, coll = ",", sep = "")) ,
- ""),
- initialize = eval(substitute(expression({
- Ranks. <- .Ranks
- plag <- length(Ranks.)
- nn <- nrow(x) # original n
- indices <- 1:plag
-
- copy_X_vlm <- TRUE # X_vlm_save matrix changes at each iteration
-
- dsrank <- -sort(-Ranks.) # ==rev(sort(Ranks.))
- if (any(dsrank != Ranks.))
- stop("Ranks must be a non-increasing sequence")
- if (!is.matrix(y) || ncol(y) == 1) {
- stop("response must be a matrix with more than one column")
- } else {
- MM <- ncol(y)
- ki <- udsrank <- unique(dsrank)
- uu <- length(udsrank)
- for(i in 1:uu)
- ki[i] <- max((1:plag)[dsrank == udsrank[i]])
- ki <- c(ki, plag+1) # For computing a
- Ranks. <- c(Ranks., 0) # For computing a
- aa <- sum( (MM-Ranks.[ki[1:uu]]) * (Ranks.[ki[1:uu]]-Ranks.[ki[-1]]) )
- }
- if (!intercept.only)
- warning("ignoring explanatory variables")
-
- if (any(MM < Ranks.))
- stop("'max(Ranks)' can only be ", MM, " or less")
- y.save <- y # Save the original
- if (any(w != 1))
- stop("all weights should be 1")
-
- new.coeffs <- .coefstart # Needed for iter = 1 of $weight
- new.coeffs <- if (length(new.coeffs))
- rep(new.coeffs, len = aa+sum(Ranks.)*MM) else
- runif(aa+sum(Ranks.)*MM)
- temp8 <- rrar.Wmat(y.save, Ranks., MM, ki, plag,
- aa, uu, nn, new.coeffs)
- X_vlm_save <- temp8$UU %*% temp8$Ht
-
- if (!length(etastart)) {
- etastart <- X_vlm_save %*% new.coeffs
- etastart <- matrix(etastart, ncol = ncol(y), byrow = TRUE)
- }
-
- extra$Ranks. <- Ranks.; extra$aa <- aa
- extra$plag <- plag; extra$nn <- nn
- extra$MM <- MM; extra$coeffs <- new.coeffs;
- extra$y.save <- y.save
-
- keep.assign <- attr(x, "assign")
- x <- x[-indices, , drop = FALSE]
- if (is.R())
- attr(x, "assign") <- keep.assign
- y <- y[-indices, , drop = FALSE]
- w <- w[-indices]
- n.save <- n <- nn - plag
- }), list( .Ranks = Ranks, .coefstart = coefstart ))),
-
- linkinv = function(eta, extra = NULL) {
- aa <- extra$aa
- coeffs <- extra$coeffs
- MM <- extra$MM
- nn <- extra$nn
- plag <- extra$plag
- Ranks. <- extra$Ranks.
- y.save <- extra$y.save
-
- tt <- (1+plag):nn
- mu <- matrix(0, nn-plag, MM)
- Ak1 <- rrar.Ak1(MM, coeffs, Ranks., aa)
- for(i in 1:plag) {
- Di <- rrar.Di(i, Ranks.)
- Ci <- rrar.Ci(i, coeffs, aa, Ranks., MM)
- mu <- mu + y.save[tt-i, , drop = FALSE] %*%
- t(Ak1 %*% Di %*% t(Ci))
- }
- mu
- },
- last = expression({
- misc$plag <- plag
- misc$Ranks <- Ranks.
- misc$Ak1 <- Ak1
- misc$omegahat <- omegahat
- misc$Cmatrices <- Cmatrices
- misc$Dmatrices <- Dmatrices
- misc$Hmatrix <- temp8$Ht
- misc$Phimatrices <- vector("list", plag)
- for(ii in 1:plag) {
- misc$Phimatrices[[ii]] = Ak1 %*% Dmatrices[[ii]] %*%
- t(Cmatrices[[ii]])
+ rrar <- function(Ranks = 1, coefstart = NULL) {
+ lag.p <- length(Ranks)
+
+ new("vglmff",
+ blurb = c("Nested reduced-rank vector autoregressive model AR(",
+ lag.p, ")\n\n",
+ "Link: ",
+ namesof("mu_t", "identity"),
+ ", t = ", paste(paste(1:lag.p, coll = ",", sep = ""))),
+ initialize = eval(substitute(expression({
+ Ranks. <- .Ranks
+ plag <- length(Ranks.)
+ nn <- nrow(x) # original n
+ indices <- 1:plag
+
+ copy_X_vlm <- TRUE # X_vlm_save matrix changes at each iteration
+
+ dsrank <- -sort(-Ranks.) # ==rev(sort(Ranks.))
+ if (any(dsrank != Ranks.))
+ stop("Ranks must be a non-increasing sequence")
+ if (!is.matrix(y) || ncol(y) == 1) {
+ stop("response must be a matrix with more than one column")
+ } else {
+ MM <- ncol(y)
+ ki <- udsrank <- unique(dsrank)
+ uu <- length(udsrank)
+ for(i in 1:uu)
+ ki[i] <- max((1:plag)[dsrank == udsrank[i]])
+ ki <- c(ki, plag+1) # For computing a
+ Ranks. <- c(Ranks., 0) # For computing a
+ aa <- sum( (MM-Ranks.[ki[1:uu]]) * (Ranks.[ki[1:uu]]-Ranks.[ki[-1]]) )
}
- misc$Z <- y.save %*% t(solve(Ak1))
- }),
- vfamily = "rrar",
- deriv = expression({
- temp8 <- rrar.Wmat(y.save,Ranks.,MM,ki,plag,aa,uu,nn,new.coeffs)
- X_vlm_save <- temp8$UU %*% temp8$Ht
-
- extra$coeffs <- new.coeffs
-
- resmat <- y
- tt <- (1+plag):nn
- Ak1 <- rrar.Ak1(MM, new.coeffs, Ranks., aa)
- Cmatrices <- Dmatrices <- vector("list", plag)
- for(ii in 1:plag) {
- Dmatrices[[ii]] <- Di <- rrar.Di(ii, Ranks.)
- Cmatrices[[ii]] <- Ci <- rrar.Ci(ii, new.coeffs, aa, Ranks., MM)
- resmat <- resmat - y.save[tt - ii, , drop = FALSE] %*%
- t(Ak1 %*% Di %*% t(Ci))
- }
- omegahat <- (t(resmat) %*% resmat) / n # MM x MM
- omegainv <- solve(omegahat)
-
- omegainv <- solve(omegahat)
- ind1 <- iam(NA,NA,MM,both = TRUE)
-
- wz = matrix(omegainv[cbind(ind1$row, ind1$col)],
- nn-plag, length(ind1$row), byrow = TRUE)
- mux22(t(wz), y-mu, M = extra$MM, as.matrix = TRUE)
- }),
- weight = expression({
- wz
- }))
+ if (!intercept.only)
+ warning("ignoring explanatory variables")
+
+ if (any(MM < Ranks.))
+ stop("'max(Ranks)' can only be ", MM, " or less")
+ y.save <- y # Save the original
+ if (any(w != 1))
+ stop("all weights should be 1")
+
+ new.coeffs <- .coefstart # Needed for iter = 1 of $weight
+ new.coeffs <- if (length(new.coeffs))
+ rep(new.coeffs, len = aa+sum(Ranks.)*MM) else
+ runif(aa+sum(Ranks.)*MM)
+ temp8 <- rrar.Wmat(y.save, Ranks., MM, ki, plag,
+ aa, uu, nn, new.coeffs)
+ X_vlm_save <- temp8$UU %*% temp8$Ht
+
+ if (!length(etastart)) {
+ etastart <- X_vlm_save %*% new.coeffs
+ etastart <- matrix(etastart, ncol = ncol(y), byrow = TRUE)
+ }
+
+ extra$Ranks. <- Ranks.; extra$aa <- aa
+ extra$plag <- plag; extra$nn <- nn
+ extra$MM <- MM; extra$coeffs <- new.coeffs;
+ extra$y.save <- y.save
+
+ keep.assign <- attr(x, "assign")
+ x <- x[-indices, , drop = FALSE]
+ if (is.R())
+ attr(x, "assign") <- keep.assign
+ y <- y[-indices, , drop = FALSE]
+ w <- w[-indices]
+ n.save <- n <- nn - plag
+ }), list( .Ranks = Ranks, .coefstart = coefstart ))),
+
+ linkinv = function(eta, extra = NULL) {
+ aa <- extra$aa
+ coeffs <- extra$coeffs
+ MM <- extra$MM
+ nn <- extra$nn
+ plag <- extra$plag
+ Ranks. <- extra$Ranks.
+ y.save <- extra$y.save
+
+ tt <- (1+plag):nn
+ mu <- matrix(0, nn-plag, MM)
+ Ak1 <- rrar.Ak1(MM, coeffs, Ranks., aa)
+ for(i in 1:plag) {
+ Di <- rrar.Di(i, Ranks.)
+ Ci <- rrar.Ci(i, coeffs, aa, Ranks., MM)
+ mu <- mu + y.save[tt-i, , drop = FALSE] %*%
+ t(Ak1 %*% Di %*% t(Ci))
+ }
+ mu
+ },
+ last = expression({
+ misc$plag <- plag
+ misc$Ranks <- Ranks.
+ misc$Ak1 <- Ak1
+ misc$omegahat <- omegahat
+ misc$Cmatrices <- Cmatrices
+ misc$Dmatrices <- Dmatrices
+ misc$Hmatrix <- temp8$Ht
+ misc$Phimatrices <- vector("list", plag)
+ for(ii in 1:plag) {
+ misc$Phimatrices[[ii]] <- Ak1 %*% Dmatrices[[ii]] %*%
+ t(Cmatrices[[ii]])
+ }
+ misc$Z <- y.save %*% t(solve(Ak1))
+ }),
+ vfamily = "rrar",
+ deriv = expression({
+ temp8 <- rrar.Wmat(y.save,Ranks.,MM,ki,plag,aa,uu,nn,new.coeffs)
+ X_vlm_save <- temp8$UU %*% temp8$Ht
+
+ extra$coeffs <- new.coeffs
+
+ resmat <- y
+ tt <- (1+plag):nn
+ Ak1 <- rrar.Ak1(MM, new.coeffs, Ranks., aa)
+ Cmatrices <- Dmatrices <- vector("list", plag)
+ for(ii in 1:plag) {
+ Dmatrices[[ii]] <- Di <- rrar.Di(ii, Ranks.)
+ Cmatrices[[ii]] <- Ci <- rrar.Ci(ii, new.coeffs, aa, Ranks., MM)
+ resmat <- resmat - y.save[tt - ii, , drop = FALSE] %*%
+ t(Ak1 %*% Di %*% t(Ci))
+ }
+ omegahat <- (t(resmat) %*% resmat) / n # MM x MM
+ omegainv <- solve(omegahat)
+
+ omegainv <- solve(omegahat)
+ ind1 <- iam(NA, NA, MM, both = TRUE)
+
+ wz <- matrix(omegainv[cbind(ind1$row, ind1$col)],
+ nn-plag, length(ind1$row), byrow = TRUE)
+ mux22(t(wz), y-mu, M = extra$MM, as.matrix = TRUE)
+ }),
+ weight = expression({
+ wz
+ }))
}
@@ -312,7 +313,8 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
", t = ", paste(paste(1:p.ar.lag, coll = ",", sep = ""))),
initialize = eval(substitute(expression({
plag <- .p.ar.lag
- predictors.names = namesof("mu", .link , earg = .earg , tag = FALSE)
+ predictors.names <- namesof("mu", .link , earg = .earg , tag = FALSE)
+
indices <- 1:plag
tt.index <- (1 + plag):nrow(x)
p_lm <- ncol(x)
@@ -348,7 +350,7 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
dimnames(x) <- list(dx[[1]], c(dx[[2]], morenames))
x <- x[-indices, , drop = FALSE]
- class(x) = "matrix"
+ class(x) <- "matrix"
y <- y[-indices]
w <- w[-indices]
n.save <- n <- n - plag
@@ -365,7 +367,9 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$link <- c(mu = .link )
+
misc$earg <- list(mu = .earg )
+
misc$plag <- plag
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
@@ -413,7 +417,7 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
x[, 1:p_lm] <- x.save[tt.index, 1:p_lm] # Reinstate
for(ii in 1:plag) {
- temp = theta2eta(y.save[tt.index-ii], .link , earg = .earg )
+ temp <- theta2eta(y.save[tt.index-ii], .link , earg = .earg )
x[, 1:p_lm] <- x[, 1:p_lm] -
@@ -421,14 +425,14 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
x[, p_lm+ii] <- temp - x.save[tt.index-ii, 1:p_lm, drop = FALSE] %*%
new.coeffs[1:p_lm]
}
- class(x) = "matrix" # Added 27/2/02; 26/2/04
+ class(x) <- "matrix" # Added 27/2/02; 26/2/04
if (iter == 1)
old.coeffs <- new.coeffs
X_vlm_save <- lm2vlm.model.matrix(x, Blist, xij = control$xij)
- vary = switch( .link ,
+ vary <- switch( .link ,
identity = 1,
loge = mu,
reciprocal = mu^2,
@@ -444,8 +448,7 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
- if (FALSE)
-{
+ if (FALSE) {
setClass(Class = "Coef.rrar", representation(
"plag" = "integer",
"Ranks" = "integer",
@@ -482,16 +485,15 @@ show.Coef.rrar <- function(object) {
setMethod("Coef", "rrar",
- function(object, ...)
- Coef(object, ...))
+ function(object, ...)
+ Coef(object, ...))
setMethod("show", "Coef.rrar",
function(object)
- show.Coef.rrar(object))
-
+ show.Coef.rrar(object))
}
diff --git a/R/family.univariate.R b/R/family.univariate.R
index db1217e..de20f9f 100644
--- a/R/family.univariate.R
+++ b/R/family.univariate.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -57,8 +57,7 @@
mccullagh89 <- function(ltheta = "rhobit",
lnu = logoff(offset = 0.5),
itheta = NULL, inu = NULL,
- zero = NULL)
-{
+ zero = NULL) {
@@ -79,22 +78,21 @@
new("vglmff",
blurb = c("McCullagh (1989)'s distribution \n",
- "f(y) = (1-2*theta*y+theta^2)^(-nu) * [1 - y^2]^(nu-1/2) /\n",
- " Beta[nu+1/2, 1/2], ",
- " -1 < y < 1, -1 < theta < 1, nu > -1/2\n",
- "Links: ",
- namesof("theta", ltheta, earg = etheta), ", ",
- namesof("nu", lnu, earg = enu),
- "\n",
- "\n",
- "Mean: nu*theta/(1+nu)"),
+ "f(y) = (1-2*theta*y+theta^2)^(-nu) * [1 - y^2]^(nu-1/2) /\n",
+ " Beta[nu+1/2, 1/2], ",
+ " -1 < y < 1, -1 < theta < 1, nu > -1/2\n",
+ "Links: ",
+ namesof("theta", ltheta, earg = etheta), ", ",
+ namesof("nu", lnu, earg = enu),
+ "\n", "\n",
+ "Mean: nu*theta/(1+nu)"),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
w.y.check(w, y)
- y = as.numeric(y)
+ y <- as.numeric(y)
if (any(y <= -1 | y >= 1))
stop("all y values must be in (-1, 1)")
@@ -103,22 +101,22 @@
namesof("nu", .lnu , earg = .enu, tag = FALSE))
if (!length(etastart)) {
- theta.init = if (length( .itheta ))
+ theta.init <- if (length( .itheta ))
rep( .itheta, length = n) else {
mccullagh89.aux <- function(thetaval, y, x, w, extraargs)
mean((y-thetaval)*(thetaval^2-1)/(1-2*thetaval*y+thetaval^2))
- theta.grid = seq(-0.9, 0.9, by=0.05)
- try.this = getMaxMin(theta.grid, objfun = mccullagh89.aux,
+ theta.grid <- seq(-0.9, 0.9, by=0.05)
+ try.this <- getMaxMin(theta.grid, objfun = mccullagh89.aux,
y = y, x = x, w = w, maximize = FALSE,
abs.arg = TRUE)
- try.this = rep(try.this, length.out = n)
+ try.this <- rep(try.this, length.out = n)
try.this
}
- tmp = y / (theta.init - y)
- tmp[tmp < -0.4] = -0.4
- tmp[tmp > 10.0] = 10.0
- nu.init = rep(if (length( .inu)) .inu else tmp, length = n)
- nu.init[!is.finite(nu.init)] = 0.4
+ tmp <- y / (theta.init - y)
+ tmp[tmp < -0.4] <- -0.4
+ tmp[tmp > 10.0] <- 10.0
+ nu.init <- rep(if (length( .inu)) .inu else tmp, length = n)
+ nu.init[!is.finite(nu.init)] <- 0.4
etastart <-
cbind(theta2eta(theta.init, .ltheta , earg = .etheta ),
theta2eta(nu.init, .lnu, earg = .enu ))
@@ -126,19 +124,21 @@
}), list( .ltheta = ltheta, .lnu = lnu, .inu = inu, .itheta = itheta,
.etheta = etheta, .enu = enu ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Theta = eta2theta(eta[, 1], .ltheta , earg = .etheta )
- nu = eta2theta(eta[, 2], .lnu, earg = .enu )
+ Theta <- eta2theta(eta[, 1], .ltheta , earg = .etheta )
+ nu <- eta2theta(eta[, 2], .lnu, earg = .enu )
nu * Theta / (1 + nu)
}, list( .ltheta = ltheta, .lnu = lnu,
.etheta = etheta, .enu = enu ))),
last = eval(substitute(expression({
- misc$link = c("theta" = .ltheta , "nu" = .lnu)
- misc$earg = list("theta" = .etheta, "nu" = .enu )
+ misc$link <- c("theta" = .ltheta , "nu" = .lnu )
+
+ misc$earg <- list("theta" = .etheta , "nu" = .enu )
+
}), list( .ltheta = ltheta, .lnu = lnu, .etheta = etheta, .enu = enu ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Theta = eta2theta(eta[, 1], .ltheta , earg = .etheta )
- nu = eta2theta(eta[, 2], .lnu, earg = .enu )
+ Theta <- eta2theta(eta[, 1], .ltheta , earg = .etheta )
+ nu <- eta2theta(eta[, 2], .lnu, earg = .enu )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(c(w) * ((nu-0.5)*log1p(-y^2) -
@@ -148,14 +148,14 @@
.etheta = etheta, .enu = enu ))),
vfamily = c("mccullagh89"),
deriv = eval(substitute(expression({
- Theta = eta2theta(eta[, 1], .ltheta , earg = .etheta )
- nu = eta2theta(eta[, 2], .lnu, earg = .enu )
+ Theta <- eta2theta(eta[, 1], .ltheta , earg = .etheta )
+ nu <- eta2theta(eta[, 2], .lnu, earg = .enu )
- dTheta.deta = dtheta.deta(Theta, .ltheta , earg = .etheta )
- dnu.deta = dtheta.deta(nu, .lnu, earg = .enu )
+ dTheta.deta <- dtheta.deta(Theta, .ltheta , earg = .etheta )
+ dnu.deta <- dtheta.deta(nu, .lnu, earg = .enu )
- dl.dTheta = 2 * nu * (y-Theta) / (1 -2*Theta*y + Theta^2)
- dl.dnu = log1p(-y^2) - log1p(-2*Theta*y + Theta^2) -
+ dl.dTheta <- 2 * nu * (y-Theta) / (1 -2*Theta*y + Theta^2)
+ dl.dnu <- log1p(-y^2) - log1p(-2*Theta*y + Theta^2) -
digamma(nu + 0.5) + digamma(nu + 1)
c(w) * cbind(dl.dTheta * dTheta.deta,
@@ -163,12 +163,12 @@
}), list( .ltheta = ltheta, .lnu = lnu,
.etheta = etheta, .enu = enu ))),
weight = eval(substitute(expression({
- d2l.dTheta2 = (2 * nu^2 / (1+nu)) / (1-Theta^2)
- d2l.dnu2 = trigamma(nu+0.5) - trigamma(nu+1)
+ d2l.dTheta2 <- (2 * nu^2 / (1+nu)) / (1-Theta^2)
+ d2l.dnu2 <- trigamma(nu+0.5) - trigamma(nu+1)
- wz = matrix(as.numeric(NA), n, M) # diagonal matrix
- wz[, iam(1, 1, M)] = d2l.dTheta2 * dTheta.deta^2
- wz[, iam(2, 2, M)] = d2l.dnu2 * dnu.deta^2
+ wz <- matrix(as.numeric(NA), n, M) # diagonal matrix
+ wz[, iam(1, 1, M)] <- d2l.dTheta2 * dTheta.deta^2
+ wz[, iam(2, 2, M)] <- d2l.dnu2 * dnu.deta^2
c(w) * wz
}), list( .ltheta = ltheta, .lnu = lnu ))))
@@ -177,8 +177,7 @@
-hzeta.control <- function(save.weight = TRUE, ...)
-{
+hzeta.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -199,14 +198,14 @@ hzeta.control <- function(save.weight = TRUE, ...)
new("vglmff",
- blurb = c(
- "Haight's Zeta distribution f(y) = (2y-1)^(-alpha) - (2y+1)^(-alpha),\n",
- " alpha>0, y = 1, 2,....\n\n",
- "Link: ",
- namesof("alpha", link, earg = earg), "\n\n",
- "Mean: (1-2^(-alpha)) * zeta(alpha) if alpha>1",
- "\n",
- "Variance: (1-2^(1-alpha)) * zeta(alpha-1) - mean^2 if alpha>2"),
+ blurb = c("Haight's Zeta distribution f(y) = (2y-1)^(-alpha) - ",
+ "(2y+1)^(-alpha),\n",
+ " alpha>0, y = 1, 2,....\n\n",
+ "Link: ",
+ namesof("alpha", link, earg = earg), "\n\n",
+ "Mean: (1-2^(-alpha)) * zeta(alpha) if alpha>1",
+ "\n",
+ "Variance: (1-2^(1-alpha)) * zeta(alpha-1) - mean^2 if alpha>2"),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y)
@@ -218,28 +217,31 @@ hzeta.control <- function(save.weight = TRUE, ...)
namesof("alpha", .link , earg = .earg , tag = FALSE)
if (!length(etastart)) {
- a.init = if (length( .ialpha)) .ialpha else {
+ a.init <- if (length( .ialpha)) .ialpha else {
if ((meany <- weighted.mean(y, w)) < 1.5) 3.0 else
if (meany < 2.5) 1.4 else 1.1
}
- a.init = rep(a.init, length = n)
+ a.init <- rep(a.init, length = n)
etastart <- theta2eta(a.init, .link , earg = .earg )
}
}), list( .link = link, .earg = earg, .ialpha = ialpha ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha = eta2theta(eta, .link , earg = .earg )
- mu = (1-2^(-alpha)) * zeta(alpha)
- mu[alpha <= 1] = Inf
+ alpha <- eta2theta(eta, .link , earg = .earg )
+ mu <- (1-2^(-alpha)) * zeta(alpha)
+ mu[alpha <= 1] <- Inf
mu
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c(alpha = .link)
- misc$earg = list(alpha = .earg )
- misc$nsimEIM = .nsimEIM
+ misc$link <- c(alpha = .link)
+
+ misc$earg <- list(alpha = .earg )
+
+ misc$nsimEIM <- .nsimEIM
+
}), list( .link = link, .earg = earg, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- alpha = eta2theta(eta, .link , earg = .earg )
+ alpha <- eta2theta(eta, .link , earg = .earg )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dhzeta(x = y, alpha = alpha, log = TRUE))
@@ -247,35 +249,35 @@ hzeta.control <- function(save.weight = TRUE, ...)
}, list( .link = link, .earg = earg ))),
vfamily = c("hzeta"),
deriv = eval(substitute(expression({
- alpha = eta2theta(eta, .link , earg = .earg )
+ alpha <- eta2theta(eta, .link , earg = .earg )
- dalpha.deta = dtheta.deta(alpha, .link , earg = .earg )
+ dalpha.deta <- dtheta.deta(alpha, .link , earg = .earg )
- d3 = deriv3(~ log((2*y-1)^(-alpha) - (2*y+1)^(-alpha)),
+ d3 <- deriv3(~ log((2*y-1)^(-alpha) - (2*y+1)^(-alpha)),
"alpha", hessian = FALSE)
- eval.d3 = eval(d3)
+ eval.d3 <- eval(d3)
- dl.dalpha = attr(eval.d3, "gradient")
+ dl.dalpha <- attr(eval.d3, "gradient")
c(w) * dl.dalpha * dalpha.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- sd3 = deriv3(~ log((2*ysim-1)^(-alpha) - (2*ysim+1)^(-alpha)),
+ sd3 <- deriv3(~ log((2*ysim-1)^(-alpha) - (2*ysim+1)^(-alpha)),
"alpha", hessian = FALSE)
- run.var = 0
+ run.var <- 0
for(ii in 1:( .nsimEIM )) {
- ysim = rhzeta(n, alpha=alpha)
- eval.sd3 = eval(sd3)
- dl.dalpha = attr(eval.d3, "gradient")
+ ysim <- rhzeta(n, alpha=alpha)
+ eval.sd3 <- eval(sd3)
+ dl.dalpha <- attr(eval.d3, "gradient")
rm(ysim)
- temp3 = dl.dalpha
- run.var = ((ii-1) * run.var + temp3^2) / ii
+ temp3 <- dl.dalpha
+ run.var <- ((ii-1) * run.var + temp3^2) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(cbind(run.var)),
n, dimm(M), byrow = TRUE) else cbind(run.var)
- wz = wz * dalpha.deta^2
+ wz <- wz * dalpha.deta^2
c(w) * wz
}), list( .nsimEIM = nsimEIM ))))
}
@@ -290,14 +292,16 @@ dhzeta <- function(x, alpha, log = FALSE) {
if (!is.Numeric(alpha, positive = TRUE))
stop("'alpha' must be numeric and have positive values")
- nn = max(length(x), length(alpha))
- x = rep(x, length.out = nn);
- alpha = rep(alpha, length.out = nn)
- ox = !is.finite(x)
- zero = ox | round(x) != x | x < 1
- ans = rep(0, length.out = nn)
- ans[!zero] = (2*x[!zero]-1)^(-alpha[!zero]) -
- (2*x[!zero]+1)^(-alpha[!zero])
+
+ nn <- max(length(x), length(alpha))
+ x <- rep(x, length.out = nn);
+ alpha <- rep(alpha, length.out = nn)
+
+ ox <- !is.finite(x)
+ zero <- ox | round(x) != x | x < 1
+ ans <- rep(0, length.out = nn)
+ ans[!zero] <- (2*x[!zero]-1)^(-alpha[!zero]) -
+ (2*x[!zero]+1)^(-alpha[!zero])
if (log.arg) log(ans) else ans
}
@@ -305,16 +309,16 @@ dhzeta <- function(x, alpha, log = FALSE) {
phzeta <- function(q, alpha) {
- nn = max(length(q), length(alpha))
- q = rep(q, length.out = nn)
- alpha = rep(alpha, length.out = nn)
- oq = !is.finite(q)
- zero = oq | q < 1
- q = floor(q)
- ans = 0 * q
- ans[!zero] = 1 - (2*q[!zero]+1)^(-alpha[!zero])
+ nn <- max(length(q), length(alpha))
+ q <- rep(q, length.out = nn)
+ alpha <- rep(alpha, length.out = nn)
+ oq <- !is.finite(q)
+ zero <- oq | q < 1
+ q <- floor(q)
+ ans <- 0 * q
+ ans[!zero] <- 1 - (2*q[!zero]+1)^(-alpha[!zero])
- ans[alpha <= 0] = NaN
+ ans[alpha <= 0] <- NaN
ans
}
@@ -326,11 +330,11 @@ qhzeta <- function(p, alpha) {
any(p >= 1))
stop("argument 'p' must have values inside the interval (0,1)")
- nn = max(length(p), length(alpha))
- p = rep(p, length.out = nn)
- alpha = rep(alpha, length.out = nn)
- ans = (((1 - p)^(-1/alpha) - 1) / 2) # p is in (0,1)
- ans[alpha <= 0] = NaN
+ nn <- max(length(p), length(alpha))
+ p <- rep(p, length.out = nn)
+ alpha <- rep(alpha, length.out = nn)
+ ans <- (((1 - p)^(-1/alpha) - 1) / 2) # p is in (0,1)
+ ans[alpha <= 0] <- NaN
floor(ans + 1)
}
@@ -338,8 +342,8 @@ qhzeta <- function(p, alpha) {
rhzeta <- function(n, alpha) {
- ans = (runif(n)^(-1/alpha) - 1) / 2
- ans[alpha <= 0] = NaN
+ ans <- (runif(n)^(-1/alpha) - 1) / 2
+ ans[alpha <= 0] <- NaN
floor(ans + 1)
}
@@ -390,17 +394,17 @@ rhzeta <- function(n, alpha) {
mycmatrix <- if (M == 1) diag(1) else diag(M)
}
constraints <- cm.vgam(mycmatrix, x, .PARALLEL ,
- constraints, intercept.apply = TRUE)
+ constraints, apply.int = TRUE)
constraints <- cm.zero.vgam(constraints, x, .ZERO , M)
}), list( .parallel = parallel, .zero = zero ))),
initialize = eval(substitute(expression({
- mustart.orig = mustart
+ mustart.orig <- mustart
delete.zero.colns <- TRUE
eval(process.categorical.data.vgam)
if (length(mustart.orig))
- mustart = mustart.orig
+ mustart <- mustart.orig
y <- as.matrix(y)
ycount <- as.matrix(y * c(w))
@@ -442,8 +446,11 @@ rhzeta <- function(n, alpha) {
prop.table(temp, 1)
}, list( .ephi = ephi, .lphi = lphi ))),
last = eval(substitute(expression({
- misc$link <- c(rep("noLinkFunction", length = M-1), .lphi)
- names(misc$link) <- c(paste("prob", 1:(M-1), sep = ""), "phi")
+
+ misc$link <- c(rep("loge", length = M-1), .lphi )
+ names(misc$link) <- c(
+ paste("prob[,", 1:(M-1), "]/prob[,", M, "])", sep = ""),
+ "phi")
misc$earg <- vector("list", M)
names(misc$earg) <- names(misc$link)
@@ -454,12 +461,12 @@ rhzeta <- function(n, alpha) {
misc$expected <- TRUE
if (intercept.only) {
- misc$shape = probs[1,] * (1/phi[1]-1) # phi & probs computed in @deriv
+ misc$shape <- probs[1,] * (1/phi[1]-1) # phi & probs computed in @deriv
}
}), list( .ephi = ephi, .lphi = lphi ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- M = if (is.matrix(eta)) ncol(eta) else 1
+ M <- if (is.matrix(eta)) ncol(eta) else 1
probs <- cbind(exp(eta[, -M]), 1)
probs <- prop.table(probs, 1)
phi <- eta2theta(eta[, M], .lphi , earg = .ephi )
@@ -474,20 +481,20 @@ rhzeta <- function(n, alpha) {
omega <- extra$n2
for(jay in 1:M) {
maxyj <- max(ycount[, jay])
- loopOveri <- n < maxyj
+ loopOveri <- (n < maxyj)
if (loopOveri) {
for(iii in 1:n) {
rrr <- 1:ycount[iii, jay] # a vector
if (ycount[iii, jay] > 0)
- ans[iii] <- ans[iii] + sum(log((1-phi[iii]) *
- probs[iii, jay] + (rrr-1)*phi[iii]))
+ ans[iii] <- ans[iii] + sum(log((1-phi[iii]) *
+ probs[iii, jay] + (rrr-1)*phi[iii]))
}
} else {
for(rrr in 1:maxyj) {
index <- (rrr <= ycount[, jay]) & (ycount[, jay] > 0)
if (any(index))
ans[index] <- ans[index] + log((1-phi[index]) *
- probs[index, jay] + (rrr-1)*phi[index])
+ probs[index, jay] + (rrr-1) * phi[index])
}
}
} # end of jay loop
@@ -497,12 +504,12 @@ rhzeta <- function(n, alpha) {
if (loopOveri) {
for(iii in 1:n) {
rrr <- 1:omega[iii]
- ans[iii]<- ans[iii] - sum(log1p(-phi[iii] + (rrr-1)*phi[iii]))
+ ans[iii]<- ans[iii] - sum(log1p(-phi[iii] + (rrr-1) * phi[iii]))
}
} else {
for(rrr in 1:maxomega) {
ind8 <- rrr <= omega
- ans[ind8] <- ans[ind8] - log1p(-phi[ind8] + (rrr-1)*phi[ind8])
+ ans[ind8] <- ans[ind8] - log1p(-phi[ind8] + (rrr-1) * phi[ind8])
}
}
sum(ans)
@@ -524,41 +531,41 @@ rhzeta <- function(n, alpha) {
ycount <- round(ycount)
for(jay in 1:M) {
- maxyj <- max(ycount[, jay])
- loopOveri <- n < maxyj
- if (loopOveri) {
- for(iii in 1:n) {
- rrr <- 1:ycount[iii, jay]
- if (ycount[iii, jay] > 0) {
- PHI <- phi[iii]
- dl.dphi[iii] <- dl.dphi[iii] +
+ maxyj <- max(ycount[, jay])
+ loopOveri <- n < maxyj
+ if (loopOveri) {
+ for(iii in 1:n) {
+ rrr <- 1:ycount[iii, jay]
+ if (ycount[iii, jay] > 0) {
+ PHI <- phi[iii]
+ dl.dphi[iii] <- dl.dphi[iii] +
sum((rrr-1-probs[iii, jay]) / ((1-PHI)*probs[iii, jay] + (rrr-1)*PHI))
- tmp9 <- (1-PHI) / ((1-PHI)*probs[iii, jay] + (rrr-1)*PHI)
- if (jay < M) {
- dl.dprobs[iii, jay] <- dl.dprobs[iii, jay] + sum(tmp9)
- } else {
- for(jay2 in 1:(M-1))
- dl.dprobs[iii, jay2]<-dl.dprobs[iii, jay2]-sum(tmp9)
- }
- }
- }
- } else {
- for(rrr in 1:maxyj) {
- index <- (rrr <= ycount[, jay]) & (ycount[, jay] > 0)
- PHI <- phi[index]
- dl.dphi[index] <- dl.dphi[index] +
- (rrr-1-probs[index, jay]) / ((1-PHI)*probs[index, jay] +
- (rrr-1)*PHI)
- tmp9 <- (1-PHI) / ((1-PHI)*probs[index, jay] + (rrr-1)*PHI)
+ tmp9 <- (1-PHI) / ((1-PHI)*probs[iii, jay] + (rrr-1)*PHI)
if (jay < M) {
- dl.dprobs[index, jay] <- dl.dprobs[index, jay] + tmp9
+ dl.dprobs[iii, jay] <- dl.dprobs[iii, jay] + sum(tmp9)
} else {
- for(jay2 in 1:(M-1))
- dl.dprobs[index, jay2] <- dl.dprobs[index, jay2] - tmp9
+ for(jay2 in 1:(M-1))
+ dl.dprobs[iii, jay2]<-dl.dprobs[iii, jay2]-sum(tmp9)
}
}
}
+ } else {
+ for(rrr in 1:maxyj) {
+ index <- (rrr <= ycount[, jay]) & (ycount[, jay] > 0)
+ PHI <- phi[index]
+ dl.dphi[index] <- dl.dphi[index] +
+ (rrr-1-probs[index, jay]) / ((1-PHI)*probs[index, jay] +
+ (rrr-1)*PHI)
+ tmp9 <- (1-PHI) / ((1-PHI)*probs[index, jay] + (rrr-1)*PHI)
+ if (jay < M) {
+ dl.dprobs[index, jay] <- dl.dprobs[index, jay] + tmp9
+ } else {
+ for(jay2 in 1:(M-1))
+ dl.dprobs[index, jay2] <- dl.dprobs[index, jay2] - tmp9
+ }
+ }
+ }
} # end of jay loop
maxomega <- max(omega)
loopOveri <- n < maxomega
@@ -670,8 +677,7 @@ rhzeta <- function(n, alpha) {
dirmul.old <- function(link = "loge", init.alpha = 0.01,
- parallel = FALSE, zero = NULL)
-{
+ parallel = FALSE, zero = NULL) {
link <- as.list(substitute(link))
earg <- link2list(link)
@@ -694,51 +700,51 @@ dirmul.old <- function(link = "loge", init.alpha = 0.01,
"Posterior mean: (n_j + shape_j)/(2*sum(n_j) + ",
"sum(shape_j))\n"),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel ,
- constraints, intercept.apply = TRUE)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel ,
+ constraints, apply.int = TRUE)
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .parallel = parallel, .zero = zero ))),
initialize = eval(substitute(expression({
- y = as.matrix(y)
- M = ncol(y)
+ y <- as.matrix(y)
+ M <- ncol(y)
if (any(y != round(y )))
stop("all y values must be integer-valued")
predictors.names <- namesof(paste("shape", 1:M, sep = ""),
.link , earg = .earg , short = TRUE)
- extra$n2 = rowSums(y) # Nb. don't multiply by 2
- extra$y = y
+ extra$n2 <- rowSums(y) # Nb. don't multiply by 2
+ extra$y <- y
if (!length(etastart)) {
- yy = if (is.numeric( .init.alpha))
+ yy <- if (is.numeric( .init.alpha))
matrix( .init.alpha, n, M, byrow = TRUE) else
matrix(runif(n*M), n, M)
etastart <- theta2eta(yy, .link , earg = .earg )
}
}), list( .link = link, .earg = earg, .init.alpha = init.alpha ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape = eta2theta(eta, .link , earg = .earg )
- M = if (is.matrix(eta)) ncol(eta) else 1
- sumshape = as.vector(shape %*% rep(1, length.out = M))
+ shape <- eta2theta(eta, .link , earg = .earg )
+ M <- if (is.matrix(eta)) ncol(eta) else 1
+ sumshape <- as.vector(shape %*% rep(1, length.out = M))
(extra$y + shape) / (extra$n2 + sumshape)
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = rep( .link , length = M)
- names(misc$link) = paste("shape", 1:M, sep = "")
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- paste("shape", 1:M, sep = "")
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
for(ii in 1:M)
- misc$earg[[ii]] = .earg
+ misc$earg[[ii]] <- .earg
- misc$pooled.weight = pooled.weight
+ misc$pooled.weight <- pooled.weight
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape = eta2theta(eta, .link , earg = .earg )
- M = if (is.matrix(eta)) ncol(eta) else 1
- sumshape = as.vector(shape %*% rep(1, length.out = M))
+ shape <- eta2theta(eta, .link , earg = .earg )
+ M <- if (is.matrix(eta)) ncol(eta) else 1
+ sumshape <- as.vector(shape %*% rep(1, length.out = M))
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(c(w) * (lgamma(sumshape) - lgamma(extra$n2 + sumshape ))) +
@@ -746,32 +752,32 @@ dirmul.old <- function(link = "loge", init.alpha = 0.01,
}, list( .link = link, .earg = earg ))),
vfamily = c("dirmul.old"),
deriv = eval(substitute(expression({
- shape = eta2theta(eta, .link , earg = .earg )
+ shape <- eta2theta(eta, .link , earg = .earg )
- sumshape = as.vector(shape %*% rep(1, length.out = M))
- dl.dsh = digamma(sumshape) - digamma(extra$n2 + sumshape) +
+ sumshape <- as.vector(shape %*% rep(1, length.out = M))
+ dl.dsh <- digamma(sumshape) - digamma(extra$n2 + sumshape) +
digamma(y + shape) - digamma(shape)
- dsh.deta = dtheta.deta(shape, .link , earg = .earg )
+ dsh.deta <- dtheta.deta(shape, .link , earg = .earg )
c(w) * dl.dsh * dsh.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- index = iam(NA, NA, M, both = TRUE, diag = TRUE)
- wz = matrix(trigamma(sumshape) - trigamma(extra$n2 + sumshape),
+ index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
+ wz <- matrix(trigamma(sumshape) - trigamma(extra$n2 + sumshape),
nrow = n, ncol = dimm(M))
- wz[, 1:M] = wz[, 1:M] + trigamma(y + shape) - trigamma(shape)
- wz = -wz * dsh.deta[, index$row] * dsh.deta[, index$col]
+ wz[, 1:M] <- wz[, 1:M] + trigamma(y + shape) - trigamma(shape)
+ wz <- -wz * dsh.deta[, index$row] * dsh.deta[, index$col]
if (TRUE && intercept.only) {
- sumw = sum(w)
+ sumw <- sum(w)
for(ii in 1:ncol(wz))
- wz[, ii] = sum(wz[, ii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
+ wz[, ii] <- sum(wz[, ii]) / sumw
+ pooled.weight <- TRUE
+ wz <- c(w) * wz # Put back the weights
} else
- pooled.weight = FALSE
+ pooled.weight <- FALSE
wz
}), list( .link = link, .earg = earg ))))
@@ -784,21 +790,21 @@ dirmul.old <- function(link = "loge", init.alpha = 0.01,
rdiric <- function(n, shape, dimension = NULL) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
if (!is.numeric(dimension))
- dimension = length(shape)
- shape = rep(shape, length.out = dimension)
+ dimension <- length(shape)
+ shape <- rep(shape, length.out = dimension)
- ans = rgamma(use.n * dimension,
- rep(shape, rep(use.n, dimension)))
- dim(ans) = c(use.n, dimension)
+ ans <- rgamma(use.n * dimension,
+ rep(shape, rep(use.n, dimension)))
+ dim(ans) <- c(use.n, dimension)
- ans = ans / rowSums(ans)
+ ans <- ans / rowSums(ans)
ans
}
@@ -826,7 +832,7 @@ rdiric <- function(n, shape, dimension = NULL) {
"Mean: shape_j/(1 + sum(shape_j)), j = 1,..,ncol(y)"),
constraints = eval(substitute(expression({
constraints <- cm.vgam(matrix(1, M, 1), x, .parallel ,
- constraints, int= TRUE)
+ constraints, apply.int = TRUE)
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .parallel = parallel, .zero = zero ))),
initialize = eval(substitute(expression({
@@ -907,7 +913,7 @@ rdiric <- function(n, shape, dimension = NULL) {
- deriv.arg = deriv
+ deriv.arg <- deriv
rm(deriv)
if (!is.Numeric(deriv.arg, allowable.length = 1,
integer.valued = TRUE))
@@ -934,9 +940,9 @@ rdiric <- function(n, shape, dimension = NULL) {
special2 <- Re(x) < 0
if (any(special2)) {
- x2 = x[special2]
- cx = 1-x2
- ans[special2] = 2^(x2) * pi^(x2-1) * sin(pi*x2/2) *
+ x2 <- x[special2]
+ cx <- 1-x2
+ ans[special2] <- 2^(x2) * pi^(x2-1) * sin(pi*x2/2) *
gamma(cx) * Recall(cx)
}
@@ -946,27 +952,26 @@ rdiric <- function(n, shape, dimension = NULL) {
return(ans)
}
- a = 12; k = 8
- B = c(1/6, -1/30,1/42,-1/30,5/66,-691/2730,7/6,-3617/510)
- ans = 0
+ a <- 12; k <- 8
+ B <- c(1/6, -1/30,1/42,-1/30,5/66,-691/2730,7/6,-3617/510)
+ ans <- 0
for(ii in 1:(a-1))
- ans = ans + 1.0 / ii^x
- ans = ans + 1.0 / ((x-1.0)* a^(x-1.0)) + 1.0 / (2.0 * a^x)
+ ans <- ans + 1.0 / ii^x
+ ans <- ans + 1.0 / ((x-1.0)* a^(x-1.0)) + 1.0 / (2.0 * a^x)
- term = (x/2) / a^(x+1)
- ans = ans + term * B[1]
+ term <- (x/2) / a^(x+1)
+ ans <- ans + term * B[1]
for(mm in 2:k) {
- term = term * (x+2*mm-2) * (x+2*mm-3) / (a * a * 2 * mm * (2*mm-1))
- ans = ans + term * B[mm]
+ term <- term * (x+2*mm-2) * (x+2*mm-3) / (a * a * 2 * mm * (2*mm-1))
+ ans <- ans + term * B[mm]
}
ans
}
- Zeta.derivative <- function(x, deriv.arg = 0)
-{
+ Zeta.derivative <- function(x, deriv.arg = 0) {
if (!is.Numeric(deriv.arg, allowable.length = 1,
@@ -980,25 +985,24 @@ rdiric <- function(n, shape, dimension = NULL) {
if (any(x < 0))
stop("Sorry, currently cannot handle x < 0")
- ok = is.finite(x) & x > 0 & x != 1 # Handles NAs
- ans = rep(as.numeric(NA), length(x))
- nn = sum(ok) # Effective length (excludes x < 0 and x = 1 values)
+ ok <- is.finite(x) & x > 0 & x != 1 # Handles NAs
+ ans <- rep(as.numeric(NA), length(x))
+ nn <- sum(ok) # Effective length (excludes x < 0 and x = 1 values)
if (nn)
- ans[ok] = dotC(name = "vzetawr", as.double(x[ok]), ans = double(nn),
+ ans[ok] <- dotC(name = "vzetawr", as.double(x[ok]), ans = double(nn),
as.integer(deriv.arg), as.integer(nn))$ans
if (deriv.arg == 0)
- ans[is.finite(x) & abs(x) < 1.0e-12] = -0.5
+ ans[is.finite(x) & abs(x) < 1.0e-12] <- -0.5
ans
}
-dzeta <- function(x, p, log = FALSE)
-{
+dzeta <- function(x, p, log = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -1006,28 +1010,27 @@ dzeta <- function(x, p, log = FALSE)
if (!is.Numeric(p, positive = TRUE)) # || min(p) <= 1
stop("'p' must be numeric and > 0")
- LLL = max(length(p), length(x))
- x = rep(x, length.out = LLL);
- p = rep(p, length.out = LLL)
+ LLL <- max(length(p), length(x))
+ x <- rep(x, length.out = LLL);
+ p <- rep(p, length.out = LLL)
- ox = !is.finite(x)
- zero = ox | round(x) != x | x < 1
+ ox <- !is.finite(x)
+ zero <- ox | round(x) != x | x < 1
if (any(zero)) warning("non-integer x and/or x < 1 or NAs")
- ans = rep(if (log.arg) log(0) else 0, length.out = LLL)
+ ans <- rep(if (log.arg) log(0) else 0, length.out = LLL)
if (any(!zero)) {
if (log.arg) {
- ans[!zero] = (-p[!zero]-1)*log(x[!zero]) - log(zeta(p[!zero]+1))
+ ans[!zero] <- (-p[!zero]-1)*log(x[!zero]) - log(zeta(p[!zero]+1))
} else {
- ans[!zero] = x[!zero]^(-p[!zero]-1) / zeta(p[!zero]+1)
+ ans[!zero] <- x[!zero]^(-p[!zero]-1) / zeta(p[!zero]+1)
}
}
- if (any(ox)) ans[ox] = NA
+ if (any(ox)) ans[ox] <- NA
ans
}
- zetaff <- function(link = "loge", init.p = NULL, zero = NULL)
-{
+ zetaff <- function(link = "loge", init.p = NULL, zero = NULL) {
if (length(init.p) && !is.Numeric(init.p, positive = TRUE))
@@ -1126,7 +1129,7 @@ dzeta <- function(x, p, log = FALSE)
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- pp = eta2theta(eta, .link , earg = .earg )
+ pp <- eta2theta(eta, .link , earg = .earg )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dzeta(x = y, p = pp, log = TRUE))
@@ -1134,13 +1137,13 @@ dzeta <- function(x, p, log = FALSE)
}, list( .link = link, .earg = earg ))),
vfamily = c("zetaff"),
deriv = eval(substitute(expression({
- pp = eta2theta(eta, .link , earg = .earg )
+ pp <- eta2theta(eta, .link , earg = .earg )
- fred1 = zeta(pp+1)
- fred2 = zeta(pp+1, deriv = 1)
- dl.dpp = -log(y) - fred2 / fred1
+ fred1 <- zeta(pp+1)
+ fred2 <- zeta(pp+1, deriv = 1)
+ dl.dpp <- -log(y) - fred2 / fred1
- dpp.deta = dtheta.deta(pp, .link , earg = .earg )
+ dpp.deta <- dtheta.deta(pp, .link , earg = .earg )
c(w) * dl.dpp * dpp.deta
}), list( .link = link, .earg = earg ))),
@@ -1164,22 +1167,32 @@ gharmonic <- function(n, s = 1, lognexponent = 0) {
if (lognexponent != 0) sum(log(1:n)^lognexponent * (1:n)^(-s)) else
sum((1:n)^(-s))
} else {
- LEN = max(length(n), length(s))
- n = rep(n, length.out = LEN)
- ans = s = rep(s, length.out = LEN)
+ LEN <- max(length(n), length(s))
+ n <- rep(n, length.out = LEN)
+ ans <- s <- rep(s, length.out = LEN)
if (lognexponent != 0) {
for(ii in 1:LEN)
- ans[ii] = sum(log(1:n[ii])^lognexponent * (1:n[ii])^(-s[ii]))
+ ans[ii] <- sum(log(1:n[ii])^lognexponent * (1:n[ii])^(-s[ii]))
} else
for(ii in 1:LEN)
- ans[ii] = sum((1:n[ii])^(-s[ii]))
+ ans[ii] <- sum((1:n[ii])^(-s[ii]))
ans
}
}
-dzipf <- function(x, N, s, log = FALSE)
-{
+
+rzipf <- function(n, N, s) {
+ r <- runif(n)
+ sapply(r, function(x) {min(which(pzipf(1:N, N, s) > x))})
+}
+
+
+
+
+
+
+dzipf <- function(x, N, s, log = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -1191,19 +1204,19 @@ dzipf <- function(x, N, s, log = FALSE)
stop("bad input for argument 'N'")
if (!is.Numeric(s, positive = TRUE))
stop("bad input for argument 's'")
- nn = max(length(x), length(N), length(s))
- x = rep(x, length.out = nn);
- N = rep(N, length.out = nn);
- s = rep(s, length.out = nn);
- ox = !is.finite(x)
- zero = ox | round(x) != x | x < 1 | x > N
- ans = (if (log.arg) log(0) else 0) * x
+ nn <- max(length(x), length(N), length(s))
+ x <- rep(x, length.out = nn);
+ N <- rep(N, length.out = nn);
+ s <- rep(s, length.out = nn);
+ ox <- !is.finite(x)
+ zero <- ox | round(x) != x | x < 1 | x > N
+ ans <- (if (log.arg) log(0) else 0) * x
if (any(!zero))
if (log.arg) {
- ans[!zero] = (-s[!zero]) * log(x[!zero]) -
+ ans[!zero] <- (-s[!zero]) * log(x[!zero]) -
log(gharmonic(N[!zero], s[!zero]))
} else {
- ans[!zero] = x[!zero]^(-s[!zero]) / gharmonic(N[!zero], s[!zero])
+ ans[!zero] <- x[!zero]^(-s[!zero]) / gharmonic(N[!zero], s[!zero])
}
ans
}
@@ -1218,17 +1231,17 @@ pzipf <- function(q, N, s) {
if (!is.Numeric(s, positive = TRUE))
stop("bad input for argument 's'")
- nn = max(length(q), length(N), length(s))
- q = rep(q, length.out = nn);
- N = rep(N, length.out = nn);
- s = rep(s, length.out = nn);
- oq = !is.finite(q)
- zeroOR1 = oq | q < 1 | q >= N
- floorq = floor(q)
- ans = 0 * floorq
- ans[oq | q >= N] = 1
+ nn <- max(length(q), length(N), length(s))
+ q <- rep(q, length.out = nn);
+ N <- rep(N, length.out = nn);
+ s <- rep(s, length.out = nn);
+ oq <- !is.finite(q)
+ zeroOR1 <- oq | q < 1 | q >= N
+ floorq <- floor(q)
+ ans <- 0 * floorq
+ ans[oq | q >= N] <- 1
if (any(!zeroOR1))
- ans[!zeroOR1] = gharmonic(floorq[!zeroOR1], s[!zeroOR1]) /
+ ans[!zeroOR1] <- gharmonic(floorq[!zeroOR1], s[!zeroOR1]) /
gharmonic(N[!zeroOR1], s[!zeroOR1])
ans
}
@@ -1240,7 +1253,7 @@ pzipf <- function(q, N, s) {
integer.valued = TRUE, allowable.length = 1) ||
N <= 1))
stop("bad input for argument 'N'")
- enteredN = length(N)
+ enteredN <- length(N)
if (length(init.s) && !is.Numeric(init.s, positive = TRUE))
stop("argument 'init.s' must be > 0")
@@ -1267,30 +1280,30 @@ pzipf <- function(q, N, s) {
predictors.names <- namesof("s", .link , earg = .earg , tag = FALSE)
- NN = .N
+ NN <- .N
if (!is.Numeric(NN, allowable.length = 1,
positive = TRUE, integer.valued = TRUE))
- NN = max(y)
+ NN <- max(y)
if (max(y) > NN)
stop("maximum of the response is greater than argument 'N'")
if (any(y < 1))
stop("all response values must be in 1, 2, 3,...,N( = ", NN,")")
- extra$N = NN
+ extra$N <- NN
if (!length(etastart)) {
llfun <- function(ss, y, N, w) {
sum(c(w) * dzipf(x = y, N=extra$N, s=ss, log = TRUE))
}
- ss.init = if (length( .init.s )) .init.s else
+ ss.init <- if (length( .init.s )) .init.s else
getInitVals(gvals = seq(0.1, 3.0, length.out = 19),
llfun=llfun,
y = y, N=extra$N, w = w)
- ss.init = rep(ss.init, length = length(y))
+ ss.init <- rep(ss.init, length = length(y))
if ( .link == "loglog") ss.init[ss.init <= 1] = 1.2
etastart <- theta2eta(ss.init, .link , earg = .earg )
}
}), list( .link = link, .earg = earg, .init.s = init.s, .N = N ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- ss = eta2theta(eta, .link , earg = .earg )
+ ss <- eta2theta(eta, .link , earg = .earg )
gharmonic(extra$N, s=ss - 1) / gharmonic(extra$N, s=ss)
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
@@ -1301,7 +1314,7 @@ pzipf <- function(q, N, s) {
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- ss = eta2theta(eta, .link , earg = .earg )
+ ss <- eta2theta(eta, .link , earg = .earg )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dzipf(x = y, N=extra$N, s=ss, log = TRUE))
@@ -1309,25 +1322,24 @@ pzipf <- function(q, N, s) {
}, list( .link = link, .earg = earg ))),
vfamily = c("zipf"),
deriv = eval(substitute(expression({
- ss = eta2theta(eta, .link , earg = .earg )
- fred1 = gharmonic(extra$N, ss)
- fred2 = gharmonic(extra$N, ss, lognexp = 1)
- dl.dss = -log(y) + fred2 / fred1
- dss.deta = dtheta.deta(ss, .link , earg = .earg )
- d2ss.deta2 = d2theta.deta2(ss, .link , earg = .earg )
+ ss <- eta2theta(eta, .link , earg = .earg )
+ fred1 <- gharmonic(extra$N, ss)
+ fred2 <- gharmonic(extra$N, ss, lognexp = 1)
+ dl.dss <- -log(y) + fred2 / fred1
+ dss.deta <- dtheta.deta(ss, .link , earg = .earg )
+ d2ss.deta2 <- d2theta.deta2(ss, .link , earg = .earg )
c(w) * dl.dss * dss.deta
}), list( .link = link, .earg = earg ))),
weight = expression({
- d2l.dss = gharmonic(extra$N, ss, lognexp = 2) / fred1 - (fred2/fred1)^2
- wz = c(w) * (dss.deta^2 * d2l.dss - d2ss.deta2 * dl.dss)
+ d2l.dss <- gharmonic(extra$N, ss, lognexp = 2) / fred1 - (fred2/fred1)^2
+ wz <- c(w) * (dss.deta^2 * d2l.dss - d2ss.deta2 * dl.dss)
wz
}))
}
-cauchy.control <- function(save.weight = TRUE, ...)
-{
+cauchy.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -1335,8 +1347,7 @@ cauchy.control <- function(save.weight = TRUE, ...)
cauchy <- function(llocation = "identity", lscale = "loge",
ilocation = NULL, iscale = NULL,
iprobs = seq(0.2, 0.8, by=0.2),
- imethod = 1, nsimEIM = NULL, zero = 2)
-{
+ imethod = 1, nsimEIM = NULL, zero = 2) {
llocat <- as.list(substitute(llocation))
elocat <- link2list(llocat)
@@ -1391,41 +1402,41 @@ cauchy.control <- function(save.weight = TRUE, ...)
if (!length(etastart)) {
- loc.init = if (length( .ilocat)) .ilocat else {
+ loc.init <- if (length( .ilocat)) .ilocat else {
if ( .imethod == 2) median(rep(y, w)) else
if ( .imethod == 3) y else {
cauchy2.Loglikfun <- function(loc, y, x, w, extraargs) {
- iprobs = .iprobs
- qy = quantile(rep(y, w), probs = iprobs)
- ztry = tan(pi*(iprobs-0.5))
- btry = (qy - loc) / ztry
- scal = median(btry, na.rm = TRUE)
- if (scal <= 0) scal = 0.1
+ iprobs <- .iprobs
+ qy <- quantile(rep(y, w), probs = iprobs)
+ ztry <- tan(pi*(iprobs-0.5))
+ btry <- (qy - loc) / ztry
+ scal <- median(btry, na.rm = TRUE)
+ if (scal <= 0) scal <- 0.1
sum(c(w) * dcauchy(x = y, loc = loc, scale = scal,
log = TRUE))
}
- loc.grid = c(quantile(y, probs = seq(0.1, 0.9, by=0.05)))
- try.this = getMaxMin(loc.grid, objfun = cauchy2.Loglikfun,
+ loc.grid <- c(quantile(y, probs = seq(0.1, 0.9, by=0.05)))
+ try.this <- getMaxMin(loc.grid, objfun = cauchy2.Loglikfun,
y = y, x = x, w = w)
- try.this = rep(c(try.this), length.out = n)
+ try.this <- rep(c(try.this), length.out = n)
try.this
}
}
- loc.init = rep(c(loc.init), length.out = n)
+ loc.init <- rep(c(loc.init), length.out = n)
- sca.init = if (length( .iscale )) .iscale else {
- iprobs = .iprobs
- qy = quantile(rep(y, w), probs = iprobs)
- ztry = tan(pi*(iprobs-0.5))
- btry = (qy - loc.init[1]) / ztry
- sca.init = median(btry, na.rm = TRUE)
- if (sca.init <= 0) sca.init = 0.01
+ sca.init <- if (length( .iscale )) .iscale else {
+ iprobs <- .iprobs
+ qy <- quantile(rep(y, w), probs = iprobs)
+ ztry <- tan(pi*(iprobs-0.5))
+ btry <- (qy - loc.init[1]) / ztry
+ sca.init <- median(btry, na.rm = TRUE)
+ if (sca.init <= 0) sca.init <- 0.01
sca.init
}
- sca.init = rep(c(sca.init), length.out = n)
- if ( .llocat == "loge") loc.init = abs(loc.init)+0.01
+ sca.init <- rep(c(sca.init), length.out = n)
+ if ( .llocat == "loge") loc.init <- abs(loc.init)+0.01
etastart <-
cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
theta2eta(sca.init, .lscale , earg = .escale ))
@@ -1439,7 +1450,7 @@ cauchy.control <- function(save.weight = TRUE, ...)
}, list( .llocat = llocat,
.elocat = elocat ))),
last = eval(substitute(expression({
- misc$expected = TRUE
+ misc$expected <- TRUE
misc$link <- c("location" = .llocat , "scale" =.lscale)
misc$earg <- list("location" = .elocat , "scale" = .escale )
misc$imethod <- .imethod
@@ -1448,8 +1459,8 @@ cauchy.control <- function(save.weight = TRUE, ...)
.llocat = llocat, .lscale = lscale ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- locat = eta2theta(eta[, 1], .llocat , earg = .elocat )
- myscale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dcauchy(x = y, loc=locat, sc=myscale, log = TRUE))
@@ -1458,44 +1469,44 @@ cauchy.control <- function(save.weight = TRUE, ...)
.elocat = elocat, .llocat = llocat ))),
vfamily = c("cauchy"),
deriv = eval(substitute(expression({
- location = eta2theta(eta[, 1], .llocat , earg = .elocat )
- myscale = eta2theta(eta[, 2], .lscale , earg = .escale )
- dlocation.deta = dtheta.deta(location, .llocat , earg = .elocat )
- dscale.deta = dtheta.deta(myscale, .lscale , earg = .escale )
- Z = (y-location) / myscale
- dl.dlocation = 2 * Z / ((1 + Z^2) * myscale)
- dl.dscale = (Z^2 - 1) / ((1 + Z^2) * myscale)
+ location <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ dlocation.deta <- dtheta.deta(location, .llocat , earg = .elocat )
+ dscale.deta <- dtheta.deta(myscale, .lscale , earg = .escale )
+ Z <- (y-location) / myscale
+ dl.dlocation <- 2 * Z / ((1 + Z^2) * myscale)
+ dl.dscale <- (Z^2 - 1) / ((1 + Z^2) * myscale)
c(w) * cbind(dl.dlocation * dlocation.deta,
dl.dscale * dscale.deta)
}), list( .escale = escale, .lscale = lscale,
.elocat = elocat, .llocat = llocat ))),
weight = eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
dthetas.detas = cbind(dlocation.deta, dscale.deta)
if (length( .nsimEIM )) {
for(ii in 1:( .nsimEIM )) {
- ysim = rcauchy(n, loc = location, scale = myscale)
- Z = (ysim-location) / myscale
- dl.dlocation = 2 * Z / ((1 + Z^2) * myscale)
- dl.dscale = (Z^2 - 1) / ((1 + Z^2) * myscale)
+ ysim <- rcauchy(n, loc = location, scale = myscale)
+ Z <- (ysim-location) / myscale
+ dl.dlocation <- 2 * Z / ((1 + Z^2) * myscale)
+ dl.dscale <- (Z^2 - 1) / ((1 + Z^2) * myscale)
rm(ysim)
- temp3 = matrix(c(dl.dlocation, dl.dscale), n, 2)
- run.varcov = ((ii-1) * run.varcov +
+ temp3 <- matrix(c(dl.dlocation, dl.dscale), n, 2)
+ run.varcov <- ((ii-1) * run.varcov +
temp3[, ind1$row.index] *
temp3[, ind1$col.index]) / ii
}
- wz = if (intercept.only)
+ wz <- if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = wz * dthetas.detas[, ind1$row] *
+ wz <- wz * dthetas.detas[, ind1$row] *
dthetas.detas[, ind1$col]
- wz = c(w) * matrix(wz, n, dimm(M))
+ wz <- c(w) * matrix(wz, n, dimm(M))
} else {
- wz = cbind(matrix(0.5 / myscale^2,n,2), matrix(0,n,1)) *
+ wz <- cbind(matrix(0.5 / myscale^2,n,2), matrix(0,n,1)) *
dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
- wz = c(w) * wz[, 1:M] # diagonal wz
+ wz <- c(w) * wz[, 1:M] # diagonal wz
}
wz
@@ -1510,8 +1521,7 @@ cauchy.control <- function(save.weight = TRUE, ...)
cauchy1 <- function(scale.arg = 1, llocation = "identity",
- ilocation = NULL, imethod = 1)
-{
+ ilocation = NULL, imethod = 1) {
llocat <- as.list(substitute(llocation))
@@ -1547,25 +1557,25 @@ cauchy.control <- function(save.weight = TRUE, ...)
if (!length(etastart)) {
- loc.init = if (length( .ilocat)) .ilocat else {
+ loc.init <- if (length( .ilocat)) .ilocat else {
if ( .imethod == 2) median(rep(y, w)) else
if ( .imethod == 3) y else {
cauchy1.Loglikfun <- function(loc, y, x, w, extraargs) {
- scal = extraargs
+ scal <- extraargs
sum(c(w) * dcauchy(x = y, loc = loc, scale = scal,
log = TRUE))
}
- loc.grid = quantile(y, probs = seq(0.1, 0.9,
+ loc.grid <- quantile(y, probs = seq(0.1, 0.9,
by = 0.05))
- try.this = getMaxMin(loc.grid,
+ try.this <- getMaxMin(loc.grid,
objfun = cauchy1.Loglikfun,
y = y, x = x, w = w,
extraargs = .scale.arg )
- try.this = rep(try.this, length.out = n)
+ try.this <- rep(try.this, length.out = n)
try.this
}
}
- loc.init = rep(loc.init, length.out = n)
+ loc.init <- rep(loc.init, length.out = n)
if ( .llocat == "loge") loc.init = abs(loc.init)+0.01
etastart <-
theta2eta(loc.init, .llocat , earg = .elocat )
@@ -1581,13 +1591,13 @@ cauchy.control <- function(save.weight = TRUE, ...)
misc$link <- c("location" = .llocat)
misc$earg <- list("location" = .elocat )
- misc$expected = TRUE
- misc$scale.arg = .scale.arg
+ misc$expected <- TRUE
+ misc$scale.arg <- .scale.arg
}), list( .scale.arg = scale.arg, .elocat = elocat,
.llocat = llocat ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- locat = eta2theta(eta, .llocat , earg = .elocat )
+ locat <- eta2theta(eta, .llocat , earg = .elocat )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dcauchy(x = y, loc=locat, scale = .scale.arg,
@@ -1597,17 +1607,17 @@ cauchy.control <- function(save.weight = TRUE, ...)
.llocat = llocat ))),
vfamily = c("cauchy1"),
deriv = eval(substitute(expression({
- locat = eta2theta(eta, .llocat , earg = .elocat )
- temp = (y-locat)/.scale.arg
- dl.dlocat = 2 * temp / ((1 + temp^2) * .scale.arg)
+ locat <- eta2theta(eta, .llocat , earg = .elocat )
+ temp <- (y-locat)/.scale.arg
+ dl.dlocat <- 2 * temp / ((1 + temp^2) * .scale.arg)
- dlocation.deta = dtheta.deta(locat, .llocat , earg = .elocat )
+ dlocation.deta <- dtheta.deta(locat, .llocat , earg = .elocat )
c(w) * dl.dlocat * dlocation.deta
}), list( .scale.arg = scale.arg, .elocat = elocat,
.llocat = llocat ))),
weight = eval(substitute(expression({
- wz = c(w) * dlocation.deta^2 / ( .scale.arg^2 * 2)
+ wz <- c(w) * dlocation.deta^2 / ( .scale.arg^2 * 2)
wz
}), list( .scale.arg = scale.arg, .elocat = elocat,
.llocat = llocat ))))
@@ -1619,8 +1629,7 @@ cauchy.control <- function(save.weight = TRUE, ...)
logistic1 <- function(llocation = "identity",
- scale.arg = 1, imethod = 1)
-{
+ scale.arg = 1, imethod = 1) {
if (!is.Numeric(scale.arg, allowable.length = 1, positive = TRUE))
stop("'scale.arg' must be a single positive number")
if (!is.Numeric(imethod, allowable.length = 1,
@@ -1637,11 +1646,11 @@ cauchy.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("One-parameter logistic distribution ",
- "(location unknown, scale known)\n\n",
- "Link: ",
- namesof("location", llocat, earg = elocat), "\n\n",
- "Mean: location", "\n",
- "Variance: (pi*scale)^2 / 3"),
+ "(location unknown, scale known)\n\n",
+ "Link: ",
+ namesof("location", llocat, earg = elocat), "\n\n",
+ "Mean: location", "\n",
+ "Variance: (pi*scale)^2 / 3"),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y)
@@ -1652,11 +1661,11 @@ cauchy.control <- function(save.weight = TRUE, ...)
if (!length(etastart)) {
- locat.init = if ( .imethod == 1) y else median(rep(y, w))
- locat.init = rep(locat.init, length.out = n)
+ locat.init <- if ( .imethod == 1) y else median(rep(y, w))
+ locat.init <- rep(locat.init, length.out = n)
if ( .llocat == "loge")
- locat.init = abs(locat.init) + 0.001
- etastart =
+ locat.init <- abs(locat.init) + 0.001
+ etastart <-
theta2eta(locat.init, .llocat , earg = .elocat )
}
}), list( .imethod = imethod, .llocat = llocat,
@@ -1666,16 +1675,16 @@ cauchy.control <- function(save.weight = TRUE, ...)
}, list( .llocat = llocat,
.elocat = elocat ))),
last = eval(substitute(expression({
- misc$expected = TRUE
+ misc$expected <- TRUE
misc$link <- c(location = .llocat)
misc$earg <- list(location = .elocat )
- misc$scale.arg = .scale.arg
+ misc$scale.arg <- .scale.arg
}), list( .llocat = llocat,
.elocat = elocat, .scale.arg = scale.arg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- locat = eta2theta(eta, .llocat , earg = .elocat )
- zedd = (y-locat)/.scale.arg
+ locat <- eta2theta(eta, .llocat , earg = .elocat )
+ zedd <- (y-locat)/.scale.arg
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dlogis(x = y, locat = locat,
@@ -1685,18 +1694,18 @@ cauchy.control <- function(save.weight = TRUE, ...)
.elocat = elocat, .scale.arg = scale.arg ))),
vfamily = c("logistic1"),
deriv = eval(substitute(expression({
- locat = eta2theta(eta, .llocat , earg = .elocat )
+ locat <- eta2theta(eta, .llocat , earg = .elocat )
- ezedd = exp(-(y-locat) / .scale.arg )
- dl.dlocat = (1 - ezedd) / ((1 + ezedd) * .scale.arg)
- dlocat.deta = dtheta.deta(locat, .llocat ,
+ ezedd <- exp(-(y-locat) / .scale.arg )
+ dl.dlocat <- (1 - ezedd) / ((1 + ezedd) * .scale.arg)
+ dlocat.deta <- dtheta.deta(locat, .llocat ,
earg = .elocat )
c(w) * dl.dlocat * dlocat.deta
}), list( .llocat = llocat,
.elocat = elocat, .scale.arg = scale.arg ))),
weight = eval(substitute(expression({
- wz = c(w) * dlocat.deta^2 / ( .scale.arg^2 * 3)
+ wz <- c(w) * dlocat.deta^2 / ( .scale.arg^2 * 3)
wz
}), list( .scale.arg = scale.arg ))))
}
@@ -1731,9 +1740,9 @@ cauchy.control <- function(save.weight = TRUE, ...)
new("vglmff",
blurb = c("Erlang distribution\n\n",
- "Link: ", namesof("scale", link, earg = earg), "\n", "\n",
- "Mean: shape * scale", "\n",
- "Variance: shape * scale^2"),
+ "Link: ", namesof("scale", link, earg = earg), "\n", "\n",
+ "Mean: shape * scale", "\n",
+ "Variance: shape * scale^2"),
constraints = eval(substitute(expression({
dotzero <- .zero
Musual <- 1
@@ -1774,17 +1783,17 @@ cauchy.control <- function(save.weight = TRUE, ...)
if (!length(etastart)) {
if ( .imethod == 1) {
- sc.init = y / .shape.arg
+ sc.init <- y / .shape.arg
}
if ( .imethod == 2) {
- sc.init = (colSums(y * w) / colSums(w))/ .shape.arg
+ sc.init <- (colSums(y * w) / colSums(w))/ .shape.arg
}
if ( .imethod == 3) {
- sc.init = median(y) / .shape.arg
+ sc.init <- median(y) / .shape.arg
}
if ( !is.matrix(sc.init))
- sc.init = matrix(sc.init, n, M, byrow = TRUE)
+ sc.init <- matrix(sc.init, n, M, byrow = TRUE)
etastart <-
@@ -1793,7 +1802,7 @@ cauchy.control <- function(save.weight = TRUE, ...)
}), list( .link = link, .earg = earg,
.shape.arg = shape.arg, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- sc = eta2theta(eta, .link , earg = .earg )
+ sc <- eta2theta(eta, .link , earg = .earg )
.shape.arg * sc
}, list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
last = eval(substitute(expression({
@@ -1815,7 +1824,7 @@ cauchy.control <- function(save.weight = TRUE, ...)
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- sc = eta2theta(eta, .link , earg = .earg )
+ sc <- eta2theta(eta, .link , earg = .earg )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * (( .shape.arg - 1) * log(y) - y / sc -
@@ -1824,14 +1833,14 @@ cauchy.control <- function(save.weight = TRUE, ...)
}, list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
vfamily = c("erlang"),
deriv = eval(substitute(expression({
- sc = eta2theta(eta, .link , earg = .earg )
- dl.dsc = (y / sc - .shape.arg) / sc
- dsc.deta = dtheta.deta(sc, .link , earg = .earg )
+ sc <- eta2theta(eta, .link , earg = .earg )
+ dl.dsc <- (y / sc - .shape.arg) / sc
+ dsc.deta <- dtheta.deta(sc, .link , earg = .earg )
c(w) * dl.dsc * dsc.deta
}), list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
weight = eval(substitute(expression({
- ed2l.dsc2 = .shape.arg / sc^2
- wz = c(w) * dsc.deta^2 * ed2l.dsc2
+ ned2l.dsc2 <- .shape.arg / sc^2
+ wz <- c(w) * dsc.deta^2 * ned2l.dsc2
wz
}), list( .earg = earg, .shape.arg = shape.arg ))))
}
@@ -1852,18 +1861,18 @@ dbort <- function(x, Qsize = 1, a = 0.5, log = FALSE) {
stop("bad input for argument 'Qsize'")
if (!is.Numeric(a, positive = TRUE) || max(a) >= 1)
stop("bad input for argument 'a'")
- N = max(length(x), length(Qsize), length(a))
- x = rep(x, length.out = N);
- Qsize = rep(Qsize, length.out = N);
- a = rep(a, length.out = N);
-
- xok = (x >= Qsize) & (x == round(x)) & (a > 0) & (a < 1)
- ans = rep(if (log.arg) log(0) else 0, length.out = N) # loglikelihood
- ans[xok] = lgamma(1 + Qsize[xok]) - lgamma(x[xok] + 1 - Qsize[xok]) +
+ N <- max(length(x), length(Qsize), length(a))
+ x <- rep(x, length.out = N);
+ Qsize <- rep(Qsize, length.out = N);
+ a <- rep(a, length.out = N);
+
+ xok <- (x >= Qsize) & (x == round(x)) & (a > 0) & (a < 1)
+ ans <- rep(if (log.arg) log(0) else 0, length.out = N) # loglikelihood
+ ans[xok] <- lgamma(1 + Qsize[xok]) - lgamma(x[xok] + 1 - Qsize[xok]) +
(x[xok] - 1 - Qsize[xok]) * log(x[xok]) +
(x[xok] - Qsize[xok]) * log(a[xok]) - a[xok] * x[xok]
if (!log.arg) {
- ans[xok] = exp(ans[xok])
+ ans[xok] <- exp(ans[xok])
}
ans
}
@@ -1871,9 +1880,9 @@ dbort <- function(x, Qsize = 1, a = 0.5, log = FALSE) {
rbort <- function(n, Qsize = 1, a = 0.5) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
if (!is.Numeric(Qsize, allowable.length = 1,
integer.valued = TRUE, positive = TRUE))
@@ -1882,17 +1891,17 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
max(a) >= 1)
stop("bad input for argument 'a'")
- N = use.n
- qsize = rep(Qsize, length.out = N);
- a = rep(a, length.out = N)
- totqsize = qsize
- fini = (qsize < 1)
+ N <- use.n
+ qsize <- rep(Qsize, length.out = N);
+ a <- rep(a, length.out = N)
+ totqsize <- qsize
+ fini <- (qsize < 1)
while(any(!fini)) {
- additions = rpois(sum(!fini), a[!fini])
- qsize[!fini] = qsize[!fini] + additions
- totqsize[!fini] = totqsize[!fini] + additions
- qsize = qsize - 1
- fini = fini | (qsize < 1)
+ additions <- rpois(sum(!fini), a[!fini])
+ qsize[!fini] <- qsize[!fini] + additions
+ totqsize[!fini] <- totqsize[!fini] + additions
+ qsize <- qsize - 1
+ fini <- fini | (qsize < 1)
}
totqsize
}
@@ -1900,8 +1909,7 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
borel.tanner <- function(Qsize = 1, link = "logit",
- imethod = 1)
-{
+ imethod = 1) {
if (!is.Numeric(Qsize, allowable.length = 1,
integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'Qsize'")
@@ -1921,11 +1929,11 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
new("vglmff",
blurb = c("Borel-Tanner distribution\n\n",
- "Link: ",
- namesof("a", link, earg = earg), "\n\n",
- "Mean: Qsize/(1-a)",
- "\n",
- "Variance: Qsize*a / (1-a)^3"),
+ "Link: ",
+ namesof("a", link, earg = earg), "\n\n",
+ "Mean: Qsize/(1-a)",
+ "\n",
+ "Variance: Qsize*a / (1-a)^3"),
initialize = eval(substitute(expression({
if (any(y < .Qsize ))
stop("all y values must be >= ", .Qsize )
@@ -1939,31 +1947,31 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
predictors.names <- namesof("a", .link , earg = .earg , tag = FALSE)
if (!length(etastart)) {
- a.init = switch(as.character( .imethod ),
+ a.init <- switch(as.character( .imethod ),
"1" = 1 - .Qsize / (y+1/8),
"2" = rep(1 - .Qsize / weighted.mean(y, w), length.out = n),
"3" = rep(1 - .Qsize / median(y), length.out = n),
"4" = rep(0.5, length.out = n))
- etastart =
+ etastart <-
theta2eta(a.init, .link , earg = .earg )
}
}), list( .link = link, .earg = earg, .Qsize = Qsize,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = eta2theta(eta, .link , earg = .earg )
+ aa <- eta2theta(eta, .link , earg = .earg )
.Qsize / (1 - aa)
}, list( .link = link, .earg = earg, .Qsize = Qsize ))),
last = eval(substitute(expression({
- misc$link = c(a = .link)
+ misc$link <- c(a = .link)
misc$earg <- list(a = .earg )
- misc$expected = TRUE
- misc$Qsize = .Qsize
+ misc$expected <- TRUE
+ misc$Qsize <- .Qsize
}), list( .link = link, .earg = earg, .Qsize = Qsize ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta, .link , earg = .earg )
+ aa <- eta2theta(eta, .link , earg = .earg )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dbort(x = y, Qsize = .Qsize, a = aa, log = TRUE))
@@ -1971,14 +1979,14 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
}, list( .link = link, .earg = earg, .Qsize = Qsize ))),
vfamily = c("borel.tanner"),
deriv = eval(substitute(expression({
- aa = eta2theta(eta, .link , earg = .earg )
- dl.da = (y - .Qsize) / aa - y
- da.deta = dtheta.deta(aa, .link , earg = .earg )
+ aa <- eta2theta(eta, .link , earg = .earg )
+ dl.da <- (y - .Qsize) / aa - y
+ da.deta <- dtheta.deta(aa, .link , earg = .earg )
c(w) * dl.da * da.deta
}), list( .link = link, .earg = earg, .Qsize = Qsize ))),
weight = eval(substitute(expression({
- ned2l.da2 = .Qsize / (aa * (1 - aa))
- wz = c(w) * ned2l.da2 * da.deta^2
+ ned2l.da2 <- .Qsize / (aa * (1 - aa))
+ wz <- c(w) * ned2l.da2 * da.deta^2
wz
}), list( .Qsize = Qsize ))))
}
@@ -1996,16 +2004,16 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
stop("bad input for argument 'x'")
if (!is.Numeric(a, positive = TRUE))
stop("bad input for argument 'a'")
- N = max(length(x), length(a))
- x = rep(x, length.out = N);
- a = rep(a, length.out = N);
+ N <- max(length(x), length(a))
+ x <- rep(x, length.out = N);
+ a <- rep(a, length.out = N);
- xok = (x %% 2 == 1) & (x == round(x)) & (x >= 1) & (a > 0) & (a < 0.5)
- ans = rep(if (log.arg) log(0) else 0, length.out = N) # loglikelihood
- ans[xok] = ((x[xok]-3)/2) * log(x[xok]) + ((x[xok]-1)/2) * log(a[xok]) -
+ xok <- (x %% 2 == 1) & (x == round(x)) & (x >= 1) & (a > 0) & (a < 0.5)
+ ans <- rep(if (log.arg) log(0) else 0, length.out = N) # loglikelihood
+ ans[xok] <- ((x[xok]-3)/2) * log(x[xok]) + ((x[xok]-1)/2) * log(a[xok]) -
lgamma(x[xok]/2 + 0.5) - a[xok] * x[xok]
if (!log.arg) {
- ans[xok] = exp(ans[xok])
+ ans[xok] <- exp(ans[xok])
}
ans
}
@@ -2027,9 +2035,9 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
new("vglmff",
blurb = c("Felix distribution\n\n",
- "Link: ",
- namesof("a", link, earg = earg), "\n\n",
- "Mean: 1/(1-2*a)"),
+ "Link: ",
+ namesof("a", link, earg = earg), "\n\n",
+ "Mean: 1/(1-2*a)"),
initialize = eval(substitute(expression({
if (any(y < 1) ||
any((y+1)/2 != round((y+1)/2)))
@@ -2140,18 +2148,18 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
new("vglmff",
blurb = c("Beta distribution parameterized by mu and a ",
"precision parameter\n",
- if (stdbeta) paste("f(y) = y^(mu*phi-1) * (1-y)^((1-mu)*phi-1)",
- "/ beta(mu*phi,(1-mu)*phi),\n",
- " 0<y<1, 0<mu<1, phi>0\n\n") else
- paste("f(y) = (y-",A,")^(mu1*phi-1) * (",B,
- "-y)^(((1-mu1)*phi)-1) / \n(beta(mu1*phi,(1-mu1)*phi) * (",
- B, "-", A, ")^(phi-1)),\n",
- A," < y < ",B, ", ", A," < mu < ",B,
- ", mu = ", A, " + ", (B-A), " * mu1",
- ", phi > 0\n\n", sep = ""),
- "Links: ",
- namesof("mu", lmu, earg = emu), ", ",
- namesof("phi", lphi, earg = ephi)),
+ if (stdbeta) paste("f(y) = y^(mu*phi-1) * (1-y)^((1-mu)*phi-1)",
+ "/ beta(mu*phi,(1-mu)*phi),\n",
+ " 0<y<1, 0<mu<1, phi>0\n\n") else
+ paste("f(y) = (y-",A,")^(mu1*phi-1) * (",B,
+ "-y)^(((1-mu1)*phi)-1) / \n(beta(mu1*phi,(1-mu1)*phi) * (",
+ B, "-", A, ")^(phi-1)),\n",
+ A," < y < ",B, ", ", A," < mu < ",B,
+ ", mu = ", A, " + ", (B-A), " * mu1",
+ ", phi > 0\n\n", sep = ""),
+ "Links: ",
+ namesof("mu", lmu, earg = emu), ", ",
+ namesof("phi", lphi, earg = ephi)),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
@@ -2166,22 +2174,22 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
predictors.names <- c(namesof("mu", .lmu , .emu , short = TRUE),
namesof("phi", .lphi , .ephi, short = TRUE))
if (!length(etastart)) {
- mu.init = if (is.Numeric( .imu )) .imu else
+ mu.init <- if (is.Numeric( .imu )) .imu else
{if ( .imethod == 1) weighted.mean(y, w) else
median(rep(y, w))}
- mu1.init = (mu.init - .A) / ( .B - .A) # In (0,1)
- phi.init = if (is.Numeric( .iphi)) .iphi else
+ mu1.init <- (mu.init - .A) / ( .B - .A) # In (0,1)
+ phi.init <- if (is.Numeric( .iphi)) .iphi else
max(0.01, -1 + ( .B - .A)^2 * mu1.init*(1-mu1.init)/var(y))
etastart <- matrix(0, n, 2)
- etastart[, 1] = theta2eta(mu.init, .lmu , earg = .emu )
- etastart[, 2] = theta2eta(phi.init, .lphi , earg = .ephi )
+ etastart[, 1] <- theta2eta(mu.init, .lmu , earg = .emu )
+ etastart[, 2] <- theta2eta(phi.init, .lphi , earg = .ephi )
}
}), list( .lmu = lmu, .lphi = lphi, .imu = imu, .iphi = iphi,
.A = A, .B = B, .emu = emu, .ephi = ephi,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mu = eta2theta(eta[, 1], .lmu , .emu )
+ mu <- eta2theta(eta[, 1], .lmu , .emu )
mu
}, list( .lmu = lmu, .emu = emu, .A = A, .B = B))),
last = eval(substitute(expression({
@@ -2194,14 +2202,14 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
.stdbeta = stdbeta ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL){
- mu = eta2theta(eta[, 1], .lmu , .emu )
- m1u = if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
- phi = eta2theta(eta[, 2], .lphi , .ephi )
+ mu <- eta2theta(eta[, 1], .lmu , .emu )
+ m1u <- if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
+ phi <- eta2theta(eta[, 2], .lphi , .ephi )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- shape1 = phi * m1u
- shape2 = (1 - m1u) * phi
- zedd = (y - .A) / ( .B - .A)
+ shape1 <- phi * m1u
+ shape2 <- (1 - m1u) * phi
+ zedd <- (y - .A) / ( .B - .A)
sum(c(w) * (dbeta(x = zedd, shape1 = shape1, shape2 = shape2,
log = TRUE) -
log( abs( .B - .A ))))
@@ -2211,22 +2219,22 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
.stdbeta = stdbeta ))),
vfamily = "betaff",
deriv = eval(substitute(expression({
- mu = eta2theta(eta[, 1], .lmu , .emu )
- phi = eta2theta(eta[, 2], .lphi , .ephi )
- m1u = if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
- dmu.deta = dtheta.deta(mu, .lmu , .emu )
- dmu1.dmu = 1 / ( .B - .A)
- dphi.deta = dtheta.deta(phi, .lphi , .ephi )
- temp1 = m1u*phi
- temp2 = (1-m1u)*phi
+ mu <- eta2theta(eta[, 1], .lmu , .emu )
+ phi <- eta2theta(eta[, 2], .lphi , .ephi )
+ m1u <- if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
+ dmu.deta <- dtheta.deta(mu, .lmu , .emu )
+ dmu1.dmu <- 1 / ( .B - .A)
+ dphi.deta <- dtheta.deta(phi, .lphi , .ephi )
+ temp1 <- m1u*phi
+ temp2 <- (1-m1u)*phi
if ( .stdbeta ) {
- dl.dmu1 = phi*(digamma(temp2) - digamma(temp1) + log(y) - log1p(-y))
- dl.dphi = digamma(phi) - mu*digamma(temp1) - (1-mu)*digamma(temp2) +
+ dl.dmu1 <- phi*(digamma(temp2) - digamma(temp1) + log(y) - log1p(-y))
+ dl.dphi <- digamma(phi) - mu*digamma(temp1) - (1-mu)*digamma(temp2) +
mu*log(y) + (1-mu)*log1p(-y)
} else {
- dl.dmu1 = phi*(digamma(temp2) - digamma(temp1) +
+ dl.dmu1 <- phi*(digamma(temp2) - digamma(temp1) +
log(y-.A) - log( .B-y))
- dl.dphi = digamma(phi) - m1u*digamma(temp1) -
+ dl.dphi <- digamma(phi) - m1u*digamma(temp1) -
(1-m1u)*digamma(temp2) +
m1u*log(y-.A) + (1-m1u)*log( .B-y) - log( .B -.A)
}
@@ -2237,14 +2245,14 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
.A = A, .B = B,
.stdbeta = stdbeta ))),
weight = eval(substitute(expression({
- d2l.dmu12 = phi^2 * (trigamma(temp1) + trigamma(temp2))
- d2l.dphi2 = -trigamma(phi) + trigamma(temp1) * m1u^2 +
+ d2l.dmu12 <- phi^2 * (trigamma(temp1) + trigamma(temp2))
+ d2l.dphi2 <- -trigamma(phi) + trigamma(temp1) * m1u^2 +
trigamma(temp2) * (1-m1u)^2
- d2l.dmu1phi = temp1*trigamma(temp1) - temp2*trigamma(temp2)
- wz = matrix(as.numeric(NA), n, dimm(M))
- wz[, iam(1, 1, M)] = d2l.dmu12 * dmu1.dmu^2 * dmu.deta^2
- wz[, iam(2, 2, M)] = d2l.dphi2 * dphi.deta^2
- wz[, iam(1, 2, M)] = d2l.dmu1phi * dmu1.dmu * dmu.deta * dphi.deta
+ d2l.dmu1phi <- temp1*trigamma(temp1) - temp2*trigamma(temp2)
+ wz <- matrix(as.numeric(NA), n, dimm(M))
+ wz[, iam(1, 1, M)] <- d2l.dmu12 * dmu1.dmu^2 * dmu.deta^2
+ wz[, iam(2, 2, M)] <- d2l.dphi2 * dphi.deta^2
+ wz[, iam(1, 2, M)] <- d2l.dmu1phi * dmu1.dmu * dmu.deta * dphi.deta
c(w) * wz
}), list( .A = A, .B = B ))))
}
@@ -2255,8 +2263,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
beta.ab <- function(lshape1 = "loge", lshape2 = "loge",
i1 = NULL, i2 = NULL, trim = 0.05,
- A = 0, B = 1, parallel = FALSE, zero = NULL)
-{
+ A = 0, B = 1, parallel = FALSE, zero = NULL) {
lshape1 <- as.list(substitute(lshape1))
eshape1 <- link2list(lshape1)
@@ -2298,8 +2305,8 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
namesof("shape1", lshape1, earg = eshape1), ", ",
namesof("shape2", lshape2, earg = eshape2)),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel ,
- constraints, int = TRUE)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel ,
+ constraints, apply.int = TRUE)
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .parallel = parallel, .zero = zero ))),
initialize = eval(substitute(expression({
@@ -2319,24 +2326,24 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
namesof("shape2", .lshape2 , earg = .eshape2 , short = TRUE))
if (!length(etastart)) {
- mu1d = mean(y, trim = .trim)
- uu = (mu1d - .A) / ( .B - .A)
- DD = ( .B - .A)^2
- pinit = max(0.01, uu^2 * (1 - uu) * DD / var(y) - uu)
- qinit = max(0.01, pinit * (1 - uu) / uu)
+ mu1d <- mean(y, trim = .trim)
+ uu <- (mu1d - .A) / ( .B - .A)
+ DD <- ( .B - .A)^2
+ pinit <- max(0.01, uu^2 * (1 - uu) * DD / var(y) - uu)
+ qinit <- max(0.01, pinit * (1 - uu) / uu)
etastart <- matrix(0, n, 2)
- etastart[, 1] = theta2eta( pinit, .lshape1 , earg = .eshape1 )
- etastart[, 2] = theta2eta( qinit, .lshape2 , earg = .eshape2 )
+ etastart[, 1] <- theta2eta( pinit, .lshape1 , earg = .eshape1 )
+ etastart[, 2] <- theta2eta( qinit, .lshape2 , earg = .eshape2 )
}
if (is.Numeric( .i1 ))
- etastart[, 1] = theta2eta( .i1, .lshape1 , earg = .eshape1 )
+ etastart[, 1] <- theta2eta( .i1, .lshape1 , earg = .eshape1 )
if (is.Numeric( .i2 ))
- etastart[, 2] = theta2eta( .i2, .lshape2 , earg = .eshape2 )
+ etastart[, 2] <- theta2eta( .i2, .lshape2 , earg = .eshape2 )
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.i1 = i1, .i2 = i2, .trim = trim, .A = A, .B = B,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shapes = cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
+ shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
.A + ( .B-.A) * shapes[, 1] / (shapes[, 1] + shapes[, 2])
}, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
@@ -2349,11 +2356,11 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
.eshape1 = eshape1, .eshape2 = eshape2 ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL){
- shapes = cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
+ shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- zedd = (y - .A) / ( .B - .A)
+ zedd <- (y - .A) / ( .B - .A)
sum(c(w) * (dbeta(x = zedd, shape1 = shapes[, 1],
shape2 = shapes[, 2],
log = TRUE) - log( abs( .B - .A ))))
@@ -2362,29 +2369,29 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
.eshape1 = eshape1, .eshape2 = eshape2 ))),
vfamily = "beta.ab",
deriv = eval(substitute(expression({
- shapes = cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
+ shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
dshapes.deta <-
cbind(dtheta.deta(shapes[, 1], .lshape1 , earg = .eshape1),
dtheta.deta(shapes[, 2], .lshape2 , earg = .eshape2))
- dl.dshapes = cbind(log(y-.A), log( .B-y)) - digamma(shapes) +
+ dl.dshapes <- cbind(log(y-.A), log( .B-y)) - digamma(shapes) +
digamma(shapes[, 1] + shapes[, 2]) - log( .B - .A)
c(w) * dl.dshapes * dshapes.deta
}), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
weight = expression({
- temp2 = trigamma(shapes[, 1]+shapes[, 2])
- d2l.dshape12 = temp2 - trigamma(shapes[, 1])
- d2l.dshape22 = temp2 - trigamma(shapes[, 2])
- d2l.dshape1shape2 = temp2
-
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[, iam(1, 1, M)] = d2l.dshape12 * dshapes.deta[, 1]^2
- wz[, iam(2, 2, M)] = d2l.dshape22 * dshapes.deta[, 2]^2
- wz[, iam(1, 2, M)] = d2l.dshape1shape2 * dshapes.deta[, 1] *
+ temp2 <- trigamma(shapes[, 1]+shapes[, 2])
+ d2l.dshape12 <- temp2 - trigamma(shapes[, 1])
+ d2l.dshape22 <- temp2 - trigamma(shapes[, 2])
+ d2l.dshape1shape2 <- temp2
+
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[, iam(1, 1, M)] <- d2l.dshape12 * dshapes.deta[, 1]^2
+ wz[, iam(2, 2, M)] <- d2l.dshape22 * dshapes.deta[, 2]^2
+ wz[, iam(1, 2, M)] <- d2l.dshape1shape2 * dshapes.deta[, 1] *
dshapes.deta[, 2]
-c(w) * wz
@@ -2459,8 +2466,8 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
"Link: ",
namesof("rate", link, earg, tag = TRUE), "\n",
"Mean: ", "mu = ",
- if (location == 0) "1 / rate" else
- paste(location, "+ 1 / rate"), "\n",
+ if (location == 0) "1 / rate" else
+ paste(location, "+ 1 / rate"), "\n",
"Variance: ",
if (location == 0) "Exponential: mu^2" else
paste("(mu - ", location, ")^2", sep = "")),
@@ -2569,8 +2576,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
- gamma1 <- function(link = "loge", zero = NULL)
-{
+ gamma1 <- function(link = "loge", zero = NULL) {
link <- as.list(substitute(link))
@@ -2586,10 +2592,10 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
new("vglmff",
blurb = c("1-parameter Gamma distribution\n",
- "Link: ",
- namesof("shape", link, earg = earg, tag = TRUE), "\n",
- "Mean: mu (=shape)\n",
- "Variance: mu (=shape)"),
+ "Link: ",
+ namesof("shape", link, earg = earg, tag = TRUE), "\n",
+ "Mean: mu (=shape)\n",
+ "Variance: mu (=shape)"),
constraints = eval(substitute(expression({
dotzero <- .zero
Musual <- 1
@@ -2699,11 +2705,11 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
new("vglmff",
blurb = c("2-parameter Gamma distribution\n",
- "Links: ",
- namesof("rate", lrate, earg = erate), ", ",
- namesof("shape", lshape, earg = eshape), "\n",
- "Mean: mu = shape/rate\n",
- "Variance: (mu^2)/shape = shape/rate^2"),
+ "Links: ",
+ namesof("rate", lrate, earg = erate), ", ",
+ namesof("shape", lshape, earg = eshape), "\n",
+ "Mean: mu = shape/rate\n",
+ "Variance: (mu^2)/shape = shape/rate^2"),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
@@ -2719,14 +2725,14 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
if (!length(etastart)) {
mymu = y + 0.167 * (y == 0)
- junk = lsfit(x, y, wt = w, intercept = FALSE)
- var.y.est = sum(c(w) * junk$resid^2) / (nrow(x) - length(junk$coef))
- init.shape = if (length( .ishape )) .ishape else mymu^2 / var.y.est
- init.rate = if (length( .irate)) .irate else init.shape / mymu
- init.rate = rep(init.rate, length.out = n)
- init.shape = rep(init.shape, length.out = n)
+ junk <- lsfit(x, y, wt = w, intercept = FALSE)
+ var.y.est <- sum(c(w) * junk$resid^2) / (nrow(x) - length(junk$coef))
+ init.shape <- if (length( .ishape )) .ishape else mymu^2 / var.y.est
+ init.rate <- if (length( .irate)) .irate else init.shape / mymu
+ init.rate <- rep(init.rate, length.out = n)
+ init.shape <- rep(init.shape, length.out = n)
if ( .lshape == "loglog")
- init.shape[init.shape <= 1] = 3.1 # Hopefully value is big enough
+ init.shape[init.shape <= 1] <- 3.1 # Hopefully value is big enough
etastart <-
cbind(theta2eta(init.rate, .lrate , earg = .erate ),
theta2eta(init.shape, .lshape , earg = .eshape ))
@@ -2746,8 +2752,8 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
.erate = erate, .eshape = eshape))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- rate = eta2theta(eta[, 1], .lrate , earg = .erate )
- shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+ rate <- eta2theta(eta[, 1], .lrate , earg = .erate )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dgamma(x = y, shape = shape, rate=rate, log = TRUE))
@@ -2756,29 +2762,29 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
.erate = erate, .eshape = eshape))),
vfamily = c("gamma2.ab"),
deriv = eval(substitute(expression({
- rate = eta2theta(eta[, 1], .lrate , earg = .erate )
- shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
- dl.drate = mu - y
- dl.dshape = log(y*rate) - digamma(shape)
- dratedeta = dtheta.deta(rate, .lrate , earg = .erate )
- dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
+ rate <- eta2theta(eta[, 1], .lrate , earg = .erate )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+ dl.drate <- mu - y
+ dl.dshape <- log(y*rate) - digamma(shape)
+ dratedeta <- dtheta.deta(rate, .lrate , earg = .erate )
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
c(w) * cbind(dl.drate * dratedeta,
dl.dshape * dshape.deta)
}), list( .lrate = lrate, .lshape = lshape,
.erate = erate, .eshape = eshape))),
weight = eval(substitute(expression({
- d2l.dshape2 = -trigamma(shape)
- d2l.drate2 = -shape/(rate^2)
- d2l.drateshape = 1/rate
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[, iam(1, 1, M)] = -d2l.drate2 * dratedeta^2
- wz[, iam(2, 2, M)] = -d2l.dshape2 * dshape.deta^2
- wz[, iam(1, 2, M)] = -d2l.drateshape * dratedeta * dshape.deta
+ d2l.dshape2 <- -trigamma(shape)
+ d2l.drate2 <- -shape/(rate^2)
+ d2l.drateshape <- 1/rate
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[, iam(1, 1, M)] <- -d2l.drate2 * dratedeta^2
+ wz[, iam(2, 2, M)] <- -d2l.dshape2 * dshape.deta^2
+ wz[, iam(1, 2, M)] <- -d2l.drateshape * dratedeta * dshape.deta
if (! .expected) {
- d2ratedeta2 = d2theta.deta2(rate, .lrate , earg = .erate )
- d2shapedeta2 = d2theta.deta2(shape, .lshape , earg = .eshape )
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] - dl.drate * d2ratedeta2
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] - dl.dshape * d2shapedeta2
+ d2ratedeta2 <- d2theta.deta2(rate, .lrate , earg = .erate )
+ d2shapedeta2 <- d2theta.deta2(shape, .lshape , earg = .eshape )
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] - dl.drate * d2ratedeta2
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] - dl.dshape * d2shapedeta2
}
c(w) * wz
}), list( .lrate = lrate, .lshape = lshape,
@@ -2790,7 +2796,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
gamma2 <-
function(lmu = "loge", lshape = "loge",
imethod = 1, ishape = NULL,
- parallel = FALSE, intercept.apply = FALSE,
+ parallel = FALSE, apply.parint = FALSE,
deviance.arg = FALSE, zero = -2)
{
@@ -2817,35 +2823,35 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
stop("argument 'imethod' must be 1 or 2")
- if (!is.logical(intercept.apply) ||
- length(intercept.apply) != 1)
- stop("argument 'intercept.apply' must be a single logical")
+ if (!is.logical(apply.parint) ||
+ length(apply.parint) != 1)
+ stop("argument 'apply.parint' must be a single logical")
if (is.logical(parallel) && parallel && length(zero))
stop("set 'zero = NULL' if 'parallel = TRUE'")
- ans =
+ ans <-
new("vglmff",
blurb = c("2-parameter Gamma distribution",
- " (McCullagh and Nelder 1989 parameterization)\n",
- "Links: ",
- namesof("mu", lmu, earg = emu), ", ",
- namesof("shape", lshape, earg = eshape), "\n",
- "Mean: mu\n",
- "Variance: (mu^2)/shape"),
+ " (McCullagh and Nelder 1989 parameterization)\n",
+ "Links: ",
+ namesof("mu", lmu, earg = emu), ", ",
+ namesof("shape", lshape, earg = eshape), "\n",
+ "Mean: mu\n",
+ "Variance: (mu^2)/shape"),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
- intercept.apply = .intercept.apply )
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+ apply.int = .apply.parint )
dotzero <- .zero
Musual <- 2
eval(negzero.expression)
constraints <- cm.zero.vgam(constraints, x, z_Index, M)
}), list( .zero = zero,
- .parallel = parallel, .intercept.apply = intercept.apply ))),
+ .parallel = parallel, .apply.parint = apply.parint ))),
infos = eval(substitute(function(...) {
list(Musual = 2,
@@ -2874,8 +2880,8 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
is.Numeric( .zero , allowable.length = 1) && .zero != -2)
stop("argument zero = -2 is required")
- M = Musual * ncol(y)
- NOS = ncoly = ncol(y) # Number of species
+ M <- Musual * ncol(y)
+ NOS <- ncoly <- ncol(y) # Number of species
temp1.names =
@@ -2897,20 +2903,20 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
if (!length(etastart)) {
- init.shape = matrix(1.0, n, NOS)
- mymu = y # + 0.167 * (y == 0) # imethod == 1 (the default)
+ init.shape <- matrix(1.0, n, NOS)
+ mymu <- y # + 0.167 * (y == 0) # imethod == 1 (the default)
if ( .imethod == 2) {
for(ii in 1:ncol(y)) {
- mymu[, ii] = weighted.mean(y[, ii], w = w[, ii])
+ mymu[, ii] <- weighted.mean(y[, ii], w = w[, ii])
}
}
for(spp in 1:NOS) {
- junk = lsfit(x, y[, spp], wt = w[, spp], intercept = FALSE)
- var.y.est = sum(w[, spp] * junk$resid^2) / (n - length(junk$coef))
- init.shape[, spp] = if (length( .ishape )) .ishape else
+ junk <- lsfit(x, y[, spp], wt = w[, spp], intercept = FALSE)
+ var.y.est <- sum(w[, spp] * junk$resid^2) / (n - length(junk$coef))
+ init.shape[, spp] <- if (length( .ishape )) .ishape else
mymu[, spp]^2 / var.y.est
if ( .lshape == "loglog")
- init.shape[init.shape[, spp] <= 1,spp] = 3.1
+ init.shape[init.shape[, spp] <= 1,spp] <- 3.1
}
etastart <-
cbind(theta2eta(mymu, .lmu , earg = .emu ),
@@ -2920,11 +2926,11 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
}
}), list( .lmu = lmu, .lshape = lshape, .ishape = ishape,
.emu = emu, .eshape = eshape,
- .parallel = parallel, .intercept.apply = intercept.apply,
+ .parallel = parallel, .apply.parint = apply.parint,
.zero = zero, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
Musual <- 2
- NOS = ncol(eta) / Musual
+ NOS <- ncol(eta) / Musual
eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
.lmu , earg = .emu )
}, list( .lmu = lmu, .emu = emu ))),
@@ -2932,40 +2938,40 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
- tmp34 = c(rep( .lmu , length = NOS),
+ tmp34 <- c(rep( .lmu , length = NOS),
rep( .lshape , length = NOS))
names(tmp34) =
c(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = ""),
if (NOS == 1) "shape" else paste("shape", 1:NOS, sep = ""))
- tmp34 = tmp34[interleave.VGAM(M, M = 2)]
- misc$link = tmp34 # Already named
+ tmp34 <- tmp34[interleave.VGAM(M, M = 2)]
+ misc$link <- tmp34 # Already named
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
for(ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] = .emu
- misc$earg[[Musual*ii ]] = .eshape
+ misc$earg[[Musual*ii-1]] <- .emu
+ misc$earg[[Musual*ii ]] <- .eshape
}
misc$Musual <- Musual
misc$expected <- TRUE
misc$multipleResponses <- TRUE
misc$parallel <- .parallel
- misc$intercept.apply <- .intercept.apply
+ misc$apply.parint <- .apply.parint
}), list( .lmu = lmu, .lshape = lshape,
.emu = emu, .eshape = eshape,
- .parallel = parallel, .intercept.apply = intercept.apply ))),
+ .parallel = parallel, .apply.parint = apply.parint ))),
linkfun = eval(substitute(function(mu, extra = NULL) {
- temp = theta2eta(mu, .lmu , earg = .emu )
- temp = cbind(temp, NA * temp)
+ temp <- theta2eta(mu, .lmu , earg = .emu )
+ temp <- cbind(temp, NA * temp)
temp[, interleave.VGAM(ncol(temp), M = 2), drop = FALSE]
}, list( .lmu = lmu, .emu = emu ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
Musual <- 2
- NOS = ncol(eta) / Musual
- mymu = mu # eta2theta(eta[, 2*(1:NOS)-1], .lmu , earg = .emu )
- shapemat = eta2theta(eta[, Musual * (1:NOS), drop = FALSE],
+ NOS <- ncol(eta) / Musual
+ mymu <- mu # eta2theta(eta[, 2*(1:NOS)-1], .lmu , earg = .emu )
+ shapemat <- eta2theta(eta[, Musual * (1:NOS), drop = FALSE],
.lshape , earg = .eshape )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
@@ -2979,32 +2985,32 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
vfamily = c("gamma2"),
deriv = eval(substitute(expression({
Musual <- 2
- NOS = ncol(eta) / Musual
+ NOS <- ncol(eta) / Musual
- mymu = eta2theta(eta[, Musual * (1:NOS) - 1],
+ mymu <- eta2theta(eta[, Musual * (1:NOS) - 1],
.lmu , earg = .emu )
- shape = eta2theta(eta[, Musual * (1:NOS)],
+ shape <- eta2theta(eta[, Musual * (1:NOS)],
.lshape , earg = .eshape )
- dl.dmu = shape * (y / mymu - 1) / mymu
- dl.dshape = log(y) + log(shape) - log(mymu) + 1 - digamma(shape) -
+ dl.dmu <- shape * (y / mymu - 1) / mymu
+ dl.dshape <- log(y) + log(shape) - log(mymu) + 1 - digamma(shape) -
y / mymu
- dmu.deta = dtheta.deta(mymu, .lmu , earg = .emu )
- dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
+ dmu.deta <- dtheta.deta(mymu, .lmu , earg = .emu )
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
- myderiv = c(w) * cbind(dl.dmu * dmu.deta,
+ myderiv <- c(w) * cbind(dl.dmu * dmu.deta,
dl.dshape * dshape.deta)
myderiv[, interleave.VGAM(M, M = Musual)]
}), list( .lmu = lmu, .lshape = lshape,
.emu = emu, .eshape = eshape))),
weight = eval(substitute(expression({
- ned2l.dmu2 = shape / (mymu^2)
- ned2l.dshape2 = trigamma(shape) - 1 / shape
- wz = matrix(as.numeric(NA), n, M) # 2 = M; diagonal!
+ ned2l.dmu2 <- shape / (mymu^2)
+ ned2l.dshape2 <- trigamma(shape) - 1 / shape
+ wz <- matrix(as.numeric(NA), n, M) # 2 = M; diagonal!
- wz[, Musual*(1:NOS)-1] = ned2l.dmu2 * dmu.deta^2
- wz[, Musual*(1:NOS) ] = ned2l.dshape2 * dshape.deta^2
+ wz[, Musual*(1:NOS)-1] <- ned2l.dmu2 * dmu.deta^2
+ wz[, Musual*(1:NOS) ] <- ned2l.dshape2 * dshape.deta^2
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
@@ -3013,7 +3019,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
- if (deviance.arg) ans at deviance = eval(substitute(
+ if (deviance.arg) ans at deviance <- eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
@@ -3022,10 +3028,10 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
Musual <- 2
- NOS = ncol(eta) / 2
- temp300 = eta[, 2*(1:NOS), drop = FALSE]
- shape = eta2theta(temp300, .lshape , earg = .eshape )
- devi = -2 * (log(y/mu) - y/mu + 1)
+ NOS <- ncol(eta) / 2
+ temp300 <- eta[, 2*(1:NOS), drop = FALSE]
+ shape <- eta2theta(temp300, .lshape , earg = .eshape )
+ devi <- -2 * (log(y/mu) - y/mu + 1)
if (residuals) {
warning("not 100% sure about these deviance residuals!")
sign(y - mu) * sqrt(abs(devi) * w)
@@ -3038,8 +3044,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
geometric <- function(link = "logit", expected = TRUE,
- imethod = 1, iprob = NULL, zero = NULL)
-{
+ imethod = 1, iprob = NULL, zero = NULL) {
if (!is.logical(expected) || length(expected) != 1)
stop("bad input for argument 'expected'")
@@ -3110,7 +3115,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
if (!length(etastart)) {
- prob.init = if ( .imethod == 2)
+ prob.init <- if ( .imethod == 2)
1 / (1 + y + 1/16) else
if ( .imethod == 3)
1 / (1 + apply(y, 2, median) + 1/16) else
@@ -3121,7 +3126,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
if (length( .iprob ))
- prob.init = matrix( .iprob , n, M, byrow = TRUE)
+ prob.init <- matrix( .iprob , n, M, byrow = TRUE)
etastart <- theta2eta(prob.init, .link , earg = .earg )
@@ -3129,7 +3134,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
}), list( .link = link, .earg = earg,
.imethod = imethod, .iprob = iprob ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- prob = eta2theta(eta, .link , earg = .earg )
+ prob <- eta2theta(eta, .link , earg = .earg )
(1 - prob) / prob
}, list( .link = link, .earg = earg ))),
@@ -3155,7 +3160,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
.expected = expected, .imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- prob = eta2theta(eta, .link , earg = .earg )
+ prob <- eta2theta(eta, .link , earg = .earg )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dgeom(x = y, prob = prob, log = TRUE))
@@ -3163,22 +3168,22 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
}, list( .link = link, .earg = earg ))),
vfamily = c("geometric"),
deriv = eval(substitute(expression({
- prob = eta2theta(eta, .link , earg = .earg )
+ prob <- eta2theta(eta, .link , earg = .earg )
- dl.dprob = -y / (1 - prob) + 1 / prob
+ dl.dprob <- -y / (1 - prob) + 1 / prob
- dprobdeta = dtheta.deta(prob, .link , earg = .earg )
+ dprobdeta <- dtheta.deta(prob, .link , earg = .earg )
c(w) * cbind(dl.dprob * dprobdeta)
}), list( .link = link, .earg = earg, .expected = expected ))),
weight = eval(substitute(expression({
- ned2l.dprob2 = if ( .expected ) {
+ ned2l.dprob2 <- if ( .expected ) {
1 / (prob^2 * (1 - prob))
} else {
y / (1 - prob)^2 + 1 / prob^2
}
- wz = ned2l.dprob2 * dprobdeta^2
+ wz <- ned2l.dprob2 * dprobdeta^2
if ( !( .expected ))
- wz = wz - dl.dprob * d2theta.deta2(prob, .link , earg = .earg )
+ wz <- wz - dl.dprob * d2theta.deta2(prob, .link , earg = .earg )
c(w) * wz
}), list( .link = link, .earg = earg,
.expected = expected ))))
@@ -3198,14 +3203,14 @@ dbetageom <- function(x, shape1, shape2, log = FALSE) {
stop("bad input for argument 'shape1'")
if (!is.Numeric(shape2, positive = TRUE))
stop("bad input for argument 'shape2'")
- N = max(length(x), length(shape1), length(shape2))
- x = rep(x, length.out = N);
- shape1 = rep(shape1, length.out = N);
- shape2 = rep(shape2, length.out = N)
-
- loglik = lbeta(1+shape1, shape2+abs(x)) - lbeta(shape1, shape2)
- xok = (x == round(x) & x >= 0)
- loglik[!xok] = log(0)
+ N <- max(length(x), length(shape1), length(shape2))
+ x <- rep(x, length.out = N);
+ shape1 <- rep(shape1, length.out = N);
+ shape2 <- rep(shape2, length.out = N)
+
+ loglik <- lbeta(1+shape1, shape2+abs(x)) - lbeta(shape1, shape2)
+ xok <- (x == round(x) & x >= 0)
+ loglik[!xok] <- log(0)
if (log.arg) {
loglik
} else {
@@ -3221,25 +3226,25 @@ pbetageom <- function(q, shape1, shape2, log.p = FALSE) {
stop("bad input for argument 'shape1'")
if (!is.Numeric(shape2, positive = TRUE))
stop("bad input for argument 'shape2'")
- N = max(length(q), length(shape1), length(shape2))
- q = rep(q, length.out = N);
- shape1 = rep(shape1, length.out = N);
- shape2 = rep(shape2, length.out = N)
- ans = q * 0 # Retains names(q)
+ N <- max(length(q), length(shape1), length(shape2))
+ q <- rep(q, length.out = N);
+ shape1 <- rep(shape1, length.out = N);
+ shape2 <- rep(shape2, length.out = N)
+ ans <- q * 0 # Retains names(q)
if (max(abs(shape1-shape1[1])) < 1.0e-08 &&
max(abs(shape2-shape2[1])) < 1.0e-08) {
- qstar = floor(q)
- temp = if (max(qstar) >= 0) dbetageom(x = 0:max(qstar),
+ qstar <- floor(q)
+ temp <- if (max(qstar) >= 0) dbetageom(x = 0:max(qstar),
shape1 = shape1[1], shape2 = shape2[1]) else 0*qstar
- unq = unique(qstar)
+ unq <- unique(qstar)
for(ii in unq) {
index <- (qstar == ii)
- ans[index] = if (ii >= 0) sum(temp[1:(1+ii)]) else 0
+ ans[index] <- if (ii >= 0) sum(temp[1:(1+ii)]) else 0
}
} else
for(ii in 1:N) {
- qstar = floor(q[ii])
- ans[ii] = if (qstar >= 0) sum(dbetageom(x = 0:qstar,
+ qstar <- floor(q[ii])
+ ans[ii] <- if (qstar >= 0) sum(dbetageom(x = 0:qstar,
shape1 = shape1[ii], shape2 = shape2[ii])) else 0
}
if (log.p) log(ans) else ans
@@ -3330,7 +3335,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
- ans =
+ ans <-
new("vglmff",
@@ -3351,7 +3356,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
if ( .parallel && ncol(cbind(y)) > 1)
stop("univariate responses needed if 'parallel = TRUE'")
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
}), list( .parallel = parallel, .zero = zero ))),
infos = eval(substitute(function(...) {
@@ -3364,7 +3369,8 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
temp5 <- w.y.check(w = w, y = y,
Is.integer.y = TRUE,
- ncol.w.max = Inf, ncol.y.max = Inf,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
out.wy = TRUE,
colsyperw = 1, maximize = TRUE)
w <- temp5$w
@@ -3389,8 +3395,8 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
stop("number of columns of prior-'weights' is greater than ",
"the number of responses")
- M = Musual * ncol(y)
- NOS = ncoly = ncol(y) # Number of species
+ M <- Musual * ncol(y)
+ NOS <- ncoly <- ncol(y) # Number of species
predictors.names <-
c(namesof(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = ""),
.lmuuu, earg = .emuuu, tag = FALSE),
@@ -3407,9 +3413,9 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
if (!length(etastart)) {
- mu.init = y
+ mu.init <- y
for(iii in 1:ncol(y)) {
- use.this = if ( .imethod == 1) {
+ use.this <- if ( .imethod == 1) {
weighted.mean(y[, iii], w[, iii]) + 1/16
} else if ( .imethod == 3) {
c(quantile(y[, iii], probs = .probs.y ) + 1/16)
@@ -3418,29 +3424,29 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
}
if (is.numeric( .mu.init )) {
- mu.init[, iii] = MU.INIT[, iii]
+ mu.init[, iii] <- MU.INIT[, iii]
} else {
- medabsres = median(abs(y[, iii] - use.this)) + 1/32
+ medabsres <- median(abs(y[, iii] - use.this)) + 1/32
allowfun <- function(z, maxtol = 1) sign(z) * pmin(abs(z), maxtol)
- mu.init[, iii] = use.this + (1 - .sinit ) *
+ mu.init[, iii] <- use.this + (1 - .sinit ) *
allowfun(y[, iii] - use.this, maxtol = medabsres)
- mu.init[, iii] = abs(mu.init[, iii]) + 1 / 1024
+ mu.init[, iii] <- abs(mu.init[, iii]) + 1 / 1024
}
} # of for(iii)
if ( is.Numeric( .k.init )) {
- kay.init = matrix( .k.init, nrow = n, ncol = NOS, byrow = TRUE)
+ kay.init <- matrix( .k.init, nrow = n, ncol = NOS, byrow = TRUE)
} else {
negbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
- mu = extraargs
+ mu <- extraargs
sum(c(w) * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
}
- k.grid = 2^((-7):7)
- k.grid = 2^(seq(-8, 8, length = 40))
- kay.init = matrix(0, nrow = n, ncol = NOS)
+ k.grid <- 2^((-7):7)
+ k.grid <- 2^(seq(-8, 8, length = 40))
+ kay.init <- matrix(0, nrow = n, ncol = NOS)
for(spp. in 1:NOS) {
- kay.init[, spp.] = getMaxMin(k.grid,
+ kay.init[, spp.] <- getMaxMin(k.grid,
objfun = negbinomial.Loglikfun,
y = y[, spp.], x = x, w = w[, spp.],
extraargs = mu.init[, spp.])
@@ -3472,9 +3478,9 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
linkinv = eval(substitute(function(eta, extra = NULL) {
Musual <- 2
- NOS = ncol(eta) / Musual
- eta.k = eta[, Musual * (1:NOS) , drop = FALSE]
- kmat = eta2theta(eta.k, .lsize , earg = .esize )
+ NOS <- ncol(eta) / Musual
+ eta.k <- eta[, Musual * (1:NOS) , drop = FALSE]
+ kmat <- eta2theta(eta.k, .lsize , earg = .esize )
@@ -3495,25 +3501,25 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
- temp0303 = c(rep( .lmuuu, length = NOS),
+ temp0303 <- c(rep( .lmuuu, length = NOS),
rep( .lsize , length = NOS))
names(temp0303) =
c(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = ""),
if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""))
- temp0303 = temp0303[interleave.VGAM(M, M = 2)]
- misc$link = temp0303 # Already named
+ temp0303 <- temp0303[interleave.VGAM(M, M = 2)]
+ misc$link <- temp0303 # Already named
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
for(ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] = newemu
- misc$earg[[Musual*ii ]] = .esize
+ misc$earg[[Musual*ii-1]] <- newemu
+ misc$earg[[Musual*ii ]] <- .esize
}
- misc$cutoff = .cutoff
- misc$imethod = .imethod
- misc$nsimEIM = .nsimEIM
- misc$expected = TRUE
+ misc$cutoff <- .cutoff
+ misc$imethod <- .imethod
+ misc$nsimEIM <- .nsimEIM
+ misc$expected <- TRUE
misc$shrinkage.init <- .sinit
misc$multipleResponses <- TRUE
}), list( .lmuuu = lmuuu, .lsize = lsize,
@@ -3526,24 +3532,24 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
linkfun = eval(substitute(function(mu, extra = NULL) {
Musual <- 2
+ newemu <- .emuuu
- eta.temp = theta2eta(mu, .lmuuu , earg = newemu)
- eta.kayy = theta2eta(if (is.numeric( .isize )) .isize else 1.0,
+ eta.temp <- theta2eta(mu, .lmuuu , earg = newemu)
+ eta.kayy <- theta2eta(if (is.numeric( .isize )) .isize else 1.0,
.lsize , earg = .esize )
- eta.kayy = 0 * eta.temp + eta.kayy # Right dimension now.
+ eta.kayy <- 0 * eta.temp + eta.kayy # Right dimension now.
- newemu <- .emuuu
if ( .lmuuu == "nbcanlink") {
newemu$size <- eta2theta(eta.kayy, .lsize , earg = .esize )
}
- eta.temp = cbind(eta.temp, eta.kayy)
+ eta.temp <- cbind(eta.temp, eta.kayy)
eta.temp[, interleave.VGAM(ncol(eta.temp), M = Musual), drop = FALSE]
}, list( .lmuuu = lmuuu, .lsize = lsize,
.emuuu = emuuu, .esize = esize,
@@ -3552,15 +3558,15 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
Musual <- 2
- NOS = ncol(eta) / Musual
+ NOS <- ncol(eta) / Musual
- eta.k = eta[, Musual*(1:NOS), drop = FALSE]
+ eta.k <- eta[, Musual*(1:NOS), drop = FALSE]
if ( .lsize == "loge") {
- bigval = 68
- eta.k = ifelse(eta.k > bigval, bigval, eta.k)
- eta.k = ifelse(eta.k < -bigval, -bigval, eta.k)
+ bigval <- 68
+ eta.k <- ifelse(eta.k > bigval, bigval, eta.k)
+ eta.k <- ifelse(eta.k < -bigval, -bigval, eta.k)
}
- kmat = eta2theta(eta.k, .lsize , earg = .esize )
+ kmat <- eta2theta(eta.k, .lsize , earg = .esize )
@@ -3585,15 +3591,15 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
deriv = eval(substitute(expression({
Musual <- 2
- NOS = ncol(eta) / Musual
- M = ncol(eta)
- eta.k = eta[, Musual*(1:NOS) , drop = FALSE]
+ NOS <- ncol(eta) / Musual
+ M <- ncol(eta)
+ eta.k <- eta[, Musual*(1:NOS) , drop = FALSE]
if ( .lsize == "loge") {
- bigval = 68
- eta.k = ifelse(eta.k > bigval, bigval, eta.k)
- eta.k = ifelse(eta.k < -bigval, -bigval, eta.k)
+ bigval <- 68
+ eta.k <- ifelse(eta.k > bigval, bigval, eta.k)
+ eta.k <- ifelse(eta.k < -bigval, -bigval, eta.k)
}
- kmat = eta2theta(eta.k, .lsize , earg = .esize )
+ kmat <- eta2theta(eta.k, .lsize , earg = .esize )
@@ -3605,21 +3611,21 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
- dl.dmu = y / mu - (y + kmat) / (mu + kmat)
- dl.dk = digamma(y + kmat) - digamma(kmat) -
+ dl.dmu <- y / mu - (y + kmat) / (mu + kmat)
+ dl.dk <- digamma(y + kmat) - digamma(kmat) -
(y + kmat) / (mu + kmat) + 1 + log(kmat / (kmat + mu))
if ( .lmuuu == "nbcanlink")
newemu$wrt.eta <- 1
- dmu.deta = dtheta.deta(mu, .lmuuu , earg = newemu) # eta1
+ dmu.deta <- dtheta.deta(mu, .lmuuu , earg = newemu) # eta1
if ( .lmuuu == "nbcanlink")
newemu$wrt.eta <- 2
- dk.deta1 = dtheta.deta(mu, .lmuuu , earg = newemu) # eta2
+ dk.deta1 <- dtheta.deta(mu, .lmuuu , earg = newemu) # eta2
- dk.deta2 = dtheta.deta(kmat, .lsize , earg = .esize)
+ dk.deta2 <- dtheta.deta(kmat, .lsize , earg = .esize)
- myderiv = c(w) * cbind(dl.dmu * dmu.deta,
+ myderiv <- c(w) * cbind(dl.dmu * dmu.deta,
dl.dk * dk.deta2)
@@ -3636,56 +3642,56 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
.emuuu = emuuu, .esize = esize))),
weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, M)
+ wz <- matrix(as.numeric(NA), n, M)
if (is.null( .nsimEIM )) {
- fred2 = dotFortran(name = "enbin9", ans = double(n*NOS),
+ fred2 <- dotFortran(name = "enbin9", ans = double(n*NOS),
as.double(kmat), as.double(mu), as.double( .cutoff ),
as.integer(n), ok = as.integer(1), as.integer(NOS),
sumpdf = double(1), as.double( .Machine$double.eps ),
as.integer( .Maxiter ))
if (fred2$ok != 1)
stop("error in Fortran subroutine exnbin9")
- dim(fred2$ans) = c(n, NOS)
- ed2l.dk2 = -fred2$ans - 1/kmat + 1/(kmat+mu)
- wz[, Musual*(1:NOS)] = dk.deta2^2 * ed2l.dk2
+ dim(fred2$ans) <- c(n, NOS)
+ ned2l.dk2 <- -fred2$ans - 1/kmat + 1/(kmat+mu)
+ wz[, Musual*(1:NOS)] <- dk.deta2^2 * ned2l.dk2
} else {
- run.varcov = matrix(0, n, NOS)
+ run.varcov <- matrix(0, n, NOS)
for(ii in 1:( .nsimEIM )) {
- ysim = rnbinom(n = n*NOS, mu = c(mu), size = c(kmat))
+ ysim <- rnbinom(n = n*NOS, mu = c(mu), size = c(kmat))
if (NOS > 1) dim(ysim) = c(n, NOS)
- dl.dk = digamma(ysim + kmat) - digamma(kmat) -
+ dl.dk <- digamma(ysim + kmat) - digamma(kmat) -
(ysim + kmat) / (mu + kmat) +
1 + log(kmat / (kmat + mu))
- run.varcov = run.varcov + dl.dk^2
+ run.varcov <- run.varcov + dl.dk^2
} # end of for loop
- run.varcov = cbind(run.varcov / .nsimEIM )
- ed2l.dk2 = if (intercept.only)
+ run.varcov <- cbind(run.varcov / .nsimEIM )
+ ned2l.dk2 <- if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz[, Musual*(1:NOS)] = ed2l.dk2 * dk.deta2^2
+ wz[, Musual*(1:NOS)] <- ned2l.dk2 * dk.deta2^2
} # end of else
- ed2l.dmu2 <- 1 / mu - 1 / (mu + kmat)
- wz[, Musual*(1:NOS) - 1] = ed2l.dmu2 * dmu.deta^2
+ ned2l.dmu2 <- 1 / mu - 1 / (mu + kmat)
+ wz[, Musual*(1:NOS) - 1] <- ned2l.dmu2 * dmu.deta^2
if ( .lmuuu == "nbcanlink") {
wz[, Musual*(1:NOS)-1] <-
- wz[, Musual*(1:NOS)-1] + ed2l.dk2 * dk.deta1^2
+ wz[, Musual*(1:NOS)-1] + ned2l.dk2 * dk.deta1^2
- wz = cbind(wz,
- kronecker(ed2l.dk2 * dk.deta1 * dk.deta2,
+ wz <- cbind(wz,
+ kronecker(ned2l.dk2 * dk.deta1 * dk.deta2,
if (NOS > 1) cbind(1, 0) else 1))
}
@@ -3702,8 +3708,8 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
if (deviance.arg) ans at deviance = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual = 2
- NOS = ncol(eta) / Musual
+ Musual <- 2
+ NOS <- ncol(eta) / Musual
@@ -3711,14 +3717,14 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
stop("cannot handle matrix 'w' yet")
- temp300 = eta[, Musual*(1:NOS), drop = FALSE]
+ temp300 <- eta[, Musual*(1:NOS), drop = FALSE]
if ( .lsize == "loge") {
- bigval = 68
- temp300[temp300 > bigval] = bigval
- temp300[temp300 < -bigval] = -bigval
+ bigval <- 68
+ temp300[temp300 > bigval] <- bigval
+ temp300[temp300 < -bigval] <- -bigval
} else stop("can only handle the 'loge' link")
- kmat = eta2theta(temp300, .lsize , earg = .esize )
- devi = 2 * (y * log(ifelse(y < 1, 1, y) / mu) +
+ kmat <- eta2theta(temp300, .lsize , earg = .esize )
+ devi <- 2 * (y * log(ifelse(y < 1, 1, y) / mu) +
(y + kmat) * log((mu + kmat) / (kmat + y)))
if (residuals) {
sign(y - mu) * sqrt(abs(devi) * w)
@@ -3737,8 +3743,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
-polya.control <- function(save.weight = TRUE, ...)
-{
+polya.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -3750,8 +3755,7 @@ polya.control <- function(save.weight = TRUE, ...)
probs.y = 0.75,
nsimEIM = 100,
deviance.arg = FALSE, imethod = 1,
- shrinkage.init = 0.95, zero = -2)
-{
+ shrinkage.init = 0.95, zero = -2) {
@@ -3809,7 +3813,7 @@ polya.control <- function(save.weight = TRUE, ...)
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
- Musual = 2
+ Musual <- 2
if (any(function.name == c("cqo", "cao")))
stop("polya() does not work with cqo() or cao(). ",
"Try negbinomial()")
@@ -3818,7 +3822,8 @@ polya.control <- function(save.weight = TRUE, ...)
temp5 <- w.y.check(w = w, y = y,
Is.integer.y = TRUE,
- ncol.w.max = Inf, ncol.y.max = Inf,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
out.wy = TRUE,
colsyperw = 1, maximize = TRUE)
w <- temp5$w
@@ -3826,8 +3831,8 @@ polya.control <- function(save.weight = TRUE, ...)
- M = Musual * ncol(y)
- NOS = ncoly = ncol(y) # Number of species
+ M <- Musual * ncol(y)
+ NOS <- ncoly <- ncol(y) # Number of species
predictors.names <-
c(namesof(if (NOS == 1) "prob" else
@@ -3850,49 +3855,49 @@ polya.control <- function(save.weight = TRUE, ...)
}
if (!length(etastart)) {
- mu.init = y
+ mu.init <- y
for(iii in 1:ncol(y)) {
- use.this = if ( .imethod == 1) {
+ use.this <- if ( .imethod == 1) {
weighted.mean(y[, iii], w[, iii]) + 1/16
} else if ( .imethod == 3) {
- c(quantile(y[, iii], probs = .probs.y) + 1/16)
+ c(quantile(y[, iii], probs <- .probs.y) + 1/16)
} else {
median(y[, iii]) + 1/16
}
if (FALSE) {
- mu.init[, iii] = MU.INIT[, iii]
+ mu.init[, iii] <- MU.INIT[, iii]
} else {
- medabsres = median(abs(y[, iii] - use.this)) + 1/32
+ medabsres <- median(abs(y[, iii] - use.this)) + 1/32
allowfun <- function(z, maxtol = 1) sign(z) * pmin(abs(z), maxtol)
- mu.init[, iii] = use.this + (1 - .sinit) * allowfun(y[, iii] -
+ mu.init[, iii] <- use.this + (1 - .sinit) * allowfun(y[, iii] -
use.this, maxtol = medabsres)
- mu.init[, iii] = abs(mu.init[, iii]) + 1 / 1024
+ mu.init[, iii] <- abs(mu.init[, iii]) + 1 / 1024
}
}
if ( is.Numeric( .kinit )) {
- kayy.init = matrix( .kinit, nrow = n, ncol = NOS, byrow = TRUE)
+ kayy.init <- matrix( .kinit, nrow = n, ncol = NOS, byrow = TRUE)
} else {
negbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
- mu = extraargs
+ mu <- extraargs
sum(c(w) * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
}
- k.grid = 2^((-7):7)
- k.grid = 2^(seq(-8, 8, length = 40))
- kayy.init = matrix(0, nrow = n, ncol = NOS)
+ k.grid <- 2^((-7):7)
+ k.grid <- 2^(seq(-8, 8, length = 40))
+ kayy.init <- matrix(0, nrow = n, ncol = NOS)
for(spp. in 1:NOS) {
- kayy.init[, spp.] = getMaxMin(k.grid,
+ kayy.init[, spp.] <- getMaxMin(k.grid,
objfun = negbinomial.Loglikfun,
y = y[, spp.], x = x, w = w,
extraargs = mu.init[, spp.])
}
}
- prob.init = if (length(PROB.INIT)) PROB.INIT else
+ prob.init <- if (length(PROB.INIT)) PROB.INIT else
kayy.init / (kayy.init + mu.init)
@@ -3909,37 +3914,37 @@ polya.control <- function(save.weight = TRUE, ...)
.sinit = shrinkage.init, .nsimEIM = nsimEIM, .zero = zero,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Musual = 2
- NOS = ncol(eta) / Musual
- pmat = eta2theta(eta[, Musual*(1:NOS) - 1, drop = FALSE],
+ Musual <- 2
+ NOS <- ncol(eta) / Musual
+ pmat <- eta2theta(eta[, Musual*(1:NOS) - 1, drop = FALSE],
.lprob , earg = .eprob)
- kmat = eta2theta(eta[, Musual*(1:NOS)- 0, drop = FALSE],
+ kmat <- eta2theta(eta[, Musual*(1:NOS)- 0, drop = FALSE],
.lsize , earg = .esize)
kmat / (kmat + pmat)
}, list( .lprob = lprob, .eprob = eprob,
.lsize = lsize, .esize = esize))),
last = eval(substitute(expression({
- temp0303 = c(rep( .lprob , length = NOS),
+ temp0303 <- c(rep( .lprob , length = NOS),
rep( .lsize , length = NOS))
names(temp0303) =
c(if (NOS == 1) "prob" else paste("prob", 1:NOS, sep = ""),
if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""))
- temp0303 = temp0303[interleave.VGAM(M, M = 2)]
- misc$link = temp0303 # Already named
+ temp0303 <- temp0303[interleave.VGAM(M, M = 2)]
+ misc$link <- temp0303 # Already named
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
for(ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] = .eprob
- misc$earg[[Musual*ii ]] = .esize
+ misc$earg[[Musual*ii-1]] <- .eprob
+ misc$earg[[Musual*ii ]] <- .esize
}
- misc$isize = .isize
- misc$imethod = .imethod
- misc$nsimEIM = .nsimEIM
- misc$expected = TRUE
- misc$shrinkage.init = .sinit
- misc$Musual = 2
+ misc$isize <- .isize
+ misc$imethod <- .imethod
+ misc$nsimEIM <- .nsimEIM
+ misc$expected <- TRUE
+ misc$shrinkage.init <- .sinit
+ misc$Musual <- 2
misc$multipleResponses <- TRUE
}), list( .lprob = lprob, .lsize = lsize,
.eprob = eprob, .esize = esize,
@@ -3950,17 +3955,17 @@ polya.control <- function(save.weight = TRUE, ...)
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual = 2
- NOS = ncol(eta) / Musual
- pmat = eta2theta(eta[, Musual*(1:NOS) - 1, drop = FALSE],
+ Musual <- 2
+ NOS <- ncol(eta) / Musual
+ pmat <- eta2theta(eta[, Musual*(1:NOS) - 1, drop = FALSE],
.lprob , earg = .eprob)
- temp300 = eta[, Musual*(1:NOS) , drop = FALSE]
+ temp300 <- eta[, Musual*(1:NOS) , drop = FALSE]
if ( .lsize == "loge") {
- bigval = 68
- temp300 = ifelse(temp300 > bigval, bigval, temp300)
- temp300 = ifelse(temp300 < -bigval, -bigval, temp300)
+ bigval <- 68
+ temp300 <- ifelse(temp300 > bigval, bigval, temp300)
+ temp300 <- ifelse(temp300 < -bigval, -bigval, temp300)
}
- kmat = eta2theta(temp300, .lsize , earg = .esize)
+ kmat <- eta2theta(temp300, .lsize , earg = .esize)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(c(w) * dnbinom(x = y, prob = pmat, size = kmat, log = TRUE))
@@ -3968,70 +3973,70 @@ polya.control <- function(save.weight = TRUE, ...)
.esize = esize, .eprob = eprob ))),
vfamily = c("polya"),
deriv = eval(substitute(expression({
- Musual = 2
- NOS = ncol(eta) / Musual
- M = ncol(eta)
+ Musual <- 2
+ NOS <- ncol(eta) / Musual
+ M <- ncol(eta)
- pmat = eta2theta(eta[, Musual*(1:NOS) - 1, drop = FALSE],
+ pmat <- eta2theta(eta[, Musual*(1:NOS) - 1, drop = FALSE],
.lprob , earg = .eprob)
- temp3 = eta[, Musual*(1:NOS) , drop = FALSE]
+ temp3 <- eta[, Musual*(1:NOS) , drop = FALSE]
if ( .lsize == "loge") {
- bigval = 68
- temp3 = ifelse(temp3 > bigval, bigval, temp3)
- temp3 = ifelse(temp3 < -bigval, -bigval, temp3)
+ bigval <- 68
+ temp3 <- ifelse(temp3 > bigval, bigval, temp3)
+ temp3 <- ifelse(temp3 < -bigval, -bigval, temp3)
}
- kmat = eta2theta(temp3, .lsize , earg = .esize)
+ kmat <- eta2theta(temp3, .lsize , earg = .esize)
- dl.dprob = kmat / pmat - y / (1.0 - pmat)
- dl.dkayy = digamma(y + kmat) - digamma(kmat) + log(pmat)
+ dl.dprob <- kmat / pmat - y / (1.0 - pmat)
+ dl.dkayy <- digamma(y + kmat) - digamma(kmat) + log(pmat)
- dprob.deta = dtheta.deta(pmat, .lprob , earg = .eprob)
- dkayy.deta = dtheta.deta(kmat, .lsize , earg = .esize)
- dthetas.detas = cbind(dprob.deta, dkayy.deta)
- dThetas.detas = dthetas.detas[, interleave.VGAM(M, M = Musual)]
- myderiv = c(w) * cbind(dl.dprob, dl.dkayy) * dthetas.detas
+ dprob.deta <- dtheta.deta(pmat, .lprob , earg = .eprob)
+ dkayy.deta <- dtheta.deta(kmat, .lsize , earg = .esize)
+ dthetas.detas <- cbind(dprob.deta, dkayy.deta)
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
+ myderiv <- c(w) * cbind(dl.dprob, dl.dkayy) * dthetas.detas
myderiv[, interleave.VGAM(M, M = Musual)]
}), list( .lprob = lprob, .lsize = lsize,
.eprob = eprob, .esize = esize))),
weight = eval(substitute(expression({
- wz = matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
+ wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
- ind1 = iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
- mumat = as.matrix(mu)
+ ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ mumat <- as.matrix(mu)
for(spp. in 1:NOS) {
- run.varcov = 0
- kvec = kmat[, spp.]
- pvec = pmat[, spp.]
+ run.varcov <- 0
+ kvec <- kmat[, spp.]
+ pvec <- pmat[, spp.]
for(ii in 1:( .nsimEIM )) {
- ysim = rnbinom(n = n, prob = pvec, size = kvec)
+ ysim <- rnbinom(n = n, prob = pvec, size = kvec)
- dl.dprob = kvec / pvec - ysim / (1.0 - pvec)
- dl.dkayy = digamma(ysim + kvec) - digamma(kvec) + log(pvec)
- temp3 = cbind(dl.dprob, dl.dkayy)
- run.varcov = run.varcov +
+ dl.dprob <- kvec / pvec - ysim / (1.0 - pvec)
+ dl.dkayy <- digamma(ysim + kvec) - digamma(kvec) + log(pvec)
+ temp3 <- cbind(dl.dprob, dl.dkayy)
+ run.varcov <- run.varcov +
temp3[, ind1$row.index] *
temp3[, ind1$col.index]
}
- run.varcov = cbind(run.varcov / .nsimEIM)
+ run.varcov <- cbind(run.varcov / .nsimEIM)
- wz1 = if (intercept.only)
+ wz1 <- if (intercept.only)
matrix(colMeans(run.varcov),
nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else
run.varcov
- wz1 = wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
+ wz1 <- wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
dThetas.detas[, Musual * (spp. - 1) + ind1$col]
for(jay in 1:Musual)
for(kay in jay:Musual) {
- cptr = iam((spp. - 1) * Musual + jay,
+ cptr <- iam((spp. - 1) * Musual + jay,
(spp. - 1) * Musual + kay,
M = M)
- wz[, cptr] = wz1[, iam(jay, kay, M = Musual)]
+ wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
}
} # End of for(spp.) loop
@@ -4045,9 +4050,9 @@ polya.control <- function(save.weight = TRUE, ...)
if (deviance.arg) ans at deviance = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual = 2
- NOS = ncol(eta) / Musual
- temp300 = eta[, Musual*(1:NOS), drop = FALSE]
+ Musual <- 2
+ NOS <- ncol(eta) / Musual
+ temp300 <- eta[, Musual*(1:NOS), drop = FALSE]
@@ -4058,14 +4063,14 @@ polya.control <- function(save.weight = TRUE, ...)
if ( .lsize == "loge") {
- bigval = 68
- temp300[temp300 > bigval] = bigval
- temp300[temp300 < -bigval] = -bigval
+ bigval <- 68
+ temp300[temp300 > bigval] <- bigval
+ temp300[temp300 < -bigval] <- -bigval
} else {
stop("can only handle the 'loge' link")
}
- kayy = eta2theta(temp300, .lsize , earg = .esize)
- devi = 2 * (y * log(ifelse(y < 1, 1, y) / mu) +
+ kayy <- eta2theta(temp300, .lsize , earg = .esize)
+ devi <- 2 * (y * log(ifelse(y < 1, 1, y) / mu) +
(y + kayy) * log((mu + kayy) / (kayy + y)))
if (residuals)
sign(y - mu) * sqrt(abs(devi) * w) else
@@ -4079,13 +4084,12 @@ polya.control <- function(save.weight = TRUE, ...)
- simple.poisson <- function()
-{
+ simple.poisson <- function() {
new("vglmff",
blurb = c("Poisson distribution\n\n",
- "Link: log(lambda)",
- "\n",
- "Variance: lambda"),
+ "Link: log(lambda)",
+ "\n",
+ "Variance: lambda"),
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
nz <- y > 0
devi <- - (y - mu)
@@ -4183,9 +4187,9 @@ polya.control <- function(save.weight = TRUE, ...)
if (!length(etastart)) {
init.df <- if (length( .idof )) .idof else {
- VarY = var(y)
- MadY = mad(y)
- if (VarY <= (1 + .tol1 )) VarY = 1.12
+ VarY <- var(y)
+ MadY <- mad(y)
+ if (VarY <= (1 + .tol1 )) VarY <- 1.12
if ( .imethod == 1) {
2 * VarY / (VarY - 1)
} else if ( .imethod == 2) {
@@ -4237,10 +4241,10 @@ polya.control <- function(save.weight = TRUE, ...)
}), list( .ldof = ldof, .edof = edof ))),
weight = eval(substitute(expression({
- const2 = (Dof + 0) / (Dof + 3)
+ const2 <- (Dof + 0) / (Dof + 3)
const2[!is.finite(Dof)] <- 1 # Handles Inf
- tmp6 = DDS(Dof)
+ tmp6 <- DDS(Dof)
nedl2.dnu2 <- 0.5 * (tmp6 * (const2 * tmp6 - 2 / (Dof + 1)) - DDSp(Dof))
wz <- c(w) * nedl2.dnu2 * ddf.deta^2
@@ -4256,16 +4260,16 @@ polya.control <- function(save.weight = TRUE, ...)
Kayfun.studentt <- function(df, bigno = .Machine$double.eps^(-0.46)) {
ind1 <- is.finite(df)
- const4 = dnorm(0)
+ const4 <- dnorm(0)
ans <- df
if (any(ind1))
ans[ind1] <- exp(lgamma((df[ind1] + 1) / 2) -
lgamma( df[ind1] / 2)) / sqrt(pi * df[ind1])
- ans[df <= 0] = NaN
+ ans[df <= 0] <- NaN
ind2 <- (df >= bigno)
if (any(ind2)) {
- dff = df[ind2]
+ dff <- df[ind2]
ans[ind2] <- const4 # 1 / const3 # for handling df = Inf
}
ans[!ind1] <- const4 # 1 / const3 # for handling df = Inf
@@ -4281,8 +4285,7 @@ polya.control <- function(save.weight = TRUE, ...)
ldf = "loglog",
ilocation = NULL, iscale = NULL, idf = NULL,
imethod = 1,
- zero = -(2:3))
-{
+ zero = -(2:3)) {
@@ -4345,7 +4348,8 @@ polya.control <- function(save.weight = TRUE, ...)
temp5 <-
w.y.check(w = w, y = y,
- ncol.w.max = Inf, ncol.y.max = Inf,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
out.wy = TRUE,
maximize = TRUE)
w <- temp5$w
@@ -4379,10 +4383,10 @@ polya.control <- function(save.weight = TRUE, ...)
sdvec / 2.3
sdvec <- rep(sdvec,
- length.out = max(length(sdvec),
+ length.out <- max(length(sdvec),
length(init.sca)))
init.sca <- rep(init.sca,
- length.out = max(length(sdvec),
+ length.out <- max(length(sdvec),
length(init.sca)))
ind9 <- (sdvec / init.sca <= (1 + 0.12))
sdvec[ind9] <- sqrt(1.12) * init.sca[ind9]
@@ -4434,7 +4438,7 @@ polya.control <- function(save.weight = TRUE, ...)
misc$Musual <- Musual
misc$imethod <- .imethod
- misc$expected = TRUE
+ misc$expected <- TRUE
misc$multipleResponses <- TRUE
}), list( .lloc = lloc, .eloc = eloc,
.lsca = lsca, .esca = esca,
@@ -4486,47 +4490,62 @@ polya.control <- function(save.weight = TRUE, ...)
.ldof = ldof, .edof = edof ))),
weight = eval(substitute(expression({
- const1 = (Dof + 1) / (Dof + 3)
- const2 = (Dof + 0) / (Dof + 3)
+ const1 <- (Dof + 1) / (Dof + 3)
+ const2 <- (Dof + 0) / (Dof + 3)
const1[!is.finite(Dof)] <- 1 # Handles Inf
const2[!is.finite(Dof)] <- 1 # Handles Inf
- const4 = dnorm(0)
- ed2l.dlocat2 = const1 / (Sca * (Kayfun.studentt(Dof) / const4))^2
- ed2l.dscale2 = 2 * const2 / Sca^2
+ const4 <- dnorm(0)
+ ned2l.dlocat2 <- const1 / (Sca * (Kayfun.studentt(Dof) / const4))^2
+ ned2l.dscale2 <- 2 * const2 / Sca^2
DDS <- function(df) digamma((df + 1) / 2) - digamma(df / 2)
DDSp <- function(df) 0.5 * (trigamma((df + 1) / 2) - trigamma(df / 2))
- tmp6 = DDS(Dof)
+ tmp6 <- DDS(Dof)
edl2.dnu2 <- 0.5 * (tmp6 * (const2 * tmp6 - 2 / (Dof + 1)) - DDSp(Dof))
- ed2l.dshape2 <- cbind(edl2.dnu2) # cosmetic name change
+ ned2l.dshape2 <- cbind(edl2.dnu2) # cosmetic name change
+
+ ned2l.dshape.dlocat <- cbind(0 * Sca)
+ ned2l.dshape.dscale <- cbind((-1 / (Dof + 1.0) + const2 * DDS(Dof)) / Sca)
+
+
+
+ wz <- array(c(c(w) * ned2l.dlocat2 * dloc.deta^2,
+ c(w) * ned2l.dscale2 * dsca.deta^2,
+ c(w) * ned2l.dshape2 * ddof.deta^2,
+ c(w) * ned2l.dshape2 * 0,
+ c(w) * ned2l.dshape.dscale * dsca.deta * ddof.deta,
+ c(w) * ned2l.dshape.dlocat * dloc.deta * ddof.deta),
+ dim = c(n, M / Musual, 6))
+ wz <- arwz2wz(wz, M = M, Musual = Musual)
- ed2l.dshape.dlocat = cbind(0 * Sca)
- ed2l.dshape.dscale = cbind((-1 / (Dof + 1.0) + const2 * DDS(Dof)) / Sca)
- wz = matrix(0.0, n, dimm(M))
- wz[, Musual*(1:NOS) - 2] = ed2l.dlocat2 * dloc.deta^2
- wz[, Musual*(1:NOS) - 1] = ed2l.dscale2 * dsca.deta^2
- wz[, Musual*(1:NOS) - 0] = ed2l.dshape2 * ddof.deta^2
+
+ if (FALSE) {
+ wz <- matrix(0.0, n, dimm(M))
+ wz[, Musual*(1:NOS) - 2] <- ned2l.dlocat2 * dloc.deta^2
+ wz[, Musual*(1:NOS) - 1] <- ned2l.dscale2 * dsca.deta^2
+ wz[, Musual*(1:NOS) - 0] <- ned2l.dshape2 * ddof.deta^2
for (ii in ((1:NOS) - 1)) {
- ind3 = 1 + ii
+ ind3 <- 1 + ii
wz[, iam(ii*Musual + 1, ii*Musual + 3, M = M)] <-
- ed2l.dshape.dlocat[, ind3] *
+ ned2l.dshape.dlocat[, ind3] *
dloc.deta[, ind3] * ddof.deta[, ind3]
wz[, iam(ii*Musual + 2, ii*Musual + 3, M = M)] <-
- ed2l.dshape.dscale[, ind3] *
+ ned2l.dshape.dscale[, ind3] *
dsca.deta[, ind3] * ddof.deta[, ind3]
}
while (all(wz[, ncol(wz)] == 0))
wz <- wz[, -ncol(wz)]
+ }
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
+ wz
}), list( .lloc = lloc, .eloc = eloc,
.lsca = lsca, .esca = esca,
.ldof = ldof, .edof = edof ))))
@@ -4541,8 +4560,7 @@ polya.control <- function(save.weight = TRUE, ...)
lscale = "loge",
ilocation = NULL, iscale = NULL,
imethod = 1,
- zero = -2)
-{
+ zero = -2) {
lloc <- as.list(substitute(llocation))
eloc <- link2list(lloc)
@@ -4597,7 +4615,7 @@ polya.control <- function(save.weight = TRUE, ...)
temp5 <-
w.y.check(w = w, y = y,
- ncol.w.max = Inf, ncol.y.max = Inf,
+ ncol.w.max = Inf,
out.wy = TRUE,
maximize = TRUE)
w <- temp5$w
@@ -4717,19 +4735,19 @@ polya.control <- function(save.weight = TRUE, ...)
.doff = doff ))),
weight = eval(substitute(expression({
- const1 = (Dof + 1) / (Dof + 3)
- const2 = (Dof + 0) / (Dof + 3)
+ const1 <- (Dof + 1) / (Dof + 3)
+ const2 <- (Dof + 0) / (Dof + 3)
const1[!is.finite( Dof )] <- 1 # Handles Inf
const2[!is.finite( Dof )] <- 1 # Handles Inf
- const4 = dnorm(0)
- ed2l.dlocat2 = const1 / (Sca * (Kayfun.studentt(Dof) / const4))^2
+ const4 <- dnorm(0)
+ ned2l.dlocat2 <- const1 / (Sca * (Kayfun.studentt(Dof) / const4))^2
- ed2l.dscale2 = 2.0 * const2 / Sca^2 # 2.0 seems to work
+ ned2l.dscale2 <- 2.0 * const2 / Sca^2 # 2.0 seems to work
- wz = matrix(as.numeric(NA), n, M) #2=M; diagonal!
- wz[, Musual*(1:NOS) - 1] = ed2l.dlocat2 * dlocat.deta^2
- wz[, Musual*(1:NOS) ] = ed2l.dscale2 * dscale.deta^2
+ wz <- matrix(as.numeric(NA), n, M) #2=M; diagonal!
+ wz[, Musual*(1:NOS) - 1] <- ned2l.dlocat2 * dlocat.deta^2
+ wz[, Musual*(1:NOS) ] <- ned2l.dscale2 * dscale.deta^2
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
}), list( .lloc = lloc, .eloc = eloc,
@@ -4776,7 +4794,8 @@ polya.control <- function(save.weight = TRUE, ...)
temp5 <-
w.y.check(w = w, y = y,
Is.positive.y = TRUE,
- ncol.w.max = Inf, ncol.y.max = Inf,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -4854,66 +4873,66 @@ dsimplex <- function(x, mu = 0.5, dispersion = 1, log = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
- sigma = dispersion
+ sigma <- dispersion
deeFun <- function(y, mu)
(((y - mu) / (mu * (1 - mu)))^2) / (y * (1 - y))
- logpdf = (-0.5 * log(2 * pi) - log(sigma) - 1.5 * log(x) -
+ logpdf <- (-0.5 * log(2 * pi) - log(sigma) - 1.5 * log(x) -
1.5 * log1p(-x) - 0.5 * deeFun(x, mu) / sigma^2)
- logpdf[x <= 0.0] = -Inf # log(0.0)
- logpdf[x >= 1.0] = -Inf # log(0.0)
- logpdf[mu <= 0.0] = NaN
- logpdf[mu >= 1.0] = NaN
- logpdf[sigma <= 0.0] = NaN
+ logpdf[x <= 0.0] <- -Inf # log(0.0)
+ logpdf[x >= 1.0] <- -Inf # log(0.0)
+ logpdf[mu <= 0.0] <- NaN
+ logpdf[mu >= 1.0] <- NaN
+ logpdf[sigma <= 0.0] <- NaN
if (log.arg) logpdf else exp(logpdf)
}
rsimplex <- function(n, mu = 0.5, dispersion = 1) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
oneval <- (length(mu) == 1 && length(dispersion) == 1)
- answer = rep(0.0, length.out = use.n)
- mu = rep(mu, length.out = use.n);
- dispersion = rep(dispersion, length.out = use.n)
- Kay1 = 3 * (dispersion * mu * (1-mu))^2
+ answer <- rep(0.0, length.out = use.n)
+ mu <- rep(mu, length.out = use.n);
+ dispersion <- rep(dispersion, length.out = use.n)
+ Kay1 <- 3 * (dispersion * mu * (1-mu))^2
if (oneval) {
- Kay1 = Kay1[1] # Since oneval means there is only one unique value
- mymu = mu[1]
- myroots = polyroot(c(-mymu^2, Kay1+2*mymu^2, -3*Kay1+1-2*mymu, 2*Kay1))
- myroots = myroots[abs(Im(myroots)) < 0.00001]
- myroots = Re(myroots)
- myroots = myroots[myroots >= 0.0]
- myroots = myroots[myroots <= 1.0]
- pdfmax = dsimplex(myroots, mymu, dispersion[1])
- pdfmax = rep(max(pdfmax), length.out = use.n) # For multiple peaks
+ Kay1 <- Kay1[1] # Since oneval means there is only one unique value
+ mymu <- mu[1]
+ myroots <- polyroot(c(-mymu^2, Kay1+2*mymu^2, -3*Kay1+1-2*mymu, 2*Kay1))
+ myroots <- myroots[abs(Im(myroots)) < 0.00001]
+ myroots <- Re(myroots)
+ myroots <- myroots[myroots >= 0.0]
+ myroots <- myroots[myroots <= 1.0]
+ pdfmax <- dsimplex(myroots, mymu, dispersion[1])
+ pdfmax <- rep(max(pdfmax), length.out = use.n) # For multiple peaks
} else {
- pdfmax = numeric(use.n)
+ pdfmax <- numeric(use.n)
for (ii in 1:use.n) {
- myroots = polyroot(c(-mu[ii]^2, Kay1[ii]+2*mu[ii]^2,
+ myroots <- polyroot(c(-mu[ii]^2, Kay1[ii]+2*mu[ii]^2,
-3*Kay1[ii]+1-2*mu[ii], 2*Kay1[ii]))
- myroots = myroots[abs(Im(myroots)) < 0.00001]
- myroots = Re(myroots)
- myroots = myroots[myroots >= 0.0]
- myroots = myroots[myroots <= 1.0]
- pdfmax[ii] = max(dsimplex(myroots, mu[ii], dispersion[ii]))
+ myroots <- myroots[abs(Im(myroots)) < 0.00001]
+ myroots <- Re(myroots)
+ myroots <- myroots[myroots >= 0.0]
+ myroots <- myroots[myroots <= 1.0]
+ pdfmax[ii] <- max(dsimplex(myroots, mu[ii], dispersion[ii]))
}
}
- index = 1:use.n
- nleft = length(index)
+ index <- 1:use.n
+ nleft <- length(index)
while (nleft > 0) {
- xx = runif(nleft) # , 0, 1
- yy = runif(nleft, max = pdfmax[index])
- newindex = (1:nleft)[yy < dsimplex(xx, mu[index], dispersion[index])]
+ xx <- runif(nleft) # , 0, 1
+ yy <- runif(nleft, max = pdfmax[index])
+ newindex <- (1:nleft)[yy < dsimplex(xx, mu[index], dispersion[index])]
if (length(newindex)) {
- answer[index[newindex]] = xx[newindex]
- index = setdiff(index, index[newindex])
- nleft = nleft - length(newindex)
+ answer[index[newindex]] <- xx[newindex]
+ index <- setdiff(index, index[newindex])
+ nleft <- nleft - length(newindex)
}
}
answer
@@ -4960,15 +4979,15 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
new("vglmff",
blurb = c("Univariate simplex distribution\n\n",
- "f(y) = [2*pi*sigma^2*(y*(1-y))^3]^(-0.5) * \n",
- " exp[-0.5*(y-mu)^2 / (sigma^2 * y * ",
- "(1-y) * mu^2 * (1-mu)^2)],\n",
- " 0 < y < 1, 0 < mu < 1, sigma > 0\n\n",
- "Links: ",
- namesof("mu", lmu, earg = emu), ", ",
- namesof("sigma", lsigma, earg = esigma), "\n\n",
- "Mean: mu\n",
- "Variance function: V(mu) = mu^3 * (1 - mu)^3"),
+ "f(y) = [2*pi*sigma^2*(y*(1-y))^3]^(-0.5) * \n",
+ " exp[-0.5*(y-mu)^2 / (sigma^2 * y * ",
+ "(1-y) * mu^2 * (1-mu)^2)],\n",
+ " 0 < y < 1, 0 < mu < 1, sigma > 0\n\n",
+ "Links: ",
+ namesof("mu", lmu, earg = emu), ", ",
+ namesof("sigma", lsigma, earg = esigma), "\n\n",
+ "Mean: mu\n",
+ "Variance function: V(mu) = mu^3 * (1 - mu)^3"),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
@@ -4981,7 +5000,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
Is.positive.y = TRUE)
- predictors.names = c(
+ predictors.names <- c(
namesof("mu", .lmu , earg = .emu , tag = FALSE),
namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
@@ -4996,10 +5015,10 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
mean(y, trim = 0.1)
- init.mu = (1 - .sinit) * y + .sinit * use.this
- mu.init = rep(if (length( .imu )) .imu else init.mu, length = n)
- sigma.init = if (length( .isigma )) rep( .isigma, leng = n) else {
- use.this = deeFun(y, mu=init.mu)
+ init.mu <- (1 - .sinit) * y + .sinit * use.this
+ mu.init <- rep(if (length( .imu )) .imu else init.mu, length = n)
+ sigma.init <- if (length( .isigma )) rep( .isigma, leng = n) else {
+ use.this <- deeFun(y, mu=init.mu)
rep(sqrt( if ( .imethod == 3) weighted.mean(use.this, w) else
if ( .imethod == 1) median(use.this) else
mean(use.this, trim = 0.1)),
@@ -5031,7 +5050,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
.sinit = shrinkage.init, .imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- sigma = eta2theta(eta[, 2], .lsigma, earg = .esigma)
+ sigma <- eta2theta(eta[, 2], .lsigma, earg = .esigma)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dsimplex(x = y, mu = mu, dispersion = sigma, log = TRUE))
@@ -5042,23 +5061,23 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
deriv = eval(substitute(expression({
deeFun <- function(y, mu)
(((y - mu) / (mu * (1 - mu)))^2) / (y * (1 - y))
- sigma = eta2theta(eta[, 2], .lsigma, earg = .esigma)
- dmu.deta = dtheta.deta(mu, .lmu , earg = .emu)
- dsigma.deta = dtheta.deta(sigma, .lsigma, earg = .esigma)
+ sigma <- eta2theta(eta[, 2], .lsigma, earg = .esigma)
+ dmu.deta <- dtheta.deta(mu, .lmu , earg = .emu)
+ dsigma.deta <- dtheta.deta(sigma, .lsigma, earg = .esigma)
- dl.dmu = (y - mu) * (deeFun(y, mu) +
+ dl.dmu <- (y - mu) * (deeFun(y, mu) +
1 / (mu * (1 - mu))^2) / (mu * (1 - mu) * sigma^2)
- dl.dsigma = (deeFun(y, mu) / sigma^2 - 1) / sigma
+ dl.dsigma <- (deeFun(y, mu) / sigma^2 - 1) / sigma
cbind(dl.dmu * dmu.deta,
dl.dsigma * dsigma.deta)
}), list( .lmu = lmu, .lsigma = lsigma,
.emu = emu, .esigma = esigma ))),
weight = eval(substitute(expression({
- wz = matrix(0.0, n, M) # Diagonal!!
- eim11 = 3 / (mu * (1 - mu)) + 1 / (sigma^2 * (mu * (1 - mu))^3)
- wz[, iam(1, 1, M)] = eim11 * dmu.deta^2
- wz[, iam(2, 2, M)] = (2 / sigma^2) * dsigma.deta^2
+ wz <- matrix(0.0, n, M) # Diagonal!!
+ eim11 <- 3 / (mu * (1 - mu)) + 1 / (sigma^2 * (mu * (1 - mu))^3)
+ wz[, iam(1, 1, M)] <- eim11 * dmu.deta^2
+ wz[, iam(2, 2, M)] <- (2 / sigma^2) * dsigma.deta^2
c(w) * wz
}), list( .lmu = lmu, .lsigma = lsigma,
.emu = emu, .esigma = esigma ))))
@@ -5072,8 +5091,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
rig <- function(lmu = "identity", llambda = "loge",
- imu = NULL, ilambda = 1)
-{
+ imu = NULL, ilambda = 1) {
if (!is.Numeric(ilambda, positive = TRUE))
@@ -5091,29 +5109,30 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
new("vglmff",
blurb = c("Reciprocal inverse Gaussian distribution \n",
- "f(y) = [lambda/(2*pi*y)]^(0.5) * \n",
- " exp[-0.5*(lambda/y) * (y-mu)^2], ",
- " 0 < y,\n",
- "Links: ",
- namesof("mu", lmu, earg = emu), ", ",
- namesof("lambda", llambda, earg = elambda), "\n\n",
- "Mean: mu"),
+ "f(y) = [lambda/(2*pi*y)]^(0.5) * \n",
+ " exp[-0.5*(lambda/y) * (y-mu)^2], ",
+ " 0 < y,\n",
+ "Links: ",
+ namesof("mu", lmu, earg = emu), ", ",
+ namesof("lambda", llambda, earg = elambda), "\n\n",
+ "Mean: mu"),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
Is.positive.y = TRUE,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
- predictors.names =
+ predictors.names <-
c(namesof("mu", .lmu , earg = .emu , tag = FALSE),
namesof("lambda", .llambda , earg = .elambda , tag = FALSE))
if (!length(etastart)) {
- mu.init = rep(if (length( .imu )) .imu else
+ mu.init <- rep(if (length( .imu )) .imu else
median(y), length = n)
- lambda.init = rep(if (length( .ilambda )) .ilambda else
+ lambda.init <- rep(if (length( .ilambda )) .ilambda else
sqrt(var(y)), length = n)
etastart <-
cbind(theta2eta(mu.init, .lmu , earg = .emu),
@@ -5135,7 +5154,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
.emu = emu, .elambda = elambda ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 2], .llambda , earg = .elambda )
+ lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(c(w) * (-0.5*log(y) + 0.5*log(lambda) - (0.5*lambda/y) * (y-mu)^2))
@@ -5144,19 +5163,19 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
vfamily = c("rig"),
deriv = eval(substitute(expression({
if (iter == 1) {
- d3 = deriv3(~ w *
+ d3 <- deriv3(~ w *
(-0.5*log(y) + 0.5*log(lambda) - (0.5*lambda/y) * (y-mu)^2),
c("mu", "lambda"), hessian= TRUE)
}
- lambda = eta2theta(eta[, 2], .llambda , earg = .elambda )
+ lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
- eval.d3 = eval(d3)
- dl.dthetas = attr(eval.d3, "gradient")
+ eval.d3 <- eval(d3)
+ dl.dthetas <- attr(eval.d3, "gradient")
- dmu.deta = dtheta.deta(mu, .lmu , earg = .emu)
- dlambda.deta = dtheta.deta(lambda, .llambda , earg = .elambda )
- dtheta.detas = cbind(dmu.deta, dlambda.deta)
+ dmu.deta <- dtheta.deta(mu, .lmu , earg = .emu)
+ dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
+ dtheta.detas <- cbind(dmu.deta, dlambda.deta)
dl.dthetas * dtheta.detas
}), list( .lmu = lmu, .llambda = llambda,
@@ -5164,26 +5183,26 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
weight = eval(substitute(expression({
d2l.dthetas2 = attr(eval.d3, "hessian")
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[, iam(1, 1, M)] = -d2l.dthetas2[, 1, 1] * dtheta.detas[, 1]^2
- wz[, iam(2, 2, M)] = -d2l.dthetas2[, 2, 2] * dtheta.detas[, 2]^2
- wz[, iam(1, 2, M)] = -d2l.dthetas2[, 1, 2] * dtheta.detas[, 1] *
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[, iam(1, 1, M)] <- -d2l.dthetas2[, 1, 1] * dtheta.detas[, 1]^2
+ wz[, iam(2, 2, M)] <- -d2l.dthetas2[, 2, 2] * dtheta.detas[, 2]^2
+ wz[, iam(1, 2, M)] <- -d2l.dthetas2[, 1, 2] * dtheta.detas[, 1] *
dtheta.detas[, 2]
if (!.expected) {
- d2mudeta2 = d2theta.deta2(mu, .lmu , earg = .emu)
- d2lambda = d2theta.deta2(lambda, .llambda , earg = .elambda )
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] - dl.dthetas[, 1] * d2mudeta2
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] - dl.dthetas[, 2] * d2lambda
+ d2mudeta2 <- d2theta.deta2(mu, .lmu , earg = .emu)
+ d2lambda <- d2theta.deta2(lambda, .llambda , earg = .elambda )
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] - dl.dthetas[, 1] * d2mudeta2
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] - dl.dthetas[, 2] * d2lambda
}
if (intercept.only) {
- sumw = sum(w)
+ sumw <- sum(w)
for(ii in 1:ncol(wz))
- wz[, ii] = sum(wz[, ii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
+ wz[, ii] <- sum(wz[, ii]) / sumw
+ pooled.weight <- TRUE
+ wz <- c(w) * wz # Put back the weights
} else
- pooled.weight = FALSE
+ pooled.weight <- FALSE
wz
}), list( .lmu = lmu, .llambda = llambda, .expected = FALSE,
@@ -5193,8 +5212,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
hypersecant <- function(link.theta = elogit(min = -pi/2, max = pi/2),
- init.theta = NULL)
-{
+ init.theta = NULL) {
link.theta <- as.list(substitute(link.theta))
@@ -5204,15 +5222,16 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
new("vglmff",
blurb = c("Hyperbolic Secant distribution \n",
- "f(y) = exp(theta*y + log(cos(theta ))) / (2*cosh(pi*y/2))\n",
- " for all y,\n",
- "Link: ",
- namesof("theta", link.theta , earg = earg), "\n\n",
- "Mean: tan(theta)"),
+ "f(y) = exp(theta*y + log(cos(theta ))) / (2*cosh(pi*y/2))\n",
+ " for all y,\n",
+ "Link: ",
+ namesof("theta", link.theta , earg = earg), "\n\n",
+ "Mean: tan(theta)"),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -5220,7 +5239,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
predictors.names <-
namesof("theta", .link.theta , earg = .earg , tag = FALSE)
if (!length(etastart)) {
- theta.init = rep(if (length( .init.theta )) .init.theta else
+ theta.init <- rep(if (length( .init.theta )) .init.theta else
median(y), length = n)
etastart <-
theta2eta(theta.init, .link.theta , earg = .earg )
@@ -5228,7 +5247,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
}), list( .link.theta = link.theta , .earg = earg,
.init.theta = init.theta ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- theta = eta2theta(eta, .link.theta , earg = .earg )
+ theta <- eta2theta(eta, .link.theta , earg = .earg )
tan(theta)
}, list( .link.theta = link.theta , .earg = earg ))),
last = eval(substitute(expression({
@@ -5238,21 +5257,21 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
}), list( .link.theta = link.theta , .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- theta = eta2theta(eta, .link.theta , earg = .earg )
+ theta <- eta2theta(eta, .link.theta , earg = .earg )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(c(w) * (theta*y + log(cos(theta)) - log(cosh(pi*y/2 ))))
}, list( .link.theta = link.theta , .earg = earg ))),
vfamily = c("hypersecant"),
deriv = eval(substitute(expression({
- theta = eta2theta(eta, .link.theta , earg = .earg )
- dl.dthetas = y - tan(theta)
- dparam.deta = dtheta.deta(theta, .link.theta , earg = .earg )
+ theta <- eta2theta(eta, .link.theta , earg = .earg )
+ dl.dthetas <- y - tan(theta)
+ dparam.deta <- dtheta.deta(theta, .link.theta , earg = .earg )
c(w) * dl.dthetas * dparam.deta
}), list( .link.theta = link.theta , .earg = earg ))),
weight = expression({
- d2l.dthetas2 = 1 / cos(theta)^2
- wz = c(w) * d2l.dthetas2 * dparam.deta^2
+ d2l.dthetas2 <- 1 / cos(theta)^2
+ wz <- c(w) * d2l.dthetas2 * dparam.deta^2
wz
}))
}
@@ -5260,8 +5279,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
hypersecant.1 <- function(link.theta = elogit(min = -pi/2, max = pi/2),
- init.theta = NULL)
-{
+ init.theta = NULL) {
link.theta <- as.list(substitute(link.theta))
@@ -5271,13 +5289,13 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
new("vglmff",
blurb = c("Hyperbolic Secant distribution \n",
- "f(y) = (cos(theta)/pi) * y^(-0.5+theta/pi) * \n",
- " (1-y)^(-0.5-theta/pi), ",
- " 0 < y < 1,\n",
- "Link: ",
- namesof("theta", link.theta , earg = earg), "\n\n",
- "Mean: 0.5 + theta/pi", "\n",
- "Variance: (pi^2 - 4*theta^2) / (8*pi^2)"),
+ "f(y) = (cos(theta)/pi) * y^(-0.5+theta/pi) * \n",
+ " (1-y)^(-0.5-theta/pi), ",
+ " 0 < y < 1,\n",
+ "Link: ",
+ namesof("theta", link.theta , earg = earg), "\n\n",
+ "Mean: 0.5 + theta/pi", "\n",
+ "Variance: (pi^2 - 4*theta^2) / (8*pi^2)"),
initialize = eval(substitute(expression({
if (any(y <= 0 | y >= 1))
stop("all response 'y' values must be in (0,1)")
@@ -5285,7 +5303,8 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
w.y.check(w = w, y = y,
Is.positive.y = TRUE,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -5294,7 +5313,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
namesof("theta", .link.theta , earg = .earg , tag = FALSE)
if (!length(etastart)) {
- theta.init = rep(if (length( .init.theta )) .init.theta else
+ theta.init <- rep(if (length( .init.theta )) .init.theta else
median(y), length = n)
etastart <-
@@ -5303,7 +5322,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
}), list( .link.theta = link.theta , .earg = earg,
.init.theta = init.theta ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- theta = eta2theta(eta, .link.theta , earg = .earg )
+ theta <- eta2theta(eta, .link.theta , earg = .earg )
0.5 + theta / pi
}, list( .link.theta = link.theta , .earg = earg ))),
last = eval(substitute(expression({
@@ -5313,7 +5332,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
}), list( .link.theta = link.theta , .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- theta = eta2theta(eta, .link.theta , earg = .earg )
+ theta <- eta2theta(eta, .link.theta , earg = .earg )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(c(w) * (log(cos(theta)) + (-0.5+theta/pi)*log(y) +
@@ -5321,14 +5340,14 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
}, list( .link.theta = link.theta , .earg = earg ))),
vfamily = c("hypersecant.1"),
deriv = eval(substitute(expression({
- theta = eta2theta(eta, .link.theta , earg = .earg )
- dl.dthetas = -tan(theta) + log(y/(1-y)) / pi
- dparam.deta = dtheta.deta(theta, .link.theta , earg = .earg )
+ theta <- eta2theta(eta, .link.theta , earg = .earg )
+ dl.dthetas <- -tan(theta) + log(y/(1-y)) / pi
+ dparam.deta <- dtheta.deta(theta, .link.theta , earg = .earg )
c(w) * dl.dthetas * dparam.deta
}), list( .link.theta = link.theta , .earg = earg ))),
weight = expression({
- d2l.dthetas2 = 1 / cos(theta)^2
- wz = c(w) * d2l.dthetas2 * dparam.deta^2
+ d2l.dthetas2 <- 1 / cos(theta)^2
+ wz <- c(w) * d2l.dthetas2 * dparam.deta^2
wz
}))
}
@@ -5336,8 +5355,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
leipnik <- function(lmu = "logit", llambda = "loge",
- imu = NULL, ilambda = NULL)
-{
+ imu = NULL, ilambda = NULL) {
@@ -5358,14 +5376,14 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
new("vglmff",
blurb = c("Leipnik's distribution \n",
- "f(y) = (y(1-y))^(-1/2) * [1 + (y-mu)^2 / (y*(1-y))]^(-lambda/2) /\n",
- " Beta[(lambda+1)/2, 1/2], ",
- " 0 < y < 1, lambda > -1\n",
- "Links: ",
- namesof("mu", lmu, earg = emu), ", ",
- namesof("lambda", llambda, earg = elambda), "\n\n",
- "Mean: mu\n",
- "Variance: mu*(1-mu)"),
+ "f(y) = (y(1-y))^(-1/2) * [1 + (y-mu)^2 / (y*(1-y))]^(-lambda/2) /\n",
+ " Beta[(lambda+1)/2, 1/2], ",
+ " 0 < y < 1, lambda > -1\n",
+ "Links: ",
+ namesof("mu", lmu, earg = emu), ", ",
+ namesof("lambda", llambda, earg = elambda), "\n\n",
+ "Mean: mu\n",
+ "Variance: mu*(1-mu)"),
initialize = eval(substitute(expression({
if (any(y <= 0 | y >= 1))
stop("all response 'y' values must be in (0,1)")
@@ -5373,7 +5391,8 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
w.y.check(w = w, y = y,
Is.positive.y = TRUE,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -5383,9 +5402,9 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
namesof("lambda", .llambda , earg = .elambda , tag = FALSE))
if (!length(etastart)) {
- mu.init = rep(if (length( .imu )) .imu else
+ mu.init <- rep(if (length( .imu )) .imu else
(y), length = n)
- lambda.init = rep(if (length( .ilambda )) .ilambda else
+ lambda.init <- rep(if (length( .ilambda )) .ilambda else
1/var(y), length = n)
etastart <-
cbind(theta2eta(mu.init, .lmu , earg = .emu ),
@@ -5403,12 +5422,12 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
misc$earg <- list(mu = .emu , lambda = .elambda )
misc$pooled.weight <- pooled.weight
- misc$expected = FALSE
+ misc$expected <- FALSE
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 2], .llambda , earg = .elambda )
+ lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(c(w) * (-0.5*log(y*(1-y)) - 0.5 * lambda *
@@ -5418,39 +5437,39 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
.emu = emu, .elambda = elambda ))),
vfamily = c("leipnik"),
deriv = eval(substitute(expression({
- lambda = eta2theta(eta[, 2], .llambda , earg = .elambda )
+ lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
dl.dthetas =
cbind(dl.dmu = lambda*(y-mu) / (y*(1-y)+(y-mu)^2),
dl.dlambda= -0.5 * log1p((y-mu)^2 / (y*(1-y))) -
0.5*digamma((lambda+1)/2) +
0.5*digamma(1+lambda/2))
- dmu.deta = dtheta.deta(mu, .lmu , earg = .emu)
- dlambda.deta = dtheta.deta(lambda, .llambda , earg = .elambda )
- dtheta.detas = cbind(dmu.deta, dlambda.deta)
+ dmu.deta <- dtheta.deta(mu, .lmu , earg = .emu)
+ dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
+ dtheta.detas <- cbind(dmu.deta, dlambda.deta)
c(w) * dl.dthetas * dtheta.detas
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda ))),
weight = eval(substitute(expression({
- denominator = y*(1-y) + (y-mu)^2
- d2l.dthetas2 = array(NA, c(n, 2, 2))
- d2l.dthetas2[, 1, 1] = c(w) * lambda*(-y*(1-y)+(y-mu)^2)/denominator^2
- d2l.dthetas2[, 1, 2] =
- d2l.dthetas2[, 2, 1] = c(w) * (y-mu) / denominator
- d2l.dthetas2[, 2, 2] = c(w) * (-0.25*trigamma((lambda+1)/2) +
+ denominator <- y*(1-y) + (y-mu)^2
+ d2l.dthetas2 <- array(NA, c(n, 2, 2))
+ d2l.dthetas2[, 1, 1] <- c(w) * lambda*(-y*(1-y)+(y-mu)^2)/denominator^2
+ d2l.dthetas2[, 1, 2] <-
+ d2l.dthetas2[, 2, 1] <- c(w) * (y-mu) / denominator
+ d2l.dthetas2[, 2, 2] <- c(w) * (-0.25*trigamma((lambda+1)/2) +
0.25*trigamma(1+lambda/2))
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[, iam(1, 1, M)] = -d2l.dthetas2[, 1, 1] * dtheta.detas[, 1]^2
- wz[, iam(2, 2, M)] = -d2l.dthetas2[, 2, 2] * dtheta.detas[, 2]^2
- wz[, iam(1, 2, M)] = -d2l.dthetas2[, 1, 2] * dtheta.detas[, 1] *
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[, iam(1, 1, M)] <- -d2l.dthetas2[, 1, 1] * dtheta.detas[, 1]^2
+ wz[, iam(2, 2, M)] <- -d2l.dthetas2[, 2, 2] * dtheta.detas[, 2]^2
+ wz[, iam(1, 2, M)] <- -d2l.dthetas2[, 1, 2] * dtheta.detas[, 1] *
dtheta.detas[, 2]
if (!.expected) {
- d2mudeta2 = d2theta.deta2(mu, .lmu , earg = .emu)
- d2lambda = d2theta.deta2(lambda, .llambda , earg = .elambda )
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] - dl.dthetas[, 1] * d2mudeta2
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] - dl.dthetas[, 2] * d2lambda
+ d2mudeta2 <- d2theta.deta2(mu, .lmu , earg = .emu)
+ d2lambda <- d2theta.deta2(lambda, .llambda , earg = .elambda )
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] - dl.dthetas[, 1] * d2mudeta2
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] - dl.dthetas[, 2] * d2lambda
}
if (intercept.only) {
@@ -5460,7 +5479,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
pooled.weight <- TRUE
wz <- c(w) * wz # Put back the weights
} else
- pooled.weight = FALSE
+ pooled.weight <- FALSE
wz
}), list( .lmu = lmu, .llambda = llambda, .expected = FALSE,
@@ -5475,8 +5494,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
llambda = "loge",
irho = NULL,
ilambda = NULL,
- zero = NULL)
-{
+ zero = NULL) {
@@ -5497,18 +5515,19 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
new("vglmff",
blurb = c("Inverse binomial distribution\n\n",
- "Links: ",
- namesof("rho", lrho, earg = erho), ", ",
- namesof("lambda", llambda, earg = elambda), "\n",
- "Mean: lambda*(1-rho)/(2*rho-1)\n",
- "Variance: lambda*rho*(1-rho)/(2*rho-1)^3\n"),
+ "Links: ",
+ namesof("rho", lrho, earg = erho), ", ",
+ namesof("lambda", llambda, earg = elambda), "\n",
+ "Mean: lambda*(1-rho)/(2*rho-1)\n",
+ "Variance: lambda*rho*(1-rho)/(2*rho-1)^3\n"),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -5517,13 +5536,13 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
namesof("lambda", .llambda , earg = .elambda , tag = FALSE))
if (!length(etastart)) {
- covarn = sd(c(y))^2 / weighted.mean(y, w)
- temp1 = 0.5 + (1 + sqrt(1+8*covarn)) / (8*covarn)
- temp2 = 0.5 + (1 - sqrt(1+8*covarn)) / (8*covarn)
- init.rho = rep(if (length( .irho)) .irho else {
+ covarn <- sd(c(y))^2 / weighted.mean(y, w)
+ temp1 <- 0.5 + (1 + sqrt(1+8*covarn)) / (8*covarn)
+ temp2 <- 0.5 + (1 - sqrt(1+8*covarn)) / (8*covarn)
+ init.rho <- rep(if (length( .irho)) .irho else {
ifelse(temp1 > 0.5 && temp1 < 1, temp1, temp2)
}, length = n)
- init.lambda = rep(if (length( .ilambda)) .ilambda else {
+ init.lambda <- rep(if (length( .ilambda)) .ilambda else {
(2*init.rho-1) * weighted.mean(y, w) / (1-init.rho)
}, length = n)
etastart <-
@@ -5534,8 +5553,8 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
.elambda = elambda, .erho = erho,
.ilambda = ilambda, .irho = irho ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- rho = eta2theta(eta[, 1], .lrho, earg = .erho)
- lambda = eta2theta(eta[, 2], .llambda , earg = .elambda )
+ rho <- eta2theta(eta[, 1], .lrho, earg = .erho)
+ lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
ifelse(rho > 0.5, lambda*(1-rho)/(2*rho-1), NA)
}, list( .llambda = llambda, .lrho = lrho,
.elambda = elambda, .erho = erho ))),
@@ -5547,8 +5566,8 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
.elambda = elambda, .erho = erho ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- rho = eta2theta(eta[, 1], .lrho, earg = .erho)
- lambda = eta2theta(eta[, 2], .llambda , earg = .elambda )
+ rho <- eta2theta(eta[, 1], .lrho, earg = .erho)
+ lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
@@ -5559,41 +5578,41 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
.elambda = elambda, .erho = erho ))),
vfamily = c("invbinomial"),
deriv = eval(substitute(expression({
- rho = eta2theta(eta[, 1], .lrho, earg = .erho)
- lambda = eta2theta(eta[, 2], .llambda , earg = .elambda )
+ rho <- eta2theta(eta[, 1], .lrho, earg = .erho)
+ lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
- dl.drho = (y + lambda)/rho - y/(1-rho)
- dl.dlambda = 1/lambda - digamma(2*y+lambda) - digamma(y+lambda+1) +
+ dl.drho <- (y + lambda)/rho - y/(1-rho)
+ dl.dlambda <- 1/lambda - digamma(2*y+lambda) - digamma(y+lambda+1) +
log(rho)
- drho.deta = dtheta.deta(rho, .lrho, earg = .erho)
- dlambda.deta = dtheta.deta(lambda, .llambda , earg = .elambda )
+ drho.deta <- dtheta.deta(rho, .lrho, earg = .erho)
+ dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
c(w) * cbind(dl.drho * drho.deta,
dl.dlambda * dlambda.deta )
}), list( .llambda = llambda, .lrho = lrho,
.elambda = elambda, .erho = erho ))),
weight = eval(substitute(expression({
- ed2l.drho2 = (mu+lambda) / rho^2 + mu / (1-rho)^2
- d2l.dlambda2 = 1/(lambda^2) + trigamma(2*y+lambda)+trigamma(y+lambda+1)
- ed2l.dlambdarho = -1/rho
+ ned2l.drho2 <- (mu+lambda) / rho^2 + mu / (1-rho)^2
+ d2l.dlambda2 <- 1/(lambda^2) + trigamma(2*y+lambda)+trigamma(y+lambda+1)
+ ned2l.dlambdarho <- -1/rho
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[, iam(1, 1, M)] = ed2l.drho2 * drho.deta^2
- wz[, iam(1, 2, M)] = ed2l.dlambdarho * dlambda.deta * drho.deta
- wz[, iam(2, 2, M)] = d2l.dlambda2 * dlambda.deta^2
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[, iam(1, 1, M)] <- ned2l.drho2 * drho.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dlambdarho * dlambda.deta * drho.deta
+ wz[, iam(2, 2, M)] <- d2l.dlambda2 * dlambda.deta^2
- d2rhodeta2 = d2theta.deta2(rho, .lrho, earg = .erho)
- d2lambda.deta2 = d2theta.deta2(lambda, .llambda , earg = .elambda )
- wz = c(w) * wz
+ d2rhodeta2 <- d2theta.deta2(rho, .lrho, earg = .erho)
+ d2lambda.deta2 <- d2theta.deta2(lambda, .llambda , earg = .elambda )
+ wz <- c(w) * wz
if (intercept.only) {
- pooled.weight = TRUE
+ pooled.weight <- TRUE
- wz[, iam(2, 2, M)] = sum(wz[, iam(2, 2, M)]) / sum(w)
+ wz[, iam(2, 2, M)] <- sum(wz[, iam(2, 2, M)]) / sum(w)
} else {
- pooled.weight = FALSE
+ pooled.weight <- FALSE
}
wz
@@ -5607,8 +5626,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
ltheta = "loge",
ilambda = NULL, itheta = NULL,
use.approx = TRUE,
- imethod = 1, zero = 1)
-{
+ imethod = 1, zero = 1) {
llambda <- as.list(substitute(llambda))
@@ -5638,18 +5656,19 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
new("vglmff",
blurb = c("Generalized Poisson distribution\n\n",
- "Links: ",
- namesof("lambda", llambda, earg = elambda), ", ",
- namesof("theta", ltheta, earg = etheta), "\n",
- "Mean: theta / (1-lambda)\n",
- "Variance: theta / (1-lambda)^3"),
+ "Links: ",
+ namesof("lambda", llambda, earg = elambda), ", ",
+ namesof("theta", ltheta, earg = etheta), "\n",
+ "Mean: theta / (1-lambda)\n",
+ "Variance: theta / (1-lambda)^3"),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -5658,25 +5677,25 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
predictors.names <-
c(namesof("lambda", .llambda , earg = .elambda , tag = FALSE),
namesof("theta", .ltheta , earg = .etheta, tag = FALSE))
- init.lambda = if ( .imethod == 1)
+ init.lambda <- if ( .imethod == 1)
1 - sqrt(weighted.mean(y, w) / var(y)) else 0.5
- init.theta = if ( .imethod == 1)
+ init.theta <- if ( .imethod == 1)
sqrt((0.01 + weighted.mean(y, w)^3) / var(y)) else
median(y) * (1-init.lambda)
if (init.theta <= 0)
- init.theta = 0.1
- cutpt = if (init.lambda < 0) {
- mmm = max(trunc(-init.theta / init.lambda), 4)
+ init.theta <- 0.1
+ cutpt <- if (init.lambda < 0) {
+ mmm <- max(trunc(-init.theta / init.lambda), 4)
max(-1, -init.theta /mmm)
} else -1
if (init.lambda <= cutpt)
- init.lambda = cutpt + 0.1
+ init.lambda <- cutpt + 0.1
if (init.lambda >= 1)
- init.lambda = 0.9
+ init.lambda <- 0.9
if (!length(etastart)) {
- lambda = rep(if (length( .ilambda)) .ilambda else
+ lambda <- rep(if (length( .ilambda)) .ilambda else
init.lambda, length = n)
- theta = rep(if (length( .itheta)) .itheta else init.theta ,
+ theta <- rep(if (length( .itheta)) .itheta else init.theta ,
length = n)
etastart <-
cbind(theta2eta(lambda, .llambda , earg = .elambda ),
@@ -5687,8 +5706,8 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
.imethod = imethod,
.itheta = itheta, .ilambda = ilambda )) ),
linkinv = eval(substitute(function(eta, extra = NULL) {
- lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
- theta = eta2theta(eta[, 2], .ltheta , earg = .etheta )
+ lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+ theta <- eta2theta(eta[, 2], .ltheta , earg = .etheta )
theta / (1 - lambda)
}, list( .ltheta = ltheta, .llambda = llambda,
.etheta = etheta, .elambda = elambda ))),
@@ -5702,9 +5721,9 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
.etheta = etheta, .elambda = elambda ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
- theta = eta2theta(eta[, 2], .ltheta , earg = .etheta )
- index = (y == 0)
+ lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+ theta <- eta2theta(eta[, 2], .ltheta , earg = .etheta )
+ index <- (y == 0)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w[index] * (-theta[index])) +
@@ -5715,49 +5734,49 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
.etheta = etheta, .elambda = elambda ))),
vfamily = c("genpoisson"),
deriv = eval(substitute(expression({
- lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
- theta = eta2theta(eta[, 2], .ltheta , earg = .etheta )
- dl.dlambda = -y + y*(y-1)/(theta+y*lambda)
- dl.dtheta = -1 + (y-1)/(theta+y*lambda) + 1/theta
- dTHETA.deta = dtheta.deta(theta, .ltheta , earg = .etheta )
- dlambda.deta = dtheta.deta(lambda, .llambda , earg = .elambda )
+ lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+ theta <- eta2theta(eta[, 2], .ltheta , earg = .etheta )
+ dl.dlambda <- -y + y*(y-1)/(theta+y*lambda)
+ dl.dtheta <- -1 + (y-1)/(theta+y*lambda) + 1/theta
+ dTHETA.deta <- dtheta.deta(theta, .ltheta , earg = .etheta )
+ dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
c(w) * cbind(dl.dlambda * dlambda.deta,
dl.dtheta * dTHETA.deta )
}), list( .ltheta = ltheta, .llambda = llambda,
.etheta = etheta, .elambda = elambda ))),
weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
if ( .use.approx ) {
- BBB = (theta+2)*(theta+2*lambda-theta*lambda)-(theta^2)*(1-lambda)
- d2l.dlambda2 = 2 * theta * (theta+2) / ((1-lambda) * BBB)
- d2l.dtheta2 = 2 * (1 + lambda * (2/theta - 1)) / BBB
- d2l.dthetalambda = 2 * theta / BBB
- wz[, iam(1, 1, M)] = d2l.dlambda2 * dlambda.deta^2
- wz[, iam(2, 2, M)] = d2l.dtheta2 * dTHETA.deta^2
- wz[, iam(1, 2, M)] = d2l.dthetalambda * dTHETA.deta * dlambda.deta
- wz = c(w) * wz
+ BBB <- (theta+2)*(theta+2*lambda-theta*lambda)-(theta^2)*(1-lambda)
+ d2l.dlambda2 <- 2 * theta * (theta+2) / ((1-lambda) * BBB)
+ d2l.dtheta2 <- 2 * (1 + lambda * (2/theta - 1)) / BBB
+ d2l.dthetalambda <- 2 * theta / BBB
+ wz[, iam(1, 1, M)] <- d2l.dlambda2 * dlambda.deta^2
+ wz[, iam(2, 2, M)] <- d2l.dtheta2 * dTHETA.deta^2
+ wz[, iam(1, 2, M)] <- d2l.dthetalambda * dTHETA.deta * dlambda.deta
+ wz <- c(w) * wz
} else {
- d2l.dlambda2 = -y^2 * (y-1) / (theta+y*lambda)^2
- d2l.dtheta2 = -(y-1)/(theta+y*lambda)^2 - 1 / theta^2
- d2l.dthetalambda = -y * (y-1) / (theta+y*lambda)^2
- wz[, iam(1, 1, M)] = -d2l.dlambda2 * dlambda.deta^2
- wz[, iam(2, 2, M)] = -d2l.dtheta2 * dTHETA.deta^2
- wz[, iam(1, 2, M)] = -d2l.dthetalambda * dTHETA.deta * dlambda.deta
-
- d2THETA.deta2 = d2theta.deta2(theta, .ltheta , earg = .etheta )
- d2lambdadeta2 = d2theta.deta2(lambda, .llambda , earg = .elambda )
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] - dl.dlambda * d2lambdadeta2
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] - dl.dtheta * d2THETA.deta2
- wz = c(w) * wz
+ d2l.dlambda2 <- -y^2 * (y-1) / (theta+y*lambda)^2
+ d2l.dtheta2 <- -(y-1)/(theta+y*lambda)^2 - 1 / theta^2
+ d2l.dthetalambda <- -y * (y-1) / (theta+y*lambda)^2
+ wz[, iam(1, 1, M)] <- -d2l.dlambda2 * dlambda.deta^2
+ wz[, iam(2, 2, M)] <- -d2l.dtheta2 * dTHETA.deta^2
+ wz[, iam(1, 2, M)] <- -d2l.dthetalambda * dTHETA.deta * dlambda.deta
+
+ d2THETA.deta2 <- d2theta.deta2(theta, .ltheta , earg = .etheta )
+ d2lambdadeta2 <- d2theta.deta2(lambda, .llambda , earg = .elambda )
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] - dl.dlambda * d2lambdadeta2
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] - dl.dtheta * d2THETA.deta2
+ wz <- c(w) * wz
if (intercept.only) {
- sumw = sum(w)
+ sumw <- sum(w)
for(ii in 1:ncol(wz))
- wz[, ii] = sum(wz[, ii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
+ wz[, ii] <- sum(wz[, ii]) / sumw
+ pooled.weight <- TRUE
+ wz <- c(w) * wz # Put back the weights
} else
- pooled.weight = FALSE
+ pooled.weight <- FALSE
}
wz
}), list( .ltheta = ltheta, .llambda = llambda,
@@ -5779,7 +5798,7 @@ dlgamma <- function(x, location = 0, scale = 1, k = 1, log = FALSE) {
stop("bad input for argument 'scale'")
if (!is.Numeric(k, positive = TRUE))
stop("bad input for argument 'k'")
- z = (x-location) / scale
+ z <- (x-location) / scale
if (log.arg) {
k * z - exp(z) - log(scale) - lgamma(k)
} else {
@@ -5790,9 +5809,9 @@ dlgamma <- function(x, location = 0, scale = 1, k = 1, log = FALSE) {
plgamma <- function(q, location = 0, scale = 1, k = 1) {
- zedd = (q - location) / scale
- ans = pgamma(exp(zedd), k)
- ans[scale < 0] = NaN
+ zedd <- (q - location) / scale
+ ans <- pgamma(exp(zedd), k)
+ ans[scale < 0] <- NaN
ans
}
@@ -5801,22 +5820,21 @@ qlgamma <- function(p, location = 0, scale = 1, k = 1) {
if (!is.Numeric(scale, positive = TRUE))
stop("bad input for argument 'scale'")
- ans = location + scale * log(qgamma(p, k))
- ans[scale < 0] = NaN
+ ans <- location + scale * log(qgamma(p, k))
+ ans[scale < 0] <- NaN
ans
}
rlgamma <- function(n, location = 0, scale = 1, k = 1) {
- ans = location + scale * log(rgamma(n, k))
- ans[scale < 0] = NaN
+ ans <- location + scale * log(rgamma(n, k))
+ ans[scale < 0] <- NaN
ans
}
- lgammaff <- function(link = "loge", init.k = NULL)
-{
+ lgammaff <- function(link = "loge", init.k = NULL) {
link <- as.list(substitute(link))
earg <- link2list(link)
@@ -5826,20 +5844,21 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
new("vglmff",
blurb = c("Log-gamma distribution ",
"f(y) = exp(ky - e^y)/gamma(k)), k>0\n\n",
- "Link: ",
- namesof("k", link, earg = earg), "\n", "\n",
- "Mean: digamma(k)", "\n"),
+ "Link: ",
+ namesof("k", link, earg = earg), "\n", "\n",
+ "Mean: digamma(k)", "\n"),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
predictors.names <-
namesof("k", .link , earg = .earg , tag = FALSE)
if (!length(etastart)) {
- k.init = if (length( .init.k))
+ k.init <- if (length( .init.k))
rep( .init.k, length.out = length(y)) else {
medy = median(y)
if (medy < 2) 5 else if (medy < 4) 20 else exp(0.7 * medy)
@@ -5848,7 +5867,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
}
}), list( .link = link, .earg = earg, .init.k = init.k ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- k = eta2theta(eta, .link , earg = .earg )
+ k <- eta2theta(eta, .link , earg = .earg )
digamma(k)
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
@@ -5858,7 +5877,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- kk = eta2theta(eta, .link , earg = .earg )
+ kk <- eta2theta(eta, .link , earg = .earg )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dlgamma(x = y, location = 0, scale = 1,
@@ -5867,14 +5886,14 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
}, list( .link = link, .earg = earg ))),
vfamily = c("lgammaff"),
deriv = eval(substitute(expression({
- kk = eta2theta(eta, .link , earg = .earg )
- dl.dk = y - digamma(kk)
- dk.deta = dtheta.deta(kk, .link , earg = .earg )
+ kk <- eta2theta(eta, .link , earg = .earg )
+ dl.dk <- y - digamma(kk)
+ dk.deta <- dtheta.deta(kk, .link , earg = .earg )
c(w) * dl.dk * dk.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- ned2l.dk2 = trigamma(kk)
- wz = c(w) * dk.deta^2 * ned2l.dk2
+ ned2l.dk2 <- trigamma(kk)
+ wz <- c(w) * dk.deta^2 * ned2l.dk2
wz
}), list( .link = link, .earg = earg ))))
}
@@ -5885,10 +5904,9 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
- lgamma3ff <- function(
- llocation = "identity", lscale = "loge", lshape = "loge",
- ilocation = NULL, iscale = NULL, ishape = 1, zero = NULL)
-{
+ lgamma3ff <-
+ function(llocation = "identity", lscale = "loge", lshape = "loge",
+ ilocation = NULL, iscale = NULL, ishape = 1, zero = NULL) {
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
@@ -5916,20 +5934,21 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
new("vglmff",
blurb = c("Log-gamma distribution",
- " f(y) = exp(k(y-a)/b - e^((y-a)/b))/(b*gamma(k)), ",
- "location=a, scale=b>0, shape=k>0\n\n",
- "Links: ",
- namesof("location", llocat, earg = elocat), ", ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape", lshape, earg = eshape), "\n\n",
- "Mean: a + b*digamma(k)", "\n"),
+ " f(y) = exp(k(y-a)/b - e^((y-a)/b))/(b*gamma(k)), ",
+ "location=a, scale=b>0, shape=k>0\n\n",
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape", lshape, earg = eshape), "\n\n",
+ "Mean: a + b*digamma(k)", "\n"),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -5940,15 +5959,15 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
if (!length(etastart)) {
- k.init = if (length( .ishape ))
+ k.init <- if (length( .ishape ))
rep( .ishape, length.out = length(y)) else {
rep(exp(median(y)), length.out = length(y))
}
- scale.init = if (length( .iscale ))
+ scale.init <- if (length( .iscale ))
rep( .iscale, length.out = length(y)) else {
rep(sqrt(var(y) / trigamma(k.init)), length.out = length(y))
}
- loc.init = if (length( .ilocat ))
+ loc.init <- if (length( .ilocat ))
rep( .ilocat, length.out = length(y)) else {
rep(median(y) - scale.init * digamma(k.init),
length.out = length(y))
@@ -5977,9 +5996,9 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
.elocat = elocat, .escale = escale, .eshape = eshape))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .llocat , earg = .elocat )
- bb = eta2theta(eta[, 2], .lscale , earg = .escale )
- kk = eta2theta(eta[, 3], .lshape , earg = .eshape )
+ aa <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ bb <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ kk <- eta2theta(eta[, 3], .lshape , earg = .eshape )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dlgamma(x = y, locat=aa, scale=bb, k=kk, log = TRUE))
@@ -5988,18 +6007,18 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
.elocat = elocat, .escale = escale, .eshape = eshape))),
vfamily = c("lgamma3ff"),
deriv = eval(substitute(expression({
- a = eta2theta(eta[, 1], .llocat , earg = .elocat )
- b = eta2theta(eta[, 2], .lscale , earg = .escale )
- k = eta2theta(eta[, 3], .lshape , earg = .eshape )
+ a <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ b <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ k <- eta2theta(eta[, 3], .lshape , earg = .eshape )
- zedd = (y-a)/b
- dl.da = (exp(zedd) - k) / b
- dl.db = (zedd * (exp(zedd) - k) - 1) / b
- dl.dk = zedd - digamma(k)
+ zedd <- (y-a)/b
+ dl.da <- (exp(zedd) - k) / b
+ dl.db <- (zedd * (exp(zedd) - k) - 1) / b
+ dl.dk <- zedd - digamma(k)
- da.deta = dtheta.deta(a, .llocat , earg = .elocat )
- db.deta = dtheta.deta(b, .lscale , earg = .escale )
- dk.deta = dtheta.deta(k, .lshape , earg = .eshape )
+ da.deta <- dtheta.deta(a, .llocat , earg = .elocat )
+ db.deta <- dtheta.deta(b, .lscale , earg = .escale )
+ dk.deta <- dtheta.deta(k, .lshape , earg = .eshape )
c(w) * cbind(dl.da * da.deta,
dl.db * db.deta,
@@ -6007,21 +6026,21 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
}), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape))),
weight = eval(substitute(expression({
- ned2l.da2 = k / b^2
- ned2l.db2 = (1 + k*(trigamma(k+1) + (digamma(k+1))^2)) / b^2
- ned2l.dk2 = trigamma(k)
- ned2l.dadb = (1 + k*digamma(k)) / b^2
- ned2l.dadk = 1 / b
- ned2l.dbdk = digamma(k) / b
-
- wz = matrix(as.numeric(NA), n, dimm(M))
- wz[, iam(1, 1, M)] = ned2l.da2 * da.deta^2
- wz[, iam(2, 2, M)] = ned2l.db2 * db.deta^2
- wz[, iam(3, 3, M)] = ned2l.dk2 * dk.deta^2
- wz[, iam(1, 2, M)] = ned2l.dadb * da.deta * db.deta
- wz[, iam(1, 3, M)] = ned2l.dadk * da.deta * dk.deta
- wz[, iam(2, 3, M)] = ned2l.dbdk * db.deta * dk.deta
- wz = c(w) * wz
+ ned2l.da2 <- k / b^2
+ ned2l.db2 <- (1 + k*(trigamma(k+1) + (digamma(k+1))^2)) / b^2
+ ned2l.dk2 <- trigamma(k)
+ ned2l.dadb <- (1 + k*digamma(k)) / b^2
+ ned2l.dadk <- 1 / b
+ ned2l.dbdk <- digamma(k) / b
+
+ wz <- matrix(as.numeric(NA), n, dimm(M))
+ wz[, iam(1, 1, M)] <- ned2l.da2 * da.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.db2 * db.deta^2
+ wz[, iam(3, 3, M)] <- ned2l.dk2 * dk.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dadb * da.deta * db.deta
+ wz[, iam(1, 3, M)] <- ned2l.dadk * da.deta * dk.deta
+ wz[, iam(2, 3, M)] <- ned2l.dbdk * db.deta * dk.deta
+ wz <- c(w) * wz
wz
}), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape))))
@@ -6031,8 +6050,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
prentice74 <- function(
llocation = "identity", lscale = "loge", lshape = "identity",
- ilocation = NULL, iscale = NULL, ishape = NULL, zero = 2:3)
-{
+ ilocation = NULL, iscale = NULL, ishape = NULL, zero = 2:3) {
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
@@ -6059,13 +6077,14 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
new("vglmff",
blurb = c("Log-gamma distribution (Prentice, 1974)",
- " f(y) = |q| * exp(w/q^2 - e^w) / (b*gamma(1/q^2)) ,\n",
- "w=(y-a)*q/b + digamma(1/q^2), location=a, scale=b>0, shape=q\n\n",
- "Links: ",
- namesof("location", llocat, earg = elocat), ", ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape", lshape, earg = eshape), "\n", "\n",
- "Mean: a", "\n"),
+ " f(y) = |q| * exp(w/q^2 - e^w) / (b*gamma(1/q^2)) ,\n",
+ "w = (y-a)*q/b + digamma(1/q^2), ",
+ "location = a, scale = b > 0, shape = q\n\n",
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape", lshape, earg = eshape), "\n", "\n",
+ "Mean: a", "\n"),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
@@ -6073,7 +6092,8 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
predictors.names <-
@@ -6084,17 +6104,17 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
if (!length(etastart)) {
- sdy = sqrt(var(y))
- k.init = if (length( .ishape ))
+ sdy <- sqrt(var(y))
+ k.init <- if (length( .ishape ))
rep( .ishape, length.out = length(y)) else {
- skewness = mean((y-mean(y))^3) / sdy^3 # <0 Left Skewed
+ skewness <- mean((y-mean(y))^3) / sdy^3 # <0 Left Skewed
rep(-skewness, length.out = length(y))
}
- scale.init = if (length( .iscale ))
+ scale.init <- if (length( .iscale ))
rep( .iscale, length.out = length(y)) else {
rep(sdy, length.out = length(y))
}
- loc.init = if (length( .iloc ))
+ loc.init <- if (length( .iloc ))
rep( .iloc, length.out = length(y)) else {
rep(median(y), length.out = length(y))
}
@@ -6119,11 +6139,11 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
.elocat = elocat, .escale = escale, .eshape = eshape))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- a = eta2theta(eta[, 1], .llocat , earg = .elocat )
- b = eta2theta(eta[, 2], .lscale , earg = .escale )
- k = eta2theta(eta[, 3], .lshape , earg = .eshape )
- tmp55 = k^(-2)
- doubw = (y-a)*k/b + digamma(tmp55)
+ a <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ b <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ k <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+ tmp55 <- k^(-2)
+ doubw <- (y-a)*k/b + digamma(tmp55)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(c(w)*(log(abs(k)) - log(b) - lgamma(tmp55) +
@@ -6132,23 +6152,23 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
.elocat = elocat, .escale = escale, .eshape = eshape))),
vfamily = c("prentice74"),
deriv = eval(substitute(expression({
- a = eta2theta(eta[, 1], .llocat , earg = .elocat )
- b = eta2theta(eta[, 2], .lscale , earg = .escale )
- k = eta2theta(eta[, 3], .lshape , earg = .eshape )
-
- tmp55 = k^(-2)
- mustar = digamma(tmp55)
- doubw = (y-a)*k/b + mustar
- sigmastar2 = trigamma(tmp55)
-
- dl.da = k*(exp(doubw) - tmp55) / b
- dl.db = ((doubw - mustar) * (exp(doubw) - tmp55) - 1) / b
- dl.dk = 1/k - 2 * (doubw - mustar) / k^3 - (exp(doubw) - tmp55) *
+ a <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ b <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ k <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+
+ tmp55 <- k^(-2)
+ mustar <- digamma(tmp55)
+ doubw <- (y-a)*k/b + mustar
+ sigmastar2 <- trigamma(tmp55)
+
+ dl.da <- k*(exp(doubw) - tmp55) / b
+ dl.db <- ((doubw - mustar) * (exp(doubw) - tmp55) - 1) / b
+ dl.dk <- 1/k - 2 * (doubw - mustar) / k^3 - (exp(doubw) - tmp55) *
((doubw - mustar) / k - 2 * sigmastar2 / k^3)
- da.deta = dtheta.deta(a, .llocat , earg = .elocat )
- db.deta = dtheta.deta(b, .lscale , earg = .escale )
- dk.deta = dtheta.deta(k, .lshape , earg = .eshape )
+ da.deta <- dtheta.deta(a, .llocat , earg = .elocat )
+ db.deta <- dtheta.deta(b, .lscale , earg = .escale )
+ dk.deta <- dtheta.deta(k, .lshape , earg = .eshape )
c(w) * cbind(dl.da * da.deta,
dl.db * db.deta,
@@ -6156,22 +6176,22 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
}), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape))),
weight = eval(substitute(expression({
- ned2l.da2 = 1 / b^2
- ned2l.db2 = (1 + sigmastar2*tmp55) / b^2
- ned2l.dk2 = tmp55 - 3*sigmastar2*tmp55^2 + 4*sigmastar2*tmp55^4 *
+ ned2l.da2 <- 1 / b^2
+ ned2l.db2 <- (1 + sigmastar2*tmp55) / b^2
+ ned2l.dk2 <- tmp55 - 3*sigmastar2*tmp55^2 + 4*sigmastar2*tmp55^4 *
(sigmastar2 - k^2)
- ned2l.dadb = k / b^2
- ned2l.dadk = (2*(sigmastar2*tmp55^2 - tmp55) - 1) / b
- ned2l.dbdk = (sigmastar2*tmp55 - 1) / (b*k)
-
- wz = matrix(as.numeric(NA), n, dimm(M))
- wz[, iam(1, 1, M)] = ned2l.da2 * da.deta^2
- wz[, iam(2, 2, M)] = ned2l.db2 * db.deta^2
- wz[, iam(3, 3, M)] = ned2l.dk2 * dk.deta^2
- wz[, iam(1, 2, M)] = ned2l.dadb * da.deta * db.deta
- wz[, iam(1, 3, M)] = ned2l.dadk * da.deta * dk.deta
- wz[, iam(2, 3, M)] = ned2l.dbdk * db.deta * dk.deta
- wz = c(w) * wz
+ ned2l.dadb <- k / b^2
+ ned2l.dadk <- (2*(sigmastar2*tmp55^2 - tmp55) - 1) / b
+ ned2l.dbdk <- (sigmastar2*tmp55 - 1) / (b*k)
+
+ wz <- matrix(as.numeric(NA), n, dimm(M))
+ wz[, iam(1, 1, M)] <- ned2l.da2 * da.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.db2 * db.deta^2
+ wz[, iam(3, 3, M)] <- ned2l.dk2 * dk.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dadb * da.deta * db.deta
+ wz[, iam(1, 3, M)] <- ned2l.dadk * da.deta * dk.deta
+ wz[, iam(2, 3, M)] <- ned2l.dbdk * db.deta * dk.deta
+ wz <- c(w) * wz
wz
}), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape))))
@@ -6191,17 +6211,17 @@ dgengamma <- function(x, scale = 1, d = 1, k = 1, log = FALSE) {
if (!is.Numeric(k, positive = TRUE))
stop("bad input for argument 'k'")
- N = max(length(x), length(scale), length(d), length(k))
- x = rep(x, length.out = N);
- scale = rep(scale, length.out = N);
- d = rep(d, length.out = N);
- k = rep(k, length.out = N);
+ N <- max(length(x), length(scale), length(d), length(k))
+ x <- rep(x, length.out = N);
+ scale <- rep(scale, length.out = N);
+ d <- rep(d, length.out = N);
+ k <- rep(k, length.out = N);
- Loglik = rep(log(0), length.out = N)
- xok = x > 0
+ Loglik <- rep(log(0), length.out = N)
+ xok <- x > 0
if (any(xok)) {
- zedd = (x[xok]/scale[xok])^d[xok]
- Loglik[xok] = log(d[xok]) + (-d[xok]*k[xok]) * log(scale[xok]) +
+ zedd <- (x[xok]/scale[xok])^d[xok]
+ Loglik[xok] <- log(d[xok]) + (-d[xok]*k[xok]) * log(scale[xok]) +
(d[xok]*k[xok]-1) * log(x[xok]) - zedd - lgamma(k[xok])
}
if (log.arg) {
@@ -6215,34 +6235,33 @@ dgengamma <- function(x, scale = 1, d = 1, k = 1, log = FALSE) {
pgengamma <- function(q, scale = 1, d = 1, k = 1) {
- zedd = (q / scale)^d
- ans = pgamma(zedd, k)
- ans[scale < 0] = NaN
- ans[d <= 0] = NaN
+ zedd <- (q / scale)^d
+ ans <- pgamma(zedd, k)
+ ans[scale < 0] <- NaN
+ ans[d <= 0] <- NaN
ans
}
qgengamma <- function(p, scale = 1, d = 1, k = 1) {
- ans = scale * qgamma(p, k)^(1/d)
- ans[scale < 0] = NaN
- ans[d <= 0] = NaN
+ ans <- scale * qgamma(p, k)^(1/d)
+ ans[scale < 0] <- NaN
+ ans[d <= 0] <- NaN
ans
}
rgengamma <- function(n, scale = 1, d = 1, k = 1) {
- ans = scale * rgamma(n, k)^(1/d)
- ans[scale < 0] = NaN
- ans[d <= 0] = NaN
+ ans <- scale * rgamma(n, k)^(1/d)
+ ans[scale < 0] <- NaN
+ ans[d <= 0] <- NaN
ans
}
gengamma <- function(lscale = "loge", ld = "loge", lk = "loge",
- iscale = NULL, id = NULL, ik = NULL, zero = NULL)
-{
+ iscale = NULL, id = NULL, ik = NULL, zero = NULL) {
lscale <- as.list(substitute(lscale))
escale <- link2list(lscale)
@@ -6270,13 +6289,13 @@ rgengamma <- function(n, scale = 1, d = 1, k = 1) {
new("vglmff",
blurb = c("Generalized gamma distribution",
- " f(y) = d * b^(-d*k) * y^(d*k-1) * exp(-(y/b)^d) / gamma(k),\n",
- "scale=b>0, d>0, k>0, y>0\n\n",
- "Links: ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("d", ld, earg = ed), ", ",
- namesof("k", lk, earg = ek), "\n", "\n",
- "Mean: b * gamma(k+1/d) / gamma(k)", "\n"),
+ " f(y) = d * b^(-d*k) * y^(d*k-1) * exp(-(y/b)^d) / gamma(k),\n",
+ "scale=b>0, d>0, k>0, y>0\n\n",
+ "Links: ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("d", ld, earg = ed), ", ",
+ namesof("k", lk, earg = ek), "\n", "\n",
+ "Mean: b * gamma(k+1/d) / gamma(k)", "\n"),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
@@ -6284,7 +6303,8 @@ rgengamma <- function(n, scale = 1, d = 1, k = 1) {
w.y.check(w = w, y = y,
Is.positive.y = TRUE,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -6295,15 +6315,15 @@ rgengamma <- function(n, scale = 1, d = 1, k = 1) {
if (!length(etastart)) {
- b.init = if (length( .iscale ))
+ b.init <- if (length( .iscale ))
rep( .iscale, length.out = length(y)) else {
rep(mean(y^2) / mean(y), length.out = length(y))
}
- k.init = if (length( .ik ))
+ k.init <- if (length( .ik ))
rep( .ik , length.out = length(y)) else {
rep(mean(y) / b.init, length.out = length(y))
}
- d.init = if (length( .id ))
+ d.init <- if (length( .id ))
rep( .id , length.out = length(y)) else {
rep(digamma(k.init) / mean(log(y / b.init)),
length.out = length(y))
@@ -6317,9 +6337,9 @@ rgengamma <- function(n, scale = 1, d = 1, k = 1) {
.escale = escale, .ed = ed, .ek = ek,
.iscale = iscale, .id = id, .ik = ik ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- b = eta2theta(eta[, 1], .lscale , earg = .escale )
- d = eta2theta(eta[, 2], .ld , earg = .ed )
- k = eta2theta(eta[, 3], .lk , earg = .ek )
+ b <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ d <- eta2theta(eta[, 2], .ld , earg = .ed )
+ k <- eta2theta(eta[, 3], .lk , earg = .ek )
b * gamma(k + 1 / d) / gamma(k)
}, list( .lscale = lscale, .lk = lk, .ld = ld,
.escale = escale, .ek = ek, .ed = ed ))),
@@ -6331,9 +6351,9 @@ rgengamma <- function(n, scale = 1, d = 1, k = 1) {
.escale = escale, .ed = ed, .ek = ek ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- b = eta2theta(eta[, 1], .lscale , earg = .escale )
- d = eta2theta(eta[, 2], .ld , earg = .ed )
- k = eta2theta(eta[, 3], .lk , earg = .ek )
+ b <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ d <- eta2theta(eta[, 2], .ld , earg = .ed )
+ k <- eta2theta(eta[, 3], .lk , earg = .ek )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
@@ -6343,19 +6363,19 @@ rgengamma <- function(n, scale = 1, d = 1, k = 1) {
.escale = escale, .ed = ed, .ek = ek ))),
vfamily = c("gengamma"),
deriv = eval(substitute(expression({
- b = eta2theta(eta[, 1], .lscale , earg = .escale )
- d = eta2theta(eta[, 2], .ld , earg = .ed )
- k = eta2theta(eta[, 3], .lk , earg = .ek )
+ b <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ d <- eta2theta(eta[, 2], .ld , earg = .ed )
+ k <- eta2theta(eta[, 3], .lk , earg = .ek )
- tmp22 = (y/b)^d
- tmp33 = log(y/b)
- dl.db = d * (tmp22 - k) / b
- dl.dd = 1/d + tmp33 * (k - tmp22)
- dl.dk = d * tmp33 - digamma(k)
+ tmp22 <- (y/b)^d
+ tmp33 <- log(y/b)
+ dl.db <- d * (tmp22 - k) / b
+ dl.dd <- 1/d + tmp33 * (k - tmp22)
+ dl.dk <- d * tmp33 - digamma(k)
- db.deta = dtheta.deta(b, .lscale , earg = .escale )
- dd.deta = dtheta.deta(d, .ld , earg = .ed )
- dk.deta = dtheta.deta(k, .lk , earg = .ek )
+ db.deta <- dtheta.deta(b, .lscale , earg = .escale )
+ dd.deta <- dtheta.deta(d, .ld , earg = .ed )
+ dk.deta <- dtheta.deta(k, .lk , earg = .ek )
c(w) * cbind(dl.db * db.deta,
dl.dd * dd.deta,
@@ -6363,22 +6383,22 @@ rgengamma <- function(n, scale = 1, d = 1, k = 1) {
}), list( .lscale = lscale, .ld = ld, .lk = lk,
.escale = escale, .ed = ed, .ek = ek ))),
weight = eval(substitute(expression({
- ned2l.db2 = k * (d/b)^2
- ned2l.dd2 = (1 + k * (trigamma(k+1) + (digamma(k+1))^2)) / d^2
- ned2l.dk2 = trigamma(k)
- ned2l.dbdd = -(1 + k*digamma(k)) / b
- ned2l.dbdk = d / b
- ned2l.dddk = -digamma(k) / d
-
- wz = matrix(as.numeric(NA), n, dimm(M))
- wz[, iam(1, 1, M)] = ned2l.db2 * db.deta^2
- wz[, iam(2, 2, M)] = ned2l.dd2 * dd.deta^2
- wz[, iam(3, 3, M)] = ned2l.dk2 * dk.deta^2
- wz[, iam(1, 2, M)] = ned2l.dbdd * db.deta * dd.deta
- wz[, iam(1, 3, M)] = ned2l.dbdk * db.deta * dk.deta
- wz[, iam(2, 3, M)] = ned2l.dddk * dd.deta * dk.deta
-
- wz = c(w) * wz
+ ned2l.db2 <- k * (d/b)^2
+ ned2l.dd2 <- (1 + k * (trigamma(k+1) + (digamma(k+1))^2)) / d^2
+ ned2l.dk2 <- trigamma(k)
+ ned2l.dbdd <- -(1 + k*digamma(k)) / b
+ ned2l.dbdk <- d / b
+ ned2l.dddk <- -digamma(k) / d
+
+ wz <- matrix(as.numeric(NA), n, dimm(M))
+ wz[, iam(1, 1, M)] <- ned2l.db2 * db.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dd2 * dd.deta^2
+ wz[, iam(3, 3, M)] <- ned2l.dk2 * dk.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dbdd * db.deta * dd.deta
+ wz[, iam(1, 3, M)] <- ned2l.dbdk * db.deta * dk.deta
+ wz[, iam(2, 3, M)] <- ned2l.dddk * dd.deta * dk.deta
+
+ wz <- c(w) * wz
wz
}), list( .lscale = lscale, .ld = ld, .lk = lk,
.escale = escale, .ed = ed, .ek = ek ))))
@@ -6394,24 +6414,24 @@ dlog <- function(x, prob, log = FALSE) {
if (!is.Numeric(prob, positive = TRUE) || max(prob) >= 1)
stop("bad input for argument 'prob'")
- N = max(length(x), length(prob))
+ N <- max(length(x), length(prob))
if (length(x) != N)
- x = rep(x, length.out = N)
+ x <- rep(x, length.out = N)
if (length(prob) != N)
- prob = rep(prob, length.out = N)
- ox = !is.finite(x)
- zero = ox | round(x) != x | x < 1
- ans = rep(0.0, length.out = length(x))
+ prob <- rep(prob, length.out = N)
+ ox <- !is.finite(x)
+ zero <- ox | round(x) != x | x < 1
+ ans <- rep(0.0, length.out = length(x))
if (log.arg) {
- ans[ zero] = log(0.0)
- ans[!zero] = x[!zero] * log(prob[!zero]) - log(x[!zero]) -
+ ans[ zero] <- log(0.0)
+ ans[!zero] <- x[!zero] * log(prob[!zero]) - log(x[!zero]) -
log(-log1p(-prob[!zero]))
} else {
- ans[!zero] = -(prob[!zero]^(x[!zero])) / (x[!zero] *
+ ans[!zero] <- -(prob[!zero]^(x[!zero])) / (x[!zero] *
log1p(-prob[!zero]))
}
if (any(ox))
- ans[ox] = NA
+ ans[ox] <- NA
ans
}
@@ -6421,38 +6441,38 @@ plog <- function(q, prob, log.p = FALSE) {
if (!is.Numeric(q)) stop("bad input for argument 'q'")
if (!is.Numeric(prob, positive = TRUE) || max(prob) >= 1)
stop("bad input for argument 'prob'")
- N = max(length(q), length(prob))
- q = rep(q, length.out = N);
- prob = rep(prob, length.out = N);
+ N <- max(length(q), length(prob))
+ q <- rep(q, length.out = N);
+ prob <- rep(prob, length.out = N);
- bigno = 10
- owen1965 = (q * (1 - prob) > bigno)
+ bigno <- 10
+ owen1965 <- (q * (1 - prob) > bigno)
if (specialCase <- any(owen1965)) {
- qqq = q[owen1965]
- ppp = prob[owen1965]
- pqp = qqq * (1 - ppp)
- bigans = (ppp^(1+qqq) / (1-ppp)) * (1/qqq -
+ qqq <- q[owen1965]
+ ppp <- prob[owen1965]
+ pqp <- qqq * (1 - ppp)
+ bigans <- (ppp^(1+qqq) / (1-ppp)) * (1/qqq -
1 / ( pqp * (qqq-1)) +
2 / ((1-ppp) * pqp * (qqq-1) * (qqq-2)) -
6 / ((1-ppp)^2 * pqp * (qqq-1) * (qqq-2) * (qqq-3)) +
24 / ((1-ppp)^3 * pqp * (qqq-1) * (qqq-2) * (qqq-3) * (qqq-4)))
- bigans = 1 + bigans / log1p(-ppp)
+ bigans <- 1 + bigans / log1p(-ppp)
}
- floorq = pmax(1, floor(q)) # Ensures at least one element per q value
- floorq[owen1965] = 1
- seqq = sequence(floorq)
- seqp = rep(prob, floorq)
- onevector = (seqp^seqq / seqq) / (-log1p(-seqp))
- rlist = dotC(name = "tyee_C_cum8sum",
+ floorq <- pmax(1, floor(q)) # Ensures at least one element per q value
+ floorq[owen1965] <- 1
+ seqq <- sequence(floorq)
+ seqp <- rep(prob, floorq)
+ onevector <- (seqp^seqq / seqq) / (-log1p(-seqp))
+ rlist <- dotC(name = "tyee_C_cum8sum",
as.double(onevector), answer = double(N),
as.integer(N), as.double(seqq),
as.integer(length(onevector)), notok=integer(1))
if (rlist$notok != 0) stop("error in 'cum8sum'")
- ans = if (log.p) log(rlist$answer) else rlist$answer
+ ans <- if (log.p) log(rlist$answer) else rlist$answer
if (specialCase)
- ans[owen1965] = if (log.p) log(bigans) else bigans
- ans[q < 1] = if (log.p) log(0.0) else 0.0
+ ans[owen1965] <- if (log.p) log(bigans) else bigans
+ ans[q < 1] <- if (log.p) log(0.0) else 0.0
ans
}
@@ -6464,10 +6484,10 @@ plog <- function(q, prob, log.p = FALSE) {
rlog <- function(n, prob, Smallno = 1.0e-6) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
if (!is.Numeric(prob, allowable.length = 1, positive = TRUE) ||
max(prob) >= 1)
@@ -6476,26 +6496,26 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
Smallno > 0.01 ||
Smallno < 2 * .Machine$double.eps)
stop("bad input for argument 'Smallno'")
- ans = rep(0.0, length.out = use.n)
+ ans <- rep(0.0, length.out = use.n)
- ptr1 = 1; ptr2 = 0
- a = -1 / log1p(-prob)
- mean = a*prob/(1-prob) # E(Y)
- sigma = sqrt(a * prob * (1 - a * prob)) / (1 - prob) # sd(Y)
- ymax = dlog(x = 1, prob)
+ ptr1 <- 1; ptr2 <- 0
+ a <- -1 / log1p(-prob)
+ mean <- a*prob/(1-prob) # E(Y)
+ sigma <- sqrt(a * prob * (1 - a * prob)) / (1 - prob) # sd(Y)
+ ymax <- dlog(x = 1, prob)
while(ptr2 < use.n) {
- Lower = 0.5 # A continuity correction is used = 1 - 0.5.
- Upper = mean + 5 * sigma
+ Lower <- 0.5 # A continuity correction is used = 1 - 0.5.
+ Upper <- mean + 5 * sigma
while(plog(q = Upper, prob) < 1 - Smallno)
- Upper = Upper + sigma
- Upper = Upper + 0.5
- x = round(runif(2 * use.n, min = Lower, max = Upper))
- index = runif(2 * use.n, max = ymax) < dlog(x,prob)
- sindex = sum(index)
+ Upper <- Upper + sigma
+ Upper <- Upper + 0.5
+ x <- round(runif(2 * use.n, min = Lower, max = Upper))
+ index <- runif(2 * use.n, max = ymax) < dlog(x,prob)
+ sindex <- sum(index)
if (sindex) {
- ptr2 = min(use.n, ptr1 + sindex - 1)
- ans[ptr1:ptr2] = (x[index])[1:(1+ptr2-ptr1)]
- ptr1 = ptr2 + 1
+ ptr2 <- min(use.n, ptr1 + sindex - 1)
+ ans[ptr1:ptr2] <- (x[index])[1:(1+ptr2-ptr1)]
+ ptr1 <- ptr2 + 1
}
}
ans
@@ -6508,8 +6528,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
- logff <- function(link = "logit", init.c = NULL, zero = NULL)
-{
+ logff <- function(link = "logit", init.c = NULL, zero = NULL) {
if (length(init.c) &&
(!is.Numeric(init.c, positive = TRUE) || max(init.c) >= 1))
stop("init.c must be in (0,1)")
@@ -6528,9 +6547,9 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
new("vglmff",
blurb = c("Logarithmic distribution f(y) = a * c^y / y, ",
"y = 1, 2, 3,...,\n",
- " 0 < c < 1, a = -1 / log(1-c) \n\n",
- "Link: ", namesof("c", link, earg = earg), "\n", "\n",
- "Mean: a * c / (1 - c)", "\n"),
+ " 0 < c < 1, a = -1 / log(1-c) \n\n",
+ "Link: ", namesof("c", link, earg = earg), "\n", "\n",
+ "Mean: a * c / (1 - c)", "\n"),
constraints = eval(substitute(expression({
dotzero <- .zero
Musual <- 1
@@ -6639,8 +6658,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
levy <- function(delta = NULL, link.gamma = "loge",
- idelta = NULL, igamma = NULL)
-{
+ idelta = NULL, igamma = NULL) {
@@ -6654,22 +6672,23 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
new("vglmff",
blurb = c("Levy distribution f(y) = sqrt(gamma/(2*pi)) * ",
- "(y-delta)^(-3/2) * \n",
- " exp(-gamma / (2*(y-delta ))),\n",
- " delta < y, gamma > 0",
- if (delta.known) paste(", delta = ", delta, ",", sep = ""),
- "\n\n",
- if (delta.known) "Link: " else "Links: ",
- namesof("gamma", link.gamma, earg = earg),
- if (! delta.known)
- c(", ", namesof("delta", "identity", earg = list())),
- "\n\n",
- "Mean: NA",
- "\n"),
+ "(y-delta)^(-3/2) * \n",
+ " exp(-gamma / (2*(y-delta ))),\n",
+ " delta < y, gamma > 0",
+ if (delta.known) paste(", delta = ", delta, ",", sep = ""),
+ "\n\n",
+ if (delta.known) "Link: " else "Links: ",
+ namesof("gamma", link.gamma, earg = earg),
+ if (! delta.known)
+ c(", ", namesof("delta", "identity", earg = list())),
+ "\n\n",
+ "Mean: NA",
+ "\n"),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -6681,7 +6700,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
if (!length(etastart)) {
- delta.init = if ( .delta.known) {
+ delta.init <- if ( .delta.known) {
if (min(y,na.rm = TRUE) <= .delta)
stop("delta must be < min(y)")
.delta
@@ -6690,9 +6709,9 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
min(y,na.rm = TRUE) - 1.0e-4 *
diff(range(y,na.rm = TRUE))
}
- gamma.init = if (length( .igamma)) .igamma else
+ gamma.init <- if (length( .igamma)) .igamma else
median(y - delta.init) # = 1/median(1/(y-delta.init))
- gamma.init = rep(gamma.init, length = length(y))
+ gamma.init <- rep(gamma.init, length = length(y))
etastart <-
cbind(theta2eta(gamma.init, .link.gamma , earg = .earg ),
if ( .delta.known) NULL else delta.init)
@@ -6704,9 +6723,9 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
.idelta = idelta,
.igamma = igamma ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta = as.matrix(eta)
- mygamma = eta2theta(eta[, 1], .link.gamma, earg = .earg )
- delta = if ( .delta.known) .delta else eta[, 2]
+ eta <- as.matrix(eta)
+ mygamma <- eta2theta(eta[, 1], .link.gamma, earg = .earg )
+ delta <- if ( .delta.known) .delta else eta[, 2]
NA * mygamma
@@ -6714,20 +6733,20 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
.delta.known = delta.known,
.delta = delta ))),
last = eval(substitute(expression({
- misc$link = if ( .delta.known) NULL else c(delta = "identity")
- misc$link = c(gamma = .link.gamma, misc$link)
- misc$earg = if ( .delta.known) list(gamma = .earg ) else
+ misc$link <- if ( .delta.known) NULL else c(delta = "identity")
+ misc$link <- c(gamma = .link.gamma, misc$link)
+ misc$earg <- if ( .delta.known) list(gamma = .earg ) else
list(gamma = .earg , delta = list())
if ( .delta.known)
- misc$delta = .delta
+ misc$delta <- .delta
}), list( .link.gamma = link.gamma, .earg = earg,
.delta.known = delta.known,
.delta = delta ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- eta = as.matrix(eta)
- mygamma = eta2theta(eta[, 1], .link.gamma, earg = .earg )
- delta = if ( .delta.known) .delta else eta[, 2]
+ eta <- as.matrix(eta)
+ mygamma <- eta2theta(eta[, 1], .link.gamma, earg = .earg )
+ delta <- if ( .delta.known) .delta else eta[, 2]
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else
sum(c(w) * 0.5 * (log(mygamma) -3 * log(y - delta) -
@@ -6737,26 +6756,26 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
.delta = delta ))),
vfamily = c("levy"),
deriv = eval(substitute(expression({
- eta = as.matrix(eta)
- mygamma = eta2theta(eta[, 1], .link.gamma, earg = .earg )
- delta = if ( .delta.known) .delta else eta[, 2]
+ eta <- as.matrix(eta)
+ mygamma <- eta2theta(eta[, 1], .link.gamma, earg = .earg )
+ delta <- if ( .delta.known) .delta else eta[, 2]
if (! .delta.known)
- dl.ddelta = (3 - mygamma / (y-delta)) / (2 * (y-delta))
- dl.dgamma = 0.5 * (1 / mygamma - 1 / (y-delta))
- dgamma.deta = dtheta.deta(mygamma, .link.gamma, earg = .earg )
+ dl.ddelta <- (3 - mygamma / (y-delta)) / (2 * (y-delta))
+ dl.dgamma <- 0.5 * (1 / mygamma - 1 / (y-delta))
+ dgamma.deta <- dtheta.deta(mygamma, .link.gamma, earg = .earg )
c(w) * cbind(dl.dgamma * dgamma.deta,
if ( .delta.known) NULL else dl.ddelta)
}), list( .link.gamma = link.gamma, .earg = earg,
.delta.known = delta.known,
.delta = delta ))),
weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, dimm(M)) # M = if (delta is known) 1 else 2
- wz[, iam(1, 1, M)] = 1 * dgamma.deta^2
+ wz <- matrix(as.numeric(NA), n, dimm(M)) # M = if (delta is known) 1 else 2
+ wz[, iam(1, 1, M)] <- 1 * dgamma.deta^2
if (! .delta.known) {
- wz[, iam(1, 2, M)] = 3 * dgamma.deta
- wz[, iam(2, 2, M)] = 21
+ wz[, iam(1, 2, M)] <- 3 * dgamma.deta
+ wz[, iam(2, 2, M)] <- 21
}
- wz = c(w) * wz / (2 * mygamma^2)
+ wz <- c(w) * wz / (2 * mygamma^2)
wz
}), list( .link.gamma = link.gamma, .earg = earg,
.delta.known = delta.known,
@@ -6773,7 +6792,7 @@ dlino <- function(x, shape1, shape2, lambda = 1, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- loglik = dbeta(x = x, shape1 = shape1, shape2 = shape2, log = TRUE) +
+ loglik <- dbeta(x = x, shape1 = shape1, shape2 = shape2, log = TRUE) +
shape1 * log(lambda) -
(shape1+shape2) * log1p(-(1-lambda)*x)
if (log.arg) loglik else exp(loglik)
@@ -6781,25 +6800,25 @@ dlino <- function(x, shape1, shape2, lambda = 1, log = FALSE) {
plino <- function(q, shape1, shape2, lambda = 1) {
- ans = pbeta(q = lambda * q / (1 - (1-lambda)*q),
+ ans <- pbeta(q = lambda * q / (1 - (1-lambda)*q),
shape1 = shape1, shape2 = shape2)
- ans[lambda <= 0] = NaN
+ ans[lambda <= 0] <- NaN
ans
}
qlino <- function(p, shape1, shape2, lambda = 1) {
- Y = qbeta(p = p, shape1 = shape1, shape2 = shape2)
- ans = Y / (lambda + (1-lambda)*Y)
- ans[lambda <= 0] = NaN
+ Y <- qbeta(p = p, shape1 = shape1, shape2 = shape2)
+ ans <- Y / (lambda + (1-lambda)*Y)
+ ans[lambda <= 0] <- NaN
ans
}
rlino <- function(n, shape1, shape2, lambda = 1) {
- Y = rbeta(n = n, shape1 = shape1, shape2 = shape2)
- ans = Y / (lambda + (1 - lambda) * Y)
- ans[lambda <= 0] = NaN
+ Y <- rbeta(n = n, shape1 = shape1, shape2 = shape2)
+ ans <- Y / (lambda + (1 - lambda) * Y)
+ ans[lambda <= 0] <- NaN
ans
}
@@ -6809,8 +6828,7 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
lshape2 = "loge",
llambda = "loge",
ishape1 = NULL, ishape2 = NULL, ilambda = 1,
- zero = NULL)
-{
+ zero = NULL) {
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
@@ -6835,11 +6853,11 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
new("vglmff",
blurb = c("Generalized Beta distribution (Libby and Novick, 1982)\n\n",
- "Links: ",
- namesof("shape1", lshape1, earg = eshape1), ", ",
- namesof("shape2", lshape2, earg = eshape2), ", ",
- namesof("lambda", llambda, earg = elambda), "\n",
- "Mean: something complicated"),
+ "Links: ",
+ namesof("shape1", lshape1, earg = eshape1), ", ",
+ namesof("shape2", lshape2, earg = eshape2), ", ",
+ namesof("lambda", llambda, earg = elambda), "\n",
+ "Mean: something complicated"),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
@@ -6849,7 +6867,8 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
w.y.check(w = w, y = y,
Is.positive.y = TRUE,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -6863,19 +6882,19 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
if (!length(etastart)) {
- lambda.init = rep(if (length( .ilambda )) .ilambda else 1,
+ lambda.init <- rep(if (length( .ilambda )) .ilambda else 1,
length = n)
- sh1.init = if (length( .ishape1 ))
+ sh1.init <- if (length( .ishape1 ))
rep( .ishape1, length = n) else NULL
- sh2.init = if (length( .ishape2 ))
+ sh2.init <- if (length( .ishape2 ))
rep( .ishape2, length = n) else NULL
- txY.init = lambda.init * y / (1+lambda.init*y - y)
- mean1 = mean(txY.init)
- mean2 = mean(1/txY.init)
+ txY.init <- lambda.init * y / (1+lambda.init*y - y)
+ mean1 <- mean(txY.init)
+ mean2 <- mean(1/txY.init)
if (!is.Numeric(sh1.init))
- sh1.init = rep((mean2 - 1) / (mean2 - 1/mean1), length = n)
+ sh1.init <- rep((mean2 - 1) / (mean2 - 1/mean1), length = n)
if (!is.Numeric(sh2.init))
- sh2.init = rep(sh1.init * (1-mean1) / mean1, length = n)
+ sh2.init <- rep(sh1.init * (1-mean1) / mean1, length = n)
etastart <-
cbind(theta2eta(sh1.init, .lshape1 , earg = .eshape1),
theta2eta(sh2.init, .lshape2 , earg = .eshape2),
@@ -6885,9 +6904,9 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
.eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda,
.ishape1=ishape1, .ishape2=ishape2, .ilambda = ilambda ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- sh1 = eta2theta(eta[, 1], .lshape1 , earg = .eshape1)
- sh2 = eta2theta(eta[, 2], .lshape2 , earg = .eshape2)
- lambda = eta2theta(eta[, 3], .llambda , earg = .elambda )
+ sh1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1)
+ sh2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2)
+ lambda <- eta2theta(eta[, 3], .llambda , earg = .elambda )
rep(as.numeric(NA), length = nrow(eta))
}, list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
.eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
@@ -6900,9 +6919,9 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
.eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- sh1 = eta2theta(eta[, 1], .lshape1 , earg = .eshape1)
- sh2 = eta2theta(eta[, 2], .lshape2 , earg = .eshape2)
- lambda = eta2theta(eta[, 3], .llambda , earg = .elambda )
+ sh1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1)
+ sh2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2)
+ lambda <- eta2theta(eta[, 3], .llambda , earg = .elambda )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
sum(c(w) * dlino(y, shape1 = sh1, shape2 = sh2,
@@ -6912,20 +6931,20 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
.eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
vfamily = c("lino"),
deriv = eval(substitute(expression({
- sh1 = eta2theta(eta[, 1], .lshape1 , earg = .eshape1)
- sh2 = eta2theta(eta[, 2], .lshape2 , earg = .eshape2)
- lambda = eta2theta(eta[, 3], .llambda , earg = .elambda )
+ sh1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1)
+ sh2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2)
+ lambda <- eta2theta(eta[, 3], .llambda , earg = .elambda )
- temp1 = log1p(-(1-lambda) * y)
- temp2 = digamma(sh1+sh2)
+ temp1 <- log1p(-(1-lambda) * y)
+ temp2 <- digamma(sh1+sh2)
- dl.dsh1 = log(lambda) + log(y) - digamma(sh1) + temp2 - temp1
- dl.dsh2 = log1p(-y) - digamma(sh2) + temp2 - temp1
- dl.dlambda = sh1/lambda - (sh1+sh2) * y / (1 - (1-lambda) * y)
+ dl.dsh1 <- log(lambda) + log(y) - digamma(sh1) + temp2 - temp1
+ dl.dsh2 <- log1p(-y) - digamma(sh2) + temp2 - temp1
+ dl.dlambda <- sh1/lambda - (sh1+sh2) * y / (1 - (1-lambda) * y)
- dsh1.deta = dtheta.deta(sh1, .lshape1 , earg = .eshape1)
- dsh2.deta = dtheta.deta(sh2, .lshape2 , earg = .eshape2)
- dlambda.deta = dtheta.deta(lambda, .llambda , earg = .elambda )
+ dsh1.deta <- dtheta.deta(sh1, .lshape1 , earg = .eshape1)
+ dsh2.deta <- dtheta.deta(sh2, .lshape2 , earg = .eshape2)
+ dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
c(w) * cbind( dl.dsh1 * dsh1.deta,
dl.dsh2 * dsh2.deta,
@@ -6933,23 +6952,23 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
}), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
.eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
weight = eval(substitute(expression({
- temp3 = trigamma(sh1+sh2)
-
- ned2l.dsh1 = trigamma(sh1) - temp3
- ned2l.dsh2 = trigamma(sh2) - temp3
- ned2l.dlambda2 = sh1 * sh2 / (lambda^2 * (sh1+sh2+1))
- ned2l.dsh1sh2 = -temp3
- ned2l.dsh1lambda = -sh2 / ((sh1+sh2)*lambda)
- ned2l.dsh2lambda = sh1 / ((sh1+sh2)*lambda)
-
- wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
- wz[, iam(1, 1, M)] = ned2l.dsh1 * dsh1.deta^2
- wz[, iam(2, 2, M)] = ned2l.dsh2 * dsh2.deta^2
- wz[, iam(3, 3, M)] = ned2l.dlambda2 * dlambda.deta^2
- wz[, iam(1, 2, M)] = ned2l.dsh1sh2 * dsh1.deta * dsh2.deta
- wz[, iam(1, 3, M)] = ned2l.dsh1lambda * dsh1.deta * dlambda.deta
- wz[, iam(2, 3, M)] = ned2l.dsh2lambda * dsh2.deta * dlambda.deta
- wz = c(w) * wz
+ temp3 <- trigamma(sh1+sh2)
+
+ ned2l.dsh1 <- trigamma(sh1) - temp3
+ ned2l.dsh2 <- trigamma(sh2) - temp3
+ ned2l.dlambda2 <- sh1 * sh2 / (lambda^2 * (sh1+sh2+1))
+ ned2l.dsh1sh2 <- -temp3
+ ned2l.dsh1lambda <- -sh2 / ((sh1+sh2)*lambda)
+ ned2l.dsh2lambda <- sh1 / ((sh1+sh2)*lambda)
+
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+ wz[, iam(1, 1, M)] <- ned2l.dsh1 * dsh1.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dsh2 * dsh2.deta^2
+ wz[, iam(3, 3, M)] <- ned2l.dlambda2 * dlambda.deta^2
+ wz[, iam(1, 2, M)] <- ned2l.dsh1sh2 * dsh1.deta * dsh2.deta
+ wz[, iam(1, 3, M)] <- ned2l.dsh1lambda * dsh1.deta * dlambda.deta
+ wz[, iam(2, 3, M)] <- ned2l.dsh2lambda * dsh2.deta * dlambda.deta
+ wz <- c(w) * wz
wz
}), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
.eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))))
@@ -6960,1975 +6979,106 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
+ betaprime <- function(link = "loge", i1 = 2, i2 = NULL, zero = NULL) {
-
- genbetaII <- function(lshape1.a = "loge",
- lscale = "loge",
- lshape2.p = "loge",
- lshape3.q = "loge",
- ishape1.a = NULL,
- iscale = NULL,
- ishape2.p = 1.0,
- ishape3.q = 1.0,
- zero = NULL)
-{
-
-
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
-
-
- lshape1.a <- as.list(substitute(lshape1.a))
- eshape1.a <- link2list(lshape1.a)
- lshape1.a <- attr(eshape1.a, "function.name")
-
- lshape2.p <- as.list(substitute(lshape2.p))
- eshape2.p <- link2list(lshape2.p)
- lshape2.p <- attr(eshape2.p, "function.name")
-
- lshape3.q <- as.list(substitute(lshape3.q))
- eshape3.q <- link2list(lshape3.q)
- lshape3.q <- attr(eshape3.q, "function.name")
-
- lscale <- as.list(substitute(lscale))
- escale <- link2list(lscale)
- lscale <- attr(escale, "function.name")
-
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
new("vglmff",
- blurb = c("Generalized Beta II distribution\n\n",
- "Links: ",
- namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape2.p", lshape2.p, earg = eshape2.p), ", ",
- namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
- "Mean: scale * gamma(shape2.p + 1/shape1.a) * ",
- "gamma(shape3.q - 1/shape1.a) / ",
- "(gamma(shape2.p) * gamma(shape3.q))"),
+ blurb = c("Beta-prime distribution\n",
+ "y^(shape1-1) * (1+y)^(-shape1-shape2) / Beta(shape1,shape2),",
+ " y>0, shape1>0, shape2>0\n\n",
+ "Links: ",
+ namesof("shape1", link, earg = earg), ", ",
+ namesof("shape2", link, earg = earg), "\n",
+ "Mean: shape1/(shape2-1) provided shape2>1"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ Is.positive.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 1)
+
predictors.names <-
- c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
- namesof("scale", .lscale , earg = .escale , tag = FALSE),
- namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE),
- namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
-
- if (!length( .ishape1.a ) || !length( .iscale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- ishape3.q = if (length( .ishape3.q)) .ishape3.q else 1
- xvec = log( (1-qvec)^(-1/ ishape3.q ) - 1 )
- fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
+ c(namesof("shape1", .link , earg = .earg , short = TRUE),
+ namesof("shape2", .link , earg = .earg , short = TRUE))
+ if (is.numeric( .i1) && is.numeric( .i2)) {
+ vec <- c( .i1, .i2)
+ vec <- c(theta2eta(vec[1], .link , earg = .earg ),
+ theta2eta(vec[2], .link , earg = .earg ))
+ etastart <- matrix(vec, n, 2, byrow = TRUE)
}
-
if (!length(etastart)) {
- aa = rep(if (length( .ishape1.a )) .ishape1.a else
- abs(1 / fit0$coef[2]),
- length.out = n)
- scale = rep(if (length( .iscale )) .iscale else
- exp(fit0$coef[1]), length = n)
- qq = rep(if (length( .ishape3.q )) .ishape3.q else 1.0,
- length.out = n)
- parg = rep(if (length( .ishape2.p )) .ishape2.p else 1.0,
- length.out = n)
-
-
- outOfRange = (qq - 1/aa <= 0)
- qq[outOfRange] = 1 / aa[outOfRange] + 1
- outOfRange = (parg + 1/aa <= 0)
- parg[outOfRange] = 1 / aa[outOfRange] + 1
-
-
+ init1 <- if (length( .i1))
+ rep( .i1, length.out = n) else rep(1, length.out = n)
+ init2 <- if (length( .i2))
+ rep( .i2, length.out = n) else 1 + init1 / (y + 0.1)
etastart <-
- cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
- theta2eta(scale, .lscale , earg = .escale ),
- theta2eta(parg, .lshape2.p, earg = .eshape2.p),
- theta2eta(qq, .lshape3.q, earg = .eshape3.q))
+ matrix(theta2eta(c(init1, init2), .link , earg = .earg ),
+ n, 2, byrow = TRUE)
}
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
- .ishape1.a = ishape1.a, .iscale = iscale,
- .ishape2.p = ishape2.p, .ishape3.q = ishape3.q ))),
+ }), list( .link = link, .earg = earg, .i1 = i1, .i2 = i2 ))),
+
linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
- qq = eta2theta(eta[, 4], .lshape3.q, earg = .eshape3.q)
- ans = Scale * exp(lgamma(parg + 1/aa) +
- lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
- ans[parg + 1/aa <= 0] = NA
- ans[qq - 1/aa <= 0] = NA
- ans[aa <= 0] = NA
- ans[Scale <= 0] = NA
- ans[parg <= 0] = NA
- ans[qq <= 0] = NA
- ans
- }, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
- .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
+ shapes <- eta2theta(eta, .link , earg = .earg )
+ ifelse(shapes[, 2] > 1, shapes[, 1] / (shapes[, 2]-1), NA)
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link <- c(shape1.a = .lshape1.a, scale = .lscale ,
- shape2.p = .lshape2.p, shape3.q = .lshape3.q)
- misc$earg <- list(shape1.a = .eshape1.a, scale = .escale ,
- shape2.p = .eshape2.p, shape3.q = .eshape3.q)
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
- .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
+ misc$link <- c(shape1 = .link , shape2 = .link)
+ misc$earg <- list(shape1 = .earg , shape2 = .earg )
+ }), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
- qq = eta2theta(eta[, 4], .lshape3.q, earg = .eshape3.q)
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL){
+ shapes <- eta2theta(eta, .link , earg = .earg )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(c(w) * (log(aa) + (aa * parg - 1) * log(y) -
- aa * parg * log(scale) +
- - lbeta(parg, qq) - (parg + qq) * log1p((y/scale)^aa)))
+ sum(c(w) *((shapes[, 1]-1) * log(y) -
+ lbeta(shapes[, 1], shapes[, 2]) -
+ (shapes[, 2]+shapes[, 1]) * log1p(y)))
}
- }, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
- .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
- vfamily = c("genbetaII"),
+ }, list( .link = link, .earg = earg ))),
+ vfamily = "betaprime",
deriv = eval(substitute(expression({
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
- qq = eta2theta(eta[, 4], .lshape3.q, earg = .eshape3.q)
-
- temp1 = log(y/scale)
- temp2 = (y/scale)^aa
- temp3 = digamma(parg + qq)
- temp3a = digamma(parg)
- temp3b = digamma(qq)
- temp4 = log1p(temp2)
-
- dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
- dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- dl.dp = aa * temp1 + temp3 - temp3a - temp4
- dl.dq = temp3 - temp3b - temp4
-
- da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
- dp.deta = dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
- dq.deta = dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
-
- c(w) * cbind( dl.da * da.deta,
- dl.dscale * dscale.deta,
- dl.dp * dp.deta,
- dl.dq * dq.deta )
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
- .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
- weight = eval(substitute(expression({
- temp5 = trigamma(parg + qq)
- temp5a = trigamma(parg)
- temp5b = trigamma(qq)
-
- ed2l.da = (1 + parg+qq + parg * qq * (temp5a + temp5b +
- (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
- (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
- ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
- ed2l.dp = temp5a - temp5
- ed2l.dq = temp5b - temp5
- ed2l.dascale = (parg - qq - parg * qq *
- (temp3a -temp3b)) / (scale*(1 + parg+qq))
- ed2l.dap = -(qq * (temp3a -temp3b) -1) / (aa*(parg+qq))
- ed2l.daq = -(parg * (temp3b -temp3a) -1) / (aa*(parg+qq))
- ed2l.dscalep = aa * qq / (scale*(parg+qq))
- ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
- ed2l.dpq = -temp5
-
- wz = matrix(as.numeric(NA), n, dimm(M)) # M==4 means 10=dimm(M)
- wz[, iam(1, 1, M)] = ed2l.da * da.deta^2
- wz[, iam(2, 2, M)] = ed2l.dscale * dscale.deta^2
- wz[, iam(3, 3, M)] = ed2l.dp * dp.deta^2
- wz[, iam(4, 4, M)] = ed2l.dq * dq.deta^2
- wz[, iam(1, 2, M)] = ed2l.dascale * da.deta * dscale.deta
- wz[, iam(1, 3, M)] = ed2l.dap * da.deta * dp.deta
- wz[, iam(1, 4, M)] = ed2l.daq * da.deta * dq.deta
- wz[, iam(2, 3, M)] = ed2l.dscalep * dscale.deta * dp.deta
- wz[, iam(2, 4, M)] = ed2l.dscaleq * dscale.deta * dq.deta
- wz[, iam(3, 4, M)] = ed2l.dpq * dp.deta * dq.deta
- wz = c(w) * wz
- wz
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
- .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))))
-}
+ shapes <- eta2theta(eta, .link , earg = .earg )
+ dshapes.deta <- dtheta.deta(shapes, .link , earg = .earg )
+ dl.dshapes <- cbind(log(y) - log1p(y) - digamma(shapes[, 1]) +
+ digamma(shapes[, 1]+shapes[, 2]),
+ - log1p(y) - digamma(shapes[, 2]) +
+ digamma(shapes[, 1]+shapes[, 2]))
+ c(w) * dl.dshapes * dshapes.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = expression({
+ temp2 <- trigamma(shapes[, 1] + shapes[, 2])
+ d2l.dshape12 <- temp2 - trigamma(shapes[, 1])
+ d2l.dshape22 <- temp2 - trigamma(shapes[, 2])
+ d2l.dshape1shape2 <- temp2
+
+ wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[, iam(1, 1, M)] <- d2l.dshape12 * dshapes.deta[, 1]^2
+ wz[, iam(2, 2, M)] <- d2l.dshape22 * dshapes.deta[, 2]^2
+ wz[, iam(1, 2, M)] <- d2l.dshape1shape2 *
+ dshapes.deta[, 1] * dshapes.deta[, 2]
+ -c(w) * wz
+ }))
+}
-rsinmad <- function(n, shape1.a, scale = 1, shape3.q)
- qsinmad(runif(n), shape1.a, scale = scale, shape3.q)
-rlomax <- function(n, scale = 1, shape3.q)
- rsinmad(n, shape1.a = 1, scale = scale, shape3.q)
-rfisk <- function(n, shape1.a, scale = 1)
- rsinmad(n, shape1.a, scale = scale, shape3.q = 1)
-
-rparalogistic <- function(n, shape1.a, scale = 1)
- rsinmad(n, shape1.a, scale = scale, shape1.a)
-
-
-rdagum <- function(n, shape1.a, scale = 1, shape2.p)
- qdagum(runif(n), shape1.a = shape1.a, scale = scale, shape2.p = shape2.p)
-
-
-rinvlomax <- function(n, scale = 1, shape2.p)
- rdagum(n, shape1.a = 1, scale = scale, shape2.p)
-
-
-rinvparalogistic <- function(n, shape1.a, scale = 1)
- rdagum(n, shape1.a, scale = scale, shape1.a)
-
-
-
-
-qsinmad <- function(p, shape1.a, scale = 1, shape3.q) {
- bad = (p < 0) | (p > 1)
- ans = NA * p
-
- LLL = max(length(p), length(shape1.a), length(scale),
- length(shape3.q))
- if (length(p) != LLL)
- p <- rep(p, length.out = LLL)
- if (length(shape1.a) != LLL)
- shape1.a <- rep(shape1.a, length.out = LLL)
- if (length(scale) != LLL)
- scale <- rep(scale, length.out = LLL)
- if (length(shape3.q) != LLL)
- shape3.q <- rep(shape3.q, length.out = LLL)
-
- Shape1.a = shape1.a[!bad]
- Scale = scale[!bad]
- Shape3.q = shape3.q[!bad]
- QQ = p[!bad]
- ans[!bad] = Scale * ((1 - QQ)^(-1/Shape3.q) - 1)^(1/Shape1.a)
- ans
-}
-
-
-qlomax <- function(p, scale = 1, shape3.q)
- qsinmad(p, shape1.a = 1, scale = scale, shape3.q)
-
-
-qfisk <- function(p, shape1.a, scale = 1)
- qsinmad(p, shape1.a, scale = scale, shape3.q = 1)
-
-
-qparalogistic <- function(p, shape1.a, scale = 1)
- qsinmad(p, shape1.a, scale = scale, shape1.a)
-
-
-
-qdagum <- function(p, shape1.a, scale = 1, shape2.p) {
-
- LLL = max(length(p), length(shape1.a), length(scale),
- length(shape2.p))
- if (length(p) != LLL)
- p <- rep(p, length.out = LLL)
- if (length(shape1.a) != LLL)
- shape1.a <- rep(shape1.a, length.out = LLL)
- if (length(scale) != LLL)
- Scale <- rep(scale, length.out = LLL)
- if (length(shape2.p) != LLL)
- shape2.p <- rep(shape2.p, length.out = LLL)
-
-
- bad = (p < 0) | (p > 1) | (Scale <= 0)
- ans = NA * p
- ans[!bad] = Scale[!bad] *
- (p[!bad]^(-1/shape2.p[!bad]) - 1)^(-1/shape1.a[!bad])
- ans
-}
-
-
-
-qinvlomax <- function(p, scale = 1, shape2.p)
- qdagum(p, shape1.a = 1, scale, shape2.p)
-
-
-qinvparalogistic <- function(p, shape1.a, scale = 1)
- qdagum(p, shape1.a, scale, shape1.a)
-
-
-
-
-
-
-psinmad <- function(q, shape1.a, scale = 1, shape3.q) {
-
-
- LLL = max(length(q), length(shape1.a), length(scale),
- length(shape3.q))
- if (length(q) != LLL)
- q <- rep(q, length.out = LLL)
- if (length(shape1.a) != LLL)
- shape1.a <- rep(shape1.a, length.out = LLL)
- if (length(scale) != LLL)
- scale <- rep(scale, length.out = LLL)
- if (length(shape3.q) != LLL)
- shape3.q <- rep(shape3.q, length.out = LLL)
-
-
- notpos = (q <= 0)
- Shape1.a = shape1.a[!notpos]
- Scale = scale[!notpos]
- Shape3.q = shape3.q[!notpos]
- QQ = q[!notpos]
-
- ans = 0 * q
- ans[!notpos] = 1 - (1 + (QQ / Scale)^Shape1.a)^(-Shape3.q)
- ans
-}
-
-
-plomax <- function(q, scale = 1, shape3.q)
- psinmad(q, shape1.a = 1, scale, shape3.q)
-
-
-pfisk <- function(q, shape1.a, scale = 1)
- psinmad(q, shape1.a, scale, shape3.q = 1)
-
-
-pparalogistic <- function(q, shape1.a, scale = 1)
- psinmad(q, shape1.a, scale, shape1.a)
-
-
-
-pdagum <- function(q, shape1.a, scale = 1, shape2.p) {
-
-
- LLL = max(length(q), length(shape1.a), length(scale),
- length(shape2.p))
- if (length(q) != LLL)
- q <- rep(q, length.out = LLL)
- if (length(shape1.a) != LLL)
- shape1.a <- rep(shape1.a, length.out = LLL)
- if (length(scale) != LLL)
- scale <- rep(scale, length.out = LLL)
- if (length(shape2.p) != LLL)
- shape2.p <- rep(shape2.p, length.out = LLL)
-
- notpos = (q <= 0)
- Shape1.a = shape1.a[!notpos]
- Scale = scale[!notpos]
- Shape2.p = shape2.p[!notpos]
- QQ = q[!notpos]
-
- ans <- 0 * q
- ans[!notpos] <- (1 + (QQ/Scale)^(-Shape1.a))^(-Shape2.p)
-
- ans[q == -Inf] <- 0
-
- ans
-}
-
-
-pinvlomax <- function(q, scale = 1, shape2.p)
- pdagum(q, shape1.a = 1, scale, shape2.p)
-
-
-pinvparalogistic <- function(q, shape1.a, scale = 1)
- pdagum(q, shape1.a, scale, shape1.a)
-
-
-
-dsinmad <- function(x, shape1.a, scale = 1, shape3.q, log = FALSE) {
-
- if (!is.logical(log.arg <- log) || length(log) != 1)
- stop("bad input for argument 'log'")
- rm(log)
-
- LLL <- max(length(x), length(shape1.a),
- length(scale), length(shape3.q))
- x <- rep(x, length.out = LLL);
- shape1.a <- rep(shape1.a, length.out = LLL)
- scale <- rep(scale, length.out = LLL);
- shape3.q <- rep(shape3.q, length.out = LLL)
-
- Loglik <- rep(log(0), length.out = LLL)
- xok <- (x > 0) # Avoids evaluating log(x) if x is negative.
- Loglik[xok] <- log(shape1.a[xok]) + log(shape3.q[xok]) +
- (shape1.a[xok]-1) * log(x[xok]) -
- shape1.a[xok] * log(scale[xok]) -
- (1 + shape3.q[xok]) * log1p((x[xok]/scale[xok])^shape1.a[xok])
- if (log.arg) Loglik else exp(Loglik)
-}
-
-
-dlomax <- function(x, scale = 1, shape3.q, log = FALSE)
- dsinmad(x, shape1.a = 1, scale, shape3.q, log = log)
-
-
-dfisk <- function(x, shape1.a, scale = 1, log = FALSE)
- dsinmad(x, shape1.a, scale, shape3.q = 1, log = log)
-
-
-dparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
- dsinmad(x, shape1.a, scale, shape1.a, log = log)
-
-
-
-ddagum <- function(x, shape1.a, scale = 1, shape2.p, log = FALSE) {
- if (!is.logical(log.arg <- log) || length(log) != 1)
- stop("bad input for argument 'log'")
- rm(log)
-
- LLL = max(length(x), length(shape1.a), length(scale),
- length(shape2.p))
- x = rep(x, length.out = LLL);
- shape1.a = rep(shape1.a, length.out = LLL)
- scale = rep(scale, length.out = LLL);
- shape2.p = rep(shape2.p, length.out = LLL)
-
- Loglik = rep(log(0), length.out = LLL)
- xok = (x > 0) # Avoids evaluating log(x) if x is negative.
- Loglik[xok] = log(shape1.a[xok]) +
- log(shape2.p[xok]) +
- (shape1.a[xok]*shape2.p[xok]-1)*log(x[xok]) -
- shape1.a[xok]*shape2.p[xok]*log(scale[xok]) -
- (1+shape2.p[xok]) * log1p((x[xok]/scale[xok])^shape1.a[xok])
- Loglik[shape2.p <= 0] = NaN
- if (log.arg) Loglik else exp(Loglik)
-}
-
-
-dinvlomax <- function(x, scale = 1, shape2.p, log = FALSE)
- ddagum(x, shape1.a = 1, scale, shape2.p, log = log)
-
-
-dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
- ddagum(x, shape1.a, scale, shape1.a, log = log)
-
-
-
- sinmad <- function(lshape1.a = "loge",
- lscale = "loge",
- lshape3.q = "loge",
- ishape1.a = NULL,
- iscale = NULL,
- ishape3.q = 1.0,
- zero = NULL)
-{
-
-
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
-
-
- lshape1.a <- as.list(substitute(lshape1.a))
- eshape1.a <- link2list(lshape1.a)
- lshape1.a <- attr(eshape1.a, "function.name")
-
- lshape3.q <- as.list(substitute(lshape3.q))
- eshape3.q <- link2list(lshape3.q)
- lshape3.q <- attr(eshape3.q, "function.name")
-
- lscale <- as.list(substitute(lscale))
- escale <- link2list(lscale)
- lscale <- attr(escale, "function.name")
-
-
-
- new("vglmff",
- blurb = c("Singh-Maddala distribution\n\n",
- "Links: ",
- namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
- "Mean: scale * gamma(1 + 1/shape1.a) * ",
- "gamma(shape3.q - 1/shape1.a) / ",
- "gamma(shape3.q)"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero , M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
-
- w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
-
-
- predictors.names <-
- c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
- namesof("scale", .lscale , earg = .escale , tag = FALSE),
- namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
- parg = 1
-
- if (!length( .ishape1.a) || !length( .iscale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- ishape3.q = if (length( .ishape3.q)) .ishape3.q else 1
- xvec = log( (1-qvec)^(-1/ ishape3.q ) - 1 )
- fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
- }
-
-
-
- if (!length(etastart)) {
- aa = rep(if (length( .ishape1.a)) .ishape1.a else 1/fit0$coef[2],
- length.out = n)
- scale = rep(if (length( .iscale )) .iscale else exp(fit0$coef[1]),
- length.out = n)
- qq = rep(if (length( .ishape3.q)) .ishape3.q else 1.0,
- length.out = n)
-
-
- outOfRange = (aa * qq <= 1)
- qq[outOfRange] = 1 / aa[outOfRange] + 1
-
-
- etastart <-
- cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
- theta2eta(scale, .lscale , earg = .escale ),
- theta2eta(qq, .lshape3.q, earg = .eshape3.q))
- }
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .lshape3.q = lshape3.q,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape3.q = eshape3.q,
- .ishape1.a = ishape1.a, .iscale = iscale,
- .ishape3.q = ishape3.q ))),
-
- linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = 1
- qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
-
- ans = Scale * exp(lgamma(parg + 1/aa) +
- lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
- ans[parg + 1/aa <= 0] = NA
- ans[qq - 1/aa <= 0] = NA
- ans[aa <= 0] = NA
- ans[Scale <= 0] = NA
- ans[qq <= 0] = NA
- ans
- }, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape3.q = eshape3.q,
- .lshape3.q = lshape3.q ))),
-
- last = eval(substitute(expression({
- misc$link =
- c(shape1.a = .lshape1.a, scale = .lscale , shape3.q = .lshape3.q)
- misc$earg =
- list(shape1.a = .eshape1.a, scale = .escale , shape3.q = .eshape3.q)
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
-
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape3.q = eshape3.q,
- .lshape3.q = lshape3.q ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = 1
- qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dsinmad(x = y, shape1.a = aa, scale = scale,
- shape3.q = qq, log = TRUE))
- }
- }, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .lshape3.q = lshape3.q,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape3.q = eshape3.q ))),
- vfamily = c("sinmad"),
- deriv = eval(substitute(expression({
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = 1
- qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
-
- temp1 = log(y/scale)
- temp2 = (y/scale)^aa
- temp3a = digamma(parg)
- temp3b = digamma(qq)
-
- dl.da = 1 / aa + parg * temp1 - (parg + qq) * temp1 / (1 + 1 / temp2)
- dl.dscale = (aa / scale) * (-parg + (parg + qq) / (1 + 1 / temp2))
- dl.dq = digamma(parg + qq) - temp3b - log1p(temp2)
-
- da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
- dq.deta = dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
-
- c(w) * cbind( dl.da * da.deta,
- dl.dscale * dscale.deta,
- dl.dq * dq.deta )
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape3.q = eshape3.q,
- .lshape3.q = lshape3.q ))),
-
- weight = eval(substitute(expression({
- ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
- (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
- (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
- ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
- ed2l.dq = 1/qq^2
- ed2l.dascale = (parg - qq - parg*qq *
- (temp3a -temp3b)) / (scale*(1 + parg+qq))
- ed2l.daq = -(parg * (temp3b -temp3a) -1) / (aa*(parg+qq))
- ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
- wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
- wz[, iam(1, 1, M)] = ed2l.da * da.deta^2
- wz[, iam(2, 2, M)] = ed2l.dscale * dscale.deta^2
- wz[, iam(3, 3, M)] = ed2l.dq * dq.deta^2
- wz[, iam(1, 2, M)] = ed2l.dascale * da.deta * dscale.deta
- wz[, iam(1, 3, M)] = ed2l.daq * da.deta * dq.deta
- wz[, iam(2, 3, M)] = ed2l.dscaleq * dscale.deta * dq.deta
- wz = c(w) * wz
- wz
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape3.q = eshape3.q,
- .lshape3.q = lshape3.q ))))
-}
-
-
- dagum <- function(lshape1.a = "loge",
- lscale = "loge",
- lshape2.p = "loge",
- ishape1.a = NULL,
- iscale = NULL,
- ishape2.p = 1.0,
- zero = NULL)
-{
-
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
-
-
- lshape1.a <- as.list(substitute(lshape1.a))
- eshape1.a <- link2list(lshape1.a)
- lshape1.a <- attr(eshape1.a, "function.name")
-
- lshape2.p <- as.list(substitute(lshape2.p))
- eshape2.p <- link2list(lshape2.p)
- lshape2.p <- attr(eshape2.p, "function.name")
-
- lscale <- as.list(substitute(lscale))
- escale <- link2list(lscale)
- lscale <- attr(escale, "function.name")
-
-
-
- new("vglmff",
- blurb = c("Dagum distribution\n\n",
- "Links: ",
- namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape2.p", lshape2.p, earg = eshape2.p), "\n",
- "Mean: scale * gamma(shape2.p + 1/shape1.a) * ",
- "gamma(1 - 1/shape1.a) / ",
- "gamma(shape2.p)"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero , M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
-
- w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
-
- predictors.names <-
- c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
- namesof("scale", .lscale , earg = .escale , tag = FALSE),
- namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE))
-
- if (!length( .ishape1.a) || !length( .iscale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- ishape2.p = if (length( .ishape2.p)) .ishape2.p else 1
- xvec = log( qvec^(-1/ ishape2.p ) - 1 )
- fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
- }
-
- if (!length(etastart)) {
- parg = rep(if (length( .ishape2.p )) .ishape2.p else 1.0,
- length.out = n)
- aa = rep(if (length( .ishape1.a )) .ishape1.a else
- -1/fit0$coef[2],
- length.out = n)
- scale = rep(if (length( .iscale )) .iscale else
- exp(fit0$coef[1]),
- length.out = n)
-
-
- outOfRange = (parg + 1/aa <= 0)
- parg[outOfRange] = 1 / aa[outOfRange] + 1
-
-
-
-
-
- etastart <-
- cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
- theta2eta(scale, .lscale , earg = .escale ),
- theta2eta(parg, .lshape2.p, earg = .eshape2.p))
- }
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .lshape2.p = lshape2.p,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape2.p = eshape2.p,
- .ishape1.a = ishape1.a, .iscale = iscale,
- .ishape2.p = ishape2.p ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
- qq = 1
-
- ans = Scale * exp(lgamma(parg + 1/aa) +
- lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
- ans[parg + 1/aa <= 0] = NA
- ans[qq - 1/aa <= 0] = NA
- ans[aa <= 0] = NA
- ans[Scale <= 0] = NA
- ans[parg <= 0] = NA
- ans
- }, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape2.p = eshape2.p,
- .lshape2.p = lshape2.p ))),
- last = eval(substitute(expression({
- misc$link = c(shape1.a = .lshape1.a, scale = .lscale ,
- p = .lshape2.p )
-
- misc$earg = list(shape1.a = .eshape1.a, scale = .escale ,
- p = .eshape2.p )
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .lshape2.p = lshape2.p,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape2.p = eshape2.p ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
- qq = 1
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * ddagum(x = y, shape1.a = aa, scale = Scale,
- shape2.p = parg, log = TRUE))
- }
- }, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .lshape2.p = lshape2.p,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape2.p = eshape2.p ))),
- vfamily = c("dagum"),
- deriv = eval(substitute(expression({
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
- qq = 1
-
- temp1 = log(y / Scale)
- temp2 = (y / Scale)^aa
- temp3a = digamma(parg)
- temp3b = digamma(qq)
-
- dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
- dl.dscale = (aa/Scale) * (-parg + (parg+qq) / (1+1/temp2))
- dl.dp = aa * temp1 + digamma(parg + qq) - temp3a - log1p(temp2)
-
- da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
- dp.deta = dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
-
- c(w) * cbind( dl.da * da.deta,
- dl.dscale * dscale.deta,
- dl.dp * dp.deta )
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape2.p = eshape2.p,
- .lshape2.p = lshape2.p ))),
- weight = eval(substitute(expression({
- ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
- (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
- (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
- ed2l.dscale = aa^2 * parg * qq / (Scale^2 * (1+parg+qq))
- ed2l.dp = 1 / parg^2
- ed2l.dascale = (parg - qq - parg * qq *(temp3a -temp3b)
- ) / (Scale * (1 + parg+qq))
- ed2l.dap= -(qq * (temp3a -temp3b) -1) / (aa*(parg+qq))
- ed2l.dscalep = aa * qq / (Scale * (parg + qq))
- wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
- wz[, iam(1, 1, M)] = ed2l.da * da.deta^2
- wz[, iam(2, 2, M)] = ed2l.dscale * dscale.deta^2
- wz[, iam(3, 3, M)] = ed2l.dp * dp.deta^2
- wz[, iam(1, 2, M)] = ed2l.dascale * da.deta * dscale.deta
- wz[, iam(1, 3, M)] = ed2l.dap * da.deta * dp.deta
- wz[, iam(2, 3, M)] = ed2l.dscalep * dscale.deta * dp.deta
- wz = c(w) * wz
- wz
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape2.p = eshape2.p,
- .lshape2.p = lshape2.p ))))
-}
-
-
-
- betaII =
- function(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
- iscale = NULL, ishape2.p = 2, ishape3.q = 2,
- zero = NULL) {
-
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
-
-
-
- lshape2.p <- as.list(substitute(lshape2.p))
- eshape2.p <- link2list(lshape2.p)
- lshape2.p <- attr(eshape2.p, "function.name")
-
- lshape3.q <- as.list(substitute(lshape3.q))
- eshape3.q <- link2list(lshape3.q)
- lshape3.q <- attr(eshape3.q, "function.name")
-
- lscale <- as.list(substitute(lscale))
- escale <- link2list(lscale)
- lscale <- attr(escale, "function.name")
-
-
-
- new("vglmff",
- blurb = c("Beta II distribution\n\n",
- "Links: ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape2.p", lshape2.p, earg = eshape2.p), ", ",
- namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
- "Mean: scale * gamma(shape2.p + 1) * ",
- "gamma(shape3.q - 1) / ",
- "(gamma(shape2.p) * gamma(shape3.q))"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero , M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
-
- w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
-
-
- predictors.names <-
- c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
- namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE),
- namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
-
- if (!length( .iscale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- ishape3.q = if (length( .ishape3.q)) .ishape3.q else 1
- xvec = log( (1-qvec)^(-1/ ishape3.q ) - 1 )
- fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
- }
-
- if (!length(etastart)) {
- scale = rep(if (length( .iscale )) .iscale else
- exp(fit0$coef[1]),
- length.out = n)
- qq = rep(if (length( .ishape3.q)) .ishape3.q else 1.0,
- length.out = n)
- parg = rep(if (length( .ishape2.p)) .ishape2.p else 1.0,
- length.out = n)
-
-
-
- aa = 1
- outOfRange = (parg + 1/aa <= 0)
- parg[outOfRange] = 1 / aa[outOfRange] + 1
- outOfRange = (qq - 1/aa <= 0)
- qq[outOfRange] = 1 / aa + 1
-
-
- etastart <-
- cbind(theta2eta(scale, .lscale , earg = .escale ),
- theta2eta(parg, .lshape2.p, earg = .eshape2.p),
- theta2eta(qq, .lshape3.q, earg = .eshape3.q))
- }
- }), list( .lscale = lscale,
- .escale = escale,
- .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
- .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
- .iscale = iscale,
- .ishape2.p = ishape2.p,
- .ishape3.q = ishape3.q ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = 1
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
- qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
-
- ans = Scale * exp(lgamma(parg + 1/aa) +
- lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
- ans[parg + 1/aa <= 0] = NA
- ans[qq - 1/aa <= 0] = NA
- ans[Scale <= 0] = NA
- ans[parg <= 0] = NA
- ans[qq <= 0] = NA
- ans
- }, list( .lscale = lscale,
- .escale = escale,
- .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
- .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
- last = eval(substitute(expression({
- misc$link <- c(scale = .lscale , shape2.p = .lshape2.p,
- shape3.q = .lshape3.q)
- misc$earg <- list(scale = .escale , shape2.p = .eshape2.p,
- shape3.q = .eshape3.q)
- }), list( .lscale = lscale,
- .escale = escale,
- .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
- .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = 1
- scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
- qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * (log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
- (-lbeta(parg, qq)) - (parg+qq)*log1p((y/scale)^aa)))
- }
- }, list( .lscale = lscale,
- .escale = escale,
- .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
- .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
- vfamily = c("betaII"),
- deriv = eval(substitute(expression({
- aa = 1
- scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
- qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
-
- temp1 = log(y/scale)
- temp2 = (y/scale)^aa
- temp3 = digamma(parg + qq)
- temp3a = digamma(parg)
- temp3b = digamma(qq)
- temp4 = log1p(temp2)
-
- dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- dl.dp = aa * temp1 + temp3 - temp3a - temp4
- dl.dq = temp3 - temp3b - temp4
- dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
- dp.deta = dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
- dq.deta = dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
- c(w) * cbind( dl.dscale * dscale.deta,
- dl.dp * dp.deta,
- dl.dq * dq.deta )
- }), list( .lscale = lscale,
- .escale = escale,
- .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
- .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
- weight = eval(substitute(expression({
- temp5 = trigamma(parg + qq)
- ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
- ed2l.dp = trigamma(parg) - temp5
- ed2l.dq = trigamma(qq) - temp5
- ed2l.dscalep = aa * qq / (scale*(parg+qq))
- ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
- ed2l.dpq = -temp5
- wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
- wz[, iam(1, 1, M)] = ed2l.dscale * dscale.deta^2
- wz[, iam(2, 2, M)] = ed2l.dp * dp.deta^2
- wz[, iam(3, 3, M)] = ed2l.dq * dq.deta^2
- wz[, iam(1, 2, M)] = ed2l.dscalep * dscale.deta * dp.deta
- wz[, iam(1, 3, M)] = ed2l.dscaleq * dscale.deta * dq.deta
- wz[, iam(2, 3, M)] = ed2l.dpq * dp.deta * dq.deta
- wz = c(w) * wz
- wz
- }), list( .lscale = lscale,
- .escale = escale,
- .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
- .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))))
-}
-
-
-
- lomax <- function(lscale = "loge",
- lshape3.q = "loge",
- iscale = NULL,
- ishape3.q = 2.0,
- zero = NULL)
-{
-
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
-
-
-
- lshape3.q <- as.list(substitute(lshape3.q))
- eshape3.q <- link2list(lshape3.q)
- lshape3.q <- attr(eshape3.q, "function.name")
-
- lscale <- as.list(substitute(lscale))
- escale <- link2list(lscale)
- lscale <- attr(escale, "function.name")
-
-
-
- new("vglmff",
- blurb = c("Lomax distribution\n\n",
- "Links: ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
- "Mean: scale / (shape3.q - 1)"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero , M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
-
- w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
-
-
-
- predictors.names <-
- c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
- namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
-
- aa = parg = 1
-
- if (!length( .iscale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- ishape3.q = if (length( .ishape3.q)) .ishape3.q else 1
- xvec = log( (1-qvec)^(-1/ ishape3.q ) - 1 )
- fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
- }
-
- if (!length(etastart)) {
- qq = rep(if (length( .ishape3.q)) .ishape3.q else 1.0,
- length.out = n)
- scale = rep(if (length( .iscale )) .iscale else
- exp(fit0$coef[1]),
- length.out = n)
-
-
- aa = 1
- outOfRange = (qq - 1/aa <= 0)
- qq[outOfRange] = 1 / aa + 1
-
-
-
- etastart <-
- cbind(theta2eta(scale, .lscale , earg = .escale ),
- theta2eta(qq, .lshape3.q, earg = .eshape3.q))
- }
- }), list( .lscale = lscale, .lshape3.q = lshape3.q,
- .escale = escale, .eshape3.q = eshape3.q,
- .iscale = iscale, .ishape3.q = ishape3.q ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = 1
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- parg = 1
- qq = eta2theta(eta[, 2], .lshape3.q , earg = .eshape3.q )
-
-
-
-
-
- ans = Scale * exp(lgamma(parg + 1/aa) +
- lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
- ans[parg + 1/aa <= 0] = NA
- ans[qq - 1/aa <= 0] = NA
- ans[Scale <= 0] = NA
- ans[qq <= 0] = NA
- ans
- }, list( .lscale = lscale, .lshape3.q = lshape3.q,
- .escale = escale, .eshape3.q = eshape3.q ))),
- last = eval(substitute(expression({
- misc$link = c(scale = .lscale , shape3.q = .lshape3.q)
- misc$earg = list(scale = .escale , shape3.q = .eshape3.q)
- }), list( .lscale = lscale, .lshape3.q = lshape3.q,
- .escale = escale, .eshape3.q = eshape3.q ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = 1
- scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- parg = 1
- qq = eta2theta(eta[, 2], .lshape3.q, earg = .eshape3.q)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dlomax(x = y, scale = scale,
- shape3.q = qq, log = TRUE))
- }
- }, list( .lscale = lscale, .lshape3.q = lshape3.q,
- .escale = escale, .eshape3.q = eshape3.q ))),
- vfamily = c("lomax"),
- deriv = eval(substitute(expression({
- aa = 1
- scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- parg = 1
- qq = eta2theta(eta[, 2], .lshape3.q, earg = .eshape3.q)
- temp2 = (y/scale)^aa
-
- dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- dl.dq = digamma(parg + qq) - digamma(qq) - log1p(temp2)
- dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
- dq.deta = dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
- c(w) * cbind( dl.dscale * dscale.deta,
- dl.dq * dq.deta )
- }), list( .lscale = lscale, .lshape3.q = lshape3.q,
- .escale = escale, .eshape3.q = eshape3.q ))),
- weight = eval(substitute(expression({
- ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
- ed2l.dq = 1/qq^2
- ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
- wz = matrix(as.numeric(NA), n, dimm(M)) #M == 2 means 3=dimm(M)
- wz[, iam(1, 1, M)] = ed2l.dscale * dscale.deta^2
- wz[, iam(2, 2, M)] = ed2l.dq * dq.deta^2
- wz[, iam(1, 2, M)] = ed2l.dscaleq * dscale.deta * dq.deta
- wz = c(w) * wz
- wz
- }), list( .lscale = lscale, .lshape3.q = lshape3.q,
- .escale = escale, .eshape3.q = eshape3.q ))))
-}
-
-
-
- fisk <- function(lshape1.a = "loge",
- lscale = "loge",
- ishape1.a = NULL,
- iscale = NULL,
- zero = NULL)
-{
-
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
-
-
- lshape1.a <- as.list(substitute(lshape1.a))
- eshape1.a <- link2list(lshape1.a)
- lshape1.a <- attr(eshape1.a, "function.name")
-
- lscale <- as.list(substitute(lscale))
- escale <- link2list(lscale)
- lscale <- attr(escale, "function.name")
-
-
-
- new("vglmff",
- blurb = c("Fisk distribution\n\n",
- "Links: ",
- namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
- namesof("scale", lscale, earg = escale), "\n",
- "Mean: scale * gamma(1 + 1/shape1.a) * ",
- "gamma(1 - 1/shape1.a)"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero , M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
-
- w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
-
-
- predictors.names <-
- c(namesof("shape1.a", .lshape1.a , earg = .eshape1.a , tag = FALSE),
- namesof("scale", .lscale , earg = .escale , tag = FALSE))
-
- qq = parg = 1
-
- if (!length( .iscale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- xvec = log( 1/qvec - 1 )
- fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
- }
-
- if (!length(etastart)) {
- aa = rep(if (length( .ishape1.a)) .ishape1.a else
- abs(-1 / fit0$coef[2]),
- length.out = n)
- scale = rep(if (length( .iscale )) .iscale else
- exp(fit0$coef[1]),
- length.out = n)
-
-
- parg = 1
- qq = 1
- outOfRange = (parg + 1/aa <= 0)
- parg[outOfRange] = 1 / aa[outOfRange] + 1
- outOfRange = (qq - 1/aa <= 0)
- qq[outOfRange] = 1 / aa + 1
-
-
-
- etastart <-
- cbind(theta2eta(aa, .lshape1.a , earg = .eshape1.a ),
- theta2eta(scale, .lscale , earg = .escale ))
- }
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale,
- .ishape1.a = ishape1.a, .iscale = iscale ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a)
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = 1
- qq = 1
-
- ans = Scale * exp(lgamma(parg + 1/aa) +
- lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
- ans[parg + 1/aa <= 0] = NA
- ans[qq - 1/aa <= 0] = NA
- ans[aa <= 0] = NA
- ans[Scale <= 0] = NA
- ans
- }, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale))),
- last = eval(substitute(expression({
- misc$link = c(shape1.a = .lshape1.a , scale = .lscale )
- misc$earg = list(shape1.a = .eshape1.a , scale = .escale )
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
- scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = qq = 1
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dfisk(x = y, shape1.a = aa, scale = scale, log = TRUE))
- }
- }, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale))),
- vfamily = c("fisk"),
- deriv = eval(substitute(expression({
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = qq = 1
-
- temp1 = log(y/scale)
- temp2 = (y/scale)^aa
- temp3a = digamma(parg)
- temp3b = digamma(qq)
-
- dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
- dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
-
- da.deta = dtheta.deta(aa, .lshape1.a , earg = .eshape1.a )
- dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
-
- c(w) * cbind( dl.da * da.deta,
- dl.dscale * dscale.deta )
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale))),
- weight = eval(substitute(expression({
- ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
- (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
- (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
- ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
- ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
- (scale*(1 + parg+qq))
- wz = matrix(as.numeric(NA), n, dimm(M)) #M == 2 means 3=dimm(M)
- wz[, iam(1, 1, M)] = ed2l.da * da.deta^2
- wz[, iam(2, 2, M)] = ed2l.dscale * dscale.deta^2
- wz[, iam(1, 2, M)] = ed2l.dascale * da.deta * dscale.deta
- wz = c(w) * wz
- wz
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale))))
-}
-
-
- invlomax <- function(lscale = "loge",
- lshape2.p = "loge",
- iscale = NULL,
- ishape2.p = 1.0,
- zero = NULL)
-{
-
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
-
-
- lshape2.p <- as.list(substitute(lshape2.p))
- eshape2.p <- link2list(lshape2.p)
- lshape2.p <- attr(eshape2.p, "function.name")
-
- lscale <- as.list(substitute(lscale))
- escale <- link2list(lscale)
- lscale <- attr(escale, "function.name")
-
-
-
-
- new("vglmff",
- blurb = c("Inverse Lomax distribution\n\n",
- "Links: ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape2.p", lshape2.p, earg = eshape2.p), "\n",
- "Mean: does not exist"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero , M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
-
- w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
-
-
-
- predictors.names <-
- c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
- namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE))
-
- qq = aa = 1
-
- if (!length( .iscale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- ishape2.p = if (length( .ishape2.p)) .ishape2.p else 1
- xvec = log( qvec^(-1/ ishape2.p ) - 1 )
- fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
- }
- if (!length(etastart)) {
- scale = rep(if (length( .iscale )) .iscale else
- exp(fit0$coef[1]),
- length.out = n)
- parg = rep(if (length( .ishape2.p)) .ishape2.p else 1.0,
- length.out = n)
-
-
-
-
- etastart <-
- cbind(theta2eta(scale, .lscale , earg = .escale ),
- theta2eta(parg, .lshape2.p, earg = .eshape2.p))
- }
- }), list( .lscale = lscale, .lshape2.p = lshape2.p,
- .escale = escale, .eshape2.p = eshape2.p,
- .iscale = iscale, .ishape2.p = ishape2.p ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
-
- NA * Scale
- }, list( .lscale = lscale,
- .escale = escale,
- .eshape2.p = eshape2.p,
- .lshape2.p = lshape2.p ))),
- last = eval(substitute(expression({
- misc$link = c(scale = .lscale , shape2.p = .lshape2.p )
- misc$earg = list(scale = .escale , shape2.p = .eshape2.p )
- }), list( .lscale = lscale,
- .escale = escale,
- .eshape2.p = eshape2.p,
- .lshape2.p = lshape2.p ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = 1
- scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
- qq = 1
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dinvlomax(x = y, scale = scale,
- shape2.p = parg, log = TRUE))
- }
- }, list( .lscale = lscale, .lshape2.p = lshape2.p,
- .escale = escale, .eshape2.p = eshape2.p ))),
- vfamily = c("invlomax"),
- deriv = eval(substitute(expression({
- aa = qq = 1
- scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
-
- temp1 = log(y/scale)
- temp2 = (y/scale)^aa
-
- dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- dl.dp = aa * temp1 + digamma(parg + qq) - digamma(parg) - log1p(temp2)
-
- dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
- dp.deta = dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
-
- c(w) * cbind( dl.dscale * dscale.deta,
- dl.dp * dp.deta )
- }), list( .lscale = lscale, .lshape2.p = lshape2.p,
- .escale = escale, .eshape2.p = eshape2.p ))),
- weight = eval(substitute(expression({
- ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
- ed2l.dp = 1/parg^2
- ed2l.dscalep = aa * qq / (scale*(parg+qq))
- wz = matrix(as.numeric(NA), n, dimm(M)) #M == 2 means 3=dimm(M)
- wz[, iam(1, 1, M)] = ed2l.dscale * dscale.deta^2
- wz[, iam(2, 2, M)] = ed2l.dp * dp.deta^2
- wz[, iam(1, 2, M)] = ed2l.dscalep * dscale.deta * dp.deta
- wz = c(w) * wz
- wz
- }), list( .lscale = lscale, .lshape2.p = lshape2.p,
- .escale = escale, .eshape2.p = eshape2.p ))))
-}
-
-
- paralogistic <- function(lshape1.a = "loge",
- lscale = "loge",
- ishape1.a = 2,
- iscale = NULL,
- zero = NULL)
-{
-
-
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
-
-
- lshape1.a <- as.list(substitute(lshape1.a))
- eshape1.a <- link2list(lshape1.a)
- lshape1.a <- attr(eshape1.a, "function.name")
-
- lscale <- as.list(substitute(lscale))
- escale <- link2list(lscale)
- lscale <- attr(escale, "function.name")
-
-
-
- new("vglmff",
- blurb = c("Paralogistic distribution\n\n",
- "Links: ",
- namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
- namesof("scale", lscale, earg = escale), "\n",
- "Mean: scale * gamma(1 + 1/shape1.a) * ",
- "gamma(shape1.a - 1/shape1.a) / gamma(shape1.a)"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero , M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
-
- w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
-
-
-
- predictors.names <-
- c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
- namesof("scale", .lscale , earg = .escale , tag = FALSE))
-
- parg = 1
-
- if (!length( .ishape1.a) || !length( .iscale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- ishape1.a = if (length( .ishape1.a)) .ishape1.a else 1
- xvec = log( (1-qvec)^(-1/ ishape1.a ) - 1 )
- fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
- }
-
- if (!length(etastart)) {
- aa = rep(if (length( .ishape1.a)) .ishape1.a else
- abs(1/fit0$coef[2]),
- length.out = n)
- scale = rep(if (length( .iscale )) .iscale else
- exp(fit0$coef[1]),
- length.out = n)
-
-
-
-
- parg = 1
- qq = aa
- outOfRange = (parg + 1/aa <= 0)
- parg[outOfRange] = 1 / aa[outOfRange] + 1
- outOfRange = (qq - 1/aa <= 0)
- aa[outOfRange] =
- qq[outOfRange] = 2 # Need aa > 1, where aa == qq
-
-
- etastart <-
- cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
- theta2eta(scale, .lscale , earg = .escale ))
- }
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale,
- .ishape1.a = ishape1.a, .iscale = iscale
- ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = 1
- qq = aa
-
- ans = Scale * exp(lgamma(parg + 1/aa) +
- lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
- ans[parg + 1/aa <= 0] = NA
- ans[qq - 1/aa <= 0] = NA
- ans[aa <= 0] = NA
- ans[Scale <= 0] = NA
- ans
- }, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale))),
- last = eval(substitute(expression({
- misc$link = c(shape1.a = .lshape1.a, scale = .lscale)
- misc$earg = list(shape1.a = .eshape1.a, scale = .escale )
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = 1
- qq = aa
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dparalogistic(x = y, shape1.a = aa,
- scale = scale, log = TRUE))
- }
- }, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale))),
- vfamily = c("paralogistic"),
- deriv = eval(substitute(expression({
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = 1
- qq = aa
-
- temp1 = log(y/scale)
- temp2 = (y/scale)^aa
- temp3a = digamma(parg)
- temp3b = digamma(qq)
-
- dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
- dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
-
- da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
-
- c(w) * cbind( dl.da * da.deta,
- dl.dscale * dscale.deta)
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale))),
- weight = eval(substitute(expression({
- ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
- (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
- (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
- ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
- ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
- (scale*(1 + parg+qq))
- wz = matrix(as.numeric(NA), n, dimm(M)) #M == 2 means 3=dimm(M)
- wz[, iam(1, 1, M)] = ed2l.da * da.deta^2
- wz[, iam(2, 2, M)] = ed2l.dscale * dscale.deta^2
- wz[, iam(1, 2, M)] = ed2l.dascale * da.deta * dscale.deta
- wz = c(w) * wz
- wz
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale))))
-}
-
-
- invparalogistic <- function(lshape1.a = "loge", lscale = "loge",
- ishape1.a = 2, iscale = NULL,
- zero = NULL)
-{
-
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
-
-
- lshape1.a <- as.list(substitute(lshape1.a))
- eshape1.a <- link2list(lshape1.a)
- lshape1.a <- attr(eshape1.a, "function.name")
-
- lscale <- as.list(substitute(lscale))
- escale <- link2list(lscale)
- lscale <- attr(escale, "function.name")
-
-
- new("vglmff",
- blurb = c("Inverse paralogistic distribution\n\n",
- "Links: ",
- namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
- namesof("scale", lscale, earg = escale), "\n",
- "Mean: scale * gamma(shape1.a + 1/shape1.a) * ",
- "gamma(1 - 1/shape1.a)/gamma(shape1.a)"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero , M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
-
- w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
-
-
- predictors.names <-
- c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a , tag = FALSE),
- namesof("scale", .lscale , earg = .escale , tag = FALSE))
-
- if (!length( .ishape1.a) || !length( .iscale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- ishape2.p = if (length( .ishape1.a )) .ishape1.a else 1
- xvec = log( qvec^(-1/ ishape2.p ) - 1 )
- fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
- }
-
- qq = 1
- if (!length(etastart)) {
- aa = rep(if (length( .ishape1.a)) .ishape1.a else -1/fit0$coef[2],
- length = n)
- scale = rep(if (length( .iscale )) .iscale else
- exp(fit0$coef[1]), length = n)
-
-
-
-
-
- parg = aa
- qq = 1
- outOfRange = (parg + 1/aa <= 0)
- parg[outOfRange] =
- aa[outOfRange] = 2
- outOfRange = (qq - 1/aa <= 0)
- qq[outOfRange] = 1 / aa[outOfRange] + 1
-
-
-
- etastart <-
- cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
- theta2eta(scale, .lscale , earg = .escale ))
- }
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale,
- .ishape1.a = ishape1.a, .iscale = iscale ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = aa
- qq = 1
-
- ans = Scale * exp(lgamma(parg + 1/aa) +
- lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
- ans[parg + 1/aa <= 0] = NA
- ans[qq - 1/aa <= 0] = NA
- ans[aa <= 0] = NA
- ans[Scale <= 0] = NA
- ans
- }, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale))),
- last = eval(substitute(expression({
- misc$link = c(shape1.a = .lshape1.a, scale = .lscale )
- misc$earg = list(shape1.a = .eshape1.a, scale = .escale )
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = aa
- qq = 1
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) * dinvparalogistic(x = y, shape1.a = aa,
- scale = scale, log = TRUE))
- }
- }, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale))),
- vfamily = c("invparalogistic"),
- deriv = eval(substitute(expression({
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- parg = aa
- qq = 1
-
- temp1 = log(y/scale)
- temp2 = (y/scale)^aa
- temp3a = digamma(parg)
- temp3b = digamma(qq)
-
- dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
- dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
-
- da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
-
- c(w) * cbind( dl.da * da.deta,
- dl.dscale * dscale.deta )
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale))),
-
- weight = eval(substitute(expression({
- ed2l.da = (1 + parg + qq +
- parg * qq * (trigamma(parg) + trigamma(qq) +
- (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
- (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1 + parg + qq))
- ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
- ed2l.dascale = (parg - qq -
- parg*qq*(temp3a -temp3b)) / (scale*(1 + parg+qq))
-
- wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
- wz[, iam(1, 1, M)] = ed2l.da * da.deta^2
- wz[, iam(2, 2, M)] = ed2l.dscale * dscale.deta^2
- wz[, iam(1, 2, M)] = ed2l.dascale * da.deta * dscale.deta
- wz = c(w) * wz
- wz
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale))))
-}
-
-
-
-
-
-
-
-
-
-
-
- if (FALSE)
- genlognormal <- function(link.sigma = "loge", link.r = "loge",
- init.sigma = 1, init.r = 1, zero = NULL)
-{
-warning("2/4/04; doesn't work, possibly because first derivs are ",
- "not continuous (sign() is used). Certainly, the derivs wrt ",
- "mymu are problematic (run with maxit=4:9 and look at weight ",
- "matrices). Possibly fundamentally cannot be estimated by IRLS. ",
- "Pooling doesn't seem to help")
-
-
-
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
-
-
-
- link.sigma <- as.list(substitute(link.sigma))
- esigma <- link2list(link.sigma)
- link.sigma <- attr(esigma, "function.name")
-
- link.r <- as.list(substitute(link.r))
- er <- link2list(link.r)
- link.r <- attr(er, "function.name")
-
-
- new("vglmff",
- blurb = c("Three-parameter generalized lognormal distribution\n\n",
- "Links: ",
- "loc; ",
- namesof("sigma", link.sigma, earg = esigma, tag = TRUE), ", ",
- namesof("r", link.r, earg = er, tag = TRUE)),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero , M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names <-
- c(namesof("loc", "identity", earg = list(), tag = FALSE),
- namesof("sigma", .link.sigma, earg = .esigma, tag = FALSE),
- namesof("r", .link.r, earg = .er, tag = FALSE))
-
- if (!length( .init.sigma) || !length( .init.r)) {
- init.r = if (length( .init.r)) .init.r else 1
- sigma.init = (0.5 *
- sum(abs(log(y) - mean(log(y )))^init.r))^(1/init.r)
- }
- if (any(y <= 0)) stop("y must be positive")
-
- if (!length(etastart)) {
- sigma.init = rep(if (length( .init.sigma)) .init.sigma else
- sigma.init, length.out = n)
- r.init = if (length( .init.r)) .init.r else init.r
- etastart <-
- cbind(mu = rep(log(median(y)), length.out = n),
- sigma = sigma.init,
- r = r.init)
- }
- }), list( .link.sigma = link.sigma, .link.r = link.r,
- .init.sigma = init.sigma, .init.r = init.r ))),
-
- linkinv = eval(substitute(function(eta, extra = NULL) {
- mymu = eta2theta(eta[, 1], "identity", earg = list())
- sigma = eta2theta(eta[, 2], .link.sigma, earg = .esigma)
- r = eta2theta(eta[, 3], .link.r, earg = .er)
- r
- }, list( .link.sigma = link.sigma, .link.r = link.r ))),
-
- last = eval(substitute(expression({
- misc$link = c(loc = "identity", "sigma" = .link.sigma, r = .link.r )
- misc$expected = TRUE
- }), list( .link.sigma = link.sigma, .link.r = link.r ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mymu = eta2theta(eta[, 1], "identity", earg = list())
- sigma = eta2theta(eta[, 2], .link.sigma, earg = .esigma)
- r = eta2theta(eta[, 3], .link.r, earg = .er)
- temp89 = (abs(log(y)-mymu)/sigma)^r
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else
- sum(c(w) * (-log(r^(1/r) * sigma) - lgamma(1+1/r) - temp89/r))
- }, list( .link.sigma = link.sigma, .link.r = link.r ))),
- vfamily = c("genlognormal3"),
- deriv = eval(substitute(expression({
- mymu = eta2theta(eta[, 1], "identity", earg = list())
- sigma = eta2theta(eta[, 2], .link.sigma, earg = .esigma)
-
- r = eta2theta(eta[, 3], .link.r, earg = .er)
- ss = 1 + 1/r
- temp33 = (abs(log(y)-mymu)/sigma)
- temp33r1 = temp33^(r-1)
-
- dl.dmymu = temp33r1 * sign(log(y)-mymu) / sigma
- dl.dsigma = (temp33*temp33r1 - 1) / sigma
- dl.dr = (log(r) - 1 + digamma(ss) + temp33*temp33r1)/r^2 -
- temp33r1 * log(temp33r1) / r
-
- dmymu.deta = dtheta.deta(mymu, "identity", earg = list())
- dsigma.deta = dtheta.deta(sigma, .link.sigma, earg = .esigma)
- dr.deta = dtheta.deta(r, .link.r, earg = .er)
-
- c(w) * cbind(dl.dmymu * dmymu.deta,
- dl.dsigma * dsigma.deta,
- dl.dr * dr.deta)
- }), list( .link.sigma = link.sigma, .link.r = link.r ))),
- weight = expression({
- wz = matrix(0, n, 6) # 5 will have small savings of 1 column
-
- B = log(r) + digamma(ss)
- ed2l.dmymu2 = (r-1) * gamma(1-1/r) / (sigma^2 * r^(2/r) * gamma(ss))
- ed2l.dsigma2 = r / sigma^2
- ed2l.dr2 = (ss * trigamma(ss) + B^2 - 1) / r^3
- ed2l.dsigmar = -B / (r * sigma)
-
- wz[, iam(1, 1, M)] = ed2l.dmymu2 * dmymu.deta^2
- wz[, iam(2, 2, M)] = ed2l.dsigma2 * dsigma.deta^2
- wz[, iam(3, 3, M)] = ed2l.dr2 * dr.deta^2
- wz[, iam(2, 3, M)] = ed2l.dsigmar * dsigma.deta * dr.deta
- wz = c(w) * wz
- wz
- }))
-}
-
-
-
-
- betaprime <- function(link = "loge", i1 = 2, i2 = NULL, zero = NULL) {
-
- link <- as.list(substitute(link))
- earg <- link2list(link)
- link <- attr(earg, "function.name")
-
-
- new("vglmff",
- blurb = c("Beta-prime distribution\n",
- "y^(shape1-1) * (1+y)^(-shape1-shape2) / Beta(shape1,shape2),",
- " y>0, shape1>0, shape2>0\n\n",
- "Links: ",
- namesof("shape1", link, earg = earg), ", ",
- namesof("shape2", link, earg = earg), "\n",
- "Mean: shape1/(shape2-1) provided shape2>1"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero , M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
-
- w.y.check(w = w, y = y,
- Is.positive.y = TRUE,
- ncol.w.max = 1, ncol.y.max = 1)
-
-
-
- predictors.names <-
- c(namesof("shape1", .link , earg = .earg , short = TRUE),
- namesof("shape2", .link , earg = .earg , short = TRUE))
- if (is.numeric( .i1) && is.numeric( .i2)) {
- vec = c( .i1, .i2)
- vec = c(theta2eta(vec[1], .link , earg = .earg ),
- theta2eta(vec[2], .link , earg = .earg ))
- etastart <- matrix(vec, n, 2, byrow = TRUE)
- }
- if (!length(etastart)) {
- init1 = if (length( .i1))
- rep( .i1, length.out = n) else rep(1, length.out = n)
- init2 = if (length( .i2))
- rep( .i2, length.out = n) else 1 + init1 / (y + 0.1)
- etastart <-
- matrix(theta2eta(c(init1, init2), .link , earg = .earg ),
- n, 2, byrow = TRUE)
- }
- }), list( .link = link, .earg = earg, .i1 = i1, .i2 = i2 ))),
-
- linkinv = eval(substitute(function(eta, extra = NULL) {
- shapes = eta2theta(eta, .link , earg = .earg )
- ifelse(shapes[, 2] > 1, shapes[, 1] / (shapes[, 2]-1), NA)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(shape1 = .link , shape2 = .link)
- misc$earg = list(shape1 = .earg , shape2 = .earg )
- }), list( .link = link, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL){
- shapes = eta2theta(eta, .link , earg = .earg )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(c(w) *((shapes[, 1]-1) * log(y) -
- lbeta(shapes[, 1], shapes[, 2]) -
- (shapes[, 2]+shapes[, 1]) * log1p(y)))
- }
- }, list( .link = link, .earg = earg ))),
- vfamily = "betaprime",
- deriv = eval(substitute(expression({
- shapes = eta2theta(eta, .link , earg = .earg )
- dshapes.deta = dtheta.deta(shapes, .link , earg = .earg )
- dl.dshapes = cbind(log(y) - log1p(y) - digamma(shapes[, 1]) +
- digamma(shapes[, 1]+shapes[, 2]),
- - log1p(y) - digamma(shapes[, 2]) +
- digamma(shapes[, 1]+shapes[, 2]))
- c(w) * dl.dshapes * dshapes.deta
- }), list( .link = link, .earg = earg ))),
- weight = expression({
- temp2 = trigamma(shapes[, 1] + shapes[, 2])
- d2l.dshape12 = temp2 - trigamma(shapes[, 1])
- d2l.dshape22 = temp2 - trigamma(shapes[, 2])
- d2l.dshape1shape2 = temp2
-
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[, iam(1, 1, M)] = d2l.dshape12 * dshapes.deta[, 1]^2
- wz[, iam(2, 2, M)] = d2l.dshape22 * dshapes.deta[, 2]^2
- wz[, iam(1, 2, M)] = d2l.dshape1shape2 *
- dshapes.deta[, 1] * dshapes.deta[, 2]
-
- -c(w) * wz
- }))
-}
-
-
-
-
-
-
-dmaxwell <- function(x, a, log = FALSE) {
- if (!is.logical(log.arg <- log) || length(log) != 1)
- stop("bad input for argument 'log'")
- rm(log)
+dmaxwell <- function(x, a, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
L <- max(length(x), length(a))
x <- rep(x, length.out = L); a = rep(a, length.out = L);
@@ -8990,11 +7140,11 @@ qmaxwell <- function(p, a) {
new("vglmff",
blurb = c("Maxwell distribution f(y;a) = sqrt(2/pi) * a^(3/2) * y^2 *",
- " exp(-0.5*a*y^2), y>0, a>0\n",
- "Link: ",
- namesof("a", link, earg = earg),
- "\n", "\n",
- "Mean: sqrt(8 / (a * pi))"),
+ " exp(-0.5*a*y^2), y>0, a>0\n",
+ "Link: ",
+ namesof("a", link, earg = earg),
+ "\n", "\n",
+ "Mean: sqrt(8 / (a * pi))"),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
@@ -9096,14 +7246,14 @@ dnaka <- function(x, shape, scale = 1, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- L = max(length(x), length(shape), length(scale))
- x = rep(x, length.out = L)
- shape = rep(shape, length.out = L)
- scale = rep(scale, length.out = L);
+ L <- max(length(x), length(shape), length(scale))
+ x <- rep(x, length.out = L)
+ shape <- rep(shape, length.out = L)
+ scale <- rep(scale, length.out = L);
- logdensity = rep(log(0), length.out = L)
- xok = (x > 0)
- logdensity[xok] = dgamma(x = x[xok]^2, shape = shape[xok],
+ logdensity <- rep(log(0), length.out = L)
+ xok <- (x > 0)
+ logdensity[xok] <- dgamma(x = x[xok]^2, shape = shape[xok],
scale = scale[xok]/shape[xok], log = TRUE) +
log(2) + log(x[xok])
if (log.arg) logdensity else exp(logdensity)
@@ -9117,47 +7267,49 @@ pnaka <- function(q, shape, scale = 1) {
stop("bad input for argument 'shape'")
if (!is.Numeric(scale, positive = TRUE))
stop("bad input for argument 'scale'")
- L = max(length(q), length(shape), length(scale))
- q = rep(q, length.out = L)
- shape = rep(shape, length.out = L)
- scale = rep(scale, length.out = L);
+ L <- max(length(q), length(shape), length(scale))
+ q <- rep(q, length.out = L)
+ shape <- rep(shape, length.out = L)
+ scale <- rep(scale, length.out = L);
ifelse(q <= 0, 0, pgamma(shape * q^2 / scale, shape))
}
qnaka <- function(p, shape, scale = 1, ...) {
- if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
- stop("bad input for argument 'p'")
- if (!is.Numeric(shape, positive = TRUE))
- stop("bad input for argument 'shape'")
- if (!is.Numeric(scale, positive = TRUE))
- stop("bad input for argument 'scale'")
- L = max(length(p), length(shape), length(scale))
- p = rep(p, length.out = L); shape = rep(shape, length.out = L);
- scale = rep(scale, length.out = L);
- ans = rep(0.0, length.out = L)
- myfun <- function(x, shape, scale = 1, p)
- pnaka(q = x, shape = shape, scale = scale) - p
- for(ii in 1:L) {
- EY = sqrt(scale[ii]/shape[ii]) *
- gamma(shape[ii]+0.5) / gamma(shape[ii])
- Upper = 5 * EY
- while(pnaka(q = Upper, shape = shape[ii],
- scale = scale[ii]) < p[ii])
- Upper = Upper + scale[ii]
- ans[ii] = uniroot(f = myfun, lower = 0, upper = Upper,
- shape = shape[ii], scale = scale[ii],
- p = p[ii], ...)$root
- }
- ans
+ if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
+ stop("bad input for argument 'p'")
+ if (!is.Numeric(shape, positive = TRUE))
+ stop("bad input for argument 'shape'")
+ if (!is.Numeric(scale, positive = TRUE))
+ stop("bad input for argument 'scale'")
+
+ L <- max(length(p), length(shape), length(scale))
+ p <- rep(p, length.out = L); shape = rep(shape, length.out = L);
+ scale <- rep(scale, length.out = L);
+ ans <- rep(0.0, length.out = L)
+
+ myfun <- function(x, shape, scale = 1, p)
+ pnaka(q = x, shape = shape, scale = scale) - p
+ for(ii in 1:L) {
+ EY <- sqrt(scale[ii]/shape[ii]) *
+ gamma(shape[ii] + 0.5) / gamma(shape[ii])
+ Upper <- 5 * EY
+ while(pnaka(q = Upper, shape = shape[ii],
+ scale = scale[ii]) < p[ii])
+ Upper <- Upper + scale[ii]
+ ans[ii] <- uniroot(f = myfun, lower = 0, upper = Upper,
+ shape = shape[ii], scale = scale[ii],
+ p = p[ii], ...)$root
+ }
+ ans
}
rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
if (!is.Numeric(scale, positive = TRUE, allowable.length = 1))
@@ -9168,24 +7320,24 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
Smallno > 0.01 ||
Smallno < 2 * .Machine$double.eps)
stop("bad input for argument 'Smallno'")
- ans = rep(0.0, length.out = use.n)
+ ans <- rep(0.0, length.out = use.n)
- ptr1 = 1; ptr2 = 0
- ymax = dnaka(x = sqrt(scale * (1 - 0.5 / shape)),
+ ptr1 <- 1; ptr2 <- 0
+ ymax <- dnaka(x = sqrt(scale * (1 - 0.5 / shape)),
shape = shape, scale = scale)
while(ptr2 < use.n) {
- EY = sqrt(scale / shape) * gamma(shape + 0.5) / gamma(shape)
- Upper = EY + 5 * scale
+ EY <- sqrt(scale / shape) * gamma(shape + 0.5) / gamma(shape)
+ Upper <- EY + 5 * scale
while(pnaka(q = Upper, shape = shape, scale = scale) < 1 - Smallno)
- Upper = Upper + scale
- x = runif(2*use.n, min = 0, max = Upper)
- index = runif(2*use.n, max = ymax) < dnaka(x, shape = shape,
+ Upper <- Upper + scale
+ x <- runif(2*use.n, min = 0, max = Upper)
+ index <- runif(2*use.n, max = ymax) < dnaka(x, shape = shape,
scale = scale)
- sindex = sum(index)
+ sindex <- sum(index)
if (sindex) {
- ptr2 = min(use.n, ptr1 + sindex - 1)
- ans[ptr1:ptr2] = (x[index])[1:(1+ptr2-ptr1)]
- ptr1 = ptr2 + 1
+ ptr2 <- min(use.n, ptr1 + sindex - 1)
+ ans[ptr1:ptr2] <- (x[index])[1:(1+ptr2-ptr1)]
+ ptr1 <- ptr2 + 1
}
}
ans
@@ -9214,20 +7366,21 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
new("vglmff",
blurb = c("Nakagami distribution f(y) = 2 * (shape/scale)^shape *\n",
- " ",
- "y^(2*shape-1) * exp(-shape*y^2/scale) / gamma(shape),\n",
- " ",
- "y>0, shape>0, scale>0\n",
- "Links: ",
- namesof("shape", lshape, earg = eshape), ", ",
- namesof("scale", lscale, earg = escale),
- "\n",
- "\n",
- "Mean: sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)"),
+ " ",
+ "y^(2*shape-1) * exp(-shape*y^2/scale) / gamma(shape),\n",
+ " ",
+ "y>0, shape>0, scale>0\n",
+ "Links: ",
+ namesof("shape", lshape, earg = eshape), ", ",
+ namesof("scale", lscale, earg = escale),
+ "\n",
+ "\n",
+ "Mean: sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)"),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -9237,10 +7390,10 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
if (!length(etastart)) {
- init2 = if (is.Numeric( .iscale, positive = TRUE))
+ init2 <- if (is.Numeric( .iscale, positive = TRUE))
rep( .iscale, length.out = n) else
rep(1, length.out = n)
- init1 = if (is.Numeric( .ishape, positive = TRUE))
+ init1 <- if (is.Numeric( .ishape, positive = TRUE))
rep( .ishape, length.out = n) else
rep(init2 / (y+1/8)^2, length.out = n)
etastart <-
@@ -9251,21 +7404,21 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
.escale = escale, .eshape = eshape,
.ishape = ishape, .iscale = iscale ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
- scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)
}, list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape))),
last = eval(substitute(expression({
- misc$link = c(shape = .lshape , scale = .lscale)
- misc$earg = list(shape = .eshape, scale = .escale )
+ misc$link <- c(shape = .lshape , scale = .lscale)
+ misc$earg <- list(shape = .eshape, scale = .escale )
misc$expected = TRUE
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
- scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else
sum(c(w) * dnaka(x = y, shape = shape, scale = scale, log = TRUE))
@@ -9273,23 +7426,23 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
.escale = escale, .eshape = eshape))),
vfamily = c("nakagami"),
deriv = eval(substitute(expression({
- shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
- Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
- dl.dshape = 1 + log(shape/Scale) - digamma(shape) +
+ shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ dl.dshape <- 1 + log(shape/Scale) - digamma(shape) +
2 * log(y) - y^2 / Scale
- dl.dscale = -shape/Scale + shape * (y/Scale)^2
- dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
- dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
+ dl.dscale <- -shape/Scale + shape * (y/Scale)^2
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
c(w) * cbind(dl.dshape * dshape.deta,
dl.dscale * dscale.deta)
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape))),
weight = eval(substitute(expression({
- d2l.dshape2 = trigamma(shape) - 1/shape
- d2l.dscale2 = shape / Scale^2
- wz = matrix(as.numeric(NA), n, M) # diagonal
- wz[, iam(1, 1, M)] = d2l.dshape2 * dshape.deta^2
- wz[, iam(2, 2, M)] = d2l.dscale2 * dscale.deta^2
+ d2l.dshape2 <- trigamma(shape) - 1/shape
+ d2l.dscale2 <- shape / Scale^2
+ wz <- matrix(as.numeric(NA), n, M) # diagonal
+ wz[, iam(1, 1, M)] <- d2l.dshape2 * dshape.deta^2
+ wz[, iam(2, 2, M)] <- d2l.dscale2 * dscale.deta^2
c(w) * wz
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape))))
@@ -9302,11 +7455,11 @@ drayleigh <- function(x, scale = 1, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- L = max(length(x), length(scale))
- x = rep(x, length.out = L); scale = rep(scale, length.out = L);
- logdensity = rep(log(0), length.out = L)
- xok = (x > 0)
- logdensity[xok] = log(x[xok]) - 0.5 * (x[xok]/scale[xok])^2 -
+ L <- max(length(x), length(scale))
+ x <- rep(x, length.out = L); scale = rep(scale, length.out = L);
+ logdensity <- rep(log(0), length.out = L)
+ xok <- (x > 0)
+ logdensity[xok] <- log(x[xok]) - 0.5 * (x[xok]/scale[xok])^2 -
2 * log(scale[xok])
if (log.arg) logdensity else exp(logdensity)
}
@@ -9315,8 +7468,8 @@ drayleigh <- function(x, scale = 1, log = FALSE) {
prayleigh <- function(q, scale = 1) {
if (any(scale <= 0))
stop("argument 'scale' must be positive")
- L = max(length(q), length(scale))
- q = rep(q, length.out = L); scale = rep(scale, length.out = L);
+ L <- max(length(q), length(scale))
+ q <- rep(q, length.out = L); scale = rep(scale, length.out = L);
ifelse(q > 0, -expm1(-0.5*(q/scale)^2), 0)
}
@@ -9324,15 +7477,15 @@ prayleigh <- function(q, scale = 1) {
qrayleigh <- function(p, scale = 1) {
if (any(p <= 0) || any(p >= 1))
stop("argument 'p' must be between 0 and 1")
- ans = scale * sqrt(-2 * log1p(-p))
- ans[scale <= 0] = NaN
+ ans <- scale * sqrt(-2 * log1p(-p))
+ ans[scale <= 0] <- NaN
ans
}
rrayleigh <- function(n, scale = 1) {
- ans = scale * sqrt(-2 * log(runif(n)))
- ans[scale <= 0] = NaN
+ ans <- scale * sqrt(-2 * log(runif(n)))
+ ans[scale <= 0] <- NaN
ans
}
@@ -9363,10 +7516,10 @@ rrayleigh <- function(n, scale = 1) {
new("vglmff",
blurb = c("Rayleigh distribution\n\n",
- "f(y) = y*exp(-0.5*(y/scale)^2)/scale^2, y>0, scale>0\n\n",
- "Link: ",
- namesof("scale", lscale, earg = escale), "\n\n",
- "Mean: scale * sqrt(pi / 2)"),
+ "f(y) = y*exp(-0.5*(y/scale)^2)/scale^2, y>0, scale>0\n\n",
+ "Link: ",
+ namesof("scale", lscale, earg = escale), "\n\n",
+ "Mean: scale * sqrt(pi / 2)"),
constraints = eval(substitute(expression({
dotzero <- .zero
Musual <- 1
@@ -9407,12 +7560,12 @@ rrayleigh <- function(n, scale = 1) {
if (!length(etastart)) {
Ymat <- matrix(colSums(y) / colSums(w), n, ncoly, byrow = TRUE)
- b.init = (Ymat + 1/8) / sqrt(pi/2)
+ b.init <- (Ymat + 1/8) / sqrt(pi/2)
etastart <- theta2eta(b.init, .lscale , earg = .escale )
}
}), list( .lscale = lscale, .escale = escale))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Scale = eta2theta(eta, .lscale , earg = .escale )
+ Scale <- eta2theta(eta, .lscale , earg = .escale )
Scale * sqrt(pi / 2)
}, list( .lscale = lscale, .escale = escale))),
@@ -9483,18 +7636,18 @@ dparetoIV <- function(x, location = 0, scale = 1, inequality = 1,
stop("bad input for argument 'log'")
rm(log)
- N = max(length(x), length(location), length(scale),
+ N <- max(length(x), length(location), length(scale),
length(inequality), length(shape))
- x = rep(x, length.out = N);
- location = rep(location, length.out = N)
- scale = rep(scale, length.out = N);
- inequality = rep(inequality, length.out = N)
- shape = rep(shape, length.out = N)
-
- logdensity = rep(log(0), length.out = N)
- xok = (x > location)
- zedd = (x - location) / scale
- logdensity[xok] = log(shape[xok]) -
+ x <- rep(x, length.out = N);
+ location <- rep(location, length.out = N)
+ scale <- rep(scale, length.out = N);
+ inequality <- rep(inequality, length.out = N)
+ shape <- rep(shape, length.out = N)
+
+ logdensity <- rep(log(0), length.out = N)
+ xok <- (x > location)
+ zedd <- (x - location) / scale
+ logdensity[xok] <- log(shape[xok]) -
log(scale[xok]) - log(inequality[xok]) +
(1/inequality[xok]-1) * log(zedd[xok]) -
(shape[xok]+1) *
@@ -9503,7 +7656,7 @@ dparetoIV <- function(x, location = 0, scale = 1, inequality = 1,
}
-pparetoIV =
+pparetoIV <-
function(q, location = 0, scale = 1, inequality = 1, shape = 1) {
if (!is.Numeric(q))
stop("bad input for argument 'q'")
@@ -9514,22 +7667,22 @@ pparetoIV =
if (!is.Numeric(shape, positive = TRUE))
stop("bad input for argument 'shape'")
- N = max(length(q), length(location), length(scale),
+ N <- max(length(q), length(location), length(scale),
length(inequality), length(shape))
- q = rep(q, length.out = N);
- location = rep(location, length.out = N)
- scale = rep(scale, length.out = N);
- inequality = rep(inequality, length.out = N)
- shape = rep(shape, length.out = N)
- answer = q * 0
- ii = q > location
- zedd = (q[ii] - location[ii]) / scale[ii]
- answer[ii] = 1 - (1 + zedd^(1/inequality[ii]))^(-shape[ii])
+ q <- rep(q, length.out = N);
+ location <- rep(location, length.out = N)
+ scale <- rep(scale, length.out = N);
+ inequality <- rep(inequality, length.out = N)
+ shape <- rep(shape, length.out = N)
+ answer <- q * 0
+ ii <- q > location
+ zedd <- (q[ii] - location[ii]) / scale[ii]
+ answer[ii] <- 1 - (1 + zedd^(1/inequality[ii]))^(-shape[ii])
answer
}
-qparetoIV =
+qparetoIV <-
function(p, location = 0, scale = 1, inequality = 1, shape = 1) {
if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
stop("bad input for argument 'p'")
@@ -9537,9 +7690,9 @@ qparetoIV =
stop("bad input for argument 'inequality'")
if (!is.Numeric(shape, positive = TRUE))
stop("bad input for argument 'shape'")
- ans = location + scale * (-1 + (1-p)^(-1/shape))^inequality
- ans[scale <= 0] = NaN
- ans[shape <= 0] = NaN
+ ans <- location + scale * (-1 + (1-p)^(-1/shape))^inequality
+ ans[scale <= 0] <- NaN
+ ans[shape <= 0] <- NaN
ans
}
@@ -9548,9 +7701,9 @@ rparetoIV =
function(n, location = 0, scale = 1, inequality = 1, shape = 1) {
if (!is.Numeric(inequality, positive = TRUE))
stop("bad input for argument 'inequality'")
- ans = location + scale * (-1 + runif(n)^(-1/shape))^inequality
- ans[scale <= 0] = NaN
- ans[shape <= 0] = NaN
+ ans <- location + scale * (-1 + runif(n)^(-1/shape))^inequality
+ ans[scale <= 0] <- NaN
+ ans[shape <= 0] <- NaN
ans
}
@@ -9646,27 +7799,27 @@ rparetoI <- function(n, scale = 1, shape = 1)
eshape <- link2list(lshape)
lshape <- attr(eshape, "function.name")
- iinequ = iinequality
-
+ iinequ <- iinequality
new("vglmff",
blurb = c("Pareto(IV) distribution F(y)=1-[1+((y - ", location,
- ")/scale)^(1/inequality)]^(-shape),",
- "\n",
- " y > ",
- location,
- ", scale > 0, inequality > 0, shape > 0,\n",
- "Links: ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("inequality", linequ, earg = einequ ),
- ", ",
- namesof("shape", lshape, earg = eshape), "\n",
- "Mean: location + scale * NA"),
+ ")/scale)^(1/inequality)]^(-shape),",
+ "\n",
+ " y > ",
+ location,
+ ", scale > 0, inequality > 0, shape > 0,\n",
+ "Links: ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("inequality", linequ, earg = einequ ),
+ ", ",
+ namesof("shape", lshape, earg = eshape), "\n",
+ "Mean: location + scale * NA"),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -9678,25 +7831,25 @@ rparetoI <- function(n, scale = 1, shape = 1)
- extra$location = location = .location
+ extra$location <- location <- .location
if (any(y <= location))
stop("the response must have values > than the 'location' argument")
if (!length(etastart)) {
- inequ.init = if (length( .iinequ )) .iinequ else 1
- scale.init = if (length( .iscale )) .iscale else 1
- shape.init = if (length( .ishape )) .ishape else NULL
+ inequ.init <- if (length( .iinequ )) .iinequ else 1
+ scale.init <- if (length( .iscale )) .iscale else 1
+ shape.init <- if (length( .ishape )) .ishape else NULL
if (!length(shape.init)) {
- zedd = (y - location) / scale.init
+ zedd <- (y - location) / scale.init
if ( .imethod == 1) {
- A1 = weighted.mean(1/(1 + zedd^(1/inequ.init)), w = w)
- A2 = weighted.mean(1/(1 + zedd^(1/inequ.init))^2, w = w)
+ A1 <- weighted.mean(1/(1 + zedd^(1/inequ.init)), w = w)
+ A2 <- weighted.mean(1/(1 + zedd^(1/inequ.init))^2, w = w)
} else {
- A1 = median(1/(1 + zedd^(1/inequ.init )))
- A2 = median(1/(1 + zedd^(1/inequ.init))^2)
+ A1 <- median(1/(1 + zedd^(1/inequ.init )))
+ A2 <- median(1/(1 + zedd^(1/inequ.init))^2)
}
- shape.init = max(0.01, (2*A2-A1)/(A1-A2))
+ shape.init <- max(0.01, (2*A2-A1)/(A1-A2))
}
etastart <- cbind(
@@ -9712,18 +7865,20 @@ rparetoI <- function(n, scale = 1, shape = 1)
.escale = escale, .einequ = einequ, .eshape = eshape,
.iscale = iscale, .iinequ = iinequ, .ishape = ishape ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- location = extra$location
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- inequ = eta2theta(eta[, 2], .linequ, earg = .einequ)
- shape = eta2theta(eta[, 3], .lshape , earg = .eshape )
+ location <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
+ shape <- eta2theta(eta[, 3], .lshape , earg = .eshape )
location + Scale * NA
}, list( .lscale = lscale, .linequ = linequ, .lshape = lshape,
.escale = escale, .einequ = einequ, .eshape = eshape))),
last = eval(substitute(expression({
- misc$link = c("scale" = .lscale , "inequality" = .linequ,
- "shape" = .lshape)
- misc$earg = list(scale = .escale , inequality= .einequ,
- shape = .eshape )
+ misc$link <- c("scale" = .lscale ,
+ "inequality" = .linequ,
+ "shape" = .lshape)
+ misc$earg <- list("scale" = .escale ,
+ "inequality" = .einequ,
+ "shape" = .eshape )
misc$location = extra$location # Use this for prediction
}), list( .lscale = lscale, .linequ = linequ,
.escale = escale, .einequ = einequ,
@@ -9731,11 +7886,11 @@ rparetoI <- function(n, scale = 1, shape = 1)
.eshape = eshape))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location = extra$location
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- inequ = eta2theta(eta[, 2], .linequ, earg = .einequ)
- shape = eta2theta(eta[, 3], .lshape , earg = .eshape )
- zedd = (y - location) / Scale
+ location <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
+ shape <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+ zedd <- (y - location) / Scale
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
sum(c(w) * dparetoIV(x = y, location = location, scale = Scale,
@@ -9749,18 +7904,18 @@ rparetoI <- function(n, scale = 1, shape = 1)
vfamily = c("paretoIV"),
deriv = eval(substitute(expression({
location = extra$location
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- inequ = eta2theta(eta[, 2], .linequ, earg = .einequ)
- shape = eta2theta(eta[, 3], .lshape , earg = .eshape )
- zedd = (y - location) / Scale
- temp100 = 1 + zedd^(1/inequ)
- dl.dscale = (shape - (1+shape) / temp100) / (inequ * Scale)
- dl.dinequ = ((log(zedd) * (shape - (1+shape)/temp100)) /
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
+ shape <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+ zedd <- (y - location) / Scale
+ temp100 <- 1 + zedd^(1/inequ)
+ dl.dscale <- (shape - (1+shape) / temp100) / (inequ * Scale)
+ dl.dinequ <- ((log(zedd) * (shape - (1+shape)/temp100)) /
inequ - 1) / inequ
- dl.dshape = -log(temp100) + 1/shape
- dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
- dinequ.deta = dtheta.deta(inequ, .linequ, earg = .einequ)
- dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
+ dl.dshape <- -log(temp100) + 1/shape
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+ dinequ.deta <- dtheta.deta(inequ, .linequ, earg = .einequ)
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
c(w) * cbind(dl.dscale * dscale.deta,
dl.dinequ * dinequ.deta,
dl.dshape * dshape.deta)
@@ -9769,21 +7924,21 @@ rparetoI <- function(n, scale = 1, shape = 1)
.escale = escale, .einequ = einequ,
.eshape = eshape))),
weight = eval(substitute(expression({
- temp200 = digamma(shape) - digamma(1) - 1
- d2scale.deta2 = shape / ((inequ*Scale)^2 * (shape+2))
- d2inequ.deta2 = (shape * (temp200^2 + trigamma(shape) + trigamma(1)
+ temp200 <- digamma(shape) - digamma(1) - 1
+ d2scale.deta2 <- shape / ((inequ*Scale)^2 * (shape+2))
+ d2inequ.deta2 <- (shape * (temp200^2 + trigamma(shape) + trigamma(1)
) + 2*(temp200+1)) / (inequ^2 * (shape+2))
- d2shape.deta2 = 1 / shape^2
- d2si.deta2 = (shape*(-temp200) -1) / (inequ^2 * Scale * (shape+2))
- d2ss.deta2 = -1 / ((inequ*Scale) * (shape+1))
- d2is.deta2 = temp200 / (inequ*(shape+1))
- wz = matrix(0, n, dimm(M))
- wz[, iam(1, 1, M)] = dscale.deta^2 * d2scale.deta2
- wz[, iam(2, 2, M)] = dinequ.deta^2 * d2inequ.deta2
- wz[, iam(3, 3, M)] = dshape.deta^2 * d2shape.deta2
- wz[, iam(1, 2, M)] = dscale.deta * dinequ.deta * d2si.deta2
- wz[, iam(1, 3, M)] = dscale.deta * dshape.deta * d2ss.deta2
- wz[, iam(2, 3, M)] = dinequ.deta * dshape.deta * d2is.deta2
+ d2shape.deta2 <- 1 / shape^2
+ d2si.deta2 <- (shape*(-temp200) -1) / (inequ^2 * Scale * (shape+2))
+ d2ss.deta2 <- -1 / ((inequ*Scale) * (shape+1))
+ d2is.deta2 <- temp200 / (inequ*(shape+1))
+ wz <- matrix(0, n, dimm(M))
+ wz[, iam(1, 1, M)] <- dscale.deta^2 * d2scale.deta2
+ wz[, iam(2, 2, M)] <- dinequ.deta^2 * d2inequ.deta2
+ wz[, iam(3, 3, M)] <- dshape.deta^2 * d2shape.deta2
+ wz[, iam(1, 2, M)] <- dscale.deta * dinequ.deta * d2si.deta2
+ wz[, iam(1, 3, M)] <- dscale.deta * dshape.deta * d2ss.deta2
+ wz[, iam(2, 3, M)] <- dinequ.deta * dshape.deta * d2is.deta2
c(w) * wz
}), list( .lscale = lscale, .linequ = linequ, .lshape = lshape,
.escale = escale, .einequ = einequ, .eshape = eshape))))
@@ -9813,23 +7968,24 @@ rparetoI <- function(n, scale = 1, shape = 1)
linequ <- attr(einequ, "function.name")
- iinequ = iinequality
+ iinequ <- iinequality
new("vglmff",
blurb = c("Pareto(III) distribution F(y)=1-[1+((y - ", location,
- ")/scale)^(1/inequality)]^(-1),",
- "\n", " y > ",
- location, ", scale > 0, inequality > 0, \n",
- "Links: ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("inequality", linequ, earg = einequ ), "\n",
- "Mean: location + scale * NA"),
+ ")/scale)^(1/inequality)]^(-1),",
+ "\n", " y > ",
+ location, ", scale > 0, inequality > 0, \n",
+ "Links: ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("inequality", linequ, earg = einequ ), "\n",
+ "Mean: location + scale * NA"),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -9843,16 +7999,16 @@ rparetoI <- function(n, scale = 1, shape = 1)
if (!length(etastart)) {
- inequ.init = if (length( .iinequ)) .iinequ else NULL
- scale.init = if (length( .iscale )) .iscale else NULL
+ inequ.init <- if (length( .iinequ)) .iinequ else NULL
+ scale.init <- if (length( .iscale )) .iscale else NULL
if (!length(inequ.init) || !length(scale.init)) {
- probs = (1:4)/5
- ytemp = quantile(x = log(y-location), probs = probs)
- fittemp = lsfit(x = logit(probs), y = ytemp, intercept = TRUE)
+ probs <- (1:4)/5
+ ytemp <- quantile(x = log(y-location), probs = probs)
+ fittemp <- lsfit(x = logit(probs), y = ytemp, intercept = TRUE)
if (!length(inequ.init))
- inequ.init = max(fittemp$coef["X"], 0.01)
+ inequ.init <- max(fittemp$coef["X"], 0.01)
if (!length(scale.init))
- scale.init = exp(fittemp$coef["Intercept"])
+ scale.init <- exp(fittemp$coef["Intercept"])
}
etastart=cbind(
theta2eta(rep(scale.init, length.out = n),
@@ -9866,25 +8022,25 @@ rparetoI <- function(n, scale = 1, shape = 1)
.escale = escale, .einequ = einequ,
.iscale = iscale, .iinequ = iinequ ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- location = extra$location
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- inequ = eta2theta(eta[, 2], .linequ, earg = .einequ)
+ location <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
location + Scale * NA
}, list( .lscale = lscale, .linequ = linequ,
.escale = escale, .einequ = einequ ))),
last = eval(substitute(expression({
- misc$link = c("scale" = .lscale , "inequality" = .linequ)
- misc$earg = list("scale" = .escale , "inequality" = .einequ)
+ misc$link <- c("scale" = .lscale , "inequality" = .linequ)
+ misc$earg <- list("scale" = .escale , "inequality" = .einequ)
- misc$location = extra$location # Use this for prediction
+ misc$location <- extra$location # Use this for prediction
}), list( .lscale = lscale, .linequ = linequ,
.escale = escale, .einequ = einequ ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location = extra$location
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- inequ = eta2theta(eta[, 2], .linequ, earg = .einequ)
- zedd = (y - location) / Scale
+ location <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
+ zedd <- (y - location) / Scale
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
sum(c(w) * dparetoIII(x = y, location = location, scale=Scale,
@@ -9894,27 +8050,27 @@ rparetoI <- function(n, scale = 1, shape = 1)
.escale = escale, .einequ = einequ ))),
vfamily = c("paretoIII"),
deriv = eval(substitute(expression({
- location = extra$location
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- inequ = eta2theta(eta[, 2], .linequ, earg = .einequ)
- shape = 1
- zedd = (y - location) / Scale
- temp100 = 1 + zedd^(1/inequ)
- dl.dscale = (shape - (1+shape) / temp100) / (inequ * Scale)
- dl.dinequ = ((log(zedd) * (shape - (1+shape)/temp100)) /
+ location <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
+ shape <- 1
+ zedd <- (y - location) / Scale
+ temp100 <- 1 + zedd^(1/inequ)
+ dl.dscale <- (shape - (1+shape) / temp100) / (inequ * Scale)
+ dl.dinequ <- ((log(zedd) * (shape - (1+shape)/temp100)) /
inequ - 1) / inequ
- dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
- dinequ.deta = dtheta.deta(inequ, .linequ, earg = .einequ)
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+ dinequ.deta <- dtheta.deta(inequ, .linequ, earg = .einequ)
c(w) * cbind(dl.dscale * dscale.deta,
dl.dinequ * dinequ.deta)
}), list( .lscale = lscale, .linequ = linequ,
.escale = escale, .einequ = einequ ))),
weight = eval(substitute(expression({
- d2scale.deta2 = 1 / ((inequ*Scale)^2 * 3)
- d2inequ.deta2 = (1 + 2* trigamma(1)) / (inequ^2 * 3)
- wz = matrix(0, n, M) # It is diagonal
- wz[, iam(1, 1, M)] = dscale.deta^2 * d2scale.deta2
- wz[, iam(2, 2, M)] = dinequ.deta^2 * d2inequ.deta2
+ d2scale.deta2 <- 1 / ((inequ*Scale)^2 * 3)
+ d2inequ.deta2 <- (1 + 2* trigamma(1)) / (inequ^2 * 3)
+ wz <- matrix(0, n, M) # It is diagonal
+ wz[, iam(1, 1, M)] <- dscale.deta^2 * d2scale.deta2
+ wz[, iam(2, 2, M)] <- dinequ.deta^2 * d2inequ.deta2
c(w) * wz
}), list( .lscale = lscale, .linequ = linequ,
.escale = escale, .einequ = einequ ))))
@@ -9950,16 +8106,17 @@ rparetoI <- function(n, scale = 1, shape = 1)
new("vglmff",
blurb = c("Pareto(II) distribution F(y)=1-[1+(y - ", location,
- ")/scale]^(-shape),",
- "\n", " y > ",
- location, ", scale > 0, shape > 0,\n",
- "Links: ", namesof("scale", lscale, earg = escale), ", ",
- namesof("shape", lshape, earg = eshape), "\n",
+ ")/scale]^(-shape),",
+ "\n", " y > ",
+ location, ", scale > 0, shape > 0,\n",
+ "Links: ", namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape", lshape, earg = eshape), "\n",
"Mean: location + scale * NA"),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -9967,25 +8124,25 @@ rparetoI <- function(n, scale = 1, shape = 1)
c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
namesof("shape", .lshape , earg = .eshape , tag = FALSE))
- extra$location = location = .location
+ extra$location <- location <- .location
if (any(y <= location))
stop("the response must have values > than the 'location' argument")
if (!length(etastart)) {
- scale.init = if (length( .iscale )) .iscale else NULL
- shape.init = if (length( .ishape )) .ishape else NULL
+ scale.init <- if (length( .iscale )) .iscale else NULL
+ shape.init <- if (length( .ishape )) .ishape else NULL
if (!length(shape.init) || !length(scale.init)) {
- probs = (1:4)/5
- scale.init.0 = 1
- ytemp = quantile(x = log(y-location+scale.init.0),
+ probs <- (1:4)/5
+ scale.init.0 <- 1
+ ytemp <- quantile(x = log(y-location+scale.init.0),
probs = probs)
- fittemp = lsfit(x = log1p(-probs), y = ytemp,
+ fittemp <- lsfit(x = log1p(-probs), y = ytemp,
intercept = TRUE)
if (!length(shape.init))
- shape.init = max(-1/fittemp$coef["X"], 0.01)
+ shape.init <- max(-1/fittemp$coef["X"], 0.01)
if (!length(scale.init))
- scale.init = exp(fittemp$coef["Intercept"])
+ scale.init <- exp(fittemp$coef["Intercept"])
}
etastart=cbind(
theta2eta(rep(scale.init, length.out = n),
@@ -9997,24 +8154,26 @@ rparetoI <- function(n, scale = 1, shape = 1)
.escale = escale, .eshape = eshape,
.lshape = lshape, .iscale = iscale, .ishape = ishape ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- location = extra$location
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+ location <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
location + Scale * NA
}, list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape))),
last = eval(substitute(expression({
- misc$link = c("scale" = .lscale , "shape" = .lshape)
- misc$earg = list("scale" = .escale , "shape" = .eshape )
- misc$location = extra$location # Use this for prediction
+ misc$link <- c("scale" = .lscale , "shape" = .lshape)
+
+ misc$earg <- list("scale" = .escale , "shape" = .eshape )
+
+ misc$location <- extra$location # Use this for prediction
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location = extra$location
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
- zedd = (y - location) / Scale
+ location <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+ zedd <- (y - location) / Scale
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
sum(c(w) * dparetoII(x = y, location = location, scale=Scale,
@@ -10024,27 +8183,27 @@ rparetoI <- function(n, scale = 1, shape = 1)
.escale = escale, .eshape = eshape))),
vfamily = c("paretoII"),
deriv = eval(substitute(expression({
- location = extra$location
- Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
- shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
- zedd = (y - location) / Scale
- temp100 = 1 + zedd
- dl.dscale = (shape - (1+shape) / temp100) / (1 * Scale)
- dl.dshape = -log(temp100) + 1/shape
- dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
- dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
+ location <- extra$location
+ Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+ zedd <- (y - location) / Scale
+ temp100 <- 1 + zedd
+ dl.dscale <- (shape - (1+shape) / temp100) / (1 * Scale)
+ dl.dshape <- -log(temp100) + 1/shape
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
c(w) * cbind(dl.dscale * dscale.deta,
dl.dshape * dshape.deta)
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape))),
weight = eval(substitute(expression({
- d2scale.deta2 = shape / (Scale^2 * (shape+2))
- d2shape.deta2 = 1 / shape^2
- d2ss.deta2 = -1 / (Scale * (shape+1))
- wz = matrix(0, n, dimm(M))
- wz[, iam(1, 1, M)] = dscale.deta^2 * d2scale.deta2
- wz[, iam(2, 2, M)] = dshape.deta^2 * d2shape.deta2
- wz[, iam(1, 2, M)] = dscale.deta * dshape.deta * d2ss.deta2
+ d2scale.deta2 <- shape / (Scale^2 * (shape+2))
+ d2shape.deta2 <- 1 / shape^2
+ d2ss.deta2 <- -1 / (Scale * (shape+1))
+ wz <- matrix(0, n, dimm(M))
+ wz[, iam(1, 1, M)] <- dscale.deta^2 * d2scale.deta2
+ wz[, iam(2, 2, M)] <- dshape.deta^2 * d2shape.deta2
+ wz[, iam(1, 2, M)] <- dscale.deta * dshape.deta * d2ss.deta2
c(w) * wz
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape))))
@@ -10059,14 +8218,14 @@ dpareto <- function(x, location, shape, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- L = max(length(x), length(location), length(shape))
- x = rep(x, length.out = L);
- location = rep(location, length.out = L);
- shape = rep(shape, length.out = L)
+ L <- max(length(x), length(location), length(shape))
+ x <- rep(x, length.out = L);
+ location <- rep(location, length.out = L);
+ shape <- rep(shape, length.out = L)
- logdensity = rep(log(0), length.out = L)
- xok = (x > location)
- logdensity[xok] = log(shape[xok]) + shape[xok] * log(location[xok]) -
+ logdensity <- rep(log(0), length.out = L)
+ xok <- (x > location)
+ logdensity[xok] <- log(shape[xok]) + shape[xok] * log(location[xok]) -
(shape[xok]+1) * log(x[xok])
if (log.arg) logdensity else exp(logdensity)
}
@@ -10074,14 +8233,14 @@ dpareto <- function(x, location, shape, log = FALSE) {
ppareto <- function(q, location, shape) {
- L = max(length(q), length(location), length(shape))
- q = rep(q, length.out = L);
- location = rep(location, length.out = L);
- shape = rep(shape, length.out = L)
+ L <- max(length(q), length(location), length(shape))
+ q <- rep(q, length.out = L);
+ location <- rep(location, length.out = L);
+ shape <- rep(shape, length.out = L)
- ans = ifelse(q > location, 1 - (location/q)^shape, 0)
- ans[location <= 0] = NaN
- ans[shape <= 0] = NaN
+ ans <- ifelse(q > location, 1 - (location/q)^shape, 0)
+ ans[location <= 0] <- NaN
+ ans[shape <= 0] <- NaN
ans
}
@@ -10090,17 +8249,17 @@ qpareto <- function(p, location, shape) {
if (any(p <= 0) || any(p >= 1))
stop("argument 'p' must be between 0 and 1")
- ans = location / (1 - p)^(1/shape)
- ans[location <= 0] = NaN
- ans[shape <= 0] = NaN
+ ans <- location / (1 - p)^(1/shape)
+ ans[location <= 0] <- NaN
+ ans[shape <= 0] <- NaN
ans
}
rpareto <- function(n, location, shape) {
- ans = location / runif(n)^(1/shape)
- ans[location <= 0] = NaN
- ans[shape <= 0] = NaN
+ ans <- location / runif(n)^(1/shape)
+ ans[location <= 0] <- NaN
+ ans[shape <= 0] <- NaN
ans
}
@@ -10127,47 +8286,49 @@ rpareto <- function(n, location, shape) {
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
predictors.names <-
namesof("shape", .lshape , earg = .earg , tag = FALSE)
- locationhat = if (!length( .location)) {
- locationEstimated = TRUE
+ locationhat <- if (!length( .location)) {
+ locationEstimated <- TRUE
min(y) # - .smallno
} else {
- locationEstimated = FALSE
+ locationEstimated <- FALSE
.location
}
if (any(y < locationhat))
stop("the value of location is too high ",
"(requires 0 < location < min(y))")
- extra$location = locationhat
- extra$locationEstimated = locationEstimated
+ extra$location <- locationhat
+ extra$locationEstimated <- locationEstimated
if (!length(etastart)) {
- k.init = (y + 1/8) / (y - locationhat + 1/8)
+ k.init <- (y + 1/8) / (y - locationhat + 1/8)
etastart <- theta2eta(k.init, .lshape , earg = .earg )
}
}), list( .lshape = lshape, .earg = earg,
.location = location ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- k = eta2theta(eta, .lshape , earg = .earg )
- location = extra$location
+ k <- eta2theta(eta, .lshape , earg = .earg )
+ location <- extra$location
ifelse(k > 1, k * location / (k-1), NA)
}, list( .lshape = lshape, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c(k = .lshape)
- misc$earg = list(k = .earg )
+ misc$link <- c(k = .lshape)
+
+ misc$earg <- list(k = .earg )
- misc$location = extra$location # Use this for prediction
+ misc$location <- extra$location # Use this for prediction
}), list( .lshape = lshape, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- k = eta2theta(eta, .lshape , earg = .earg )
- location = extra$location
+ k <- eta2theta(eta, .lshape , earg = .earg )
+ location <- extra$location
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
@@ -10177,15 +8338,15 @@ rpareto <- function(n, location, shape) {
}, list( .lshape = lshape, .earg = earg ))),
vfamily = c("pareto1"),
deriv = eval(substitute(expression({
- location = extra$location
- k = eta2theta(eta, .lshape , earg = .earg )
- dl.dk = 1/k + log(location/y)
- dk.deta = dtheta.deta(k, .lshape , earg = .earg )
+ location <- extra$location
+ k <- eta2theta(eta, .lshape , earg = .earg )
+ dl.dk <- 1/k + log(location/y)
+ dk.deta <- dtheta.deta(k, .lshape , earg = .earg )
c(w) * dl.dk * dk.deta
}), list( .lshape = lshape, .earg = earg ))),
weight = eval(substitute(expression({
- ed2l.dk2 = 1 / k^2
- wz = c(w) * dk.deta^2 * ed2l.dk2
+ ed2l.dk2 <- 1 / k^2
+ wz <- c(w) * dk.deta^2 * ed2l.dk2
wz
}), list( .lshape = lshape, .earg = earg ))))
}
@@ -10209,11 +8370,11 @@ dtpareto <- function(x, lower, upper, shape, log = FALSE) {
if (!is.Numeric(shape, positive = TRUE))
stop("argument 'shape' must be positive")
- L = max(length(x), length(lower), length(upper), length(shape))
- x = rep(x, length.out = L);
- shape = rep(shape, length.out = L)
- lower = rep(lower, length.out = L);
- upper = rep(upper, length.out = L);
+ L <- max(length(x), length(lower), length(upper), length(shape))
+ x <- rep(x, length.out = L);
+ shape <- rep(shape, length.out = L)
+ lower <- rep(lower, length.out = L);
+ upper <- rep(upper, length.out = L);
logdensity <- rep(log(0), length.out = L)
@@ -10234,22 +8395,22 @@ ptpareto <- function(q, lower, upper, shape) {
if (!is.Numeric(q))
stop("bad input for argument 'q'")
- L = max(length(q), length(lower), length(upper), length(shape))
- q = rep(q, length.out = L);
- lower = rep(lower, length.out = L);
- upper = rep(upper, length.out = L);
- shape = rep(shape, length.out = L)
+ L <- max(length(q), length(lower), length(upper), length(shape))
+ q <- rep(q, length.out = L);
+ lower <- rep(lower, length.out = L);
+ upper <- rep(upper, length.out = L);
+ shape <- rep(shape, length.out = L)
- ans = q * 0
+ ans <- q * 0
xok <- (0 < lower) & (lower < q) & (q < upper) & (shape > 0)
- ans[xok] = (1 - (lower[xok]/q[xok])^shape[xok]) / (1 -
+ ans[xok] <- (1 - (lower[xok]/q[xok])^shape[xok]) / (1 -
(lower[xok]/upper[xok])^shape[xok])
- ans[q >= upper] = 1
+ ans[q >= upper] <- 1
ans[upper < lower] <- NaN
- ans[lower <= 0] = NaN
- ans[upper <= 0] = NaN
- ans[shape <= 0] = NaN
+ ans[lower <= 0] <- NaN
+ ans[upper <= 0] <- NaN
+ ans[shape <= 0] <- NaN
ans
}
@@ -10261,10 +8422,10 @@ qtpareto <- function(p, lower, upper, shape) {
if (max(p) >= 1)
stop("argument 'p' must be in (0, 1)")
- ans = lower / (1 - p*(1-(lower/upper)^shape))^(1/shape)
- ans[lower <= 0] = NaN
- ans[upper <= 0] = NaN
- ans[shape <= 0] = NaN
+ ans <- lower / (1 - p*(1-(lower/upper)^shape))^(1/shape)
+ ans[lower <= 0] <- NaN
+ ans[upper <= 0] <- NaN
+ ans[shape <= 0] <- NaN
ans[upper < lower] <- NaN
ans
}
@@ -10272,10 +8433,10 @@ qtpareto <- function(p, lower, upper, shape) {
rtpareto <- function(n, lower, upper, shape) {
- ans = qtpareto(p = runif(n), lower = lower, upper = upper, shape = shape)
- ans[lower <= 0] = NaN
- ans[upper <= 0] = NaN
- ans[shape <= 0] = NaN
+ ans <- qtpareto(p = runif(n), lower = lower, upper = upper, shape = shape)
+ ans[lower <= 0] <- NaN
+ ans[upper <= 0] <- NaN
+ ans[shape <= 0] <- NaN
ans
}
@@ -10311,15 +8472,16 @@ rtpareto <- function(n, lower, upper, shape) {
new("vglmff",
blurb = c("Truncated Pareto distribution f(y) = shape * lower^shape /",
- "(y^(shape+1) * (1-(lower/upper)^shape)),",
- " 0 < lower < y < upper < Inf, shape>0\n",
- "Link: ", namesof("shape", lshape, earg = earg), "\n", "\n",
- "Mean: shape*lower^shape*(upper^(1-shape)-lower^(1-shape)) /",
- " ((1-shape) * (1-(lower/upper)^shape))"),
+ "(y^(shape+1) * (1-(lower/upper)^shape)),",
+ " 0 < lower < y < upper < Inf, shape>0\n",
+ "Link: ", namesof("shape", lshape, earg = earg), "\n", "\n",
+ "Mean: shape*lower^shape*(upper^(1-shape)-lower^(1-shape)) /",
+ " ((1-shape) * (1-(lower/upper)^shape))"),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -10330,24 +8492,24 @@ rtpareto <- function(n, lower, upper, shape) {
stop("the value of argument 'lower' is too high ",
"(requires '0 < lower < min(y)')")
- extra$lower = .lower
+ extra$lower <- .lower
if (any(y >= .upper))
stop("the value of argument 'upper' is too low ",
"(requires 'max(y) < upper')")
- extra$upper = .upper
+ extra$upper <- .upper
if (!length(etastart)) {
- shape.init = if (is.Numeric( .ishape )) 0 * y + .ishape else
+ shape.init <- if (is.Numeric( .ishape )) 0 * y + .ishape else
if ( .imethod == 2) {
0 * y + median(rep((y + 1/8) / (y - .lower + 1/8), times = w))
} else {
tpareto1.Loglikfun <- function(shape, y, x, w, extraargs) {
- myratio = .lower / .upper
+ myratio <- .lower / .upper
sum(c(w) * (log(shape) + shape * log( .lower) -
(shape+1) * log(y) - log1p(-myratio^shape)))
}
- shape.grid = 2^((-4):4)
- try.this = getMaxMin(shape.grid, objfun = tpareto1.Loglikfun,
+ shape.grid <- 2^((-4):4)
+ try.this <- getMaxMin(shape.grid, objfun = tpareto1.Loglikfun,
y = y, x = x, w = w)
try.this = rep(try.this, length.out = n)
try.this
@@ -10359,49 +8521,54 @@ rtpareto <- function(n, lower, upper, shape) {
.imethod = imethod,
.lower = lower, .upper = upper ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape = eta2theta(eta, .lshape , earg = .earg )
- myratio = .lower / .upper
- constprop = shape * .lower^shape / (1 - myratio^shape)
+ shape <- eta2theta(eta, .lshape , earg = .earg )
+ myratio <- .lower / .upper
+ constprop <- shape * .lower^shape / (1 - myratio^shape)
constprop * ( .upper^(1-shape) - .lower^(1-shape)) / (1-shape)
}, list( .lshape = lshape, .earg = earg,
.lower = lower, .upper = upper ))),
- last = eval(substitute(expression({
- misc$link = c(shape = .lshape)
- misc$earg = list(shape = .earg )
- misc$lower = extra$lower
- misc$upper = extra$upper
- misc$expected = TRUE
- }), list( .lshape = lshape, .earg = earg,
- .lower = lower, .upper = upper ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape = eta2theta(eta, .lshape , earg = .earg )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- ans = sum(c(w) * dtpareto(x = y, lower = .lower ,
- upper = .upper ,
- shape = shape, log = TRUE))
- ans
- }
- }, list( .lshape = lshape, .earg = earg,
- .lower = lower, .upper = upper ))),
- vfamily = c("tpareto1"),
- deriv = eval(substitute(expression({
- shape = eta2theta(eta, .lshape , earg = .earg )
- myratio = .lower / .upper
- myratio2 = myratio^shape
- tmp330 = myratio2 * log(myratio) / (1 - myratio2)
- dl.dshape = 1 / shape + log( .lower) - log(y) + tmp330
- dshape.deta = dtheta.deta(shape, .lshape , earg = .earg )
- c(w) * dl.dshape * dshape.deta
- }), list( .lshape = lshape, .earg = earg,
- .lower = lower, .upper = upper ))),
- weight = eval(substitute(expression({
- ed2l.dshape2 = 1 / shape^2 - tmp330^2 / myratio2
- wz = c(w) * dshape.deta^2 * ed2l.dshape2
- wz
- }), list( .lshape = lshape, .earg = earg,
- .lower = lower, .upper = upper ))))
+ last = eval(substitute(expression({
+ misc$link <- c(shape = .lshape)
+
+ misc$earg <- list(shape = .earg )
+
+ misc$lower <- extra$lower
+ misc$upper <- extra$upper
+ misc$expected <- TRUE
+ }), list( .lshape = lshape, .earg = earg,
+ .lower = lower, .upper = upper ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ shape <- eta2theta(eta, .lshape , earg = .earg )
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ ans <- sum(c(w) * dtpareto(x = y, lower = .lower ,
+ upper = .upper ,
+ shape = shape, log = TRUE))
+ ans
+ }
+ }, list( .lshape = lshape, .earg = earg,
+ .lower = lower, .upper = upper ))),
+ vfamily = c("tpareto1"),
+ deriv = eval(substitute(expression({
+ shape <- eta2theta(eta, .lshape , earg = .earg )
+ myratio <- .lower / .upper
+ myratio2 <- myratio^shape
+ tmp330 <- myratio2 * log(myratio) / (1 - myratio2)
+
+ dl.dshape <- 1 / shape + log( .lower) - log(y) + tmp330
+
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .earg )
+
+ c(w) * dl.dshape * dshape.deta
+ }), list( .lshape = lshape, .earg = earg,
+ .lower = lower, .upper = upper ))),
+ weight = eval(substitute(expression({
+ ned2l.dshape2 <- 1 / shape^2 - tmp330^2 / myratio2
+ wz <- c(w) * dshape.deta^2 * ned2l.dshape2
+ wz
+ }), list( .lshape = lshape, .earg = earg,
+ .lower = lower, .upper = upper ))))
}
@@ -10415,8 +8582,7 @@ erfc <- function(x)
- wald <- function(link.lambda = "loge", init.lambda = NULL)
-{
+ wald <- function(link.lambda = "loge", init.lambda = NULL) {
link.lambda <- as.list(substitute(link.lambda))
earg <- link2list(link.lambda)
@@ -10426,18 +8592,19 @@ erfc <- function(x)
new("vglmff",
blurb = c("Standard Wald distribution\n\n",
- "f(y) = sqrt(lambda/(2*pi*y^3)) * ",
- "exp(-lambda*(y-1)^2/(2*y)), y&lambda>0",
- "\n",
- "Link: ",
- namesof("lambda", link.lambda, earg = earg), "\n",
- "Mean: ", "1\n",
- "Variance: 1 / lambda"),
+ "f(y) = sqrt(lambda/(2*pi*y^3)) * ",
+ "exp(-lambda*(y-1)^2/(2*y)), y&lambda>0",
+ "\n",
+ "Link: ",
+ namesof("lambda", link.lambda, earg = earg), "\n",
+ "Mean: ", "1\n",
+ "Variance: 1 / lambda"),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
Is.positive.y = TRUE,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
predictors.names <-
@@ -10445,9 +8612,9 @@ erfc <- function(x)
if (!length(etastart)) {
- initlambda = if (length( .init.lambda)) .init.lambda else
+ initlambda <- if (length( .init.lambda)) .init.lambda else
1 / (0.01 + (y-1)^2)
- initlambda = rep(initlambda, length.out = n)
+ initlambda <- rep(initlambda, length.out = n)
etastart <-
cbind(theta2eta(initlambda,
link = .link.lambda , earg = .earg ))
@@ -10458,25 +8625,27 @@ erfc <- function(x)
0*eta + 1
},
last = eval(substitute(expression({
- misc$link = c(lambda = .link.lambda )
- misc$earg = list(lambda = .earg )
+ misc$link <- c(lambda = .link.lambda )
+
+ misc$earg <- list(lambda = .earg )
+
}), list( .link.lambda = link.lambda, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta, link=.link.lambda, earg = .earg )
+ lambda <- eta2theta(eta, link=.link.lambda, earg = .earg )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else
sum(c(w) * (0.5 * log(lambda/(2*pi*y^3)) - lambda * (y-1)^2 / (2*y)))
}, list( .link.lambda = link.lambda, .earg = earg ))),
vfamily = "wald",
deriv = eval(substitute(expression({
- lambda = eta2theta(eta, link=.link.lambda, earg = .earg )
- dl.dlambda = 0.5 / lambda + 1 - 0.5 * (y + 1/y)
- dlambda.deta = dtheta.deta(theta=lambda, link=.link.lambda, earg = .earg )
+ lambda <- eta2theta(eta, link=.link.lambda, earg = .earg )
+ dl.dlambda <- 0.5 / lambda + 1 - 0.5 * (y + 1/y)
+ dlambda.deta <- dtheta.deta(theta=lambda, link=.link.lambda, earg = .earg )
c(w) * cbind(dl.dlambda * dlambda.deta)
}), list( .link.lambda = link.lambda, .earg = earg ))),
weight = eval(substitute(expression({
- d2l.dlambda2 = 0.5 / (lambda^2)
+ d2l.dlambda2 <- 0.5 / (lambda^2)
c(w) * cbind(dlambda.deta^2 * d2l.dlambda2)
}), list( .link.lambda = link.lambda, .earg = earg ))))
}
@@ -10501,7 +8670,7 @@ erfc <- function(x)
if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- ishape[ishape == 1] = 1.1 # Fails in @deriv
+ ishape[ishape == 1] <- 1.1 # Fails in @deriv
lscale <- as.list(substitute(lscale))
@@ -10516,17 +8685,18 @@ erfc <- function(x)
new("vglmff",
blurb = c("Exponentiated Exponential Distribution\n",
- "Links: ",
- namesof("shape", lshape, earg = eshape), ", ",
- namesof("scale", lscale, earg = escale),"\n",
- "Mean: (digamma(shape+1)-digamma(1))/scale"),
+ "Links: ",
+ namesof("shape", lshape, earg = eshape), ", ",
+ namesof("scale", lscale, earg = escale),"\n",
+ "Mean: (digamma(shape+1)-digamma(1))/scale"),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -10536,13 +8706,13 @@ erfc <- function(x)
if (!length(etastart)) {
- shape.init = if (!is.Numeric( .ishape, positive = TRUE))
+ shape.init <- if (!is.Numeric( .ishape, positive = TRUE))
stop("argument 'ishape' must be positive") else
rep( .ishape, length.out = n)
- scale.init = if (length( .iscale ))
+ scale.init <- if (length( .iscale ))
rep( .iscale, length.out = n) else
(digamma(shape.init+1) - digamma(1)) / (y+1/8)
- scale.init = rep(weighted.mean(scale.init, w = w),
+ scale.init <- rep(weighted.mean(scale.init, w = w),
length.out = n)
etastart <-
cbind(theta2eta(shape.init, .lshape , earg = .eshape ),
@@ -10552,22 +8722,22 @@ erfc <- function(x)
.iscale = iscale, .ishape = ishape,
.eshape = eshape, .escale = escale))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
- scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
(digamma(shape+1)-digamma(1)) / scale
}, list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale))),
last = eval(substitute(expression({
- misc$link = c("shape" = .lshape , "scale" = .lscale)
- misc$earg = list("shape" = .eshape, "scale" = .escale )
+ misc$link <- c("shape" = .lshape , "scale" = .lscale )
+ misc$earg <- list("shape" = .eshape , "scale" = .escale )
- misc$expected = TRUE
+ misc$expected <- TRUE
}), list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale))),
loglikelihood= eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
- scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else
sum(c(w) * (log(shape) + log(scale) +
@@ -10576,54 +8746,54 @@ erfc <- function(x)
.eshape = eshape, .escale = escale))),
vfamily = c("expexp"),
deriv = eval(substitute(expression({
- shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
- scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
+ scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
- dl.dscale = 1/scale + (shape-1)*y*exp(-scale*y) / (-expm1(-scale*y)) - y
- dl.dshape = 1/shape + log1p(-exp(-scale*y))
+ dl.dscale <- 1/scale + (shape-1)*y*exp(-scale*y) / (-expm1(-scale*y)) - y
+ dl.dshape <- 1/shape + log1p(-exp(-scale*y))
- dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
- dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
+ dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
c(w) * cbind(dl.dshape * dshape.deta,
dl.dscale * dscale.deta)
}), list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale))),
weight = eval(substitute(expression({
- d11 = 1 / shape^2 # True for all shape
- d22 = d12 = rep(as.numeric(NA), length.out = n)
- index2 = abs(shape - 2) > .tolerance # index2 = shape != 1
- largeno = 10000
+ d11 <- 1 / shape^2 # True for all shape
+ d22 <- d12 <- rep(as.numeric(NA), length.out = n)
+ index2 <- abs(shape - 2) > .tolerance # index2 = shape != 1
+ largeno <- 10000
if (any(index2)) {
- Shape = shape[index2]
- Shape[abs(Shape-1) < .tolerance] = 1.001 # digamma(0) is undefined
- Scale = scale[index2]
- tmp200 = trigamma(1)-trigamma(Shape-1) +
+ Shape <- shape[index2]
+ Shape[abs(Shape-1) < .tolerance] <- 1.001 # digamma(0) is undefined
+ Scale <- scale[index2]
+ tmp200 <- trigamma(1)-trigamma(Shape-1) +
(digamma(Shape-1)-digamma(1))^2 # Fails when Shape == 1
- tmp300 = trigamma(1)-digamma(Shape)+(digamma(Shape)-digamma(1))^2
- d22[index2] = (1 + Shape*(Shape-1)*tmp200/(Shape-2)) / Scale^2 +
+ tmp300 <- trigamma(1)-digamma(Shape)+(digamma(Shape)-digamma(1))^2
+ d22[index2] <- (1 + Shape*(Shape-1)*tmp200/(Shape-2)) / Scale^2 +
Shape*tmp300 / Scale^2
}
if (any(!index2)) {
- Scale = scale[!index2]
- d22[!index2] = (1 + 4 * sum(1/(2 + (0:largeno))^3)) / Scale^2
+ Scale <- scale[!index2]
+ d22[!index2] <- (1 + 4 * sum(1/(2 + (0:largeno))^3)) / Scale^2
}
- index1 = abs(shape - 1) > .tolerance # index1 = shape != 1
+ index1 <- abs(shape - 1) > .tolerance # index1 <- shape != 1
if (any(index1)) {
- Shape = shape[index1]
- Scale = scale[index1]
- d12[index1] = -(Shape*(digamma(Shape)-digamma(1))/(Shape-1) -
+ Shape <- shape[index1]
+ Scale <- scale[index1]
+ d12[index1] <- -(Shape*(digamma(Shape)-digamma(1))/(Shape-1) -
digamma(Shape+1) + digamma(1)) / Scale
}
if (any(!index1)) {
- Scale = scale[!index1]
- d12[!index1] = -sum(1/(2 + (0:largeno))^2) / Scale
+ Scale <- scale[!index1]
+ d12[!index1] <- -sum(1/(2 + (0:largeno))^2) / Scale
}
- wz = matrix(0, n, dimm(M))
- wz[, iam(1, 1, M)] = dshape.deta^2 * d11
- wz[, iam(2, 2, M)] = dscale.deta^2 * d22
- wz[, iam(1, 2, M)] = dscale.deta * dshape.deta * d12
+ wz <- matrix(0, n, dimm(M))
+ wz[, iam(1, 1, M)] <- dshape.deta^2 * d11
+ wz[, iam(2, 2, M)] <- dscale.deta^2 * d22
+ wz[, iam(1, 2, M)] <- dscale.deta * dshape.deta * d12
c(w) * wz
}), list( .tolerance = tolerance ))))
}
@@ -10648,14 +8818,15 @@ erfc <- function(x)
new("vglmff",
blurb = c("Exponentiated Exponential Distribution",
- " (profile likelihood estimation)\n",
- "Links: ",
- namesof("scale", lscale, earg = escale), "\n",
- "Mean: (digamma(shape+1)-digamma(1))/scale"),
+ " (profile likelihood estimation)\n",
+ "Links: ",
+ namesof("scale", lscale, earg = escale), "\n",
+ "Mean: (digamma(shape+1)-digamma(1))/scale"),
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
- ncol.w.max = 1, ncol.y.max = 1)
+ ncol.w.max = 1,
+ ncol.y.max = 1)
@@ -10670,15 +8841,15 @@ erfc <- function(x)
if (!intercept.only)
stop("this family function only works for an ",
"intercept-only, i.e., y ~ 1")
- extra$yvector = y
- extra$sumw = sum(w)
- extra$w = w
+ extra$yvector <- y
+ extra$sumw <- sum(w)
+ extra$w <- w
if (!length(etastart)) {
- shape.init = if (!is.Numeric( .ishape, positive = TRUE))
+ shape.init <- if (!is.Numeric( .ishape, positive = TRUE))
stop("argument 'ishape' must be positive") else
rep( .ishape, length.out = n)
- scaleinit = if (length( .iscale ))
+ scaleinit <- if (length( .iscale ))
rep( .iscale, length.out = n) else
(digamma(shape.init+1) - digamma(1)) / (y+1/8)
etastart <-
@@ -10687,26 +8858,26 @@ erfc <- function(x)
}), list( .lscale = lscale, .iscale = iscale, .ishape = ishape,
.escale = escale))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- scale = eta2theta(eta, .lscale , earg = .escale )
- temp7 = -expm1(-scale*extra$yvector)
- shape = -extra$sumw / sum(extra$w*log(temp7)) # \gamma(\theta)
+ scale <- eta2theta(eta, .lscale , earg = .escale )
+ temp7 <- -expm1(-scale*extra$yvector)
+ shape <- -extra$sumw / sum(extra$w*log(temp7)) # \gamma(\theta)
(digamma(shape+1)-digamma(1)) / scale
}, list( .lscale = lscale,
.escale = escale))),
last = eval(substitute(expression({
- misc$link = c("scale" = .lscale)
- misc$earg = list("scale" = .escale )
+ misc$link <- c("scale" = .lscale)
+ misc$earg <- list("scale" = .escale )
- temp7 = -expm1(-scale*y)
- shape = -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
- misc$shape = shape # Store the ML estimate here
- misc$pooled.weight = pooled.weight
+ temp7 <- -expm1(-scale*y)
+ shape <- -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
+ misc$shape <- shape # Store the ML estimate here
+ misc$pooled.weight <- pooled.weight
}), list( .lscale = lscale, .escale = escale))),
loglikelihood= eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- scale = eta2theta(eta, .lscale , earg = .escale )
- temp7 = -expm1(-scale*y)
- shape = -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
+ scale <- eta2theta(eta, .lscale , earg = .escale )
+ temp7 <- -expm1(-scale*y)
+ shape <- -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else
sum(c(w) * (log(shape) + log(scale) +
@@ -10714,32 +8885,32 @@ erfc <- function(x)
}, list( .lscale = lscale, .escale = escale))),
vfamily = c("expexp1"),
deriv = eval(substitute(expression({
- scale = eta2theta(eta, .lscale , earg = .escale )
+ scale <- eta2theta(eta, .lscale , earg = .escale )
- temp6 = exp(-scale*y)
- temp7 = 1-temp6
- shape = -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
- d1 = 1/scale + (shape-1)*y*temp6/temp7 - y
+ temp6 <- exp(-scale*y)
+ temp7 <- 1-temp6
+ shape <- -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
+ d1 <- 1/scale + (shape-1)*y*temp6/temp7 - y
c(w) * cbind(d1 * dtheta.deta(scale, .lscale , earg = .escale ))
}), list( .lscale = lscale, .escale = escale))),
weight = eval(substitute(expression({
- d11 = 1/scale^2 + y*(temp6/temp7^2) * ((shape-1) *
+ d11 <- 1/scale^2 + y*(temp6/temp7^2) * ((shape-1) *
(y*temp7+temp6) - y*temp6 / (log(temp7))^2)
- wz = matrix(0, n, dimm(M))
+ wz <- matrix(0, n, dimm(M))
wz[, iam(1, 1, M)] =
dtheta.deta(scale, .lscale , earg = .escale )^2 * d11 -
d2theta.deta2(scale, .lscale , earg = .escale ) * d1
if (FALSE && intercept.only) {
- sumw = sum(w)
+ sumw <- sum(w)
for(ii in 1:ncol(wz))
- wz[, ii] = sum(wz[, ii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
+ wz[, ii] <- sum(wz[, ii]) / sumw
+ pooled.weight <- TRUE
+ wz <- c(w) * wz # Put back the weights
} else
- pooled.weight = FALSE
+ pooled.weight <- FALSE
c(w) * wz
}), list( .lscale = lscale, .escale = escale))))
}
@@ -10786,12 +8957,12 @@ erfc <- function(x)
new("vglmff",
blurb = c("Two-parameter logistic distribution\n\n",
- "Links: ",
- namesof("location", llocat, earg = elocat), ", ",
- namesof("scale", lscale, earg = escale),
- "\n", "\n",
- "Mean: location", "\n",
- "Variance: (pi * scale)^2 / 3"),
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale),
+ "\n", "\n",
+ "Mean: location", "\n",
+ "Variance: (pi * scale)^2 / 3"),
constraints = eval(substitute(expression({
dotzero <- .zero
Musual <- 2
@@ -10808,7 +8979,8 @@ erfc <- function(x)
temp5 <-
w.y.check(w = w, y = y,
- ncol.w.max = Inf, ncol.y.max = Inf,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -10834,23 +9006,23 @@ erfc <- function(x)
if (!length(etastart)) {
if ( .imethod == 1) {
- locat.init = y
- scale.init = sqrt(3) * apply(y, 2, sd) / pi
+ locat.init <- y
+ scale.init <- sqrt(3) * apply(y, 2, sd) / pi
} else {
- locat.init = scale.init = NULL
+ locat.init <- scale.init <- NULL
for(ii in 1:ncoly) {
- locat.init = c(locat.init, median(rep(y[, ii], w[, ii])))
- scale.init = c(scale.init, sqrt(3) * sum(w[, ii] *
+ locat.init <- c(locat.init, median(rep(y[, ii], w[, ii])))
+ scale.init <- c(scale.init, sqrt(3) * sum(w[, ii] *
(y[, ii] - locat.init[ii])^2) / (sum(w[, ii]) * pi))
}
}
- locat.init = matrix(if (length( .ilocat )) .ilocat else
+ locat.init <- matrix(if (length( .ilocat )) .ilocat else
locat.init, n, ncoly, byrow = TRUE)
if ( .llocat == "loge")
- locat.init = abs(locat.init) + 0.001
+ locat.init <- abs(locat.init) + 0.001
- scale.init = matrix(if (length( .iscale )) .iscale else
+ scale.init <- matrix(if (length( .iscale )) .iscale else
scale.init, n, ncoly, byrow = TRUE)
etastart <- cbind(
@@ -10899,8 +9071,8 @@ erfc <- function(x)
Musual <- 2
ncoly <- M / Musual
- locat = eta2theta(eta[, (1:ncoly)*Musual-1], .llocat , earg = .elocat )
- Scale = eta2theta(eta[, (1:ncoly)*Musual ], .lscale , earg = .escale )
+ locat <- eta2theta(eta[, (1:ncoly)*Musual-1], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, (1:ncoly)*Musual ], .lscale , earg = .escale )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
sum(c(w) * dlogis(x = y, location = locat,
@@ -10913,29 +9085,29 @@ erfc <- function(x)
Musual <- 2
ncoly <- M / Musual
- locat = eta2theta(eta[, (1:ncoly)*Musual-1], .llocat , earg = .elocat )
- Scale = eta2theta(eta[, (1:ncoly)*Musual ], .lscale , earg = .escale )
+ locat <- eta2theta(eta[, (1:ncoly)*Musual-1], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, (1:ncoly)*Musual ], .lscale , earg = .escale )
- zedd = (y - locat) / Scale
- ezedd = exp(-zedd)
- dl.dlocat = (-expm1(-zedd)) / ((1 + ezedd) * Scale)
- dl.dscale = zedd * (-expm1(-zedd)) / ((1 + ezedd) * Scale) -
+ zedd <- (y - locat) / Scale
+ ezedd <- exp(-zedd)
+ dl.dlocat <- (-expm1(-zedd)) / ((1 + ezedd) * Scale)
+ dl.dscale <- zedd * (-expm1(-zedd)) / ((1 + ezedd) * Scale) -
1 / Scale
- dlocat.deta = dtheta.deta(locat, .llocat , earg = .elocat )
- dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
+ dlocat.deta <- dtheta.deta(locat, .llocat , earg = .elocat )
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
c(w) * cbind(dl.dlocat * dlocat.deta,
dl.dscale * dscale.deta)[, interleave.VGAM(M, M = Musual)]
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale))),
weight = eval(substitute(expression({
- ned2l.dlocat2 = 1 / (3 * Scale^2)
- ned2l.dscale2 = (3 + pi^2) / (9 * Scale^2)
+ ned2l.dlocat2 <- 1 / (3 * Scale^2)
+ ned2l.dscale2 <- (3 + pi^2) / (9 * Scale^2)
- wz = matrix(as.numeric(NA), nrow = n, ncol = M) # diagonal
- wz[, (1:ncoly) * Musual - 1] = ned2l.dlocat2 * dlocat.deta^2
- wz[, (1:ncoly) * Musual ] = ned2l.dscale2 * dscale.deta^2
+ wz <- matrix(as.numeric(NA), nrow = n, ncol = M) # diagonal
+ wz[, (1:ncoly) * Musual - 1] <- ned2l.dlocat2 * dlocat.deta^2
+ wz[, (1:ncoly) * Musual ] <- ned2l.dscale2 * dscale.deta^2
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
}), list( .llocat = llocat, .lscale = lscale,
@@ -10953,8 +9125,7 @@ erfc <- function(x)
imu = NULL,
probs.y = 0.75,
imethod = 1,
- shrinkage.init = 0.95, zero = NULL)
-{
+ shrinkage.init = 0.95, zero = NULL) {
@@ -10990,7 +9161,7 @@ erfc <- function(x)
- ans =
+ ans <-
new("vglmff",
blurb = c("Negative-binomial distribution with size known\n\n",
@@ -11022,7 +9193,8 @@ erfc <- function(x)
w.y.check(w = w, y = y,
Is.nonnegative.y = TRUE,
Is.integer.y = TRUE,
- ncol.w.max = Inf, ncol.y.max = Inf,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -11031,8 +9203,8 @@ erfc <- function(x)
- M = Musual * ncol(y)
- NOS = ncoly = ncol(y) # Number of species
+ M <- Musual * ncol(y)
+ NOS <- ncoly <- ncol(y) # Number of species
mynames1 <- paste("mu", if (NOS > 1) 1:NOS else "", sep = "")
predictors.names <-
namesof(mynames1, .lmu , earg = .emu , tag = FALSE)
@@ -11043,9 +9215,9 @@ erfc <- function(x)
if (!length(etastart)) {
- mu.init = y
+ mu.init <- y
for(iii in 1:ncol(y)) {
- use.this = if ( .imethod == 1) {
+ use.this <- if ( .imethod == 1) {
weighted.mean(y[, iii], w[, iii]) + 1/16
} else if ( .imethod == 3) {
c(quantile(y[, iii], probs = .probs.y) + 1/16)
@@ -11054,21 +9226,21 @@ erfc <- function(x)
}
if (is.numeric( .mu.init )) {
- mu.init[, iii] = MU.INIT[, iii]
+ mu.init[, iii] <- MU.INIT[, iii]
} else {
- medabsres = median(abs(y[, iii] - use.this)) + 1/32
+ medabsres <- median(abs(y[, iii] - use.this)) + 1/32
allowfun <- function(z, maxtol = 1)
sign(z)*pmin(abs(z), maxtol)
- mu.init[, iii] = use.this + (1 - .sinit) *
+ mu.init[, iii] <- use.this + (1 - .sinit) *
allowfun(y[, iii] - use.this,
maxtol = medabsres)
- mu.init[, iii] = abs(mu.init[, iii]) + 1 / 1024
+ mu.init[, iii] <- abs(mu.init[, iii]) + 1 / 1024
}
} # of for(iii)
- kmat = matrix( .size , n, NOS, byrow = TRUE)
+ kmat <- matrix( .size , n, NOS, byrow = TRUE)
@@ -11092,10 +9264,10 @@ erfc <- function(x)
linkinv = eval(substitute(function(eta, extra = NULL) {
Musual <- 1
- eta = cbind(eta)
- NOS = ncol(eta) / Musual
- n = nrow(eta)
- kmat = matrix( .size , n, NOS, byrow = TRUE)
+ eta <- cbind(eta)
+ NOS <- ncol(eta) / Musual
+ n <- nrow(eta)
+ kmat <- matrix( .size , n, NOS, byrow = TRUE)
@@ -11117,17 +9289,17 @@ erfc <- function(x)
misc$link <- rep( .lmu , length = NOS)
names(misc$link) <- mynames1
- misc$earg = vector("list", M)
- names(misc$earg) = mynames1
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
for(ii in 1:NOS) {
- misc$earg[[ii]] = newemu
+ misc$earg[[ii]] <- newemu
}
- misc$imethod = .imethod
- misc$expected = TRUE
- misc$shrinkage.init = .sinit
- misc$size = kmat
+ misc$imethod <- .imethod
+ misc$expected <- TRUE
+ misc$shrinkage.init <- .sinit
+ misc$size <- kmat
}), list( .lmu = lmu,
.emu = emu,
.sinit = shrinkage.init,
@@ -11136,29 +9308,29 @@ erfc <- function(x)
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mu = cbind(mu)
- y = cbind(y)
- w = cbind(w)
- eta = cbind(eta)
- NOS = ncol(eta)
- n = nrow(eta)
- kmat = matrix( .size , n, NOS, byrow = TRUE)
+ mu <- cbind(mu)
+ y <- cbind(y)
+ w <- cbind(w)
+ eta <- cbind(eta)
+ NOS <- ncol(eta)
+ n <- nrow(eta)
+ kmat <- matrix( .size , n, NOS, byrow = TRUE)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- ind1 = is.finite(kmat)
- ans1 = ans2 = 0
+ ind1 <- is.finite(kmat)
+ ans1 <- ans2 <- 0
for (kk in 1:NOS) {
- ind1 = is.finite(kmat[, kk])
- ans1 = ans1 +
+ ind1 <- is.finite(kmat[, kk])
+ ans1 <- ans1 +
sum(w[ind1] * dnbinom(x = y[ind1, kk], mu = mu[ind1, kk],
size = kmat[ind1, kk], log = TRUE))
- ans2 = ans2 +
+ ans2 <- ans2 +
sum(w[!ind1] * dpois(x = y[!ind1, kk], lambda = mu[!ind1, kk],
log = TRUE))
}
- ans = ans1 + ans2
+ ans <- ans1 + ans2
ans
}
}, list( .size = size ))),
@@ -11166,9 +9338,9 @@ erfc <- function(x)
vfamily = c("negbinomial.size"),
deriv = eval(substitute(expression({
- eta = cbind(eta)
- NOS = M = ncol(eta)
- kmat = matrix( .size , n, M, byrow = TRUE)
+ eta <- cbind(eta)
+ NOS <- M <- ncol(eta)
+ kmat <- matrix( .size , n, M, byrow = TRUE)
@@ -11178,24 +9350,24 @@ erfc <- function(x)
}
- dl.dmu = y/mu - (y+kmat)/(kmat+mu)
- dl.dmu[!is.finite(dl.dmu)] = (y/mu)[!is.finite(dl.dmu)] - 1
+ dl.dmu <- y/mu - (y+kmat)/(kmat+mu)
+ dl.dmu[!is.finite(dl.dmu)] <- (y/mu)[!is.finite(dl.dmu)] - 1
if ( .lmu == "nbcanlink")
newemu$wrt.eta <- 1
- dmu.deta = dtheta.deta(mu, .lmu , earg = newemu) # eta1
+ dmu.deta <- dtheta.deta(mu, .lmu , earg = newemu) # eta1
- myderiv = c(w) * dl.dmu * dmu.deta
+ myderiv <- c(w) * dl.dmu * dmu.deta
myderiv
}), list( .lmu = lmu,
.emu = emu,
.size = size ))),
weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, M) # wz is 'diagonal'
+ wz <- matrix(as.numeric(NA), n, M) # wz is 'diagonal'
- ned2l.dmu2 = 1 / mu - 1 / (mu + kmat)
- wz = dmu.deta^2 * ned2l.dmu2
+ ned2l.dmu2 <- 1 / mu - 1 / (mu + kmat)
+ wz <- dmu.deta^2 * ned2l.dmu2
diff --git a/R/family.vglm.R b/R/family.vglm.R
index df14a22..f2d25e1 100644
--- a/R/family.vglm.R
+++ b/R/family.vglm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/family.zeroinf.R b/R/family.zeroinf.R
index 7012fd1..cb92cd8 100644
--- a/R/family.zeroinf.R
+++ b/R/family.zeroinf.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -13,7 +13,7 @@
dzanegbin <- function(x, size, prob = NULL, munb = NULL, pobs0 = 0,
- log = FALSE) {
+ log = FALSE) {
if (length(munb)) {
if (length(prob))
stop("arguments 'prob' and 'munb' both specified")
@@ -24,13 +24,13 @@ dzanegbin <- function(x, size, prob = NULL, munb = NULL, pobs0 = 0,
stop("bad input for argument 'log'")
rm(log)
- LLL = max(length(x), length(pobs0), length(prob), length(size))
+ LLL <- max(length(x), length(pobs0), length(prob), length(size))
if (length(x) != LLL) x <- rep(x, len = LLL)
if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
if (length(prob) != LLL) prob <- rep(prob, len = LLL)
if (length(size) != LLL) size <- rep(size, len = LLL);
- ans = rep(0.0, len = LLL)
+ ans <- rep(0.0, len = LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be in [0,1]")
if (!is.Numeric(prob, positive = TRUE))
@@ -46,7 +46,7 @@ dzanegbin <- function(x, size, prob = NULL, munb = NULL, pobs0 = 0,
size = size[!index0], log = TRUE)
} else {
ans[ index0] <- pobs0[index0]
- ans[!index0] <- (1-pobs0[!index0]) * dposnegbin(x[!index0],
+ ans[!index0] <- (1 - pobs0[!index0]) * dposnegbin(x[!index0],
prob = prob[!index0], size = size[!index0])
}
ans
@@ -61,21 +61,21 @@ pzanegbin <- function(q, size, prob = NULL, munb = NULL, pobs0 = 0) {
prob <- size / (size + munb)
}
- LLL = max(length(q), length(pobs0), length(prob), length(size))
- if (length(q) != LLL) q = rep(q, len = LLL);
- if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(size) != LLL) size = rep(size, len = LLL);
- ans = rep(0.0, len = LLL)
+ LLL <- max(length(q), length(pobs0), length(prob), length(size))
+ if (length(q) != LLL) q <- rep(q, len = LLL);
+ if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL);
+ if (length(size) != LLL) size <- rep(size, len = LLL);
+ ans <- rep(0.0, len = LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be in [0,1]")
- qindex = (q > 0)
- ans[ qindex] = pobs0[qindex] + (1 - pobs0[qindex]) *
- pposnegbin(q[qindex], size = size[qindex],
- prob = prob[qindex])
- ans[q < 0] = 0
- ans[q == 0] = pobs0[q == 0]
+ qindex <- (q > 0)
+ ans[ qindex] <- pobs0[qindex] + (1 - pobs0[qindex]) *
+ pposnegbin(q[qindex], size = size[qindex],
+ prob = prob[qindex])
+ ans[q < 0] <- 0
+ ans[q == 0] <- pobs0[q == 0]
ans
}
@@ -87,19 +87,19 @@ qzanegbin <- function(p, size, prob = NULL, munb = NULL, pobs0 = 0) {
prob <- size/(size + munb)
}
- LLL = max(length(p), length(pobs0), length(prob), length(size))
- if (length(p) != LLL) p = rep(p, len = LLL);
- if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(size) != LLL) size = rep(size, len = LLL);
- ans = rep(0.0, len = LLL)
+ LLL <- max(length(p), length(pobs0), length(prob), length(size))
+ if (length(p) != LLL) p <- rep(p, len = LLL);
+ if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL);
+ if (length(size) != LLL) size <- rep(size, len = LLL);
+ ans <- rep(0.0, len = LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be between 0 and 1 inclusive")
- ans = p
- ans[p <= pobs0] = 0
- pindex = (p > pobs0)
- ans[pindex] = qposnegbin((p[pindex] -
+ ans <- p
+ ans[p <= pobs0] <- 0
+ pindex <- (p > pobs0)
+ ans[pindex] <- qposnegbin((p[pindex] -
pobs0[pindex]) / (1 - pobs0[pindex]),
prob = prob[pindex],
size = size[pindex])
@@ -108,9 +108,9 @@ qzanegbin <- function(p, size, prob = NULL, munb = NULL, pobs0 = 0) {
rzanegbin <- function(n, size, prob = NULL, munb = NULL, pobs0 = 0) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
if (length(munb)) {
@@ -119,8 +119,9 @@ rzanegbin <- function(n, size, prob = NULL, munb = NULL, pobs0 = 0) {
prob <- size / (size + munb)
}
- ans = rposnegbin(n = use.n, prob = prob, size = size)
- if (length(pobs0) != use.n) pobs0 = rep(pobs0, len = use.n)
+ ans <- rposnegbin(n = use.n, prob = prob, size = size)
+ if (length(pobs0) != use.n)
+ pobs0 <- rep(pobs0, len = use.n)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be between 0 and 1 inclusive")
@@ -136,24 +137,24 @@ dzapois <- function(x, lambda, pobs0 = 0, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- LLL = max(length(x), length(lambda), length(pobs0))
- if (length(x) != LLL) x = rep(x, len = LLL);
- if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
- if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
- ans = rep(0.0, len = LLL)
+ LLL <- max(length(x), length(lambda), length(pobs0))
+ if (length(x) != LLL) x <- rep(x, len = LLL);
+ if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL);
+ if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
+ ans <- rep(0.0, len = LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be in [0,1]")
- index0 = (x == 0)
+ index0 <- (x == 0)
if (log.arg) {
- ans[ index0] = log(pobs0[index0])
- ans[!index0] = log1p(-pobs0[!index0]) +
+ ans[ index0] <- log(pobs0[index0])
+ ans[!index0] <- log1p(-pobs0[!index0]) +
dpospois(x[!index0], lambda[!index0], log = TRUE)
} else {
- ans[ index0] = pobs0[index0]
- ans[!index0] = (1 - pobs0[!index0]) *
+ ans[ index0] <- pobs0[index0]
+ ans[!index0] <- (1 - pobs0[!index0]) *
dpospois(x[!index0], lambda[!index0])
}
ans
@@ -162,48 +163,48 @@ dzapois <- function(x, lambda, pobs0 = 0, log = FALSE) {
pzapois <- function(q, lambda, pobs0 = 0) {
- LLL = max(length(q), length(lambda), length(pobs0))
- if (length(q) != LLL) q = rep(q, len = LLL);
- if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
- if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
- ans = rep(0.0, len = LLL)
+ LLL <- max(length(q), length(lambda), length(pobs0))
+ if (length(q) != LLL) q <- rep(q, len = LLL);
+ if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL);
+ if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
+ ans <- rep(0.0, len = LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be in [0,1]")
- ans[q > 0] = pobs0[q > 0] +
+ ans[q > 0] <- pobs0[q > 0] +
(1-pobs0[q > 0]) * ppospois(q[q > 0], lambda[q > 0])
- ans[q < 0] = 0
- ans[q == 0] = pobs0[q == 0]
+ ans[q < 0] <- 0
+ ans[q == 0] <- pobs0[q == 0]
ans
}
qzapois <- function(p, lambda, pobs0 = 0) {
- LLL = max(length(p), length(lambda), length(pobs0))
- if (length(p) != LLL) p = rep(p, len = LLL);
- if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
- if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
+ LLL <- max(length(p), length(lambda), length(pobs0))
+ if (length(p) != LLL) p <- rep(p, len = LLL);
+ if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL);
+ if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be between 0 and 1 inclusive")
- ans = p
- ind4 = (p > pobs0)
- ans[!ind4] = 0
- ans[ ind4] = qpospois((p[ind4] - pobs0[ind4]) / (1 - pobs0[ind4]),
+ ans <- p
+ ind4 <- (p > pobs0)
+ ans[!ind4] <- 0
+ ans[ ind4] <- qpospois((p[ind4] - pobs0[ind4]) / (1 - pobs0[ind4]),
lambda = lambda[ind4])
ans
}
rzapois <- function(n, lambda, pobs0 = 0) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- ans = rpospois(use.n, lambda)
+ ans <- rpospois(use.n, lambda)
if (length(pobs0) != use.n)
- pobs0 = rep(pobs0, length = use.n)
+ pobs0 <- rep(pobs0, length = use.n)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be between 0 and 1 inclusive")
@@ -222,31 +223,31 @@ dzipois <- function(x, lambda, pstr0 = 0, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- LLL = max(length(x), length(lambda), length(pstr0))
- if (length(x) != LLL) x = rep(x, len = LLL);
- if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
- if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
+ LLL <- max(length(x), length(lambda), length(pstr0))
+ if (length(x) != LLL) x <- rep(x, len = LLL);
+ if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL);
+ if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
- ans = x + lambda + pstr0
+ ans <- x + lambda + pstr0
- index0 = (x == 0)
+ index0 <- (x == 0)
if (log.arg) {
- ans[ index0] = log(pstr0[ index0] + (1 - pstr0[ index0]) *
+ ans[ index0] <- log(pstr0[ index0] + (1 - pstr0[ index0]) *
dpois(x[ index0], lambda[ index0]))
- ans[!index0] = log1p(-pstr0[!index0]) +
+ ans[!index0] <- log1p(-pstr0[!index0]) +
dpois(x[!index0], lambda[!index0], log = TRUE)
} else {
- ans[ index0] = pstr0[ index0] + (1 - pstr0[ index0]) *
+ ans[ index0] <- pstr0[ index0] + (1 - pstr0[ index0]) *
dpois(x[ index0], lambda[ index0])
- ans[!index0] = (1 - pstr0[!index0]) * dpois(x[!index0], lambda[!index0])
+ ans[!index0] <- (1 - pstr0[!index0]) * dpois(x[!index0], lambda[!index0])
}
- deflat_limit = -1 / expm1(lambda)
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
+ deflat_limit <- -1 / expm1(lambda)
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
ans
}
@@ -254,18 +255,18 @@ dzipois <- function(x, lambda, pstr0 = 0, log = FALSE) {
pzipois <- function(q, lambda, pstr0 = 0) {
- LLL = max(length(pstr0), length(lambda), length(q))
- if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
- if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
- if (length(q) != LLL) q = rep(q, len = LLL);
+ LLL <- max(length(pstr0), length(lambda), length(q))
+ if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
+ if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL);
+ if (length(q) != LLL) q <- rep(q, len = LLL);
- ans = ppois(q, lambda)
- ans = ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
+ ans <- ppois(q, lambda)
+ ans <- ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
- deflat_limit = -1 / expm1(lambda)
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
+ deflat_limit <- -1 / expm1(lambda)
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
ans
@@ -274,66 +275,66 @@ pzipois <- function(q, lambda, pstr0 = 0) {
qzipois <- function(p, lambda, pstr0 = 0) {
- LLL = max(length(p), length(lambda), length(pstr0))
+ LLL <- max(length(p), length(lambda), length(pstr0))
ans =
- p = rep(p, len = LLL)
- lambda = rep(lambda, len = LLL)
- pstr0 = rep(pstr0, len = LLL)
+ p <- rep(p, len = LLL)
+ lambda <- rep(lambda, len = LLL)
+ pstr0 <- rep(pstr0, len = LLL)
- ans[p <= pstr0] = 0
- pindex = (p > pstr0)
- ans[pindex] = qpois((p[pindex] - pstr0[pindex]) / (1 - pstr0[pindex]),
+ ans[p <= pstr0] <- 0
+ pindex <- (p > pstr0)
+ ans[pindex] <- qpois((p[pindex] - pstr0[pindex]) / (1 - pstr0[pindex]),
lambda = lambda[pindex])
- deflat_limit = -1 / expm1(lambda)
- ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ deflat_limit <- -1 / expm1(lambda)
+ ind0 <- (deflat_limit <= pstr0) & (pstr0 < 0)
if (any(ind0)) {
- pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * exp(-lambda[ind0])
- ans[p[ind0] <= pobs0] = 0
- pindex = (1:LLL)[ind0 & (p > pobs0)]
- Pobs0 = pstr0[pindex] + (1 - pstr0[pindex]) * exp(-lambda[pindex])
- ans[pindex] = qpospois((p[pindex] - Pobs0) / (1 - Pobs0),
+ pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * exp(-lambda[ind0])
+ ans[p[ind0] <= pobs0] <- 0
+ pindex <- (1:LLL)[ind0 & (p > pobs0)]
+ Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * exp(-lambda[pindex])
+ ans[pindex] <- qpospois((p[pindex] - Pobs0) / (1 - Pobs0),
lambda = lambda[pindex])
}
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
- ans[p < 0] = NaN
- ans[p > 1] = NaN
+ ans[p < 0] <- NaN
+ ans[p > 1] <- NaN
ans
}
rzipois <- function(n, lambda, pstr0 = 0) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- if (length(pstr0) != use.n) pstr0 = rep(pstr0, len = use.n);
- if (length(lambda) != use.n) lambda = rep(lambda, len = use.n);
+ if (length(pstr0) != use.n) pstr0 <- rep(pstr0, len = use.n);
+ if (length(lambda) != use.n) lambda <- rep(lambda, len = use.n);
- ans = rpois(use.n, lambda)
- ans = ifelse(runif(use.n) < pstr0, 0, ans)
+ ans <- rpois(use.n, lambda)
+ ans <- ifelse(runif(use.n) < pstr0, 0, ans)
- prob0 = exp(-lambda)
- deflat_limit = -1 / expm1(lambda)
- ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ prob0 <- exp(-lambda)
+ deflat_limit <- -1 / expm1(lambda)
+ ind0 <- (deflat_limit <= pstr0) & (pstr0 < 0)
if (any(ind0)) {
- pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
- ans[ind0] = rpospois(sum(ind0), lambda[ind0])
- ans[ind0] = ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
+ pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+ ans[ind0] <- rpospois(sum(ind0), lambda[ind0])
+ ans[ind0] <- ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
}
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
ans
}
@@ -363,19 +364,19 @@ rzipois <- function(n, lambda, pstr0 = 0) {
first = eval(substitute(expression({
zero <- y == 0
if (any(zero)) {
- if (length(extra)) extra$sumw = sum(w) else
- extra = list(sumw=sum(w))
+ if (length(extra)) extra$sumw <- sum(w) else
+ extra <- list(sumw=sum(w))
if (is.numeric(.n.arg) && extra$sumw != .n.arg)
stop("value of 'n.arg' conflicts with data ",
"(it need not be specified anyway)")
warning("trimming out the zero observations")
- axa.save = attr(x, "assign")
- x = x[!zero,, drop = FALSE]
- attr(x, "assign") = axa.save # Don't lose these!!
- w = w[!zero]
- y = y[!zero]
+ axa.save <- attr(x, "assign")
+ x <- x[!zero,, drop = FALSE]
+ attr(x, "assign") <- axa.save # Don't lose these!!
+ w <- w[!zero]
+ y <- y[!zero]
} else {
if (!is.numeric(.n.arg))
stop("n.arg must be supplied")
@@ -384,7 +385,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
}), list( .n.arg = n.arg ))),
initialize = eval(substitute(expression({
- narg = if (is.numeric(.n.arg)) .n.arg else extra$sumw
+ narg <- if (is.numeric(.n.arg)) .n.arg else extra$sumw
if (sum(w) > narg)
stop("sum(w) > narg")
@@ -397,43 +398,44 @@ rzipois <- function(n, lambda, pstr0 = 0) {
namesof("lambda", .link, list(theta = NULL), tag = FALSE)
if (!length(etastart)) {
- lambda.init = rep(median(y), length = length(y))
- etastart = theta2eta(lambda.init, .link , earg = .earg )
+ lambda.init <- rep(median(y), length = length(y))
+ etastart <- theta2eta(lambda.init, .link , earg = .earg )
}
if (length(extra)) {
- extra$sumw = sum(w)
- extra$narg = narg # For @linkinv
+ extra$sumw <- sum(w)
+ extra$narg <- narg # For @linkinv
} else {
- extra = list(sumw = sum(w), narg = narg)
+ extra <- list(sumw = sum(w), narg = narg)
}
}), list( .link = link, .earg = earg, .n.arg = n.arg ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- lambda = eta2theta(eta, .link, .earg)
- temp5 = exp(-lambda)
- pstr0 = (1 - temp5 - extra$sumw/extra$narg) / (1 - temp5)
+ lambda <- eta2theta(eta, .link, .earg)
+ temp5 <- exp(-lambda)
+ pstr0 <- (1 - temp5 - extra$sumw/extra$narg) / (1 - temp5)
if (any(pstr0 <= 0))
stop("non-positive value(s) of pstr0")
(1 - pstr0) * lambda
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c(lambda = .link )
- misc$earg = list(lambda = .earg )
+ misc$link <- c(lambda = .link )
+
+ misc$earg <- list(lambda = .earg )
if (intercept.only) {
- suma = extra$sumw
- pstr0 = (1 - temp5[1] - suma / narg) / (1 - temp5[1])
- pstr0 = if (pstr0 < 0 || pstr0 > 1) NA else pstr0
- misc$pstr0 = pstr0
+ suma <- extra$sumw
+ pstr0 <- (1 - temp5[1] - suma / narg) / (1 - temp5[1])
+ pstr0 <- if (pstr0 < 0 || pstr0 > 1) NA else pstr0
+ misc$pstr0 <- pstr0
}
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(function(mu, y, w, residuals = FALSE,
eta, extra = NULL) {
- lambda = eta2theta(eta, .link)
- temp5 = exp(-lambda)
- pstr0 = (1 - temp5 - extra$sumw / extra$narg) / (1 - temp5)
+ lambda <- eta2theta(eta, .link)
+ temp5 <- exp(-lambda)
+ pstr0 <- (1 - temp5 - extra$sumw / extra$narg) / (1 - temp5)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dzipois(x = y, pstr0 = pstr0, lambda = lambda, log = TRUE))
@@ -442,15 +444,15 @@ rzipois <- function(n, lambda, pstr0 = 0) {
vfamily = c("yip88"),
deriv = eval(substitute(expression({
- lambda = eta2theta(eta, .link , earg = .earg )
- temp5 = exp(-lambda)
- dl.dlambda = -1 + y/lambda - temp5/(1-temp5)
- dlambda.deta = dtheta.deta(lambda, .link , earg = .earg )
+ lambda <- eta2theta(eta, .link , earg = .earg )
+ temp5 <- exp(-lambda)
+ dl.dlambda <- -1 + y/lambda - temp5/(1-temp5)
+ dlambda.deta <- dtheta.deta(lambda, .link , earg = .earg )
w * dl.dlambda * dlambda.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- d2lambda.deta2 = d2theta.deta2(lambda, .link , earg = .earg )
- d2l.dlambda2 = -y / lambda^2 + temp5 / (1 - temp5)^2
+ d2lambda.deta2 <- d2theta.deta2(lambda, .link , earg = .earg )
+ d2l.dlambda2 <- -y / lambda^2 + temp5 / (1 - temp5)^2
-w * (d2l.dlambda2*dlambda.deta^2 + dl.dlambda*d2lambda.deta2)
}), list( .link = link, .earg = earg ))))
}
@@ -509,74 +511,74 @@ rzipois <- function(n, lambda, pstr0 = 0) {
y <- temp5$y
- extra$y0 = y0 = ifelse(y == 0, 1, 0)
- extra$NOS = NOS = ncoly = ncol(y) # Number of species
- extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
+ extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
+ extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
+ extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
- mynames1 = if (ncoly == 1) "pobs0" else
- paste("pobs0", 1:ncoly, sep = "")
- mynames2 = if (ncoly == 1) "lambda" else
- paste("lambda", 1:ncoly, sep = "")
+ mynames1 <- if (ncoly == 1) "pobs0" else
+ paste("pobs0", 1:ncoly, sep = "")
+ mynames2 <- if (ncoly == 1) "lambda" else
+ paste("lambda", 1:ncoly, sep = "")
predictors.names <-
c(namesof(mynames1, .lpobs_0, earg = .epobs_0, tag = FALSE),
namesof(mynames2, .llambda, earg = .elambda, tag = FALSE))[
interleave.VGAM(Musual*NOS, M = Musual)]
if (!length(etastart)) {
- etastart =
+ etastart <-
cbind(theta2eta((0.5 + w*y0) / (1+w),
.lpobs_0, earg = .epobs_0 ),
matrix(1, n, NOS)) # 1 here is any old value
for(spp. in 1:NOS) {
- sthese = skip.these[, spp.]
+ sthese <- skip.these[, spp.]
etastart[!sthese, NOS+spp.] =
theta2eta(y[!sthese, spp.] / (-expm1(-y[!sthese, spp.])),
.llambda, earg = .elambda )
}
- etastart = etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
}
}), list( .lpobs_0 = lpobs_0, .llambda = llambda,
.epobs_0 = epobs_0, .elambda = elambda ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- NOS = extra$NOS
+ NOS <- extra$NOS
Musual <- 2
- pobs_0 = cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
- .lpobs_0, earg = .epobs_0 ))
- lambda = cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
- .llambda, earg = .elambda ))
+ pobs_0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lpobs_0, earg = .epobs_0 ))
+ lambda <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ .llambda, earg = .elambda ))
(1 - pobs_0) * lambda / (-expm1(-lambda))
}, list( .lpobs_0 = lpobs_0, .llambda = llambda,
.epobs_0 = epobs_0, .elambda = elambda ))),
last = eval(substitute(expression({
- misc$expected = TRUE
+ misc$expected <- TRUE
misc$multipleResponses <- TRUE
- temp.names = c(rep( .lpobs_0 , len = NOS),
- rep( .llambda , len = NOS))
- temp.names = temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
- misc$link = temp.names
+ temp.names <- c(rep( .lpobs_0 , len = NOS),
+ rep( .llambda , len = NOS))
+ temp.names <- temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
+ misc$link <- temp.names
names(misc$link) <-
c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
- misc$earg = vector("list", Musual * NOS)
+ misc$earg <- vector("list", Musual * NOS)
names(misc$earg) <- names(misc$link)
for(ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] = .epobs_0
- misc$earg[[Musual*ii ]] = .elambda
+ misc$earg[[Musual*ii-1]] <- .epobs_0
+ misc$earg[[Musual*ii ]] <- .elambda
}
}), list( .lpobs_0 = lpobs_0, .llambda = llambda,
.epobs_0 = epobs_0, .elambda = elambda ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- NOS = extra$NOS
+ NOS <- extra$NOS
Musual <- 2
- pobs0 = cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ pobs0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
.lpobs_0, earg = .epobs_0))
- lambda = cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ lambda <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
.llambda, earg = .elambda ))
if (residuals)
@@ -588,26 +590,26 @@ rzipois <- function(n, lambda, pstr0 = 0) {
vfamily = c("zapoisson"),
deriv = eval(substitute(expression({
Musual <- 2
- NOS = extra$NOS
- y0 = extra$y0
- skip = extra$skip.these
+ NOS <- extra$NOS
+ y0 <- extra$y0
+ skip <- extra$skip.these
- phimat = cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
- .lpobs_0, earg = .epobs_0 ))
- lambda = cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
- .llambda, earg = .elambda ))
+ phimat <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lpobs_0, earg = .epobs_0 ))
+ lambda <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ .llambda, earg = .elambda ))
- dl.dlambda = y / lambda + 1 / expm1(-lambda)
- dl.dphimat = -1 / (1 - phimat) # For y > 0 obsns
+ dl.dlambda <- y / lambda + 1 / expm1(-lambda)
+ dl.dphimat <- -1 / (1 - phimat) # For y > 0 obsns
for(spp. in 1:NOS) {
- dl.dphimat[skip[, spp.], spp.] = 1 / phimat[skip[, spp.], spp.]
- dl.dlambda[skip[, spp.], spp.] = 0
+ dl.dphimat[skip[, spp.], spp.] <- 1 / phimat[skip[, spp.], spp.]
+ dl.dlambda[skip[, spp.], spp.] <- 0
}
- dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
- mu.phi0 = phimat
+ dlambda.deta <- dtheta.deta(lambda, .llambda, earg = .elambda)
+ mu.phi0 <- phimat
- temp3 = if (.lpobs_0 == "logit") {
+ temp3 <- if (.lpobs_0 == "logit") {
c(w) * (y0 - mu.phi0)
} else {
c(w) * dtheta.deta(mu.phi0, link = .lpobs_0 , earg = .epobs_0 ) *
@@ -616,24 +618,24 @@ rzipois <- function(n, lambda, pstr0 = 0) {
ans <- cbind(temp3,
c(w) * dl.dlambda * dlambda.deta)
- ans = ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
ans
}), list( .lpobs_0 = lpobs_0, .llambda = llambda,
.epobs_0 = epobs_0, .elambda = elambda ))),
weight = eval(substitute(expression({
- wz = matrix(0.0, n, Musual * NOS)
+ wz <- matrix(0.0, n, Musual * NOS)
- temp5 = expm1(lambda)
- ed2l.dlambda2 = (1 - phimat) * (temp5 + 1) *
- (1 / lambda - 1 / temp5) / temp5
- wz[, NOS+(1:NOS)] = w * ed2l.dlambda2 * dlambda.deta^2
+ temp5 <- expm1(lambda)
+ ned2l.dlambda2 <- (1 - phimat) * (temp5 + 1) *
+ (1 / lambda - 1 / temp5) / temp5
+ wz[, NOS+(1:NOS)] <- w * ned2l.dlambda2 * dlambda.deta^2
- tmp100 = mu.phi0 * (1.0 - mu.phi0)
- tmp200 = if ( .lpobs_0 == "logit" && is.empty.list( .epobs_0 )) {
+ tmp100 <- mu.phi0 * (1.0 - mu.phi0)
+ tmp200 <- if ( .lpobs_0 == "logit" && is.empty.list( .epobs_0 )) {
cbind(c(w) * tmp100)
} else {
cbind(c(w) * (1 / tmp100) *
@@ -643,16 +645,16 @@ rzipois <- function(n, lambda, pstr0 = 0) {
if (FALSE)
for(ii in 1:NOS) {
- index200 = abs(tmp200[, ii]) < .Machine$double.eps
+ index200 <- abs(tmp200[, ii]) < .Machine$double.eps
if (any(index200)) {
- tmp200[index200, ii] = 10.0 * .Machine$double.eps^(3/4)
+ tmp200[index200, ii] <- 10.0 * .Machine$double.eps^(3/4)
}
}
- wz[, 1:NOS] = tmp200
+ wz[, 1:NOS] <- tmp200
- wz = wz[, interleave.VGAM(ncol(wz), M = Musual)]
+ wz <- wz[, interleave.VGAM(ncol(wz), M = Musual)]
@@ -665,21 +667,19 @@ rzipois <- function(n, lambda, pstr0 = 0) {
-zanegbinomial.control <- function(save.weight = TRUE, ...)
-{
+zanegbinomial.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
- zanegbinomial =
+ zanegbinomial <-
function(lpobs0 = "logit", lmunb = "loge", lsize = "loge",
ipobs0 = NULL, isize = NULL,
zero = c(-1, -3),
imethod = 1,
nsimEIM = 250,
- shrinkage.init = 0.95)
-{
+ shrinkage.init = 0.95) {
@@ -756,12 +756,12 @@ zanegbinomial.control <- function(save.weight = TRUE, ...)
y <- temp5$y
- extra$NOS = NOS = ncoly = ncol(y) # Number of species
- M = Musual * ncoly #
+ extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
+ M <- Musual * ncoly #
- mynames1 = if (NOS == 1) "pobs0" else paste("pobs0", 1:NOS, sep = "")
- mynames2 = if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
- mynames3 = if (NOS == 1) "size" else paste("size", 1:NOS, sep = "")
+ mynames1 <- if (NOS == 1) "pobs0" else paste("pobs0", 1:NOS, sep = "")
+ mynames2 <- if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
+ mynames3 <- if (NOS == 1) "size" else paste("size", 1:NOS, sep = "")
predictors.names <-
c(namesof(mynames1, .lpobs0 , earg = .epobs0 , tag = FALSE),
namesof(mynames2, .lmunb , earg = .emunb , tag = FALSE),
@@ -779,47 +779,48 @@ zanegbinomial.control <- function(save.weight = TRUE, ...)
index.posy <- (y[, iii] > 0)
if ( .imethod == 1) {
use.this <- weighted.mean(y[index.posy, iii],
- w[index.posy, iii])
- mu.init[ index.posy, iii] = (1 - .sinit ) * y[index.posy, iii] +
- .sinit * use.this
- mu.init[!index.posy, iii] = use.this
+ w[index.posy, iii])
+ mu.init[ index.posy, iii] <- (1 - .sinit ) * y[index.posy, iii] +
+ .sinit * use.this
+ mu.init[!index.posy, iii] <- use.this
} else {
use.this <-
mu.init[, iii] <- (y[, iii] +
- weighted.mean(y[index.posy, iii], w[index.posy, iii])) / 2
+ weighted.mean(y[index.posy, iii],
+ w[index.posy, iii])) / 2
}
if (TRUE) {
- max.use.this = 7 * use.this + 10
- vecTF = (mu.init[, iii] > max.use.this)
+ max.use.this <- 7 * use.this + 10
+ vecTF <- (mu.init[, iii] > max.use.this)
if (any(vecTF))
- mu.init[vecTF, iii] = max.use.this
+ mu.init[vecTF, iii] <- max.use.this
}
}
- pnb0 = matrix(if (length( .ipobs0 )) .ipobs0 else -1,
+ pnb0 <- matrix(if (length( .ipobs0 )) .ipobs0 else -1,
nrow = n, ncol = NOS, byrow = TRUE)
for(spp. in 1:NOS) {
if (any(pnb0[, spp.] < 0)) {
- index.y0 = y[, spp.] < 0.5
- pnb0[, spp.] = max(min(sum(index.y0)/n, 0.97), 0.03)
+ index.y0 <- y[, spp.] < 0.5
+ pnb0[, spp.] <- max(min(sum(index.y0)/n, 0.97), 0.03)
}
}
if ( is.Numeric( .isize )) {
- kmat0 = matrix( .isize , nrow = n, ncol = ncoly, byrow = TRUE)
+ kmat0 <- matrix( .isize , nrow = n, ncol = ncoly, byrow = TRUE)
} else {
posnegbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
- munb = extraargs
+ munb <- extraargs
sum(c(w) * dposnegbin(x = y, munb = munb, size = kmat,
- log = TRUE))
+ log = TRUE))
}
- k.grid = 2^((-6):6)
- kmat0 = matrix(0, nrow = n, ncol = NOS)
+ k.grid <- 2^((-6):6)
+ kmat0 <- matrix(0, nrow = n, ncol = NOS)
for(spp. in 1:NOS) {
- index.posy = y[, spp.] > 0
- posy = y[index.posy, spp.]
- kmat0[, spp.] = getMaxMin(k.grid,
+ index.posy <- y[, spp.] > 0
+ posy <- y[index.posy, spp.]
+ kmat0[, spp.] <- getMaxMin(k.grid,
objfun = posnegbinomial.Loglikfun,
y = posy, x = x[index.posy, ],
w = w[index.posy, spp.],
@@ -827,10 +828,10 @@ if (TRUE) {
}
}
- etastart = cbind(theta2eta(pnb0, .lpobs0 , earg = .epobs0 ),
+ etastart <- cbind(theta2eta(pnb0, .lpobs0 , earg = .epobs0 ),
theta2eta(mu.init, .lmunb , earg = .emunb ),
theta2eta(kmat0, .lsize , earg = .esize ))
- etastart = etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
} # End of if (!length(etastart))
@@ -854,23 +855,23 @@ if (TRUE) {
rep( .lmunb , length = NOS),
rep( .lsize , length = NOS))[interleave.VGAM(Musual*NOS,
M = Musual)]
- temp.names = c(mynames1,
+ temp.names <- c(mynames1,
mynames2,
mynames3)[interleave.VGAM(Musual*NOS, M = Musual)]
- names(misc$link) = temp.names
+ names(misc$link) <- temp.names
- misc$earg = vector("list", Musual*NOS)
- names(misc$earg) = temp.names
+ misc$earg <- vector("list", Musual*NOS)
+ names(misc$earg) <- temp.names
for(ii in 1:NOS) {
- misc$earg[[Musual*ii-2]] = .epobs0
- misc$earg[[Musual*ii-1]] = .emunb
- misc$earg[[Musual*ii ]] = .esize
+ misc$earg[[Musual*ii-2]] <- .epobs0
+ misc$earg[[Musual*ii-1]] <- .emunb
+ misc$earg[[Musual*ii ]] <- .esize
}
- misc$nsimEIM = .nsimEIM
- misc$imethod = .imethod
- misc$ipobs0 = .ipobs0
- misc$isize = .isize
+ misc$nsimEIM <- .nsimEIM
+ misc$imethod <- .imethod
+ misc$ipobs0 <- .ipobs0
+ misc$isize <- .isize
misc$multipleResponses <- TRUE
}), list( .lpobs0 = lpobs0, .lmunb = lmunb, .lsize = lsize,
.epobs0 = epobs0, .emunb = emunb, .esize = esize,
@@ -879,84 +880,84 @@ if (TRUE) {
.imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- NOS = extra$NOS
+ NOS <- extra$NOS
Musual <- 3
- phi0 = eta2theta(eta[, Musual*(1:NOS)-2], .lpobs0 , earg = .epobs0 )
- munb = eta2theta(eta[, Musual*(1:NOS)-1], .lmunb , earg = .emunb )
- kmat = eta2theta(eta[, Musual*(1:NOS) ], .lsize , earg = .esize )
+ phi0 <- eta2theta(eta[, Musual*(1:NOS)-2], .lpobs0 , earg = .epobs0 )
+ munb <- eta2theta(eta[, Musual*(1:NOS)-1], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, Musual*(1:NOS) ], .lsize , earg = .esize )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dzanegbin(x = y, pobs0 = phi0, munb = munb, size = kmat,
- log = TRUE))
+ log = TRUE))
}
}, list( .lpobs0 = lpobs0, .lmunb = lmunb, .lsize = lsize,
.epobs0 = epobs0, .emunb = emunb, .esize = esize ))),
vfamily = c("zanegbinomial"),
deriv = eval(substitute(expression({
Musual <- 3
- NOS = extra$NOS
- y0 = extra$y0
+ NOS <- extra$NOS
+ y0 <- extra$y0
- phi0 = eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+ phi0 <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
.lpobs0 , earg = .epobs0 )
- munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ munb <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
.lmunb , earg = .emunb )
- kmat = eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ kmat <- eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
.lsize , earg = .esize )
- skip = extra$skip.these
+ skip <- extra$skip.these
- dphi0.deta = dtheta.deta(phi0, .lpobs0 , earg = .epobs0 )
- dmunb.deta = dtheta.deta(munb, .lmunb , earg = .emunb )
- dsize.deta = dtheta.deta(kmat, .lsize , earg = .esize )
+ dphi0.deta <- dtheta.deta(phi0, .lpobs0 , earg = .epobs0 )
+ dmunb.deta <- dtheta.deta(munb, .lmunb , earg = .emunb )
+ dsize.deta <- dtheta.deta(kmat, .lsize , earg = .esize )
- tempk = kmat / (kmat + munb)
- tempm = munb / (kmat + munb)
- prob0 = tempk^kmat
- oneminusf0 = 1 - prob0
- df0.dmunb = -tempk * prob0
- df0.dkmat = prob0 * (tempm + log(tempk))
+ tempk <- kmat / (kmat + munb)
+ tempm <- munb / (kmat + munb)
+ prob0 <- tempk^kmat
+ oneminusf0 <- 1 - prob0
+ df0.dmunb <- -tempk * prob0
+ df0.dkmat <- prob0 * (tempm + log(tempk))
- dl.dphi0 = -1 / (1 - phi0)
- dl.dmunb = y / munb - (y + kmat) / (munb + kmat) +
+ dl.dphi0 <- -1 / (1 - phi0)
+ dl.dmunb <- y / munb - (y + kmat) / (munb + kmat) +
df0.dmunb / oneminusf0
- dl.dsize = digamma(y + kmat) - digamma(kmat) -
+ dl.dsize <- digamma(y + kmat) - digamma(kmat) -
(y + kmat)/(munb + kmat) + 1 + log(tempk) +
df0.dkmat / oneminusf0
- dl.dphi0[y == 0] = 1 / phi0[y == 0] # Do it in one line
- skip = extra$skip.these
+ dl.dphi0[y == 0] <- 1 / phi0[y == 0] # Do it in one line
+ skip <- extra$skip.these
for(spp. in 1:NOS) {
- dl.dsize[skip[, spp.], spp.] =
- dl.dmunb[skip[, spp.], spp.] = 0
+ dl.dsize[skip[, spp.], spp.] <-
+ dl.dmunb[skip[, spp.], spp.] <- 0
}
- dl.deta23 = c(w) * cbind(dl.dmunb * dmunb.deta,
+ dl.deta23 <- c(w) * cbind(dl.dmunb * dmunb.deta,
dl.dsize * dsize.deta)
- muphi0 = phi0
- dl.deta1 = if ( .lpobs0 == "logit") {
+ muphi0 <- phi0
+ dl.deta1 <- if ( .lpobs0 == "logit") {
c(w) * (y0 - muphi0)
} else {
c(w) * dphi0.deta * (y0 / muphi0 - 1) / (1 - muphi0)
}
- ans = cbind(dl.deta1, dl.deta23)
- ans = ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans <- cbind(dl.deta1, dl.deta23)
+ ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
ans
}), list( .lpobs0 = lpobs0 , .lmunb = lmunb , .lsize = lsize ,
.epobs0 = epobs0 , .emunb = emunb , .esize = esize ))),
weight = eval(substitute(expression({
- six = dimm(Musual)
+ six <- dimm(Musual)
wz =
- run.varcov = matrix(0.0, n, six*NOS-1)
- Musualm1 = Musual - 1
+ run.varcov <- matrix(0.0, n, six*NOS-1)
+ Musualm1 <- Musual - 1
@@ -964,40 +965,40 @@ if (TRUE) {
- ind2 = iam(NA, NA, M = Musual - 1, both = TRUE, diag = TRUE)
+ ind2 <- iam(NA, NA, M = Musual - 1, both = TRUE, diag = TRUE)
for(ii in 1:( .nsimEIM )) {
- ysim = rzanegbin(n = n*NOS, pobs0 = phi0,
+ ysim <- rzanegbin(n = n*NOS, pobs0 = phi0,
size = kmat, mu = munb)
- dim(ysim) = c(n, NOS)
+ dim(ysim) <- c(n, NOS)
- dl.dphi0 = -1 / (1 - phi0)
- dl.dmunb = ysim / munb - (ysim + kmat) / (munb + kmat) +
+ dl.dphi0 <- -1 / (1 - phi0)
+ dl.dmunb <- ysim / munb - (ysim + kmat) / (munb + kmat) +
df0.dmunb / oneminusf0
- dl.dsize = digamma(ysim + kmat) - digamma(kmat) -
+ dl.dsize <- digamma(ysim + kmat) - digamma(kmat) -
(ysim + kmat)/(munb + kmat) + 1 + log(tempk) +
df0.dkmat / oneminusf0
- dl.dphi0[ysim == 0] = 1 / phi0[ysim == 0] # Do it in one line
- ysim0 = ifelse(ysim == 0, 1, 0)
- skip.sim = matrix(as.logical(ysim0), n, NOS)
+ dl.dphi0[ysim == 0] <- 1 / phi0[ysim == 0] # Do it in one line
+ ysim0 <- ifelse(ysim == 0, 1, 0)
+ skip.sim <- matrix(as.logical(ysim0), n, NOS)
for(spp. in 1:NOS) {
- dl.dsize[skip.sim[, spp.], spp.] =
- dl.dmunb[skip.sim[, spp.], spp.] = 0
+ dl.dsize[skip.sim[, spp.], spp.] <-
+ dl.dmunb[skip.sim[, spp.], spp.] <- 0
}
for(kk in 1:NOS) {
- temp2 = cbind(dl.dmunb[, kk] * dmunb.deta[, kk],
+ temp2 <- cbind(dl.dmunb[, kk] * dmunb.deta[, kk],
dl.dsize[, kk] * dsize.deta[, kk])
- small.varcov = temp2[, ind2$row.index] *
+ small.varcov <- temp2[, ind2$row.index] *
temp2[, ind2$col.index]
@@ -1013,35 +1014,35 @@ if (TRUE) {
} # ii; end of nsimEIM
- run.varcov = cbind(run.varcov / .nsimEIM )
- run.varcov = if (intercept.only)
+ run.varcov <- cbind(run.varcov / .nsimEIM )
+ run.varcov <- if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wzind1 = sort(c( Musual*(1:NOS) - 1,
+ wzind1 <- sort(c( Musual*(1:NOS) - 1,
Musual*(1:NOS) - 0,
M + Musual*(1:NOS) - 1))
- wz[, wzind1] = c(w) * run.varcov[, wzind1]
+ wz[, wzind1] <- c(w) * run.varcov[, wzind1]
- tmp100 = muphi0 * (1 - muphi0)
- tmp200 = if ( .lpobs0 == "logit") {
+ tmp100 <- muphi0 * (1 - muphi0)
+ tmp200 <- if ( .lpobs0 == "logit") {
cbind(c(w) * tmp100)
} else {
c(w) * cbind(dphi0.deta^2 / tmp100)
}
for(ii in 1:NOS) {
- index200 = abs(tmp200[, ii]) < .Machine$double.eps
+ index200 <- abs(tmp200[, ii]) < .Machine$double.eps
if (any(index200)) {
- tmp200[index200, ii] = .Machine$double.eps # Diagonal 0's are bad
+ tmp200[index200, ii] <- .Machine$double.eps # Diagonal 0's are bad
}
}
- wz[, Musual*(1:NOS)-2] = tmp200
+ wz[, Musual*(1:NOS)-2] <- tmp200
@@ -1069,14 +1070,14 @@ rposnegbin <- function(n, munb, size) {
if (!is.Numeric(n, positive = TRUE, integer.valued = TRUE,
allowable.length = 1))
stop("argument 'n' must be a positive integer")
- ans = rnbinom(n=n, mu = munb, size=size)
- munb = rep(munb, length = n)
- size = rep(size, length = n)
- index = ans == 0
+ ans <- rnbinom(n = n, mu = munb, size = size)
+ munb <- rep(munb, length = n)
+ size <- rep(size, length = n)
+ index <- ans == 0
while(any(index)) {
- more = rnbinom(n=sum(index), mu = munb[index], size=size[index])
- ans[index] = more
- index = ans == 0
+ more <- rnbinom(n = sum(index), mu = munb[index], size = size[index])
+ ans[index] <- more
+ index <- ans == 0
}
ans
}
@@ -1087,10 +1088,10 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
stop("argument 'size' must be positive")
if (!is.Numeric(munb, positive = TRUE))
stop("argument 'munb' must be positive")
- ans = dnbinom(x = x, mu = munb, size=size, log=log)
- ans0 = dnbinom(x=0, mu = munb, size=size, log = FALSE)
- ans = if (log) ans - log1p(-ans0) else ans/(1-ans0)
- ans[x == 0] = if (log) -Inf else 0
+ ans <- dnbinom(x = x, mu = munb, size = size, log=log)
+ ans0 <- dnbinom(x=0, mu = munb, size = size, log = FALSE)
+ ans <- if (log) ans - log1p(-ans0) else ans/(1-ans0)
+ ans[x == 0] <- if (log) -Inf else 0
ans
}
@@ -1107,13 +1108,10 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
zipoisson <- function(lpstr0 = "logit", llambda = "loge",
ipstr0 = NULL, ilambda = NULL,
imethod = 1,
- shrinkage.init = 0.8, zero = NULL)
-{
+ shrinkage.init = 0.8, zero = NULL) {
ipstr00 <- ipstr0
-
-
lpstr0 <- as.list(substitute(lpstr0))
epstr00 <- link2list(lpstr0)
lpstr00 <- attr(epstr00, "function.name")
@@ -1178,7 +1176,6 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
ncoly <- ncol(y)
-
Musual <- 2
extra$ncoly <- ncoly
extra$Musual <- Musual
@@ -1209,14 +1206,15 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
yvec <- y[, spp.]
Phi.init <- 1 - 0.85 * sum(w[yvec > 0]) / sum(w)
- Phi.init[Phi.init <= 0.02] = 0.02 # Last resort
- Phi.init[Phi.init >= 0.98] = 0.98 # Last resort
+ Phi.init[Phi.init <= 0.02] <- 0.02 # Last resort
+ Phi.init[Phi.init >= 0.98] <- 0.98 # Last resort
if ( length(mustart)) {
mustart <- matrix(mustart, n, ncoly) # Make sure right size
Lambda.init <- mustart / (1 - Phi.init)
} else if ( .imethod == 2) {
- mymean <- weighted.mean(yvec[yvec > 0], w[yvec > 0]) + 1/16
+ mymean <- weighted.mean(yvec[yvec > 0],
+ w[yvec > 0]) + 1/16
Lambda.init <- (1 - .sinit) * (yvec + 1/8) + .sinit * mymean
} else {
use.this <- median(yvec[yvec > 0]) + 1 / 16
@@ -1254,8 +1252,8 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
.ipstr00 = ipstr00, .ilambda = ilambda,
.imethod = imethod, .sinit = shrinkage.init ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- phimat = eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
- lambda = eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
+ phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
+ lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
(1 - phimat) * lambda
}, list( .lpstr00 = lpstr00, .llambda = llambda,
.epstr00 = epstr00, .elambda = elambda ))),
@@ -1270,8 +1268,8 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
for(ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .epstr00
- misc$earg[[Musual*ii ]] <- .elambda
+ misc$earg[[Musual*ii-1]] <- .epstr00
+ misc$earg[[Musual*ii ]] <- .elambda
}
misc$Musual <- Musual
@@ -1279,39 +1277,43 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
misc$expected <- TRUE
misc$multipleResponses <- TRUE
- misc$pobs0 = phimat + (1 - phimat) * exp(-lambda) # P(Y=0)
+ misc$pobs0 <- phimat + (1 - phimat) * exp(-lambda) # P(Y=0)
if (length(dimnames(y)[[2]]) > 0)
- dimnames(misc$pobs0) = dimnames(y)
-
+ dimnames(misc$pobs0) <- dimnames(y)
+
+ misc$pstr0 <- phimat
+ if (length(dimnames(y)[[2]]) > 0)
+ dimnames(misc$pstr0) <- dimnames(y)
}), list( .lpstr00 = lpstr00, .llambda = llambda,
.epstr00 = epstr00, .elambda = elambda,
.imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- phimat = eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
- lambda = eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
+ phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
+ lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dzipois(x = y, pstr0 = phimat, lambda = lambda,
- log = TRUE))
+ log = TRUE))
}
- }, list( .lpstr00 = lpstr00, .llambda = llambda,
- .epstr00 = epstr00, .elambda = elambda ))),
+ }, list( .lpstr00 = lpstr00, .llambda = llambda,
+ .epstr00 = epstr00, .elambda = elambda ))),
vfamily = c("zipoisson"),
deriv = eval(substitute(expression({
Musual <- 2
phimat <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr00 ,
- earg = .epstr00 )
+ earg = .epstr00 )
lambda <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .llambda ,
- earg = .elambda )
+ earg = .elambda )
- prob0 <- phimat + (1 - phimat) * exp(-lambda)
+ prob0 <- exp(-lambda)
+ pobs0 <- phimat + (1 - phimat) * prob0
index0 <- as.matrix(y == 0)
- dl.dphimat <- -expm1(-lambda) / prob0
+ dl.dphimat <- -expm1(-lambda) / pobs0
dl.dphimat[!index0] <- -1 / (1 - phimat[!index0])
- dl.dlambda <- -(1 - phimat) * exp(-lambda) / prob0
+ dl.dlambda <- -(1 - phimat) * exp(-lambda) / pobs0
dl.dlambda[!index0] <- (y[!index0] - lambda[!index0]) / lambda[!index0]
dphimat.deta <- dtheta.deta(phimat, .lpstr00 , earg = .epstr00 )
@@ -1337,34 +1339,27 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
weight = eval(substitute(expression({
wz <- matrix(0.0, nrow = n, ncol = M + M-1)
- ned2l.dphimat2 <- -expm1(-lambda) / ((1 - phimat) * prob0)
- ned2l.dphimatlambda <- -exp(-lambda) / prob0
+ ned2l.dphimat2 <- -expm1(-lambda) / ((1 - phimat) * pobs0)
+ ned2l.dphimatlambda <- -exp(-lambda) / pobs0
ned2l.dlambda2 <- (1 - phimat) / lambda -
- phimat * (1 - phimat) * exp(-lambda) / prob0
+ phimat * (1 - phimat) * exp(-lambda) / pobs0
+ wz <- array(c(c(w) * ned2l.dphimat2 * dphimat.deta^2,
+ c(w) * ned2l.dlambda2 * dlambda.deta^2,
+ c(w) * ned2l.dphimatlambda * dphimat.deta * dlambda.deta),
+ dim = c(n, M / Musual, 3))
+ wz <- arwz2wz(wz, M = M, Musual = Musual)
- ned2l.dphimat2 <- as.matrix(ned2l.dphimat2)
- ned2l.dlambda2 <- as.matrix(ned2l.dlambda2)
- ned2l.dphimatlambda <- as.matrix(ned2l.dphimatlambda)
-
- for (ii in 1:(M / Musual)) {
- wz[, iam(Musual * ii - 1, Musual * ii - 1, M)] <-
- ned2l.dphimat2[, ii] * dphimat.deta[, ii]^2
- wz[, iam(Musual * ii , Musual * ii , M)] <-
- ned2l.dlambda2[, ii] * dlambda.deta[, ii]^2
- wz[, iam(Musual * ii - 1, Musual * ii , M)] <-
- ned2l.dphimatlambda[, ii] * dphimat.deta[, ii] * dlambda.deta[, ii]
- }
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ wz
}), list( .llambda = llambda, .elambda = elambda ))))
-} # zipoisson
+} # zipoisson
@@ -1376,8 +1371,7 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
zibinomial <- function(lpstr0 = "logit", lprob = "logit",
ipstr0 = NULL,
- zero = 1, mv = FALSE, imethod = 1)
-{
+ zero = 1, mv = FALSE, imethod = 1) {
if (as.logical(mv))
stop("argument 'mv' must be FALSE")
@@ -1421,11 +1415,11 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
if (NCOL(y) == 1) {
if (is.factor(y)) y <- y != levels(y)[1]
- nn = rep(1, n)
+ nn <- rep(1, n)
if (!all(y >= 0 & y <= 1))
stop("response values must be in [0, 1]")
if (!length(mustart) && !length(etastart))
- mustart = (0.5 + w * y) / (1.0 + w)
+ mustart <- (0.5 + w * y) / (1.0 + w)
no.successes <- y
@@ -1478,9 +1472,9 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
}
}
- phi.init[phi.init <= -0.10] <- 0.50 # Lots of sample variation
- phi.init[phi.init <= 0.01] <- 0.05 # Last resort
- phi.init[phi.init >= 0.99] <- 0.95 # Last resort
+ phi.init[phi.init <= -0.10] <- 0.10 # Lots of sample variation
+ phi.init[phi.init <= 0.05] <- 0.15 # Last resort
+ phi.init[phi.init >= 0.80] <- 0.80 # Last resort
if ( length(mustart) && !length(etastart))
mustart <- cbind(rep(phi.init, len = n),
@@ -1503,11 +1497,8 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
misc$imethod <- .imethod
- if (intercept.only && all(w == w[1])) {
- phi <- eta2theta(eta[1, 1], .lpstr0 , earg = .epstr0 )
- mubin <- eta2theta(eta[1, 2], .lprob , earg = .eprob )
- misc$pobs0 <- phi + (1 - phi) * (1 - mubin)^w[1] # P(Y=0)
- }
+ misc$pobs0 <- phi + (1 - phi) * (1 - mubin)^w # [1] # P(Y=0)
+ misc$pstr0 <- phi
}), list( .lpstr0 = lpstr0, .lprob = lprob,
.epstr0 = epstr0, .eprob = eprob,
.imethod = imethod ))),
@@ -1533,12 +1524,12 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
mubin <- eta2theta(eta[, 2], .lprob , earg = .eprob )
prob0 <- (1 - mubin)^w # Actually q^w
- tmp8 <- phi + (1 - phi) * prob0
+ pobs0 <- phi + (1 - phi) * prob0
index <- (y == 0)
- dl.dphi <- (1 - prob0) / tmp8
+ dl.dphi <- (1 - prob0) / pobs0
dl.dphi[!index] <- -1 / (1 - phi[!index])
- dl.dmubin <- -w * (1 - phi) * (1 - mubin)^(w - 1) / tmp8
+ dl.dmubin <- -w * (1 - phi) * (1 - mubin)^(w - 1) / pobs0
dl.dmubin[!index] <- w[!index] *
( y[!index] / mubin[!index] -
(1 - y[!index]) / (1 - mubin[!index]))
@@ -1561,10 +1552,10 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
- ned2l.dphi2 <- (1 - prob0) / ((1 - phi) * tmp8)
+ ned2l.dphi2 <- (1 - prob0) / ((1 - phi) * pobs0)
- ned2l.dphimubin <- -w * ((1 - mubin)^(w - 1)) / tmp8
+ ned2l.dphimubin <- -w * ((1 - mubin)^(w - 1)) / pobs0
@@ -1572,7 +1563,7 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
ned2l.dmubin2 <- (w * (1 - phi) / (mubin * (1 - mubin)^2)) *
- (1 - mubin - w * mubin * (1 - mubin)^w * phi / tmp8)
+ (1 - mubin - w * mubin * (1 - mubin)^w * phi / pobs0)
@@ -1605,16 +1596,16 @@ dzibinom <- function(x, size, prob, pstr0 = 0, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- LLL = max(length(x), length(size), length(prob), length(pstr0))
- if (length(x) != LLL) x = rep(x, len = LLL);
- if (length(size) != LLL) size = rep(size, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
+ LLL <- max(length(x), length(size), length(prob), length(pstr0))
+ if (length(x) != LLL) x <- rep(x, len = LLL);
+ if (length(size) != LLL) size <- rep(size, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL);
+ if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
- ans = dbinom(x = x, size = size, prob = prob, log = TRUE)
+ ans <- dbinom(x = x, size = size, prob = prob, log = TRUE)
- ans = if (log.arg) {
+ ans <- if (log.arg) {
ifelse(x == 0, log(pstr0 + (1-pstr0) * exp(ans)), log1p(-pstr0) + ans)
} else {
ifelse(x == 0, pstr0 + (1-pstr0) * exp(ans) ,
@@ -1622,10 +1613,10 @@ dzibinom <- function(x, size, prob, pstr0 = 0, log = FALSE) {
}
- prob0 = (1 - prob)^size
- deflat_limit = -prob0 / (1 - prob0)
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
+ prob0 <- (1 - prob)^size
+ deflat_limit <- -prob0 / (1 - prob0)
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
ans
@@ -1635,20 +1626,20 @@ dzibinom <- function(x, size, prob, pstr0 = 0, log = FALSE) {
pzibinom <- function(q, size, prob, pstr0 = 0,
lower.tail = TRUE, log.p = FALSE) {
- LLL = max(length(pstr0), length(size), length(prob), length(q))
- if (length(q) != LLL) q = rep(q, len = LLL);
- if (length(size) != LLL) size = rep(size, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
+ LLL <- max(length(pstr0), length(size), length(prob), length(q))
+ if (length(q) != LLL) q <- rep(q, len = LLL);
+ if (length(size) != LLL) size <- rep(size, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL);
+ if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
- ans = pbinom(q, size, prob, lower.tail = lower.tail, log.p = log.p)
- ans = ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
+ ans <- pbinom(q, size, prob, lower.tail = lower.tail, log.p = log.p)
+ ans <- ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
- prob0 = (1 - prob)^size
- deflat_limit = -prob0 / (1 - prob0)
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
+ prob0 <- (1 - prob)^size
+ deflat_limit <- -prob0 / (1 - prob0)
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
ans
}
@@ -1656,16 +1647,16 @@ pzibinom <- function(q, size, prob, pstr0 = 0,
qzibinom <- function(p, size, prob, pstr0 = 0,
lower.tail = TRUE, log.p = FALSE) {
- LLL = max(length(p), length(size), length(prob), length(pstr0))
- p = rep(p, length = LLL)
- size = rep(size, length = LLL)
- prob = rep(prob, length = LLL)
- pstr0 = rep(pstr0, length = LLL)
+ LLL <- max(length(p), length(size), length(prob), length(pstr0))
+ p <- rep(p, length = LLL)
+ size <- rep(size, length = LLL)
+ prob <- rep(prob, length = LLL)
+ pstr0 <- rep(pstr0, length = LLL)
- ans = p
- ans[p <= pstr0] = 0
- ans[p > pstr0] =
+ ans <- p
+ ans[p <= pstr0] <- 0
+ ans[p > pstr0] <-
qbinom((p[p > pstr0] - pstr0[p > pstr0]) / (1 - pstr0[p > pstr0]),
size[p > pstr0],
prob[p > pstr0],
@@ -1673,23 +1664,21 @@ qzibinom <- function(p, size, prob, pstr0 = 0,
- prob0 = (1 - prob)^size
- deflat_limit = -prob0 / (1 - prob0)
- ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ prob0 <- (1 - prob)^size
+ deflat_limit <- -prob0 / (1 - prob0)
+ ind0 <- (deflat_limit <= pstr0) & (pstr0 < 0)
if (any(ind0)) {
- pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
- ans[p[ind0] <= pobs0] = 0
- pindex = (1:LLL)[ind0 & (p > pobs0)]
- Pobs0 = pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
- ans[pindex] = qposbinom((p[pindex] - Pobs0) / (1 - Pobs0),
+ pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+ ans[p[ind0] <= pobs0] <- 0
+ pindex <- (1:LLL)[ind0 & (p > pobs0)]
+ Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
+ ans[pindex] <- qposbinom((p[pindex] - Pobs0) / (1 - Pobs0),
size = size[pindex],
prob = prob[pindex])
}
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
-
-
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
ans
@@ -1697,32 +1686,31 @@ qzibinom <- function(p, size, prob, pstr0 = 0,
rzibinom <- function(n, size, prob, pstr0 = 0) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- pstr0 = rep(pstr0, len = use.n)
- size = rep(size, len = use.n)
- prob = rep(prob, len = use.n)
+ pstr0 <- rep(pstr0, len = use.n)
+ size <- rep(size, len = use.n)
+ prob <- rep(prob, len = use.n)
- ans = rbinom(use.n, size, prob)
+ ans <- rbinom(use.n, size, prob)
ans[runif(use.n) < pstr0] <- 0
- prob0 = (1 - prob)^size
- deflat_limit = -prob0 / (1 - prob0)
- ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ prob0 <- (1 - prob)^size
+ deflat_limit <- -prob0 / (1 - prob0)
+ ind0 <- (deflat_limit <= pstr0) & (pstr0 < 0)
if (any(ind0)) {
- pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
- ans[ind0] = rposbinom(sum(ind0), size = size[ind0], prob = prob[ind0])
- ans[ind0] = ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
+ pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+ ans[ind0] <- rposbinom(sum(ind0), size = size[ind0], prob = prob[ind0])
+ ans[ind0] <- ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
}
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
-
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
ans
}
@@ -1751,25 +1739,25 @@ dzinegbin <- function(x, size, prob = NULL, munb = NULL, pstr0 = 0,
rm(log)
- LLL = max(length(pstr0), length(size), length(prob), length(x))
- if (length(x) != LLL) x = rep(x, len = LLL);
- if (length(size) != LLL) size = rep(size, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
+ LLL <- max(length(pstr0), length(size), length(prob), length(x))
+ if (length(x) != LLL) x <- rep(x, len = LLL);
+ if (length(size) != LLL) size <- rep(size, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL);
+ if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
- ans = dnbinom(x = x, size = size, prob = prob, log = log.arg)
+ ans <- dnbinom(x = x, size = size, prob = prob, log = log.arg)
- ans = if (log.arg)
+ ans <- if (log.arg)
ifelse(x == 0, log(pstr0+(1-pstr0)*exp(ans)), log1p(-pstr0) + ans) else
ifelse(x == 0, pstr0+(1-pstr0)* ans, (1-pstr0) * ans)
- prob0 = prob^size
- deflat_limit = -prob0 / (1 - prob0)
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
+ prob0 <- prob^size
+ deflat_limit <- -prob0 / (1 - prob0)
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
ans
@@ -1783,24 +1771,23 @@ pzinegbin <- function(q, size, prob = NULL, munb = NULL, pstr0 = 0) {
prob <- size / (size + munb)
}
- LLL = max(length(pstr0), length(size), length(prob), length(q))
- if (length(q) != LLL) q = rep(q, len = LLL);
- if (length(size) != LLL) size = rep(size, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
-
+ LLL <- max(length(pstr0), length(size), length(prob), length(q))
+ if (length(q) != LLL) q <- rep(q, len = LLL);
+ if (length(size) != LLL) size <- rep(size, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL);
+ if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
- ans = pnbinom(q = q, size = size, prob = prob)
- ans = ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
+ ans <- pnbinom(q = q, size = size, prob = prob)
+ ans <- ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
- prob0 = prob^size
- deflat_limit = -prob0 / (1 - prob0)
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
+ prob0 <- prob^size
+ deflat_limit <- -prob0 / (1 - prob0)
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
ans
}
@@ -1812,36 +1799,36 @@ qzinegbin <- function(p, size, prob = NULL, munb = NULL, pstr0 = 0) {
stop("arguments 'prob' and 'munb' both specified")
prob <- size/(size + munb)
}
- LLL = max(length(p), length(prob), length(pstr0), length(size))
- if (length(p) != LLL) p = rep(p, len = LLL)
- if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL)
- if (length(size) != LLL) size = rep(size, len = LLL);
-
- ans = p
- ind4 = (p > pstr0)
- ans[!ind4] = 0
- ans[ ind4] = qnbinom(p = (p[ind4] - pstr0[ind4]) / (1 - pstr0[ind4]),
+ LLL <- max(length(p), length(prob), length(pstr0), length(size))
+ if (length(p) != LLL) p <- rep(p, len = LLL)
+ if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL)
+ if (length(size) != LLL) size <- rep(size, len = LLL);
+
+ ans <- p
+ ind4 <- (p > pstr0)
+ ans[!ind4] <- 0
+ ans[ ind4] <- qnbinom(p = (p[ind4] - pstr0[ind4]) / (1 - pstr0[ind4]),
size = size[ind4], prob = prob[ind4])
- prob0 = prob^size
- deflat_limit = -prob0 / (1 - prob0)
- ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ prob0 <- prob^size
+ deflat_limit <- -prob0 / (1 - prob0)
+ ind0 <- (deflat_limit <= pstr0) & (pstr0 < 0)
if (any(ind0)) {
- pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
- ans[p[ind0] <= pobs0] = 0
- pindex = (1:LLL)[ind0 & (p > pobs0)]
- Pobs0 = pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
- ans[pindex] = qposnegbin((p[pindex] - Pobs0) / (1 - Pobs0),
+ pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+ ans[p[ind0] <= pobs0] <- 0
+ pindex <- (1:LLL)[ind0 & (p > pobs0)]
+ Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
+ ans[pindex] <- qposnegbin((p[pindex] - Pobs0) / (1 - Pobs0),
size = size[pindex],
prob = prob[pindex])
}
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
@@ -1856,34 +1843,34 @@ rzinegbin <- function(n, size, prob = NULL, munb = NULL, pstr0 = 0) {
prob <- size / (size + munb)
}
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
- pstr0 = rep(pstr0, len = use.n)
- size = rep(size, len = use.n)
- prob = rep(prob, len = use.n)
+ pstr0 <- rep(pstr0, len = use.n)
+ size <- rep(size, len = use.n)
+ prob <- rep(prob, len = use.n)
- ans = rnbinom(n = use.n, size = size, prob = prob)
- ans = ifelse(runif(use.n) < pstr0, rep(0, use.n), ans)
+ ans <- rnbinom(n = use.n, size = size, prob = prob)
+ ans <- ifelse(runif(use.n) < pstr0, rep(0, use.n), ans)
- prob0 = rep(prob^size, len = use.n)
- deflat_limit = -prob0 / (1 - prob0)
- ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ prob0 <- rep(prob^size, len = use.n)
+ deflat_limit <- -prob0 / (1 - prob0)
+ ind0 <- (deflat_limit <= pstr0) & (pstr0 < 0)
if (any(ind0, na.rm = TRUE)) {
- pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
- ans[ind0] = rposnegbin(sum(ind0, na.rm = TRUE), size = size[ind0],
+ pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+ ans[ind0] <- rposnegbin(sum(ind0, na.rm = TRUE), size = size[ind0],
prob = prob[ind0])
- ans[ind0] = ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
+ ans[ind0] <- ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
}
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
ans
}
@@ -1894,8 +1881,7 @@ rzinegbin <- function(n, size, prob = NULL, munb = NULL, pstr0 = 0) {
-zinegbinomial.control <- function(save.weight = TRUE, ...)
-{
+zinegbinomial.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -1905,9 +1891,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
ipstr0 = NULL, isize = NULL,
zero = c(-1, -3),
imethod = 1, shrinkage.init = 0.95,
- nsimEIM = 250)
-{
-
+ nsimEIM = 250) {
lpstr0 <- as.list(substitute(lpstr0))
@@ -1981,13 +1965,13 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
- extra$NOS = NOS = ncoly = ncol(y) # Number of species
+ extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
+ extra$dimnamesy2 <- dimnames(y)[[2]]
- mynames1 = if (NOS == 1) "pstr0" else paste("pstr0", 1:NOS, sep = "")
- mynames2 = if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
- mynames3 = if (NOS == 1) "size" else paste("size", 1:NOS, sep = "")
+ mynames1 <- if (NOS == 1) "pstr0" else paste("pstr0", 1:NOS, sep = "")
+ mynames2 <- if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
+ mynames3 <- if (NOS == 1) "size" else paste("size", 1:NOS, sep = "")
predictors.names <-
c(namesof(mynames1, .lpstr0 , earg = .epstr0 , tag = FALSE),
namesof(mynames2, .lmunb , earg = .emunb , tag = FALSE),
@@ -1995,13 +1979,13 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
interleave.VGAM(Musual*NOS, M = Musual)]
if (!length(etastart)) {
- mum.init = if ( .imethod == 3) {
+ mum.init <- if ( .imethod == 3) {
y + 1/16
} else {
- mum.init = y
+ mum.init <- y
for(iii in 1:ncol(y)) {
- index = (y[, iii] > 0)
- mum.init[, iii] = if ( .imethod == 2)
+ index <- (y[, iii] > 0)
+ mum.init[, iii] <- if ( .imethod == 2)
weighted.mean(y[index, iii], w = w[index, iii]) else
median(rep(y[index, iii], times = w[index, iii])) + 1/8
}
@@ -2009,14 +1993,14 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
}
- pstr0.init = if (length( .ipstr0 )) {
+ pstr0.init <- if (length( .ipstr0 )) {
matrix( .ipstr0 , n, ncoly, byrow = TRUE)
} else {
- pstr0.init = y
+ pstr0.init <- y
for(iii in 1:ncol(y))
- pstr0.init[, iii] = sum(w[y[, iii] == 0, iii]) / sum(w[, iii])
- pstr0.init[pstr0.init <= 0.02] = 0.02 # Last resort
- pstr0.init[pstr0.init >= 0.98] = 0.98 # Last resort
+ pstr0.init[, iii] <- sum(w[y[, iii] == 0, iii]) / sum(w[, iii])
+ pstr0.init[pstr0.init <= 0.02] <- 0.02 # Last resort
+ pstr0.init[pstr0.init >= 0.98] <- 0.98 # Last resort
pstr0.init
}
@@ -2025,28 +2009,28 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
matrix( .isize, nrow = n, ncol = ncoly, byrow = TRUE)
} else {
zinegbin.Loglikfun <- function(kval, y, x, w, extraargs) {
- index0 = (y == 0)
- pstr0vec = extraargs$pstr0
- muvec = extraargs$mu
+ index0 <- (y == 0)
+ pstr0vec <- extraargs$pstr0
+ muvec <- extraargs$mu
- ans1 = 0.0
+ ans1 <- 0.0
if (any( index0))
- ans1 = ans1 + sum(w[ index0] *
+ ans1 <- ans1 + sum(w[ index0] *
dzinegbin(x = y[ index0], size = kval,
munb = muvec[ index0],
pstr0 = pstr0vec[ index0], log = TRUE))
if (any(!index0))
- ans1 = ans1 + sum(w[!index0] *
+ ans1 <- ans1 + sum(w[!index0] *
dzinegbin(x = y[!index0], size = kval,
munb = muvec[!index0],
pstr0 = pstr0vec[!index0], log = TRUE))
ans1
}
- k.grid = 2^((-6):6)
- kay.init = matrix(0, nrow = n, ncol = NOS)
+ k.grid <- 2^((-6):6)
+ kay.init <- matrix(0, nrow = n, ncol = NOS)
for(spp. in 1:NOS) {
- kay.init[, spp.] = getMaxMin(k.grid,
+ kay.init[, spp.] <- getMaxMin(k.grid,
objfun = zinegbin.Loglikfun,
y = y[, spp.], x = x, w = w[, spp.],
extraargs = list(pstr0 = pstr0.init[, spp.],
@@ -2055,10 +2039,10 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
kay.init
}
- etastart = cbind(theta2eta(pstr0.init, .lpstr0 , earg = .epstr0 ),
+ etastart <- cbind(theta2eta(pstr0.init, .lpstr0 , earg = .epstr0 ),
theta2eta(mum.init, .lmunb , earg = .emunb ),
theta2eta(kay.init, .lsize , earg = .esize ))
- etastart =
+ etastart <-
etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
}
}), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
@@ -2080,7 +2064,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
}, list( .lpstr0 = lpstr0, .lsize = lsize, .lmunb = lmunb,
.epstr0 = epstr0, .esize = esize, .emunb = emunb ))),
last = eval(substitute(expression({
- misc$link =
+ misc$link <-
c(rep( .lpstr0 , length = NOS),
rep( .lmunb , length = NOS),
rep( .lsize , length = NOS))[interleave.VGAM(Musual*NOS,
@@ -2089,31 +2073,28 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
c(mynames1,
mynames2,
mynames3)[interleave.VGAM(Musual*NOS, M = Musual)]
- names(misc$link) = temp.names
+ names(misc$link) <- temp.names
- misc$earg = vector("list", Musual*NOS)
- names(misc$earg) = temp.names
+ misc$earg <- vector("list", Musual*NOS)
+ names(misc$earg) <- temp.names
for(ii in 1:NOS) {
- misc$earg[[Musual*ii-2]] = .epstr0
- misc$earg[[Musual*ii-1]] = .emunb
- misc$earg[[Musual*ii ]] = .esize
+ misc$earg[[Musual*ii-2]] <- .epstr0
+ misc$earg[[Musual*ii-1]] <- .emunb
+ misc$earg[[Musual*ii ]] <- .esize
}
- misc$imethod = .imethod
- misc$nsimEIM = .nsimEIM
- misc$expected = TRUE
- misc$Musual = Musual
- misc$ipstr0 = .ipstr0
- misc$isize = .isize
+ misc$imethod <- .imethod
+ misc$nsimEIM <- .nsimEIM
+ misc$expected <- TRUE
+ misc$Musual <- Musual
+ misc$ipstr0 <- .ipstr0
+ misc$isize <- .isize
misc$multipleResponses <- TRUE
- if (intercept.only) {
- pstr0.val = eta2theta(eta[1, Musual*(1:NOS)-2], .lpstr0 , earg= .epstr0 )
- munb.val = eta2theta(eta[1, Musual*(1:NOS)-1], .lmunb , earg= .emunb )
- kval = eta2theta(eta[1, Musual*(1:NOS) ], .lsize , earg= .esize )
- misc$pobs0 = pstr0.val +
- (1 - pstr0.val) * (kval / (kval + munb.val))^kval # P(Y=0)
- }
+
+
+ misc$pobs0 <- pstr0 + (1 - pstr0) * (kmat / (kmat + munb))^kmat # P(Y=0)
+ misc$pstr0 <- pstr0
}), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
.epstr0 = epstr0, .emunb = emunb, .esize = esize,
.ipstr0 = ipstr0, .isize = isize,
@@ -2121,12 +2102,12 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
Musual <- 3
- NOS = extra$NOS
- pstr0 = eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+ NOS <- extra$NOS
+ pstr0 <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
.lpstr0 , earg = .epstr0 )
- munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ munb <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
.lmunb , earg = .emunb )
- kmat = eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ kmat <- eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
.lsize , earg = .esize )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
@@ -2138,18 +2119,18 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
vfamily = c("zinegbinomial"),
deriv = eval(substitute(expression({
Musual <- 3
- NOS = extra$NOS
+ NOS <- extra$NOS
- pstr0 = eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+ pstr0 <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
.lpstr0 , earg = .epstr0 )
- munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ munb <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
.lmunb , earg = .emunb )
- kmat = eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ kmat <- eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
.lsize , earg = .esize )
- dpstr0.deta = dtheta.deta(pstr0, .lpstr0 , earg = .epstr0 )
- dmunb.deta = dtheta.deta(munb , .lmunb , earg = .emunb )
- dsize.deta = dtheta.deta(kmat , .lsize , earg = .esize )
+ dpstr0.deta <- dtheta.deta(pstr0, .lpstr0 , earg = .epstr0 )
+ dmunb.deta <- dtheta.deta(munb , .lmunb , earg = .emunb )
+ dsize.deta <- dtheta.deta(kmat , .lsize , earg = .esize )
dthetas.detas =
(cbind(dpstr0.deta,
dmunb.deta,
@@ -2157,39 +2138,39 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
- dl.dpstr0 = -1 / (1 - pstr0)
- dl.dmunb = y / munb - (y + kmat) / (munb + kmat)
- dl.dsize = digamma(y + kmat) - digamma(kmat) -
+ dl.dpstr0 <- -1 / (1 - pstr0)
+ dl.dmunb <- y / munb - (y + kmat) / (munb + kmat)
+ dl.dsize <- digamma(y + kmat) - digamma(kmat) -
(y + kmat) / (munb + kmat) + 1 +
log(kmat / (kmat + munb))
for(spp. in 1:NOS) {
- index0 = (y[, spp.] == 0)
+ index0 <- (y[, spp.] == 0)
if (!any(index0) || !any(!index0))
stop("must have some 0s AND some positive counts in the data")
- kmat. = kmat[index0, spp.]
- munb. = munb[index0, spp.]
- pstr0. = pstr0[index0, spp.]
+ kmat. <- kmat[index0, spp.]
+ munb. <- munb[index0, spp.]
+ pstr0. <- pstr0[index0, spp.]
- tempk. = kmat. / (kmat. + munb.)
- tempm. = munb. / (kmat. + munb.)
- prob0. = tempk.^kmat.
- df0.dmunb. = -tempk.* prob0.
- df0.dkmat. = prob0. * (tempm. + log(tempk.))
+ tempk. <- kmat. / (kmat. + munb.)
+ tempm. <- munb. / (kmat. + munb.)
+ prob0. <- tempk.^kmat.
+ df0.dmunb. <- -tempk.* prob0.
+ df0.dkmat. <- prob0. * (tempm. + log(tempk.))
- denom. = pstr0. + (1 - pstr0.) * prob0.
- dl.dpstr0[index0, spp.] = (1 - prob0.) / denom.
- dl.dmunb[index0, spp.] = (1 - pstr0.) * df0.dmunb. / denom.
- dl.dsize[index0, spp.] = (1 - pstr0.) * df0.dkmat. / denom.
+ denom. <- pstr0. + (1 - pstr0.) * prob0.
+ dl.dpstr0[index0, spp.] <- (1 - prob0.) / denom.
+ dl.dmunb[index0, spp.] <- (1 - pstr0.) * df0.dmunb. / denom.
+ dl.dsize[index0, spp.] <- (1 - pstr0.) * df0.dkmat. / denom.
} # of spp.
- dl.dthetas =
+ dl.dthetas <-
cbind(dl.dpstr0,
dl.dmunb,
dl.dsize)[, interleave.VGAM(Musual*NOS, M = Musual)]
@@ -2203,54 +2184,54 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
- wz = matrix(0, n, Musual*M - Musual)
+ wz <- matrix(0, n, Musual*M - Musual)
- ind3 = iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ ind3 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
- run.varcov = array(0.0, c(n, length(ind3$row.index), NOS))
+ run.varcov <- array(0.0, c(n, length(ind3$row.index), NOS))
for(ii in 1:( .nsimEIM )) {
- ysim = rzinegbin(n = n*NOS, pstr0 = pstr0,
+ ysim <- rzinegbin(n = n*NOS, pstr0 = pstr0,
size = kmat, mu = munb)
- dim(ysim) = c(n, NOS)
- index0 = (ysim[, spp.] == 0)
+ dim(ysim) <- c(n, NOS)
+ index0 <- (ysim[, spp.] == 0)
- dl.dpstr0 = -1 / (1 - pstr0)
- dl.dmunb = ysim / munb - (ysim + kmat) / (munb + kmat)
- dl.dsize = digamma(ysim + kmat) - digamma(kmat) -
+ dl.dpstr0 <- -1 / (1 - pstr0)
+ dl.dmunb <- ysim / munb - (ysim + kmat) / (munb + kmat)
+ dl.dsize <- digamma(ysim + kmat) - digamma(kmat) -
(ysim + kmat) / (munb + kmat) + 1 +
log(kmat / (kmat + munb))
for(spp. in 1:NOS) {
- index0 = (ysim[, spp.] == 0)
+ index0 <- (ysim[, spp.] == 0)
if (!any(index0) || !any(!index0))
stop("must have some 0s AND some positive counts in the data")
- kmat. = kmat[index0, spp.]
- munb. = munb[index0, spp.]
- pstr0. = pstr0[index0, spp.]
+ kmat. <- kmat[index0, spp.]
+ munb. <- munb[index0, spp.]
+ pstr0. <- pstr0[index0, spp.]
- tempk. = kmat. / (kmat. + munb.)
- tempm. = munb. / (kmat. + munb.)
- prob0. = tempk.^kmat.
- df0.dmunb. = -tempk.* prob0.
- df0.dkmat. = prob0. * (tempm. + log(tempk.))
+ tempk. <- kmat. / (kmat. + munb.)
+ tempm. <- munb. / (kmat. + munb.)
+ prob0. <- tempk.^kmat.
+ df0.dmunb. <- -tempk.* prob0.
+ df0.dkmat. <- prob0. * (tempm. + log(tempk.))
- denom. = pstr0. + (1 - pstr0.) * prob0.
- dl.dpstr0[index0, spp.] = (1 - prob0.) / denom.
- dl.dmunb[index0, spp.] = (1 - pstr0.) * df0.dmunb. / denom.
- dl.dsize[index0, spp.] = (1 - pstr0.) * df0.dkmat. / denom.
+ denom. <- pstr0. + (1 - pstr0.) * prob0.
+ dl.dpstr0[index0, spp.] <- (1 - prob0.) / denom.
+ dl.dmunb[index0, spp.] <- (1 - pstr0.) * df0.dmunb. / denom.
+ dl.dsize[index0, spp.] <- (1 - pstr0.) * df0.dkmat. / denom.
- sdl.dthetas = cbind(dl.dpstr0[, spp.],
+ sdl.dthetas <- cbind(dl.dpstr0[, spp.],
dl.dmunb[, spp.],
dl.dsize[, spp.])
- temp3 = sdl.dthetas
- run.varcov[,, spp.] = run.varcov[,, spp.] +
+ temp3 <- sdl.dthetas
+ run.varcov[,, spp.] <- run.varcov[,, spp.] +
temp3[, ind3$row.index] *
temp3[, ind3$col.index]
@@ -2258,12 +2239,12 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
} # End of for(spp.) loop
} # End of ii nsimEIM loop
- run.varcov = run.varcov / .nsimEIM
+ run.varcov <- run.varcov / .nsimEIM
- wz1 = if (intercept.only) {
+ wz1 <- if (intercept.only) {
for(spp. in 1:NOS) {
for(jay in 1:length(ind3$row.index)) {
- run.varcov[, jay, spp.] = mean(run.varcov[, jay, spp.])
+ run.varcov[, jay, spp.] <- mean(run.varcov[, jay, spp.])
}
}
run.varcov
@@ -2272,7 +2253,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
}
for(spp. in 1:NOS) {
- wz1[,, spp.] = wz1[,, spp.] *
+ wz1[,, spp.] <- wz1[,, spp.] *
dthetas.detas[, Musual * (spp. - 1) + ind3$row] *
dthetas.detas[, Musual * (spp. - 1) + ind3$col]
}
@@ -2280,10 +2261,10 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
for(spp. in 1:NOS) {
for(jay in 1:Musual) {
for(kay in jay:Musual) {
- cptr = iam((spp. - 1) * Musual + jay,
+ cptr <- iam((spp. - 1) * Musual + jay,
(spp. - 1) * Musual + kay, M = M)
- temp.wz1 = wz1[,, spp.]
- wz[, cptr] = temp.wz1[, iam(jay, kay, M = Musual)]
+ temp.wz1 <- wz1[,, spp.]
+ wz[, cptr] <- temp.wz1[, iam(jay, kay, M = Musual)]
}
}
}
@@ -2303,8 +2284,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
zipoissonff <- function(llambda = "loge", lprobp = "logit",
ilambda = NULL, iprobp = NULL, imethod = 1,
- shrinkage.init = 0.8, zero = -2)
-{
+ shrinkage.init = 0.8, zero = -2) {
lprobp. <- lprobp
iprobp. <- iprobp
@@ -2399,14 +2379,15 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
yjay <- y[, jay]
Phi0.init <- 1 - 0.85 * sum(w[yjay > 0]) / sum(w)
- Phi0.init[Phi0.init <= 0.02] = 0.02 # Last resort
- Phi0.init[Phi0.init >= 0.98] = 0.98 # Last resort
+ Phi0.init[Phi0.init <= 0.02] <- 0.02 # Last resort
+ Phi0.init[Phi0.init >= 0.98] <- 0.98 # Last resort
if ( length(mustart)) {
mustart <- matrix(mustart, n, ncoly) # Make sure right size
Lambda.init <- mustart / (1 - Phi0.init)
} else if ( .imethod == 2) {
- mymean <- weighted.mean(yjay[yjay > 0], w[yjay > 0]) + 1/16
+ mymean <- weighted.mean(yjay[yjay > 0],
+ w[yjay > 0]) + 1/16
Lambda.init <- (1 - .sinit) * (yjay + 1/8) + .sinit * mymean
} else {
use.this <- median(yjay[yjay > 0]) + 1 / 16
@@ -2471,13 +2452,18 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
misc$Musual <- Musual
misc$imethod <- .imethod
- misc$expected = TRUE
+ misc$expected <- TRUE
misc$multipleResponses <- TRUE
- misc$pobs0 <- (1 - probp.) + probp. * exp(-lambda) # P(Y=0)
+ misc$pobs0 <- (1 - probp.) + probp. * exp(-lambda) # P(Y=0)
misc$pobs0 <- as.matrix(misc$pobs0)
if (length(dimnames(y)[[2]]) > 0)
- dimnames(misc$pobs0) = dimnames(y)
+ dimnames(misc$pobs0) <- dimnames(y)
+
+ misc$pstr0 <- (1 - probp.)
+ misc$pstr0 <- as.matrix(misc$pstr0)
+ if (length(dimnames(y)[[2]]) > 0)
+ dimnames(misc$pstr0) <- dimnames(y)
}), list( .lprobp. = lprobp., .llambda = llambda,
.eprobp. = eprobp., .elambda = elambda,
.imethod = imethod ))),
@@ -2494,7 +2480,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dzipois(x = y, pstr0 = 1 - probp., lambda = lambda,
- log = TRUE))
+ log = TRUE))
}
}, list( .lprobp. = lprobp., .llambda = llambda,
.eprobp. = eprobp., .elambda = elambda ))),
@@ -2540,39 +2526,19 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
weight = eval(substitute(expression({
- wz <- matrix(0, nrow = n, ncol = M + M-1)
ned2l.dlambda2 <- ( probp.) / lambda -
probp. * (1 - probp.) * exp(-lambda) / denom
ned2l.dprobp.2 <- -expm1(-lambda) / (( probp.) * denom)
ned2l.dphilambda <- +exp(-lambda) / denom
- if (ncoly == 1) { # Make sure these are matrices
- ned2l.dlambda2 <- cbind(ned2l.dlambda2)
- ned2l.dprobp.2 <- cbind(ned2l.dprobp.2)
- dlambda.deta <- cbind(dlambda.deta)
- dprobp..deta <- cbind(dprobp..deta)
- ned2l.dphilambda <- cbind(ned2l.dphilambda)
- }
-
- for (ii in 1:ncoly) {
- wz[, iam(Musual*ii - 1, Musual*ii - 1, M)] <-
- ned2l.dlambda2[, ii] *
- dlambda.deta[, ii]^2
- wz[, iam(Musual*ii , Musual*ii , M)] <-
- ned2l.dprobp.2[, ii] *
- dprobp..deta[, ii]^2
- wz[, iam(Musual*ii - 1, Musual*ii , M)] <-
- ned2l.dphilambda[, ii] *
- dprobp..deta[, ii] *
- dlambda.deta[, ii]
-
-
-
- } # ii
-
+ wz <- array(c(c(w) * ned2l.dlambda2 * dlambda.deta^2,
+ c(w) * ned2l.dprobp.2 * dprobp..deta^2,
+ c(w) * ned2l.dphilambda * dprobp..deta * dlambda.deta),
+ dim = c(n, M / Musual, 3))
+ wz <- arwz2wz(wz, M = M, Musual = Musual)
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
+ wz
}), list( .llambda = llambda ))))
}
@@ -2587,16 +2553,16 @@ dzigeom <- function(x, prob, pstr0 = 0, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- LLL = max(length(x), length(prob), length(pstr0))
- if (length(x) != LLL) x = rep(x, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
+ LLL <- max(length(x), length(prob), length(pstr0))
+ if (length(x) != LLL) x <- rep(x, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL);
+ if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
- ans = dgeom(x = x, prob = prob, log = TRUE)
+ ans <- dgeom(x = x, prob = prob, log = TRUE)
- ans = if (log.arg) {
+ ans <- if (log.arg) {
ifelse(x == 0, log(pstr0 + (1 - pstr0) * exp(ans)),
log1p(-pstr0) + ans)
} else {
@@ -2606,10 +2572,10 @@ dzigeom <- function(x, prob, pstr0 = 0, log = FALSE) {
- prob0 = prob
- deflat_limit = -prob0 / (1 - prob0)
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
+ prob0 <- prob
+ deflat_limit <- -prob0 / (1 - prob0)
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
ans
}
@@ -2619,19 +2585,19 @@ dzigeom <- function(x, prob, pstr0 = 0, log = FALSE) {
pzigeom <- function(q, prob, pstr0 = 0) {
- LLL = max(length(q), length(prob), length(pstr0))
- if (length(q) != LLL) q = rep(q, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
+ LLL <- max(length(q), length(prob), length(pstr0))
+ if (length(q) != LLL) q <- rep(q, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL);
+ if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
- ans = pgeom(q, prob)
- ans = ifelse(q < 0, 0, pstr0 + (1-pstr0) * ans)
+ ans <- pgeom(q, prob)
+ ans <- ifelse(q < 0, 0, pstr0 + (1-pstr0) * ans)
- prob0 = prob
- deflat_limit = -prob0 / (1 - prob0)
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
+ prob0 <- prob
+ deflat_limit <- -prob0 / (1 - prob0)
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
ans
}
@@ -2639,31 +2605,31 @@ pzigeom <- function(q, prob, pstr0 = 0) {
qzigeom <- function(p, prob, pstr0 = 0) {
- LLL = max(length(p), length(prob), length(pstr0))
- ans = p = rep(p, len = LLL)
- prob = rep(prob, len = LLL)
- pstr0 = rep(pstr0, len = LLL)
- ans[p <= pstr0] = 0
- ind1 = (p > pstr0)
- ans[ind1] =
+ LLL <- max(length(p), length(prob), length(pstr0))
+ ans <- p <- rep(p, len = LLL)
+ prob <- rep(prob, len = LLL)
+ pstr0 <- rep(pstr0, len = LLL)
+ ans[p <= pstr0] <- 0
+ ind1 <- (p > pstr0)
+ ans[ind1] <-
qgeom((p[ind1] - pstr0[ind1]) / (1 - pstr0[ind1]),
prob = prob[ind1])
- prob0 = prob
- deflat_limit = -prob0 / (1 - prob0)
- ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ prob0 <- prob
+ deflat_limit <- -prob0 / (1 - prob0)
+ ind0 <- (deflat_limit <= pstr0) & (pstr0 < 0)
if (any(ind0)) {
- pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
- ans[p[ind0] <= pobs0] = 0
- pindex = (1:LLL)[ind0 & (p > pobs0)]
- Pobs0 = pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
- ans[pindex] = 1 + qgeom((p[pindex] - Pobs0) / (1 - Pobs0),
+ pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+ ans[p[ind0] <= pobs0] <- 0
+ pindex <- (1:LLL)[ind0 & (p > pobs0)]
+ Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
+ ans[pindex] <- 1 + qgeom((p[pindex] - Pobs0) / (1 - Pobs0),
prob = prob[pindex])
}
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
ans
}
@@ -2671,31 +2637,31 @@ qzigeom <- function(p, prob, pstr0 = 0) {
rzigeom <- function(n, prob, pstr0 = 0) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- pstr0 = rep(pstr0, len = use.n)
- prob = rep(prob, len = use.n)
+ pstr0 <- rep(pstr0, len = use.n)
+ prob <- rep(prob, len = use.n)
- ans = rgeom(use.n, prob)
- ans[runif(use.n) < pstr0] = 0
+ ans <- rgeom(use.n, prob)
+ ans[runif(use.n) < pstr0] <- 0
- prob0 = prob
- deflat_limit = -prob0 / (1 - prob0)
- ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ prob0 <- prob
+ deflat_limit <- -prob0 / (1 - prob0)
+ ind0 <- (deflat_limit <= pstr0) & (pstr0 < 0)
if (any(ind0)) {
- pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
- ans[ind0] = 1 + rgeom(sum(ind0), prob = prob[ind0])
- ans[ind0] = ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
+ pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+ ans[ind0] <- 1 + rgeom(sum(ind0), prob = prob[ind0])
+ ans[ind0] <- ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
}
- ans[pstr0 < deflat_limit] = NaN
- ans[pstr0 > 1] = NaN
+ ans[pstr0 < deflat_limit] <- NaN
+ ans[pstr0 > 1] <- NaN
ans
@@ -2710,11 +2676,10 @@ rzigeom <- function(n, prob, pstr0 = 0) {
iprob = NULL, ipstr0 = NULL,
imethod = 1,
bias.red = 0.5,
- zero = 2)
-{
+ zero = 2) {
- expected = TRUE
+ expected <- TRUE
@@ -2759,64 +2724,88 @@ rzigeom <- function(n, prob, pstr0 = 0) {
namesof("pstr0", lpstr0, earg = epstr0), "\n",
"Mean: (1 - pstr0) * (1 - prob) / prob"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero , M)
+
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
}), list( .zero = zero ))),
+
infos = eval(substitute(function(...) {
list(Musual = 2,
zero = .zero )
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a 1-column matrix")
+ Musual <- 2
if (any(y < 0))
- stop("all responses must be >= 0")
-
+ stop("the response must not have negative values")
+ temp5 <-
w.y.check(w = w, y = y,
- ncol.w.max = 1,
- ncol.y.max = 1,
- Is.integer.y = TRUE)
-
-
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+ extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
+ mynames1 <- if (ncoly == 1) "prob" else
+ paste("prob", 1:ncoly, sep = "")
+ mynames2 <- if (ncoly == 1) "pobs0" else
+ paste("pobs0", 1:ncoly, sep = "")
predictors.names <-
- c(namesof("prob", .lprob, earg = .eprob, tag = FALSE),
- namesof("pstr0", .lpstr0, earg = .epstr0, tag = FALSE))
+ c(namesof(mynames1, .lprob, earg = .eprob, tag = FALSE),
+ namesof(mynames2, .lpstr0, earg = .epstr0, tag = FALSE))[
+ interleave.VGAM(Musual*NOS, M = Musual)]
+
if (!length(etastart)) {
- prob.init = if ( .imethod == 3)
- .bias.red / (1 + y + 1/8) else
- if ( .imethod == 2)
- .bias.red / (1 + mean(y) + 1/8) else
- .bias.red / (1 + weighted.mean(y, w) + 1/8)
- prob.init = if (length( .iprob )) {
- rep( .iprob, len = n)
+ prob.init <- if ( .imethod == 3)
+ .bias.red / (1 + y + 1/8) else
+ if ( .imethod == 2)
+ .bias.red / (1 +
+ matrix(colMeans(y) + 1/8,
+ n, ncoly, byrow = TRUE)) else
+ .bias.red / (1 +
+ matrix(colSums(y * w) / colSums(w) + 1/8,
+ n, ncoly, byrow = TRUE))
+
+ prob.init <- if (length( .iprob )) {
+ matrix( .iprob , n, ncoly, byrow = TRUE)
} else {
- rep(prob.init, len = n)
+ prob.init # Already a matrix
}
- prob0.est = sum(w[y == 0]) / sum(w)
- psze.init = if ( .imethod == 3)
- prob0.est / 2 else
- if ( .imethod == 1)
- max(0.05, (prob0.est - median(prob.init))) else
- prob0.est / 5
- psze.init = if (length( .ipstr0 )) {
- rep( .ipstr0 , len = n)
+ prob0.est <- psze.init <- matrix(0, n, NOS)
+ for (jlocal in 1:NOS) {
+ prob0.est[, jlocal] <-
+ sum(w[y[, jlocal] == 0, jlocal]) / sum(w[, jlocal])
+ psze.init[, jlocal] <- if ( .imethod == 3)
+ prob0.est[, jlocal] / 2 else
+ if ( .imethod == 1)
+ pmax(0.05, (prob0.est[, jlocal] -
+ median(prob.init[, jlocal]))) else
+ prob0.est[, jlocal] / 5
+ }
+ psze.init <- if (length( .ipstr0 )) {
+ matrix( .ipstr0 , n, ncoly, byrow = TRUE)
} else {
- rep( psze.init, len = n)
+ psze.init # Already a matrix
}
- etastart =
- cbind(theta2eta(prob.init, .lprob, earg = .eprob),
- theta2eta(psze.init, .lpstr0, earg = .epstr0))
-
+ etastart <-
+ cbind(theta2eta(prob.init, .lprob , earg = .eprob ),
+ theta2eta(psze.init, .lpstr0, earg = .epstr0))
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
}
}), list( .lprob = lprob, .lpstr0 = lpstr0,
.eprob = eprob, .epstr0 = epstr0,
@@ -2824,21 +2813,42 @@ rzigeom <- function(n, prob, pstr0 = 0) {
.bias.red = bias.red,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- prob = eta2theta(eta[, 1], .lprob, earg = .eprob)
- pstr0 = eta2theta(eta[, 2], .lpstr0 , earg = .epstr0 )
+ prob <- eta2theta(eta[, c(TRUE, FALSE)], .lprob , earg = .eprob )
+ pstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lpstr0 , earg = .epstr0 )
(1 - pstr0) * (1 - prob) / prob
}, list( .lprob = lprob, .lpstr0 = lpstr0,
.eprob = eprob, .epstr0 = epstr0 ))),
last = eval(substitute(expression({
- misc$link = c(prob = .lprob, pstr0 = .lpstr0 )
- misc$earg = list(prob = .eprob, pstr0 = .epstr0 )
- misc$imethod = .imethod
- misc$zero = .zero
- misc$bias.red = .bias.red
- misc$expected = .expected
- misc$ipstr0 = .ipstr0
+ temp.names <- c(rep( .lprob , len = NOS),
+ rep( .lpstr0 , len = NOS))
+ temp.names <- temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
+ misc$link <- temp.names
+ misc$earg <- vector("list", Musual * NOS)
+ names(misc$link) <-
+ names(misc$earg) <-
+ c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
+
+ for(ii in 1:NOS) {
+ misc$earg[[Musual*ii-1]] <- .eprob
+ misc$earg[[Musual*ii ]] <- .epstr0
+ }
+
+
+ misc$imethod <- .imethod
+ misc$zero <- .zero
+ misc$bias.red <- .bias.red
+ misc$expected <- .expected
+ misc$ipstr0 <- .ipstr0
+
+
+ misc$pobs0 <- pobs0
+ if (length(dimnames(y)[[2]]) > 0)
+ dimnames(misc$pobs0) <- dimnames(y)
+ misc$pstr0 <- pstr0
+ if (length(dimnames(y)[[2]]) > 0)
+ dimnames(misc$pstr0) <- dimnames(y)
}), list( .lprob = lprob, .lpstr0 = lpstr0,
.eprob = eprob, .epstr0 = epstr0,
.ipstr0 = ipstr0,
@@ -2848,8 +2858,8 @@ rzigeom <- function(n, prob, pstr0 = 0) {
.imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- prob = eta2theta(eta[, 1], .lprob, earg = .eprob)
- pstr0 = eta2theta(eta[, 2], .lpstr0 , earg = .epstr0 )
+ prob <- eta2theta(eta[, c(TRUE, FALSE)], .lprob , earg = .eprob )
+ pstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lpstr0 , earg = .epstr0 )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * dzigeom(x = y, prob = prob, pstr0 = pstr0, log = TRUE))
@@ -2859,64 +2869,65 @@ rzigeom <- function(n, prob, pstr0 = 0) {
vfamily = c("zigeometric"),
deriv = eval(substitute(expression({
- prob = eta2theta(eta[, 1], .lprob, earg = .eprob)
- pstr0 = eta2theta(eta[, 2], .lpstr0 , earg = .epstr0 )
+ Musual <- 2
+ prob <- eta2theta(eta[, c(TRUE, FALSE)], .lprob , earg = .eprob )
+ pstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lpstr0 , earg = .epstr0 )
- prob0 = prob # P(Y == 0)
- tmp8 = pstr0 + (1 - pstr0) * prob0
- index0 = (y == 0)
+ prob0 <- prob # P(Y == 0)
+ pobs0 <- pstr0 + (1 - pstr0) * prob0
+ index0 <- (y == 0)
- dl.dpstr0 = (1 - prob0) / tmp8
- dl.dpstr0[!index0] = -1 / (1 - pstr0[!index0])
+ dl.dpstr0 <- (1 - prob0) / pobs0
+ dl.dpstr0[!index0] <- -1 / (1 - pstr0[!index0])
- dl.dprob = (1 - pstr0) / tmp8
- dl.dprob[!index0] = 1 / prob[!index0] -
- y[!index0] / (1 - prob[!index0])
+ dl.dprob <- (1 - pstr0) / pobs0
+ dl.dprob[!index0] <- 1 / prob[!index0] -
+ y[!index0] / (1 - prob[!index0])
- dprob.deta = dtheta.deta(prob, .lprob, earg = .eprob )
- dpstr0.deta = dtheta.deta(pstr0 , .lpstr0 , earg = .epstr0 )
+ dprob.deta <- dtheta.deta(prob, .lprob, earg = .eprob )
+ dpstr0.deta <- dtheta.deta(pstr0 , .lpstr0 , earg = .epstr0 )
- dl.deta12 =
- c(w) * cbind(dl.dprob * dprob.deta,
- dl.dpstr0 * dpstr0.deta)
+ dl.deta12 <- c(w) * cbind(dl.dprob * dprob.deta,
+ dl.dpstr0 * dpstr0.deta)
+
+ dl.deta12 <- dl.deta12[, interleave.VGAM(ncol(dl.deta12), M = Musual)]
dl.deta12
}), list( .lprob = lprob, .lpstr0 = lpstr0,
.eprob = eprob, .epstr0 = epstr0 ))),
weight = eval(substitute(expression({
- ed2l.dprob2 = (1 - pstr0) * (1 / (prob^2 * (1 - prob)) +
- (1 - pstr0) / tmp8)
- ed2l.dpstr0.prob = 1 / tmp8
- ed2l.dpstr02 = (1 - prob0) / ((1 - pstr0) * tmp8)
-
- od2l.dprob2 = ((1 - pstr0) / tmp8)^2
- od2l.dprob2[!index0] = 1 / (prob[!index0])^2 +
- y[!index0] / (1 - prob[!index0])^2
- od2l.dpstr0.prob = (tmp8 + (1 - prob0) * (1 - pstr0)) / tmp8^2
- od2l.dpstr0.prob[!index0] = 0
-
-
- od2l.dpstr02 = ((1 - prob0) / tmp8)^2
- od2l.dpstr02[!index0] = 1 / (1 - pstr0[!index0])^2
-
-
- wz = matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
if ( .expected ) {
- wz[,iam(1, 1, M)] = ed2l.dprob2 * dprob.deta^2
- wz[,iam(2, 2, M)] = ed2l.dpstr02 * dpstr0.deta^2
- wz[,iam(1, 2, M)] = ed2l.dpstr0.prob * dprob.deta * dpstr0.deta
+ ned2l.dprob2 <- (1 - pstr0) * (1 / (prob^2 * (1 - prob)) +
+ (1 - pstr0) / pobs0)
+ ned2l.dpstr0.prob <- 1 / pobs0
+ ned2l.dpstr02 <- (1 - prob0) / ((1 - pstr0) * pobs0)
} else {
- wz[,iam(1, 1, M)] = od2l.dprob2 * dprob.deta^2
- wz[,iam(2, 2, M)] = od2l.dpstr02 * dpstr0.deta^2
- wz[,iam(1, 2, M)] = od2l.dpstr0.prob * dprob.deta * dpstr0.deta
+ od2l.dprob2 <- ((1 - pstr0) / pobs0)^2
+ od2l.dprob2[!index0] <- 1 / (prob[!index0])^2 +
+ y[!index0] / (1 - prob[!index0])^2
+ od2l.dpstr0.prob <- (pobs0 + (1 - prob0) * (1 - pstr0)) / pobs0^2
+ od2l.dpstr0.prob[!index0] <- 0
+
+ od2l.dpstr02 <- ((1 - prob0) / pobs0)^2
+ od2l.dpstr02[!index0] <- 1 / (1 - pstr0[!index0])^2
}
+ allvals <- if ( .expected )
+ c(c(w) * ned2l.dprob2 * dprob.deta^2,
+ c(w) * ned2l.dpstr02 * dpstr0.deta^2,
+ c(w) * ned2l.dpstr0.prob * dprob.deta * dpstr0.deta) else
+ c(c(w) * od2l.dprob2 * dprob.deta^2,
+ c(w) * od2l.dpstr02 * dpstr0.deta^2,
+ c(w) * od2l.dpstr0.prob * dprob.deta * dpstr0.deta)
+ wz <- array(allvals, dim = c(n, M / Musual, 3))
+ wz <- arwz2wz(wz, M = M, Musual = Musual)
+
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = 1)
+ wz
}), list( .lprob = lprob, .lpstr0 = lpstr0,
- .expected = expected,
- .eprob = eprob, .epstr0 = epstr0 ))))
+ .eprob = eprob, .epstr0 = epstr0,
+ .expected = expected ))))
}
@@ -2929,23 +2940,23 @@ dzageom <- function(x, prob, pobs0 = 0, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- LLL = max(length(x), length(prob), length(pobs0))
- if (length(x) != LLL) x = rep(x, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
- ans = rep(0.0, len = LLL)
+ LLL <- max(length(x), length(prob), length(pobs0))
+ if (length(x) != LLL) x <- rep(x, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL);
+ if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
+ ans <- rep(0.0, len = LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be in [0,1]")
- index0 = (x == 0)
+ index0 <- (x == 0)
if (log.arg) {
- ans[ index0] = log(pobs0[index0])
- ans[!index0] = log1p(-pobs0[!index0]) +
+ ans[ index0] <- log(pobs0[index0])
+ ans[!index0] <- log1p(-pobs0[!index0]) +
dposgeom(x[!index0],
prob = prob[!index0], log = TRUE)
} else {
- ans[ index0] = pobs0[index0]
- ans[!index0] = (1-pobs0[!index0]) *
+ ans[ index0] <- pobs0[index0]
+ ans[!index0] <- (1-pobs0[!index0]) *
dposgeom(x[!index0],
prob = prob[!index0])
}
@@ -2956,50 +2967,51 @@ dzageom <- function(x, prob, pobs0 = 0, log = FALSE) {
pzageom <- function(q, prob, pobs0 = 0) {
- LLL = max(length(q), length(prob), length(pobs0))
- if (length(q) != LLL) q = rep(q, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
- ans = rep(0.0, len = LLL)
+ LLL <- max(length(q), length(prob), length(pobs0))
+ if (length(q) != LLL) q <- rep(q, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL);
+ if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
+ ans <- rep(0.0, len = LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be in [0,1]")
- ans[q > 0] = pobs0[q > 0] +
+ ans[q > 0] <- pobs0[q > 0] +
(1 - pobs0[q > 0]) *
pposgeom(q[q > 0], prob = prob[q > 0])
- ans[q < 0] = 0
- ans[q == 0] = pobs0[q == 0]
+ ans[q < 0] <- 0
+ ans[q == 0] <- pobs0[q == 0]
ans
}
qzageom <- function(p, prob, pobs0 = 0) {
- LLL = max(length(p), length(prob), length(pobs0))
- if (length(p) != LLL) p = rep(p, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
+ LLL <- max(length(p), length(prob), length(pobs0))
+ if (length(p) != LLL) p <- rep(p, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL);
+ if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be in [0,1]")
- ans = p
- ind4 = (p > pobs0)
- ans[!ind4] = 0.0
- ans[ ind4] = qposgeom((p[ind4] - pobs0[ind4]) / (1 - pobs0[ind4]),
+ ans <- p
+ ind4 <- (p > pobs0)
+ ans[!ind4] <- 0.0
+ ans[ ind4] <- qposgeom((p[ind4] - pobs0[ind4]) / (1 - pobs0[ind4]),
prob = prob[ind4])
ans
}
rzageom <- function(n, prob, pobs0 = 0) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
- ans = rposgeom(use.n, prob)
- if (length(pobs0) != use.n) pobs0 = rep(pobs0, len = use.n)
+ ans <- rposgeom(use.n, prob)
+ if (length(pobs0) != use.n)
+ pobs0 <- rep(pobs0, len = use.n)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be between 0 and 1 inclusive")
ifelse(runif(use.n) < pobs0, 0, ans)
@@ -3019,24 +3031,24 @@ dzabinom <- function(x, size, prob, pobs0 = 0, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- LLL = max(length(x), length(size), length(prob), length(pobs0))
- if (length(x) != LLL) x = rep(x, len = LLL);
- if (length(size) != LLL) size = rep(size, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
- ans = rep(0.0, len = LLL)
+ LLL <- max(length(x), length(size), length(prob), length(pobs0))
+ if (length(x) != LLL) x <- rep(x, len = LLL);
+ if (length(size) != LLL) size <- rep(size, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL);
+ if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
+ ans <- rep(0.0, len = LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be in [0,1]")
- index0 = (x == 0)
+ index0 <- (x == 0)
if (log.arg) {
- ans[ index0] = log(pobs0[index0])
- ans[!index0] = log1p(-pobs0[!index0]) +
+ ans[ index0] <- log(pobs0[index0])
+ ans[!index0] <- log1p(-pobs0[!index0]) +
dposbinom(x[!index0], size = size[!index0],
prob = prob[!index0], log = TRUE)
} else {
- ans[ index0] = pobs0[index0]
- ans[!index0] = (1-pobs0[!index0]) *
+ ans[ index0] <- pobs0[index0]
+ ans[!index0] <- (1-pobs0[!index0]) *
dposbinom(x[!index0], size = size[!index0],
prob = prob[!index0])
}
@@ -3047,40 +3059,40 @@ dzabinom <- function(x, size, prob, pobs0 = 0, log = FALSE) {
pzabinom <- function(q, size, prob, pobs0 = 0) {
- LLL = max(length(q), length(size), length(prob), length(pobs0))
- if (length(q) != LLL) q = rep(q, len = LLL);
- if (length(size) != LLL) size = rep(size, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
- ans = rep(0.0, len = LLL)
+ LLL <- max(length(q), length(size), length(prob), length(pobs0))
+ if (length(q) != LLL) q <- rep(q, len = LLL);
+ if (length(size) != LLL) size <- rep(size, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL);
+ if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
+ ans <- rep(0.0, len = LLL)
if (!is.Numeric(pobs0) ||
any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be in [0,1]")
- ans[q > 0] = pobs0[q > 0] +
+ ans[q > 0] <- pobs0[q > 0] +
(1 - pobs0[q > 0]) *
pposbinom(q[q > 0], size = size[q > 0], prob = prob[q > 0])
- ans[q < 0] = 0
- ans[q == 0] = pobs0[q == 0]
+ ans[q < 0] <- 0
+ ans[q == 0] <- pobs0[q == 0]
ans
}
qzabinom <- function(p, size, prob, pobs0 = 0) {
- LLL = max(length(p), length(size), length(prob), length(pobs0))
- if (length(p) != LLL) p = rep(p, len = LLL);
- if (length(size) != LLL) size = rep(size, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
+ LLL <- max(length(p), length(size), length(prob), length(pobs0))
+ if (length(p) != LLL) p <- rep(p, len = LLL);
+ if (length(size) != LLL) size <- rep(size, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL);
+ if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be in [0,1]")
- ans = p
- ind4 = (p > pobs0)
- ans[!ind4] = 0.0
- ans[ ind4] = qposbinom((p[ind4] - pobs0[ind4]) / (1 - pobs0[ind4]),
+ ans <- p
+ ind4 <- (p > pobs0)
+ ans[!ind4] <- 0.0
+ ans[ ind4] <- qposbinom((p[ind4] - pobs0[ind4]) / (1 - pobs0[ind4]),
size = size[ind4],
prob = prob[ind4])
ans
@@ -3088,13 +3100,14 @@ qzabinom <- function(p, size, prob, pobs0 = 0) {
rzabinom <- function(n, size, prob, pobs0 = 0) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
- ans = rposbinom(use.n, size, prob)
- if (length(pobs0) != use.n) pobs0 = rep(pobs0, len = use.n)
+ ans <- rposbinom(use.n, size, prob)
+ if (length(pobs0) != use.n)
+ pobs0 <- rep(pobs0, len = use.n)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be between 0 and 1 inclusive")
ifelse(runif(use.n) < pobs0, 0, ans)
@@ -3108,8 +3121,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
lpobs0 = "logit",
iprob = NULL, ipobs0 = NULL,
imethod = 1,
- zero = 2)
-{
+ zero = 2) {
@@ -3168,20 +3180,20 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
{
- NCOL = function (x)
+ NCOL <- function (x)
if (is.array(x) && length(dim(x)) > 1 ||
is.data.frame(x)) ncol(x) else as.integer(1)
if (NCOL(y) == 1) {
if (is.factor(y)) y <- y != levels(y)[1]
- nn = rep(1, n)
+ nn <- rep(1, n)
if (!all(y >= 0 & y <= 1))
stop("response values must be in [0, 1]")
if (!length(mustart) && !length(etastart))
- mustart = (0.5 + w * y) / (1.0 + w)
+ mustart <- (0.5 + w * y) / (1.0 + w)
- no.successes = y
+ no.successes <- y
if (min(y) < 0)
stop("Negative data not allowed!")
if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
@@ -3192,12 +3204,12 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
stop("Negative data not allowed!")
if (any(abs(y - round(y)) > 1.0e-8))
stop("Count data must be integer-valued")
- y = round(y)
- nvec = y[, 1] + y[, 2]
- y = ifelse(nvec > 0, y[, 1] / nvec, 0)
- w = w * nvec
+ y <- round(y)
+ nvec <- y[, 1] + y[, 2]
+ y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
+ w <- w * nvec
if (!length(mustart) && !length(etastart))
- mustart = (0.5 + nvec * y) / (1 + nvec)
+ mustart <- (0.5 + nvec * y) / (1 + nvec)
} else {
stop("for the binomialff family, response 'y' must be a ",
"vector of 0 and 1's\n",
@@ -3209,13 +3221,13 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
}
if (!all(w == 1))
- extra$new.w = w
+ extra$new.w <- w
- y = as.matrix(y)
- extra$y0 = y0 = ifelse(y == 0, 1, 0)
- extra$NOS = NOS = ncoly = ncol(y) # Number of species
- extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
+ y <- as.matrix(y)
+ extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
+ extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
+ extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
@@ -3228,8 +3240,8 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
new.w <- if (length(extra$new.w)) extra$new.w else 1
Size <- new.w / orig.w
- phi.init = if (length( .ipobs0 )) .ipobs0 else {
- prob0.est = sum(Size[y == 0]) / sum(Size)
+ phi.init <- if (length( .ipobs0 )) .ipobs0 else {
+ prob0.est <- sum(Size[y == 0]) / sum(Size)
if ( .imethod == 1) {
(prob0.est - (1 - mustart)^Size) / (1 - (1 - mustart)^Size)
} else
@@ -3240,15 +3252,15 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
}
}
- phi.init[phi.init <= -0.10] = 0.50 # Lots of sample variation
- phi.init[phi.init <= 0.01] = 0.05 # Last resort
- phi.init[phi.init >= 0.99] = 0.95 # Last resort
+ phi.init[phi.init <= -0.10] <- 0.50 # Lots of sample variation
+ phi.init[phi.init <= 0.01] <- 0.05 # Last resort
+ phi.init[phi.init >= 0.99] <- 0.95 # Last resort
if (!length(etastart)) {
- etastart =
+ etastart <-
cbind(theta2eta( mustart, .lprob, earg = .eprob ),
theta2eta(phi.init, .lpobs0, earg = .epobs0 ))
@@ -3272,6 +3284,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
last = eval(substitute(expression({
misc$link <- c(prob = .lprob, pobs0 = .lpobs0 )
misc$earg <- list(prob = .eprob, pobs0 = .epobs0 )
+
misc$imethod <- .imethod
misc$zero <- .zero
misc$expected <- TRUE
@@ -3311,21 +3324,21 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
dprob.deta <- dtheta.deta(prob, .lprob , earg = .eprob )
dphi0.deta <- dtheta.deta(phi0, .lpobs0, earg = .epobs0 )
- df0.dprob = -Size * (1 - prob)^(Size - 1)
- df02.dprob2 = Size * (Size - 1) * (1 - prob)^(Size - 2)
- prob0 = (1 - prob)^(Size)
- oneminusf0 = 1 - prob0
+ df0.dprob <- -Size * (1 - prob)^(Size - 1)
+ df02.dprob2 <- Size * (Size - 1) * (1 - prob)^(Size - 2)
+ prob0 <- (1 - prob)^(Size)
+ oneminusf0 <- 1 - prob0
- dl.dprob = c(w) * (y / prob - (1 - y) / (1 - prob)) +
+ dl.dprob <- c(w) * (y / prob - (1 - y) / (1 - prob)) +
c(orig.w) * df0.dprob / oneminusf0
- dl.dphi0 = -1 / (1 - phi0)
+ dl.dphi0 <- -1 / (1 - phi0)
- dl.dphi0[y == 0] = 1 / phi0[y == 0] # Do it in one line
- skip = extra$skip.these
+ dl.dphi0[y == 0] <- 1 / phi0[y == 0] # Do it in one line
+ skip <- extra$skip.these
for(spp. in 1:NOS) {
- dl.dprob[skip[, spp.], spp.] = 0
+ dl.dprob[skip[, spp.], spp.] <- 0
}
@@ -3338,30 +3351,30 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
weight = eval(substitute(expression({
- wz = matrix(0.0, n, Musual)
+ wz <- matrix(0.0, n, Musual)
- usualmeanY = prob
- meanY = (1 - phi0) * usualmeanY / oneminusf0
+ usualmeanY <- prob
+ meanY <- (1 - phi0) * usualmeanY / oneminusf0
- term1 = c(Size) * (meanY / prob^2 -
+ term1 <- c(Size) * (meanY / prob^2 -
meanY / (1 - prob)^2) +
c(Size) * (1 - phi0) / (1 - prob)^2
- term2 = -(1 - phi0) * df02.dprob2 / oneminusf0
- term3 = -(1 - phi0) * (df0.dprob / oneminusf0)^2
- ed2l.dprob2 = term1 + term2 + term3
- wz[, iam(1, 1, M)] = ed2l.dprob2 * dprob.deta^2
+ term2 <- -(1 - phi0) * df02.dprob2 / oneminusf0
+ term3 <- -(1 - phi0) * (df0.dprob / oneminusf0)^2
+ ned2l.dprob2 <- term1 + term2 + term3
+ wz[, iam(1, 1, M)] <- ned2l.dprob2 * dprob.deta^2
- mu.phi0 = phi0
- tmp100 = mu.phi0 * (1.0 - mu.phi0)
- tmp200 = if ( .lpobs0 == "logit" && is.empty.list( .epobs0 )) {
+ mu.phi0 <- phi0
+ tmp100 <- mu.phi0 * (1.0 - mu.phi0)
+ tmp200 <- if ( .lpobs0 == "logit" && is.empty.list( .epobs0 )) {
tmp100
} else {
(dphi0.deta^2) / tmp100
}
- wz[, iam(2, 2, M)] = tmp200
+ wz[, iam(2, 2, M)] <- tmp200
c(orig.w) * wz
@@ -3409,8 +3422,8 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
blurb = c("Zero-altered geometric ",
"(Bernoulli and positive-geometric conditional model)\n\n",
"Links: ",
- namesof("pobs0", lpobs0, earg = epobs0, tag = FALSE), ", ",
- namesof("prob" , lprob , earg = eprob , tag = FALSE), "\n",
+ namesof("pobs0", lpobs0, earg = epobs0, tag = FALSE), ", ",
+ namesof("prob" , lprob , earg = eprob , tag = FALSE), "\n",
"Mean: (1 - pobs0) / prob"),
constraints = eval(substitute(expression({
@@ -3426,8 +3439,8 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
temp5 <-
w.y.check(w = w, y = y,
- ncol.w.max = 1,
- ncol.y.max = 1,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
Is.integer.y = TRUE,
out.wy = TRUE,
colsyperw = 1,
@@ -3438,9 +3451,9 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
- extra$y0 = y0 = ifelse(y == 0, 1, 0)
- extra$NOS = NOS = ncoly = ncol(y) # Number of species
- extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
+ extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
+ extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
+ extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
mynames1 <- if (ncoly == 1) "pobs0" else
paste("pobs0", 1:ncoly, sep = "")
@@ -3454,78 +3467,83 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
if (!length(etastart)) {
foo <- function(x) mean(as.numeric(x == 0))
- phi0.init = matrix(apply(y, 2, foo), n, ncoly, byrow = TRUE)
+ phi0.init <- matrix(apply(y, 2, foo), n, ncoly, byrow = TRUE)
if (length( .ipobs0 ))
- phi0.init = matrix( .ipobs0 , n, ncoly, byrow = TRUE)
+ phi0.init <- matrix( .ipobs0 , n, ncoly, byrow = TRUE)
prob.init =
if ( .imethod == 2)
1 / (1 + y + 1/16) else
if ( .imethod == 1)
- (1 - phi0.init) / (1 + matrix(apply(y, 2, weighted.mean, w = w),
- n, ncoly, byrow = TRUE) + 1/16) else
- (1 - phi0.init) / (1 + matrix(apply(y, 2, median),
- n, ncoly, byrow = TRUE) + 1/16)
+ (1 - phi0.init) / (1 +
+ matrix(colSums(y * w) / colSums(w) + 1/16,
+ n, ncoly, byrow = TRUE)) else
+ (1 - phi0.init) / (1 +
+ matrix(apply(y, 2, median), n, ncoly, byrow = TRUE) + 1/16)
+
+
if (length( .iprob ))
- prob.init = matrix( .iprob , n, ncoly, byrow = TRUE)
+ prob.init <- matrix( .iprob , n, ncoly, byrow = TRUE)
- etastart = cbind(theta2eta(phi0.init, .lpobs0 , earg = .epobs0 ),
+ etastart <- cbind(theta2eta(phi0.init, .lpobs0 , earg = .epobs0 ),
theta2eta(prob.init, .lprob , earg = .eprob ))
- etastart = etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
}
}), list( .lpobs0 = lpobs0, .lprob = lprob,
.epobs0 = epobs0, .eprob = eprob,
.ipobs0 = ipobs0, .iprob = iprob,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- NOS = extra$NOS
+ NOS <- extra$NOS
Musual <- 2
- phi0 = cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ phi0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
.lpobs0 , earg = .epobs0 ))
- prob = cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ prob <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
.lprob , earg = .eprob ))
(1 - phi0) / prob
}, list( .lpobs0 = lpobs0, .lprob = lprob,
.epobs0 = epobs0, .eprob = eprob ))),
last = eval(substitute(expression({
- temp.names = c(rep( .lpobs0 , len = NOS),
- rep( .lprob , len = NOS))
- temp.names = temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
- misc$link = temp.names
-
- misc$expected = TRUE
- misc$earg = vector("list", Musual * NOS)
- misc$imethod = .imethod
- misc$ipobs0 = .ipobs0
- misc$iprob = .iprob
- misc$multipleResponses <- TRUE
+ temp.names <- c(rep( .lpobs0 , len = NOS),
+ rep( .lprob , len = NOS))
+ temp.names <- temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
+ misc$link <- temp.names
+
+ misc$earg <- vector("list", Musual * NOS)
names(misc$link) <-
names(misc$earg) <-
- c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
+ c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M <- Musual)]
for(ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] = .epobs0
- misc$earg[[Musual*ii ]] = .eprob
+ misc$earg[[Musual*ii-1]] <- .epobs0
+ misc$earg[[Musual*ii ]] <- .eprob
}
+
+
+ misc$expected <- TRUE
+ misc$imethod <- .imethod
+ misc$ipobs0 <- .ipobs0
+ misc$iprob <- .iprob
+ misc$multipleResponses <- TRUE
}), list( .lpobs0 = lpobs0, .lprob = lprob,
.epobs0 = epobs0, .eprob = eprob,
.ipobs0 = ipobs0, .iprob = iprob,
.imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- NOS = extra$NOS
+ NOS <- extra$NOS
Musual <- 2
- phi0 = cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
- .lpobs0 , earg = .epobs0 ))
- prob = cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
- .lprob , earg = .eprob ))
+ phi0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lpobs0 , earg = .epobs0 ))
+ prob <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ .lprob , earg = .eprob ))
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
@@ -3536,61 +3554,61 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
vfamily = c("zageometric"),
deriv = eval(substitute(expression({
Musual <- 2
- NOS = extra$NOS
- y0 = extra$y0
- skip = extra$skip.these
+ NOS <- extra$NOS
+ y0 <- extra$y0
+ skip <- extra$skip.these
- phi0 = cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
- .lpobs0 , earg = .epobs0 ))
- prob = cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
- .lprob , earg = .eprob ))
+ phi0 <- cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lpobs0 , earg = .epobs0 ))
+ prob <- cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ .lprob , earg = .eprob ))
- dl.dprob = 1 / prob - (y - 1) / (1 - prob)
- dl.dphi0 = -1 / (1 - phi0)
+ dl.dprob <- 1 / prob - (y - 1) / (1 - prob)
+ dl.dphi0 <- -1 / (1 - phi0)
for(spp. in 1:NOS) {
- dl.dphi0[skip[, spp.], spp.] = 1 / phi0[skip[, spp.], spp.]
- dl.dprob[skip[, spp.], spp.] = 0
+ dl.dphi0[skip[, spp.], spp.] <- 1 / phi0[skip[, spp.], spp.]
+ dl.dprob[skip[, spp.], spp.] <- 0
}
- dphi0.deta = dtheta.deta(phi0, .lpobs0 , earg = .epobs0 )
- dprob.deta = dtheta.deta(prob, .lprob , earg = .eprob )
+ dphi0.deta <- dtheta.deta(phi0, .lpobs0 , earg = .epobs0 )
+ dprob.deta <- dtheta.deta(prob, .lprob , earg = .eprob )
ans <- c(w) * cbind(dl.dphi0 * dphi0.deta,
dl.dprob * dprob.deta)
- ans = ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
ans
}), list( .lpobs0 = lpobs0, .lprob = lprob,
.epobs0 = epobs0, .eprob = eprob ))),
weight = eval(substitute(expression({
- wz = matrix(0.0, n, Musual*NOS)
+ wz <- matrix(0.0, n, Musual*NOS)
- ed2l.dprob2 = (1 - phi0) / (prob^2 * (1 - prob))
+ ned2l.dprob2 <- (1 - phi0) / (prob^2 * (1 - prob))
- wz[, NOS+(1:NOS)] = c(w) * ed2l.dprob2 * dprob.deta^2
+ wz[, NOS+(1:NOS)] <- c(w) * ned2l.dprob2 * dprob.deta^2
- mu.phi0 = phi0
- tmp100 = mu.phi0 * (1.0 - mu.phi0)
- tmp200 = if ( .lpobs0 == "logit" && is.empty.list( .epobs0 )) {
+ mu.phi0 <- phi0
+ tmp100 <- mu.phi0 * (1.0 - mu.phi0)
+ tmp200 <- if ( .lpobs0 == "logit" && is.empty.list( .epobs0 )) {
cbind(c(w) * tmp100)
} else {
cbind(c(w) * (dphi0.deta^2) / tmp100)
}
- wz[, 1:NOS] = tmp200
+ wz[, 1:NOS] <- tmp200
- wz = wz[, interleave.VGAM(ncol(wz), M = Musual)]
+ wz <- wz[, interleave.VGAM(ncol(wz), M = Musual)]
wz
}), list( .lpobs0 = lpobs0,
.epobs0 = epobs0 ))))
-} # End of zageometric
+} # End of zageometric
diff --git a/R/fittedvlm.R b/R/fittedvlm.R
index 228ed5b..a1b2213 100644
--- a/R/fittedvlm.R
+++ b/R/fittedvlm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/formula.vlm.q b/R/formula.vlm.q
index 610a68a..96b7a0b 100644
--- a/R/formula.vlm.q
+++ b/R/formula.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/generic.q b/R/generic.q
index 8832317..7b71020 100644
--- a/R/generic.q
+++ b/R/generic.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/links.q b/R/links.q
index 6f30c4d..ffb0b97 100644
--- a/R/links.q
+++ b/R/links.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -19,7 +19,10 @@ ToString <- function(x)
TypicalVGAMfamilyFunction <-
function(lsigma = "loge",
- isigma = NULL, parallel = TRUE,
+ isigma = NULL,
+ gsigma = exp(-5:5),
+ parallel = TRUE,
+ apply.parint = FALSE,
shrinkage.init = 0.95,
nointercept = NULL, imethod = 1,
probs.x = c(0.15, 0.85),
@@ -45,6 +48,17 @@ TypicalVGAMlinkFunction <-
+care.exp <- function(x,
+ thresh = -log( sqrt( .Machine$double.xmin ) )
+ ) {
+ x[x > thresh] <- thresh
+ x[x < (-thresh)] <- -thresh
+ exp(x)
+}
+
+
+
+
@@ -57,8 +71,8 @@ TypicalVGAMlinkFunction <-
if (is.character(theta)) {
string <- if (short)
- paste("log(", theta, ")", sep = "") else
- paste("log(", theta, ")", sep = "")
+ paste("log(", theta, ")", sep = "") else
+ paste("log(", theta, ")", sep = "")
if (tag)
string <- paste("Log:", string)
return(string)
@@ -94,8 +108,8 @@ TypicalVGAMlinkFunction <-
stop("bad input for argument 'offset'")
if (is.character(theta)) {
- string <- if (short)
- paste("Logoff(", theta,
+ string <- if (short)
+ paste("logoff(", theta,
", offset = ", as.character(offset),
")", sep = "") else
paste("log(",
@@ -130,6 +144,8 @@ TypicalVGAMlinkFunction <-
identity <- function(theta,
inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE) {
+
+
if (is.character(theta)) {
string <- theta
if (tag)
@@ -192,7 +208,7 @@ TypicalVGAMlinkFunction <-
if (is.character(theta)) {
string <- if (short)
paste("logit(", theta, ")", sep = "") else
- paste("log(", theta, "/(1-", theta, "))", sep = "")
+ paste("log(", theta, "/(1-", theta, "))", sep = "")
if (tag)
string <- paste("Logit:", string)
return(string)
@@ -234,7 +250,7 @@ TypicalVGAMlinkFunction <-
{
if (is.character(theta)) {
string <- if (short)
- paste("loglog(", theta, ")", sep = "") else
+ paste("loglog(", theta, ")", sep = "") else
paste("log(log(", theta, "))", sep = "")
if (tag)
string <- paste("Log-Log:", string)
@@ -274,7 +290,7 @@ TypicalVGAMlinkFunction <-
{
if (is.character(theta)) {
string <- if (short)
- paste("cloglog(", theta, ")", sep = "") else
+ paste("cloglog(", theta, ")", sep = "") else
paste("log(-log(1-", theta, "))", sep = "")
if (tag)
string <- paste("Complementary log-log:", string)
@@ -317,7 +333,7 @@ TypicalVGAMlinkFunction <-
if (is.character(theta)) {
string <- if (short)
paste("probit(", theta, ")", sep = "") else
- paste("qnorm(", theta, ")", sep = "")
+ paste("qnorm(", theta, ")", sep = "")
if (tag)
string <- paste("Probit:", string)
return(string)
@@ -383,7 +399,7 @@ TypicalVGAMlinkFunction <-
{
if (is.character(theta)) {
string <- if (short)
- paste("exp(", theta, ")", sep = "") else
+ paste("explink(", theta, ")", sep = "") else
paste("exp(", theta, ")", sep = "")
if (tag)
string <- paste("Exp:", string)
@@ -452,8 +468,8 @@ TypicalVGAMlinkFunction <-
short = TRUE, tag = FALSE) {
if (is.character(theta)) {
string <- if (short)
- paste("-log(", theta, ")", sep = "") else
- paste("-log(", theta, ")", sep = "")
+ paste("nloge(", theta, ")", sep = "") else
+ paste("-log(", theta, ")", sep = "")
if (tag)
string <- paste("Negative log:", string)
return(string)
@@ -601,7 +617,7 @@ TypicalVGAMlinkFunction <-
if (is.character(theta)) {
string <- if (short)
paste("fisherz(", theta, ")", sep = "") else
- paste("(1/2)log((1+", theta, ")/(1-", theta, "))", sep = "")
+ paste("(1/2) * log((1+", theta, ")/(1-", theta, "))", sep = "")
if (tag)
string <- paste("Fisher's Z transformation:", string)
return(string)
@@ -619,12 +635,11 @@ TypicalVGAMlinkFunction <-
bmaxvalue = bmaxvalue,
inverse = FALSE, deriv = deriv)
} else {
- junk <- exp(2*theta)
- expm1(2*theta) / (junk+1.0)
+ tanh(theta)
}
} else {
switch(deriv+1,
- 0.5 * log1p(theta) - log1p(-theta),
+ atanh(theta),
1.0 - theta^2,
(1.0 - theta^2)^2 / (2*theta))
}
@@ -643,8 +658,7 @@ TypicalVGAMlinkFunction <-
whitespace = FALSE,
bvalue = NULL,
inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE)
-{
+ short = TRUE, tag = FALSE) {
fillerChar <- ifelse(whitespace, " ", "")
@@ -667,8 +681,8 @@ TypicalVGAMlinkFunction <-
stop("could not coerce 'refLevel' into a single positive integer")
} else
if (!is.Numeric(refLevel, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE))
- stop("'refLevel' must be a single positive integer")
+ integer.valued = TRUE))
+ stop("'refLevel' must be a single (positive?) integer")
@@ -683,7 +697,7 @@ TypicalVGAMlinkFunction <-
theta, "[,",
ifelse(is.M, M+1, "M+1"),
"]), j = 1:",
- M, sep = ""),
+ ifelse(is.M, M, "M"), sep = ""),
paste("log(", theta, "[,j]/",
theta, "[,",
ifelse(is.M, M+1, "M+1"),
@@ -716,12 +730,17 @@ TypicalVGAMlinkFunction <-
+ M.orig <- M
M <- if (inverse) ncol(cbind(theta)) else
ncol(cbind(theta)) - 1
if (M < 1)
ifelse(inverse,
stop("argument 'eta' should have at least one column"),
stop("argument 'theta' should have at least two columns"))
+ if (is.numeric(M.orig) && M != M.orig) {
+ warning("argument 'M' does not seem right but using it")
+ M <- M.orig
+ }
@@ -734,14 +753,15 @@ TypicalVGAMlinkFunction <-
foo <- function(eta, refLevel = -1, M) {
phat <- if ((refLevel < 0) || (refLevel == M+1)) {
- cbind(exp(eta), 1)
+ cbind(care.exp(eta), 1.0)
} else if ( refLevel == 1) {
- cbind(1, exp(eta))
+ cbind(1.0, care.exp(eta))
} else {
use.refLevel <- if ( refLevel < 0) M+1 else refLevel
- etamat <- cbind(eta[, 1:( refLevel - 1)], 0,
+ etamat <- cbind(eta[, 1:( refLevel - 1)],
+ 0.0,
eta[, ( refLevel ):M])
- exp(etamat)
+ care.exp(etamat)
}
ans <- phat / rowSums(phat)
colnames(ans) <- NULL
@@ -756,22 +776,24 @@ TypicalVGAMlinkFunction <-
bvalue = bvalue,
inverse = FALSE, deriv = deriv)
} else {
- ans <- if ( refLevel < 0) {
+ foo(theta, refLevel, M = M) # log(theta[, -jay] / theta[, jay])
+ }
+ } else {
+ switch(deriv + 1, {
+ ans <- if (refLevel < 0) {
log(theta[, -ncol(theta)] / theta[, ncol(theta)])
} else {
- use.refLevel <- if ( refLevel < 0) ncol(theta) else refLevel
+ use.refLevel <- if (refLevel < 0) ncol(theta) else refLevel
log(theta[, -( use.refLevel )] / theta[, use.refLevel ])
}
colnames(ans) <- NULL
ans
- }
- } else {
- switch(deriv + 1,
- foo(theta, refLevel, M = M), # log(theta[, -jay] / theta[, jay])
- exp(log(theta) + log1p(-theta)),
- exp(log(theta) + log1p(-theta)) * (1 - 2 * theta))
+ },
+ care.exp(log(theta) + log1p(-theta)),
+ care.exp(log(theta) + log1p(-theta)) * (1 - 2 * theta))
}
-}
+} # end of mlogit
+
@@ -1001,10 +1023,11 @@ fsqrt <- function(theta, # = NA , = NULL,
0.5 + atan(theta) / pi
}
} else {
- switch(deriv+1, {
- tan(pi * (theta-0.5))},
+ switch(deriv+1,
+ tan(pi * (theta-0.5)),
cos(pi * (theta-0.5))^2 / pi,
- -sin(pi * (theta-0.5) * 2))
+ -sin(pi * (theta-0.5) * 2)
+ )
}
}
@@ -1316,7 +1339,7 @@ fsqrt <- function(theta, # = NA , = NULL,
inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE) {
- kay = k
+ kay <- k
if (!is.Numeric(kay, positive = TRUE))
stop("could not determine argument 'k' or ",
"it is not positive-valued")
diff --git a/R/logLik.vlm.q b/R/logLik.vlm.q
index 93fdda9..300e962 100644
--- a/R/logLik.vlm.q
+++ b/R/logLik.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -9,7 +9,7 @@
logLik.vlm <- function(object, ...)
- object at criterion$loglikelihood
+ object at criterion$loglikelihood
if (!isGeneric("logLik"))
@@ -34,9 +34,14 @@ setMethod("logLik", "vgam", function(object, ...)
+
+
+
constraints.vlm <- function(object,
type = c("lm", "term"),
- all = TRUE, which, ...) {
+ all = TRUE, which,
+ matrix.out = FALSE,
+ ...) {
type <- match.arg(type, c("lm", "term"))[1]
@@ -60,7 +65,21 @@ constraints.vlm <- function(object,
names(ans) <- names.att.x.LM
} # End of "term"
- if (all) ans else ans[[which]]
+ if (matrix.out) {
+ if (all) {
+ M <- npred(object)
+ mat.ans <- matrix(unlist(ans), nrow = M)
+ if (length(object at misc$predictors.names) == M)
+ rownames(mat.ans) <- object at misc$predictors.names
+ if (length(object at misc$colnames.X_vlm) == ncol(mat.ans))
+ colnames(mat.ans) <- object at misc$colnames.X_vlm
+ mat.ans
+ } else {
+ ans[[which]]
+ }
+ } else {
+ if (all) ans else ans[[which]]
+ }
}
@@ -69,8 +88,9 @@ if (!isGeneric("constraints"))
setGeneric("constraints", function(object, ...)
standardGeneric("constraints"))
+
setMethod("constraints", "vlm", function(object, ...)
- constraints.vlm(object, ...))
+ constraints.vlm(object, ...))
diff --git a/R/lrwaldtest.R b/R/lrwaldtest.R
index 2ec1b39..4b4991c 100644
--- a/R/lrwaldtest.R
+++ b/R/lrwaldtest.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -256,8 +256,8 @@ setMethod("lrtest", "vglm",
-use.S3.lrtest = TRUE
-use.S3.lrtest = FALSE
+use.S3.lrtest <- TRUE
+use.S3.lrtest <- FALSE
if (use.S3.lrtest)
@@ -271,7 +271,7 @@ if (use.S3.lrtest)
lrtest.formula <- function(object, ..., data = list()) {
object <- if (length(data) < 1)
eval(call("lm", formula = as.formula(deparse(substitute(object))),
- environment(object))) else
+ environment(object))) else
eval(call("lm", formula = as.formula(deparse(substitute(object))),
data = as.name(deparse(substitute(data))),
environment(data)))
@@ -286,7 +286,6 @@ lrtest.default <- function(object, ..., name = NULL) {
-print("hi S3 20111224")
cls <- class(object)[1]
@@ -335,20 +334,20 @@ print("hi S3 20111224")
nmodels <- length(objects)
if (nmodels < 2) {
objects <- c(objects, . ~ 1)
-print("objects 1")
-print( objects )
+ print("objects 1")
+ print( objects )
nmodels <- 2
}
no.update <- sapply(objects, function(obj) inherits(obj, cls))
-print("no.update")
-print( no.update )
+ print("no.update")
+ print( no.update )
for(i in 2:nmodels)
objects[[i]] <- modelUpdate(objects[[i-1]], objects[[i]])
-print("objects i")
-print( objects )
+ print("objects i")
+ print( objects )
ns <- sapply(objects, nobs)
if (any(ns != ns[1])) {
@@ -359,8 +358,8 @@ print( objects )
"the same size of dataset") else {
commonobs <- row.names(model.frame(objects[[i]])) %in%
row.names(model.frame(objects[[i-1]]))
-print("commonobs")
-print( commonobs )
+ print("commonobs")
+ print( commonobs )
objects[[i]] <- eval(substitute(update(objects[[i]],
subset = commonobs),
list(commonobs = commonobs)))
@@ -411,12 +410,15 @@ waldtest <- function(object, ...) {
waldtest_formula <- function(object, ..., data = list()) {
+
+ stop("cannot find waldtest_lm()")
+
object <- if (length(data) < 1)
eval(call("lm", formula = as.formula(deparse(substitute(object))),
environment(object))) else
eval(call("lm", formula = as.formula(deparse(substitute(object))),
data = as.name(deparse(substitute(data))), environment(data)))
- waldtest_lm(object, ...)
+
}
diff --git a/R/model.matrix.vglm.q b/R/model.matrix.vglm.q
index d2292c4..c45807a 100644
--- a/R/model.matrix.vglm.q
+++ b/R/model.matrix.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -12,47 +12,87 @@
- attrassigndefault = function(mmat, tt) {
+ attrassigndefault <- function(mmat, tt) {
if (!inherits(tt, "terms"))
stop("need terms object")
- aa = attr(mmat, "assign")
+ aa <- attr(mmat, "assign")
if (is.null(aa))
stop("argument is not really a model matrix")
- ll = attr(tt, "term.labels")
+ ll <- attr(tt, "term.labels")
if (attr(tt, "intercept") > 0)
- ll = c("(Intercept)", ll)
- aaa = factor(aa, labels = ll)
+ ll <- c("(Intercept)", ll)
+ aaa <- factor(aa, labels = ll)
split(order(aa), aaa)
}
- attrassignlm = function(object, ...)
+ attrassignlm <- function(object, ...)
attrassigndefault(model.matrix(object), object at terms)
- vlabel = function(xn, ncolBlist, M, separator = ":") {
+ vlabel <- function(xn, ncolBlist, M, separator = ":") {
if (length(xn) != length(ncolBlist))
stop("length of first two arguments not equal")
- n1 = rep(xn, ncolBlist)
+ n1 <- rep(xn, ncolBlist)
if (M == 1)
return(n1)
- n2 = as.list(ncolBlist)
- n2 = lapply(n2, seq)
- n2 = unlist(n2)
- n2 = as.character(n2)
- n2 = paste(separator, n2, sep = "")
- n3 = rep(ncolBlist, ncolBlist)
- n2[n3 == 1] = ""
- n1n2 = paste(n1, n2, sep = "")
+ n2 <- as.list(ncolBlist)
+ n2 <- lapply(n2, seq)
+ n2 <- unlist(n2)
+ n2 <- as.character(n2)
+ n2 <- paste(separator, n2, sep = "")
+ n3 <- rep(ncolBlist, ncolBlist)
+ n2[n3 == 1] <- ""
+ n1n2 <- paste(n1, n2, sep = "")
n1n2
}
- lm2vlm.model.matrix = function(x, Blist = NULL, assign.attributes = TRUE,
- M = NULL, xij = NULL, Xm2 = NULL) {
+
+
+ vlm2lm.model.matrix <-
+ function(x_vlm, Blist = NULL,
+ which.lp = 1,
+ M = NULL) {
+
+
+
+
+
+
+
+ if (is.numeric(M)) {
+ if (M != nrow(Blist[[1]]))
+ stop("argument 'M' does not match argument 'Blist'")
+ } else {
+ M <- nrow(Blist[[1]])
+ }
+
+
+ Hmatrices <- matrix(c(unlist(Blist)), nrow = M)
+ if (ncol(Hmatrices) != ncol(x_vlm))
+ stop("ncol(Hmatrices) != ncol(x_vlm)")
+
+
+ n_lm <- nrow(x_vlm) / M
+ if (round(n_lm) != n_lm)
+ stop("'n_lm' does not seem to be an integer")
+ lapred.index <- which.lp
+ vecTF <- Hmatrices[lapred.index, ] != 0
+ X_lm_jay <- x_vlm[(0:(n_lm - 1)) * M + lapred.index, vecTF,
+ drop = FALSE]
+ X_lm_jay
+}
+
+
+
+
+
+ lm2vlm.model.matrix <- function(x, Blist = NULL, assign.attributes = TRUE,
+ M = NULL, xij = NULL, Xm2 = NULL) {
@@ -62,70 +102,70 @@
if (length(xij)) {
if (inherits(xij, "formula"))
- xij = list(xij)
+ xij <- list(xij)
if (!is.list(xij))
stop("'xij' is not a list of formulae")
}
if (!is.numeric(M))
- M = nrow(Blist[[1]])
+ M <- nrow(Blist[[1]])
- nrow_X_lm = nrow(x)
+ nrow_X_lm <- nrow(x)
if (all(trivial.constraints(Blist) == 1)) {
- X_vlm = if (M > 1) kronecker(x, diag(M)) else x
- ncolBlist = rep(M, ncol(x))
+ X_vlm <- if (M > 1) kronecker(x, diag(M)) else x
+ ncolBlist <- rep(M, ncol(x))
} else {
- allB = matrix(unlist(Blist), nrow = M)
- ncolBlist = unlist(lapply(Blist, ncol))
- Rsum = sum(ncolBlist)
-
- X1 = rep(c(t(x)), rep(ncolBlist, nrow_X_lm))
- dim(X1) = c(Rsum, nrow_X_lm)
- X_vlm = kronecker(t(X1), matrix(1, M, 1)) *
- kronecker(matrix(1, nrow_X_lm, 1), allB)
+ allB <- matrix(unlist(Blist), nrow = M)
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ Rsum <- sum(ncolBlist)
+
+ X1 <- rep(c(t(x)), rep(ncolBlist, nrow_X_lm))
+ dim(X1) <- c(Rsum, nrow_X_lm)
+ X_vlm <- kronecker(t(X1), matrix(1, M, 1)) *
+ kronecker(matrix(1, nrow_X_lm, 1), allB)
rm(X1)
}
- dn = labels(x)
- yn = dn[[1]]
- xn = dn[[2]]
- dimnames(X_vlm) = list(vlabel(yn, rep(M, nrow_X_lm), M),
- vlabel(xn, ncolBlist, M))
+ dn <- labels(x)
+ yn <- dn[[1]]
+ xn <- dn[[2]]
+ dimnames(X_vlm) <- list(vlabel(yn, rep(M, nrow_X_lm), M),
+ vlabel(xn, ncolBlist, M))
if (assign.attributes) {
- attr(X_vlm, "contrasts") = attr(x, "contrasts")
- attr(X_vlm, "factors") = attr(x, "factors")
- attr(X_vlm, "formula") = attr(x, "formula")
- attr(X_vlm, "class") = attr(x, "class")
- attr(X_vlm, "order") = attr(x, "order")
- attr(X_vlm, "term.labels") = attr(x, "term.labels")
+ attr(X_vlm, "contrasts") <- attr(x, "contrasts")
+ attr(X_vlm, "factors") <- attr(x, "factors")
+ attr(X_vlm, "formula") <- attr(x, "formula")
+ attr(X_vlm, "class") <- attr(x, "class")
+ attr(X_vlm, "order") <- attr(x, "order")
+ attr(X_vlm, "term.labels") <- attr(x, "term.labels")
- nasgn = oasgn = attr(x, "assign")
- lowind = 0
+ nasgn <- oasgn <- attr(x, "assign")
+ lowind <- 0
for(ii in 1:length(oasgn)) {
- mylen = length(oasgn[[ii]]) * ncolBlist[oasgn[[ii]][1]]
- nasgn[[ii]] = (lowind+1):(lowind+mylen)
- lowind = lowind + mylen
+ mylen <- length(oasgn[[ii]]) * ncolBlist[oasgn[[ii]][1]]
+ nasgn[[ii]] <- (lowind+1):(lowind+mylen)
+ lowind <- lowind + mylen
} # End of ii
if (lowind != ncol(X_vlm))
stop("something gone wrong")
- attr(X_vlm, "assign") = nasgn
+ attr(X_vlm, "assign") <- nasgn
- fred = unlist(lapply(nasgn, length)) / unlist(lapply(oasgn, length))
- vasgn = vector("list", sum(fred))
- kk = 0
+ fred <- unlist(lapply(nasgn, length)) / unlist(lapply(oasgn, length))
+ vasgn <- vector("list", sum(fred))
+ kk <- 0
for(ii in 1:length(oasgn)) {
- temp = matrix(nasgn[[ii]], ncol = length(oasgn[[ii]]))
+ temp <- matrix(nasgn[[ii]], ncol = length(oasgn[[ii]]))
for(jloc in 1:nrow(temp)) {
- kk = kk + 1
- vasgn[[kk]] = temp[jloc,]
+ kk <- kk + 1
+ vasgn[[kk]] <- temp[jloc,]
}
}
- names(vasgn) = vlabel(names(oasgn), fred, M)
- attr(X_vlm, "vassign") = vasgn
+ names(vasgn) <- vlabel(names(oasgn), fred, M)
+ attr(X_vlm, "vassign") <- vasgn
- attr(X_vlm, "constraints") = Blist
+ attr(X_vlm, "constraints") <- Blist
} # End of if (assign.attributes)
@@ -139,50 +179,50 @@
- at.x = attr(x, "assign")
- at.vlmx = attr(X_vlm, "assign")
- at.Xm2 = attr(Xm2, "assign")
+ at.x <- attr(x, "assign")
+ at.vlmx <- attr(X_vlm, "assign")
+ at.Xm2 <- attr(Xm2, "assign")
for(ii in 1:length(xij)) {
- form.xij = xij[[ii]]
+ form.xij <- xij[[ii]]
if (length(form.xij) != 3)
stop("xij[[", ii, "]] is not a formula with a response")
- tform.xij = terms(form.xij)
- aterm.form = attr(tform.xij, "term.labels") # Does not include response
+ tform.xij <- terms(form.xij)
+ aterm.form <- attr(tform.xij, "term.labels") # Does not include response
if (length(aterm.form) != M)
stop("xij[[", ii, "]] does not contain ", M, " terms")
- name.term.y = as.character(form.xij)[2]
- cols.X_vlm = at.vlmx[[name.term.y]] # May be > 1 in length.
+ name.term.y <- as.character(form.xij)[2]
+ cols.X_vlm <- at.vlmx[[name.term.y]] # May be > 1 in length.
- x.name.term.2 = aterm.form[1] # Choose the first one
- One.such.term = at.Xm2[[x.name.term.2]]
+ x.name.term.2 <- aterm.form[1] # Choose the first one
+ One.such.term <- at.Xm2[[x.name.term.2]]
for(bbb in 1:length(One.such.term)) {
- use.cols.Xm2 = NULL
+ use.cols.Xm2 <- NULL
for(sss in 1:M) {
- x.name.term.2 = aterm.form[sss]
- one.such.term = at.Xm2[[x.name.term.2]]
- use.cols.Xm2 = c(use.cols.Xm2, one.such.term[bbb])
+ x.name.term.2 <- aterm.form[sss]
+ one.such.term <- at.Xm2[[x.name.term.2]]
+ use.cols.Xm2 <- c(use.cols.Xm2, one.such.term[bbb])
} # End of sss
- allXk = Xm2[,use.cols.Xm2,drop=FALSE]
- cmat.no = (at.x[[name.term.y]])[1] # First one will do (all the same).
- cmat = Blist[[cmat.no]]
- Rsum.k = ncol(cmat)
- tmp44 = kronecker(matrix(1, nrow_X_lm, 1), t(cmat)) *
+ allXk <- Xm2[,use.cols.Xm2,drop=FALSE]
+ cmat.no <- (at.x[[name.term.y]])[1] # First one will do (all the same).
+ cmat <- Blist[[cmat.no]]
+ Rsum.k <- ncol(cmat)
+ tmp44 <- kronecker(matrix(1, nrow_X_lm, 1), t(cmat)) *
kronecker(allXk, matrix(1,ncol(cmat), 1)) # n*Rsum.k x M
- tmp44 = array(t(tmp44), c(M, Rsum.k, nrow_X_lm))
- tmp44 = aperm(tmp44, c(1,3,2)) # c(M, n, Rsum.k)
- rep.index = cols.X_vlm[((bbb-1)*Rsum.k+1):(bbb*Rsum.k)]
- X_vlm[,rep.index] = c(tmp44)
+ tmp44 <- array(t(tmp44), c(M, Rsum.k, nrow_X_lm))
+ tmp44 <- aperm(tmp44, c(1,3,2)) # c(M, n, Rsum.k)
+ rep.index <- cols.X_vlm[((bbb-1)*Rsum.k+1):(bbb*Rsum.k)]
+ X_vlm[,rep.index] <- c(tmp44)
} # End of bbb
} # End of for(ii in 1:length(xij))
if (assign.attributes) {
- attr(X_vlm, "vassign") = vasgn
- attr(X_vlm, "assign") = nasgn
- attr(X_vlm, "xij") = xij
+ attr(X_vlm, "vassign") <- vasgn
+ attr(X_vlm, "assign") <- nasgn
+ attr(X_vlm, "xij") <- xij
}
X_vlm
}
@@ -192,7 +232,7 @@
- model.matrixvlm = function(object,
+ model.matrixvlm <- function(object,
type = c("vlm", "lm", "lm2", "bothlmlm2"),
lapred.index = NULL,
...) {
@@ -200,8 +240,8 @@
if (mode(type) != "character" && mode(type) != "name")
- type = as.character(substitute(type))
- type = match.arg(type, c("vlm", "lm", "lm2", "bothlmlm2"))[1]
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("vlm", "lm", "lm2", "bothlmlm2"))[1]
if (length(lapred.index) &&
type != "lm")
@@ -213,35 +253,35 @@
"assigned a value")
- x = slot(object, "x")
+ x <- slot(object, "x")
- Xm2 = if (any(slotNames(object) == "Xm2")) slot(object, "Xm2") else
+ Xm2 <- if (any(slotNames(object) == "Xm2")) slot(object, "Xm2") else
numeric(0)
if (!length(x)) {
- data = model.frame(object, xlev = object at xlevels, ...)
+ data <- model.frame(object, xlev = object at xlevels, ...)
- kill.con = if (length(object at contrasts)) object at contrasts else NULL
+ kill.con <- if (length(object at contrasts)) object at contrasts else NULL
- x = vmodel.matrix.default(object, data = data,
+ x <- vmodel.matrix.default(object, data = data,
contrasts.arg = kill.con)
- tt = terms(object)
- attr(x, "assign") = attrassigndefault(x, tt)
+ tt <- terms(object)
+ attr(x, "assign") <- attrassigndefault(x, tt)
}
if ((type == "lm2" || type == "bothlmlm2") &&
!length(Xm2)) {
- object.copy2 = object
- data = model.frame(object.copy2, xlev = object.copy2 at xlevels, ...)
+ object.copy2 <- object
+ data <- model.frame(object.copy2, xlev = object.copy2 at xlevels, ...)
- kill.con = if (length(object.copy2 at contrasts))
+ kill.con <- if (length(object.copy2 at contrasts))
object.copy2 at contrasts else NULL
- Xm2 = vmodel.matrix.default(object.copy2, data = data,
+ Xm2 <- vmodel.matrix.default(object.copy2, data = data,
contrasts.arg = kill.con)
- ttXm2 = terms(object.copy2 at misc$form2)
- attr(Xm2, "assign") = attrassigndefault(Xm2, ttXm2)
+ ttXm2 <- terms(object.copy2 at misc$form2)
+ attr(Xm2, "assign") <- attrassigndefault(Xm2, ttXm2)
}
@@ -257,8 +297,8 @@
}
- M = object at misc$M
- Blist = object at constraints # == constraints(object, type = "lm")
+ M <- object at misc$M
+ Blist <- object at constraints # == constraints(object, type = "lm")
X_vlm <- lm2vlm.model.matrix(x = x, Blist = Blist,
xij = object at control$xij, Xm2 = Xm2)
@@ -272,16 +312,16 @@
stop("argument 'lapred.index' should have ",
"a single value from the set 1:", M)
- Hlist = Blist
- n_lm = nobs(object) # Number of rows of the LM matrix
- M = object at misc$M # Number of linear/additive predictors
- Hmatrices = matrix(c(unlist(Hlist)), nrow = M)
- jay = lapred.index
- index0 = Hmatrices[jay, ] != 0
- X_lm_jay = X_vlm[(0:(n_lm - 1)) * M + jay, index0, drop = FALSE]
+ Hlist <- Blist
+ n_lm <- nobs(object) # Number of rows of the LM matrix
+ M <- object at misc$M # Number of linear/additive predictors
+ Hmatrices <- matrix(c(unlist(Hlist)), nrow = M)
+ jay <- lapred.index
+ index0 <- Hmatrices[jay, ] != 0
+ X_lm_jay <- X_vlm[(0:(n_lm - 1)) * M + jay, index0, drop = FALSE]
X_lm_jay
} else {
- stop("am confused. Don't know what to return")
+ stop("am confused. Do not know what to return")
}
}
@@ -297,26 +337,26 @@ setMethod("model.matrix", "vlm", function(object, ...)
- model.framevlm = function(object,
- setupsmart = TRUE, wrapupsmart = TRUE, ...) {
+ model.framevlm <- function(object,
+ setupsmart = TRUE, wrapupsmart = TRUE, ...) {
- dots = list(...)
- nargs = dots[match(c("data", "na.action", "subset"), names(dots), 0)]
+ dots <- list(...)
+ nargs <- dots[match(c("data", "na.action", "subset"), names(dots), 0)]
if (length(nargs) || !length(object at model)) {
- fcall = object at call
- fcall$method = "model.frame"
- fcall[[1]] = as.name("vlm")
+ fcall <- object at call
+ fcall$method <- "model.frame"
+ fcall[[1]] <- as.name("vlm")
- fcall$smart = FALSE
+ fcall$smart <- FALSE
if (setupsmart && length(object at smart.prediction)) {
setup.smart("read", smart.prediction=object at smart.prediction)
}
- fcall[names(nargs)] = nargs
- env = environment(object at terms$terms) # @terms or @terms$terms ??
+ fcall[names(nargs)] <- nargs
+ env <- environment(object at terms$terms) # @terms or @terms$terms ??
if (is.null(env))
- env = parent.frame()
- ans = eval(fcall, env, parent.frame())
+ env <- parent.frame()
+ ans <- eval(fcall, env, parent.frame())
if (wrapupsmart && length(object at smart.prediction)) {
wrapup.smart()
@@ -337,63 +377,63 @@ setMethod("model.frame", "vlm", function(formula, ...)
- vmodel.matrix.default = function(object, data = environment(object),
- contrasts.arg = NULL, xlev = NULL, ...) {
+ vmodel.matrix.default <- function(object, data = environment(object),
+ contrasts.arg = NULL, xlev = NULL, ...) {
- t <- if (missing(data)) terms(object) else terms(object, data = data)
- if (is.null(attr(data, "terms")))
- data <- model.frame(object, data, xlev = xlev) else {
- reorder <- match(sapply(attr(t, "variables"), deparse,
- width.cutoff = 500)[-1], names(data))
- if (any(is.na(reorder)))
- stop("model frame and formula mismatch in model.matrix()")
- if (!identical(reorder, seq_len(ncol(data))))
- data <- data[, reorder, drop = FALSE]
+ t <- if (missing(data)) terms(object) else terms(object, data = data)
+ if (is.null(attr(data, "terms")))
+ data <- model.frame(object, data, xlev = xlev) else {
+ reorder <- match(sapply(attr(t, "variables"), deparse,
+ width.cutoff = 500)[-1], names(data))
+ if (any(is.na(reorder)))
+ stop("model frame and formula mismatch in model.matrix()")
+ if (!identical(reorder, seq_len(ncol(data))))
+ data <- data[, reorder, drop = FALSE]
+ }
+ int <- attr(t, "response")
+ if (length(data)) {
+ contr.funs <- as.character(getOption("contrasts"))
+ namD <- names(data)
+ for (i in namD) if (is.character(data[[i]])) {
+ data[[i]] <- factor(data[[i]])
+ warning(gettextf("variable '%s' converted to a factor", i),
+ domain = NA)
}
- int <- attr(t, "response")
- if (length(data)) {
- contr.funs <- as.character(getOption("contrasts"))
- namD <- names(data)
- for (i in namD) if (is.character(data[[i]])) {
- data[[i]] <- factor(data[[i]])
- warning(gettextf("variable '%s' converted to a factor",
- i), domain = NA)
+ isF <- sapply(data, function(x) is.factor(x) || is.logical(x))
+ isF[int] <- FALSE
+ isOF <- sapply(data, is.ordered)
+ for (nn in namD[isF]) if (is.null(attr(data[[nn]], "contrasts")))
+ contrasts(data[[nn]]) <- contr.funs[1 + isOF[nn]]
+ if (!is.null(contrasts.arg) && is.list(contrasts.arg)) {
+ if (is.null(namC <- names(contrasts.arg)))
+ stop("invalid 'contrasts.arg' argument")
+ for (nn in namC) {
+ if (is.na(ni <- match(nn, namD)))
+ warning(gettextf(
+ "variable '%s' is absent, its contrast will be ignored",
+ nn), domain = NA) else {
+ ca <- contrasts.arg[[nn]]
+ if (is.matrix(ca))
+ contrasts(data[[ni]], ncol(ca)) <- ca else
+ contrasts(data[[ni]]) <- contrasts.arg[[nn]]
}
- isF <- sapply(data, function(x) is.factor(x) || is.logical(x))
- isF[int] <- FALSE
- isOF <- sapply(data, is.ordered)
- for (nn in namD[isF]) if (is.null(attr(data[[nn]], "contrasts")))
- contrasts(data[[nn]]) <- contr.funs[1 + isOF[nn]]
- if (!is.null(contrasts.arg) && is.list(contrasts.arg)) {
- if (is.null(namC <- names(contrasts.arg)))
- stop("invalid 'contrasts.arg' argument")
- for (nn in namC) {
- if (is.na(ni <- match(nn, namD)))
- warning(gettextf(
- "variable '%s' is absent, its contrast will be ignored",
- nn), domain = NA) else {
- ca <- contrasts.arg[[nn]]
- if (is.matrix(ca))
- contrasts(data[[ni]], ncol(ca)) <- ca else
- contrasts(data[[ni]]) <- contrasts.arg[[nn]]
- }
- }
- }
- } else {
- isF <- FALSE
- data <- list(x = rep(0, nrow(data)))
+ }
}
+ } else {
+ isF <- FALSE
+ data <- list(x = rep(0, nrow(data)))
+ }
- ans <- (model.matrix(t, data))
+ ans <- (model.matrix(t, data))
- cons <- if (any(isF))
- lapply(data[isF], function(x) attr(x, "contrasts")) else NULL
- attr(ans, "contrasts") <- cons
- ans
+ cons <- if (any(isF))
+ lapply(data[isF], function(x) attr(x, "contrasts")) else NULL
+ attr(ans, "contrasts") <- cons
+ ans
}
@@ -513,13 +553,13 @@ hatvaluesvlm <- function(model,
} else {
ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
MM12 <- M * (M + 1) / 2
- all.rows.index = rep((0:(nn-1)) * M, rep(MM12, nn)) + ind1$row.index
- all.cols.index = rep((0:(nn-1)) * M, rep(MM12, nn)) + ind1$col.index
+ all.rows.index <- rep((0:(nn-1)) * M, rep(MM12, nn)) + ind1$row.index
+ all.cols.index <- rep((0:(nn-1)) * M, rep(MM12, nn)) + ind1$col.index
- H_ss = rowSums(Q.S3[all.rows.index, ] *
+ H_ss <- rowSums(Q.S3[all.rows.index, ] *
Q.S3[all.cols.index, ])
- H_ss = matrix(H_ss, nn, MM12, byrow = TRUE)
+ H_ss <- matrix(H_ss, nn, MM12, byrow = TRUE)
H_ss
}
}
@@ -582,7 +622,7 @@ hatplot.vlm <-
}
if (is.null(ylim))
- ylim = c(0, max(hatval))
+ ylim <- c(0, max(hatval))
for (jay in 1:M) {
plot(hatval[, jay], type = "n", main = predictors.names[jay],
ylim = ylim, xlab = xlab, ylab = ylab,
@@ -634,15 +674,15 @@ dfbetavlm <-
if (!is(model, "vlm"))
stop("argument 'model' does not seem to be a vglm() object")
- n_lm = nobs(model, type = "lm")
- X_lm = model.matrix(model, type = "lm")
- X_vlm = model.matrix(model, type = "vlm")
- p_vlm = ncol(X_vlm) # nvar(model, type = "vlm")
- M = npred(model)
- wz = weights(model, type = "work") # zz unused!!!!!!!
- etastart = predict(model)
- offset = matrix(model at offset, n_lm, M)
- new.control = model at control
+ n_lm <- nobs(model, type = "lm")
+ X_lm <- model.matrix(model, type = "lm")
+ X_vlm <- model.matrix(model, type = "vlm")
+ p_vlm <- ncol(X_vlm) # nvar(model, type = "vlm")
+ M <- npred(model)
+ wz <- weights(model, type = "work") # zz unused!!!!!!!
+ etastart <- predict(model)
+ offset <- matrix(model at offset, n_lm, M)
+ new.control <- model at control
pweights <- weights(model, type = "prior")
orig.w <- if (is.numeric(model at extra$orig.w))
model at extra$orig.w else 1
@@ -650,8 +690,8 @@ dfbetavlm <-
model at extra$y.integer else FALSE
- new.control$trace = trace.new
- new.control$maxit = maxit.new
+ new.control$trace <- trace.new
+ new.control$maxit <- maxit.new
dfbeta <- matrix(0, n_lm, p_vlm)
@@ -667,10 +707,10 @@ dfbetavlm <-
flush.console()
}
- w.orig = if (length(orig.w) != n_lm)
+ w.orig <- if (length(orig.w) != n_lm)
rep(orig.w, length.out = n_lm) else
orig.w
- w.orig[ii] = w.orig[ii] * smallno # Relative
+ w.orig[ii] <- w.orig[ii] * smallno # Relative
fit <- vglm.fit(x = X_lm,
X_vlm_arg = X_vlm, # Should be more efficient
@@ -732,28 +772,27 @@ hatvaluesbasic <- function(X_vlm,
M = 1) {
-
if (M > 1)
stop("currently argument 'M' must be 1")
nn <- nrow(X_vlm)
- ncol_X_vlm = ncol(X_vlm)
+ ncol_X_vlm <- ncol(X_vlm)
- XtW = t(c(diagWm) * X_vlm)
+ XtW <- t(c(diagWm) * X_vlm)
UU <- sqrt(diagWm) # Only for M == 1
- UU.X_vlm <- UU * X_vlm
+ UU.X_vlm <- c(UU) * X_vlm # c(UU) okay for M==1
qrSlot <- qr(UU.X_vlm)
Rmat <- qr.R(qrSlot)
- rinv = diag(ncol_X_vlm)
- rinv = backsolve(Rmat, rinv)
+ rinv <- diag(ncol_X_vlm)
+ rinv <- backsolve(Rmat, rinv)
Diag.Hat <- if (FALSE) {
- covun = rinv %*% t(rinv)
+ covun <- rinv %*% t(rinv)
rhs.mat <- covun %*% XtW
colSums(t(X_vlm) * rhs.mat)
} else {
diff --git a/R/mux.q b/R/mux.q
index 6b4620c..892c26c 100644
--- a/R/mux.q
+++ b/R/mux.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -8,17 +8,19 @@
mux34 <- function(xmat, cc, symmetric = FALSE) {
- if (!is.matrix(xmat))
- xmat <- as.matrix(xmat)
- d <- dim(xmat)
- nnn <- d[1]
- RRR <- d[2]
- if (length(cc) == 1) cc = matrix(cc, 1, 1)
- if (!is.matrix(cc)) stop("'cc' is not a matrix")
- c(dotC(name = "VGAM_C_mux34", as.double(xmat), as.double(cc),
- as.integer(nnn), as.integer(RRR),
- as.integer(symmetric), ans = as.double(rep(0.0, nnn)),
- NAOK = TRUE)$ans)
+ if (!is.matrix(xmat))
+ xmat <- as.matrix(xmat)
+ d <- dim(xmat)
+ nnn <- d[1]
+ RRR <- d[2]
+ if (length(cc) == 1)
+ cc <- matrix(cc, 1, 1)
+ if (!is.matrix(cc))
+ stop("'cc' is not a matrix")
+ c(dotC(name = "VGAM_C_mux34", as.double(xmat), as.double(cc),
+ as.integer(nnn), as.integer(RRR),
+ as.integer(symmetric), ans = as.double(rep(0.0, nnn)),
+ NAOK = TRUE)$ans)
}
@@ -27,14 +29,14 @@ mux34 <- function(xmat, cc, symmetric = FALSE) {
if(FALSE)
mux34 <- function(xmat, cc, symmetric = FALSE) {
- if (!is.matrix(xmat))
- xmat <- as.matrix(xmat)
- d <- dim(xmat)
- n <- d[1]
- R <- d[2]
- if (length(cc) == 1) cc = matrix(cc, 1, 1)
- if (!is.matrix(cc)) stop("'cc' is not a matrix")
- c(dotFortran(name = "vgamf90mux34", as.double(xmat), as.double(cc),
+ if (!is.matrix(xmat))
+ xmat <- as.matrix(xmat)
+ d <- dim(xmat)
+ n <- d[1]
+ R <- d[2]
+ if (length(cc) == 1) cc = matrix(cc, 1, 1)
+ if (!is.matrix(cc)) stop("'cc' is not a matrix")
+ c(dotFortran(name = "vgamf90mux34", as.double(xmat), as.double(cc),
as.integer(n), as.integer(R),
as.integer(symmetric), ans = as.double(rep(0.0, n)),
NAOK = TRUE)$ans)
@@ -47,22 +49,22 @@ mux34 <- function(xmat, cc, symmetric = FALSE) {
mux2 <- function(cc, xmat) {
- if (!is.matrix(xmat))
- xmat <- as.matrix(xmat)
- d <- dim(xmat)
- n <- d[1]
- p <- d[2]
- if (is.matrix(cc))
- cc <- array(cc, c(dim(cc),n))
- d <- dim(cc)
- M <- d[1]
- if (d[2] != p || d[3] != n)
- stop("dimension size inconformable")
- ans <- rep(as.numeric(NA), n*M)
- fred <- dotC(name = "mux2", as.double(cc), as.double(t(xmat)),
+ if (!is.matrix(xmat))
+ xmat <- as.matrix(xmat)
+ d <- dim(xmat)
+ n <- d[1]
+ p <- d[2]
+ if (is.matrix(cc))
+ cc <- array(cc, c(dim(cc), n))
+ d <- dim(cc)
+ M <- d[1]
+ if (d[2] != p || d[3] != n)
+ stop("dimension size inconformable")
+ ans <- rep(as.numeric(NA), n*M)
+ fred <- dotC(name = "mux2", as.double(cc), as.double(t(xmat)),
ans = as.double(ans), as.integer(p), as.integer(n),
as.integer(M), NAOK = TRUE)
- matrix(fred$ans,n,M,byrow = TRUE)
+ matrix(fred$ans, n, M, byrow = TRUE)
}
@@ -72,21 +74,21 @@ mux2 <- function(cc, xmat) {
mux22 <- function(cc, xmat, M, upper = FALSE, as.matrix = FALSE) {
- n <- ncol(cc)
+ n <- ncol(cc)
- index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
- dimm.value <- nrow(cc) # Usually M or M(M+1)/2
+ index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
+ dimm.value <- nrow(cc) # Usually M or M(M+1)/2
- ans <- rep(as.numeric(NA), n*M)
- fred <- dotC(name = "mux22", as.double(cc), as.double(t(xmat)),
+ ans <- rep(as.numeric(NA), n*M)
+ fred <- dotC(name = "mux22", as.double(cc), as.double(t(xmat)),
ans = as.double(ans), as.integer(dimm.value),
as.integer(index$row), as.integer(index$col),
as.integer(n), as.integer(M), wk = double(M*M),
as.integer(as.numeric(upper)), NAOK = TRUE)
- if (!as.matrix) fred$ans else {
- dim(fred$ans) <- c(M, n)
- t(fred$ans)
- }
+ if (!as.matrix) fred$ans else {
+ dim(fred$ans) <- c(M, n)
+ t(fred$ans)
+ }
}
@@ -107,9 +109,9 @@ mux5 <- function(cc, x, M, matrix.arg = FALSE) {
cc <- t(cc)
} else {
n <- dimcc[3]
- if (dimcc[1]!= dimcc[2] ||
- dimx[1]!= dimcc[1] ||
- (length(dimx) == 3 && dimx[3]!= dimcc[3]))
+ if (dimcc[1] != dimcc[2] ||
+ dimx[1] != dimcc[1] ||
+ (length(dimx) == 3 && dimx[3] != dimcc[3]))
stop('input nonconformable')
neltscci <- M*(M+1)/2
}
@@ -121,15 +123,15 @@ mux5 <- function(cc, x, M, matrix.arg = FALSE) {
size <- if (matrix.arg) dimm(r)*n else r*r*n
fred <- dotC(name = "mux5", as.double(cc), as.double(x),
- ans = double(size),
- as.integer(M), as.integer(n), as.integer(r),
- as.integer(neltscci),
- as.integer(dimm(r)),
- as.integer(as.numeric(matrix.arg)),
- double(M*M), double(r*r),
- as.integer(index.M$row), as.integer(index.M$col),
- as.integer(index.r$row), as.integer(index.r$col),
- ok3 = as.integer(1), NAOK = TRUE)
+ ans = double(size),
+ as.integer(M), as.integer(n), as.integer(r),
+ as.integer(neltscci),
+ as.integer(dimm(r)),
+ as.integer(as.numeric(matrix.arg)),
+ double(M*M), double(r*r),
+ as.integer(index.M$row), as.integer(index.M$col),
+ as.integer(index.r$row), as.integer(index.r$col),
+ ok3 = as.integer(1), NAOK = TRUE)
if (fred$ok3 == 0)
stop("can only handle matrix.arg == 1")
@@ -148,21 +150,21 @@ mux5 <- function(cc, x, M, matrix.arg = FALSE) {
mux55 <- function(evects, evals, M) {
- d <- dim(evects)
- n <- ncol(evals)
- if (d[1]!= M || d[2]!= M || d[3]!= n ||
- nrow(evals)!= M || ncol(evals)!= n)
- stop("input nonconformable")
- MM12 <- M*(M+1)/2 # The answer is a full-matrix
- index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
+ d <- dim(evects)
+ n <- ncol(evals)
+ if (d[1] != M || d[2] != M || d[3] != n ||
+ nrow(evals)!= M || ncol(evals) != n)
+ stop("input nonconformable")
+ MM12 <- M*(M+1)/2 # The answer is a full-matrix
+ index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
- fred <- dotC(name = "mux55", as.double(evects), as.double(evals),
+ fred <- dotC(name = "mux55", as.double(evects), as.double(evals),
ans = double(MM12 * n),
double(M*M), double(M*M),
as.integer(index$row), as.integer(index$col),
as.integer(M), as.integer(n), NAOK = TRUE)
- dim(fred$ans) <- c(MM12, n)
- fred$ans
+ dim(fred$ans) <- c(MM12, n)
+ fred$ans
}
@@ -170,24 +172,24 @@ mux55 <- function(evects, evals, M) {
mux7 <- function(cc, x) {
- dimx <- dim(x)
- dimcc <- dim(cc)
- if (dimx[1]!= dimcc[2] ||
- (length(dimx) == 3 && dimx[3]!= dimcc[3]))
- stop('input nonconformable')
- M <- dimcc[1]
- qq <- dimcc[2]
- n <- dimcc[3]
- r <- dimx[2]
- if (is.matrix(x))
- x <- array(x,c(qq,r, n))
-
- ans <- array(NA, c(M, r, n))
- fred <- dotC(name = "mux7", as.double(cc), as.double(x),
+ dimx <- dim(x)
+ dimcc <- dim(cc)
+ if (dimx[1]!= dimcc[2] ||
+ (length(dimx) == 3 && dimx[3]!= dimcc[3]))
+ stop('input nonconformable')
+ M <- dimcc[1]
+ qq <- dimcc[2]
+ n <- dimcc[3]
+ r <- dimx[2]
+ if (is.matrix(x))
+ x <- array(x,c(qq,r, n))
+
+ ans <- array(NA, c(M, r, n))
+ fred <- dotC(name = "mux7", as.double(cc), as.double(x),
ans = as.double(ans),
as.integer(M), as.integer(qq), as.integer(n),
as.integer(r), NAOK = TRUE)
- array(fred$ans,c(M, r, n))
+ array(fred$ans, c(M, r, n))
}
@@ -197,7 +199,7 @@ mux7 <- function(cc, x) {
mux9 <- function(cc, xmat) {
if (is.vector(xmat))
- xmat <- cbind(xmat)
+ xmat <- cbind(xmat)
dimxmat <- dim(xmat)
dimcc <- dim(cc)
@@ -221,20 +223,20 @@ mux9 <- function(cc, xmat) {
mux11 <- function(cc, xmat) {
- dcc <- dim(cc)
- d <- dim(xmat)
- M <- dcc[1]
- R <- d[2]
- n <- dcc[3]
- if (M != dcc[2] || d[1] != n*M)
- stop("input inconformable")
-
- Xmat <- array(c(t(xmat)), c(R, M, n))
- Xmat <- aperm(Xmat, c(2, 1, 3)) # Xmat becomes M x R x n
- mat <- mux7(cc, Xmat) # mat is M x R x n
- mat <- aperm(mat, c(2, 1, 3)) # mat becomes R x M x n
- mat <- matrix(c(mat), n*M, R, byrow = TRUE)
- mat
+ dcc <- dim(cc)
+ d <- dim(xmat)
+ M <- dcc[1]
+ R <- d[2]
+ n <- dcc[3]
+ if (M != dcc[2] || d[1] != n*M)
+ stop("input inconformable")
+
+ Xmat <- array(c(t(xmat)), c(R, M, n))
+ Xmat <- aperm(Xmat, c(2, 1, 3)) # Xmat becomes M x R x n
+ mat <- mux7(cc, Xmat) # mat is M x R x n
+ mat <- aperm(mat, c(2, 1, 3)) # mat becomes R x M x n
+ mat <- matrix(c(mat), n*M, R, byrow = TRUE)
+ mat
}
@@ -248,12 +250,12 @@ mux111 <- function(cc, xmat, M, upper = TRUE) {
dimm.value <- nrow(cc) # M or M(M+1)/2
fred <- dotC(name = "mux111", as.double(cc),
- b = as.double(t(xmat)),
- as.integer(M),
- as.integer(R), as.integer(n), wk = double(M * M),
- wk2 = double(M * R), as.integer(index$row),
- as.integer(index$col), as.integer(dimm.value),
- as.integer(as.numeric(upper)), NAOK = TRUE)
+ b = as.double(t(xmat)),
+ as.integer(M),
+ as.integer(R), as.integer(n), wk = double(M * M),
+ wk2 = double(M * R), as.integer(index$row),
+ as.integer(index$col), as.integer(dimm.value),
+ as.integer(as.numeric(upper)), NAOK = TRUE)
ans <- fred$b
dim(ans) <- c(R, nrow(xmat))
@@ -268,6 +270,7 @@ mux111 <- function(cc, xmat, M, upper = TRUE) {
mux15 <- function(cc, xmat) {
+
n <- nrow(xmat)
M <- ncol(xmat)
if (nrow(cc) != M || ncol(cc) != M)
@@ -275,107 +278,107 @@ mux15 <- function(cc, xmat) {
if (max(abs(t(cc)-cc))>0.000001)
stop("argument 'cc' is not symmetric")
- ans <- rep(as.numeric(NA),n*M*M)
+ ans <- rep(as.numeric(NA), n*M*M)
fred <- dotC(name = "mux15", as.double(cc), as.double(t(xmat)),
- ans = as.double(ans), as.integer(M),
- as.integer(n), NAOK = TRUE)
- array(fred$ans,c(M,M,n))
+ ans = as.double(ans), as.integer(M),
+ as.integer(n), NAOK = TRUE)
+ array(fred$ans, c(M, M, n))
}
-
-
vforsub <- function(cc, b, M, n) {
index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
- dimm.value <- nrow(cc) # M or M(M+1)/2
+ dimm.value <- nrow(cc) # M or M(M+1)/2
- fred <- dotC(name = "vforsub", as.double(cc), b = as.double(t(b)),
+ fred <- dotC(name = "vforsub", as.double(cc), b = as.double(t(b)),
as.integer(M), as.integer(n), wk = double(M*M),
as.integer(index$row), as.integer(index$col),
as.integer(dimm.value), NAOK = TRUE)
- dim(fred$b) <- c(M, n)
- fred$b
+ dim(fred$b) <- c(M, n)
+ fred$b
}
vbacksub <- function(cc, b, M, n) {
- index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
- dimm.value <- nrow(cc)
- if (nrow(b)!= M || ncol(b)!= n)
- stop("dimension size inconformable")
+ index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
+ dimm.value <- nrow(cc)
+ if (nrow(b) != M || ncol(b) != n)
+ stop("dimension size inconformable")
- fred <- dotC(name = "vbacksub", as.double(cc), b = as.double(b),
+ fred <- dotC(name = "vbacksub", as.double(cc), b = as.double(b),
as.integer(M), as.integer(n), wk = double(M*M),
as.integer(index$row), as.integer(index$col),
as.integer(dimm.value), NAOK = TRUE)
- if (M == 1) fred$b else {
- dim(fred$b) <- c(M,n)
- t(fred$b)
- }
+ if (M == 1) {
+ fred$b
+ } else {
+ dim(fred$b) <- c(M, n)
+ t(fred$b)
+ }
}
-
-vchol <- function(cc, M, n, silent = FALSE, callno = 0)
-{
+vchol <- function(cc, M, n, silent = FALSE, callno = 0) {
- index <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- cc <- t(cc)
- MM <- nrow(cc) # cc is big enough to hold its Cholesky decom.
- fred <- dotC(name = "vchol", cc = as.double(cc), as.integer(M),
+
+ index <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ cc <- t(cc)
+ MM <- nrow(cc) # cc is big enough to hold its Cholesky decom.
+
+ fred <- dotC(name = "vchol", cc = as.double(cc), as.integer(M),
as.integer(n), ok = integer(n),
wk = double(M*M), as.integer(index$row),
as.integer(index$col),
as.integer(MM),
NAOK = TRUE)
- failed <- (fred$ok != 1)
- if ((correction.needed <- any(failed))) {
- index <- (1:n)[failed]
- if (!silent) {
- if (length(index) < 11)
- warning("weight matri",
- ifelse(length(index) > 1, "ces ","x "),
- paste(index, collapse = ", "),
- " not positive-definite")
- }
+ failed <- (fred$ok != 1)
+ if ((correction.needed <- any(failed))) {
+ index <- (1:n)[failed]
+ if (!silent) {
+ if (length(index) < 11)
+ warning("weight matri",
+ ifelse(length(index) > 1, "ces ","x "),
+ paste(index, collapse = ", "),
+ " not positive-definite")
}
+ }
- ans <- fred$cc
- dim(ans) <- c(MM, n)
+ ans <- fred$cc
+ dim(ans) <- c(MM, n)
- if (correction.needed) {
- temp <- cc[, index, drop = FALSE]
- tmp777 <- vchol.greenstadt(temp, M = M, silent = silent,
- callno = callno + 1)
+ if (correction.needed) {
+ temp <- cc[, index, drop = FALSE]
+ tmp777 <- vchol.greenstadt(temp, M = M, silent = silent,
+ callno = callno + 1)
- if (length(index) == n) {
- ans = tmp777[1:nrow(ans),,drop = FALSE]
- } else {
+ if (length(index) == n) {
+ ans <- tmp777[1:nrow(ans),,drop = FALSE]
+ } else {
- ans[,index] <- tmp777 # restored 16/10/03
- }
- }
- dim(ans) <- c(MM, n) # Make sure
+ ans[,index] <- tmp777 # restored 16/10/03
+ }
+ }
+ dim(ans) <- c(MM, n) # Make sure
- ans
+ ans
}
@@ -387,65 +390,66 @@ vchol.greenstadt <- function(cc, M, silent = FALSE,
- MM <- dim(cc)[1]
- n <- dim(cc)[2]
+ MM <- dim(cc)[1]
+ n <- dim(cc)[2]
- if (!silent)
- cat(paste("Applying Greenstadt modification to ", n, " matri",
- ifelse(n > 1, "ces", "x"), "\n", sep = ""))
+ if (!silent)
+ cat(paste("Applying Greenstadt modification to ", n, " matri",
+ ifelse(n > 1, "ces", "x"), "\n", sep = ""))
- temp <- veigen(cc, M = M) # , mat = TRUE)
- dim(temp$vectors) <- c(M, M, n) # Make sure (when M = 1) for mux5
- dim(temp$values) <- c(M, n) # Make sure (when M = 1) for mux5
+ temp <- veigen(cc, M = M) # , mat = TRUE)
+ dim(temp$vectors) <- c(M, M, n) # Make sure (when M = 1) for mux5
+ dim(temp$values) <- c(M, n) # Make sure (when M = 1) for mux5
- is.neg <- (temp$values < .Machine$double.eps)
- is.pos <- (temp$values > .Machine$double.eps)
- zilch <- (!is.pos & !is.neg)
+ is.neg <- (temp$values < .Machine$double.eps)
+ is.pos <- (temp$values > .Machine$double.eps)
+ zilch <- (!is.pos & !is.neg)
- temp$values <- abs(temp$values)
+ temp$values <- abs(temp$values)
- temp.small.value <- quantile(temp$values[!zilch], prob = 0.15)
- if (callno > 2) {
- temp.small.value <- abs(temp.small.value) * 1.50^callno
+ temp.small.value <- quantile(temp$values[!zilch], prob = 0.15)
+ if (callno > 2) {
+ temp.small.value <- abs(temp.small.value) * 1.50^callno
- small.value <- temp.small.value
+ small.value <- temp.small.value
- temp$values[zilch] <- small.value
+ temp$values[zilch] <- small.value
- }
+ }
- if (callno > 9) {
- warning("taking drastic action; setting all wz to ",
- "scaled versions of the order-M identity matrix")
+ if (callno > 9) {
+ warning("taking drastic action; setting all wz to ",
+ "scaled versions of the order-M identity matrix")
- cc2mean <- abs(colMeans(cc[1:M, , drop = FALSE]))
- temp$values <- matrix(cc2mean, M, n, byrow = TRUE)
- temp$vectors <- array(c(diag(M)), c(M, M, n))
- }
+ cc2mean <- abs(colMeans(cc[1:M, , drop = FALSE]))
+ temp$values <- matrix(cc2mean, M, n, byrow = TRUE)
+ temp$vectors <- array(c(diag(M)), c(M, M, n))
+ }
- temp3 <- mux55(temp$vectors, temp$values, M = M) #, matrix.arg = TRUE)
- ans <- vchol(t(temp3), M = M, n = n, silent = silent,
- callno = callno + 1) #, matrix.arg = TRUE)
+ temp3 <- mux55(temp$vectors, temp$values, M = M) #, matrix.arg = TRUE)
+ ans <- vchol(t(temp3), M = M, n = n, silent = silent,
+ callno = callno + 1) #, matrix.arg = TRUE)
- if (nrow(ans) == MM) ans else ans[1:MM, , drop = FALSE]
+ if (nrow(ans) == MM) ans else ans[1:MM, , drop = FALSE]
}
-myf = function(x) {
- dotFortran("VGAM_F90_fill9",
+if (FALSE)
+myf <- function(x) {
+ dotFortran("VGAM_F90_fill9",
x = as.double(x), lenx = as.integer(length(x)),
answer = as.double(x),
NAOK = TRUE)$answer
diff --git a/R/nobs.R b/R/nobs.R
index 583b3b3..79407c2 100644
--- a/R/nobs.R
+++ b/R/nobs.R
@@ -1,19 +1,14 @@
-# These functions are Copyright (C) 1998-2012 T. W. Yee All rights reserved.
+# These functions are
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
+# All rights reserved.
+
-# nobs.R
-# Notes.
-# 1. 20110711 Looked at "NEWS" and found out about nobs().
-# Adding nvar() too while I am at it.
-# ======================================================================
-# 20110711
nobs.vlm <- function(object, type = c("lm", "vlm"), ...) {
-# Notes:
-# 1. with type = "vlm" this is n * M.
if(mode(type) != "character" && mode(type) != "name")
type <- as.character(substitute(type))
@@ -30,9 +25,6 @@ nobs.vlm <- function(object, type = c("lm", "vlm"), ...) {
-# 20120216; if I have the if() commented out then
-# Error in loadNamespace(package, c(which.lib.loc, lib.loc)) :
-# cyclic namespace dependency detected when loading ‘VGAM’, already loading ‘VGAM’
if (!isGeneric("nobs"))
setGeneric("nobs", function(object, ...)
standardGeneric("nobs"),
@@ -44,24 +36,14 @@ setMethod("nobs", "vlm",
nobs.vlm(object, ...))
-# setMethod("nobs", "vglm",
-# function(object, ...)
-# nobs.vlm(object, ...))
-# setMethod("nobs", "vgam",
-# function(object, ...)
-# nobs.vlm(object, ...))
-# ======================================================================
-# 20110711
-# Here is the 'nvar' methods functions.
-# Tricky for "vgam", "rrvglm", "qrrvglm", "cao", "rcim" objects?
nvar.vlm <- function(object, type = c("vlm", "lm"), ...) {
@@ -81,8 +63,6 @@ nvar.vlm <- function(object, type = c("vlm", "lm"), ...) {
nvar.vgam <- function(object, type = c("vgam", "zz"), ...) {
-# 20110711
-# Uses the effective dof, or edof, or edf zz??
if(mode(type) != "character" && mode(type) != "name")
type <- as.character(substitute(type))
@@ -100,8 +80,6 @@ nvar.vgam <- function(object, type = c("vgam", "zz"), ...) {
nvar.rrvglm <- function(object, type = c("rrvglm", "zz"), ...) {
-# 20110711
-# Uses the effective dof, or edof, or edf zz??
if(mode(type) != "character" && mode(type) != "name")
type <- as.character(substitute(type))
@@ -120,8 +98,6 @@ nvar.rrvglm <- function(object, type = c("rrvglm", "zz"), ...) {
nvar.qrrvglm <- function(object, type = c("qrrvglm", "zz"), ...) {
-# 20110711
-# Uses the effective dof, or edof, or edf zz??
if(mode(type) != "character" && mode(type) != "name")
type <- as.character(substitute(type))
@@ -140,8 +116,6 @@ nvar.qrrvglm <- function(object, type = c("qrrvglm", "zz"), ...) {
nvar.cao <- function(object, type = c("cao", "zz"), ...) {
-# 20110711
-# Uses the effective dof, or edof, or edf zz??
if(mode(type) != "character" && mode(type) != "name")
type <- as.character(substitute(type))
@@ -160,8 +134,6 @@ nvar.cao <- function(object, type = c("cao", "zz"), ...) {
nvar.rcim <- function(object, type = c("rcim", "zz"), ...) {
-# 20110711
-# Uses the effective dof, or edof, or edf zz??
if(mode(type) != "character" && mode(type) != "name")
type <- as.character(substitute(type))
@@ -221,7 +193,6 @@ setMethod("nvar", "rcim",
nvar.rcim(object, ...))
-# ======================================================================
diff --git a/R/plot.vglm.q b/R/plot.vglm.q
index 2e18dbe..e193459 100644
--- a/R/plot.vglm.q
+++ b/R/plot.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -14,91 +14,91 @@
-plotvgam = function(x, newdata = NULL, y = NULL, residuals = NULL, rugplot = TRUE,
- se = FALSE, scale = 0,
- raw = TRUE, offset.arg = 0, deriv.arg = 0, overlay = FALSE,
- type.residuals = c("deviance", "working", "pearson", "response"),
- plot.arg = TRUE, which.term = NULL, which.cf = NULL,
- control = plotvgam.control(...),
- varxij = 1, ...)
-{
+plotvgam <-
+ function(x, newdata = NULL, y = NULL, residuals = NULL, rugplot = TRUE,
+ se = FALSE, scale = 0,
+ raw = TRUE, offset.arg = 0, deriv.arg = 0, overlay = FALSE,
+ type.residuals = c("deviance", "working", "pearson", "response"),
+ plot.arg = TRUE, which.term = NULL, which.cf = NULL,
+ control = plotvgam.control(...),
+ varxij = 1, ...) {
- missing.control = missing(control)
+ missing.control <- missing(control)
- na.act = x at na.action
- x at na.action = list() # Don't want NAs returned from predict() or resid()
+ na.act <- x at na.action
+ x at na.action <- list() # Don't want NAs returned from predict() or resid()
- if (!is.Numeric(varxij, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for the 'varxij' argument")
- if (any(slotNames(x) == "control")) {
- x at control$varxij = varxij
- }
+ if (!is.Numeric(varxij, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for the 'varxij' argument")
+ if (any(slotNames(x) == "control")) {
+ x at control$varxij <- varxij
+ }
- missing.type.residuals = missing(type.residuals)
- if (mode(type.residuals) != "character" && mode(type.residuals) != "name")
- type.residuals <- as.character(substitute(type.residuals))
- if (!missing.type.residuals)
- type.residuals <- match.arg(type.residuals,
- c("deviance","working","pearson","response"))[1]
+ missing.type.residuals <- missing(type.residuals)
+ if (mode(type.residuals) != "character" && mode(type.residuals) != "name")
+ type.residuals <- as.character(substitute(type.residuals))
+ if (!missing.type.residuals)
+ type.residuals <- match.arg(type.residuals,
+ c("deviance", "working", "pearson", "response"))[1]
- if (!is.Numeric(deriv.arg, integer.valued = TRUE,
- allowable.length = 1) ||
- deriv.arg < 0)
- stop("bad input for the 'deriv' argument")
+ if (!is.Numeric(deriv.arg, integer.valued = TRUE,
+ allowable.length = 1) ||
+ deriv.arg < 0)
+ stop("bad input for the 'deriv' argument")
- if (se && deriv.arg > 0) {
- warning("standard errors not available with derivatives. ",
- "Setting 'se = FALSE'")
- se = FALSE
- }
+ if (se && deriv.arg > 0) {
+ warning("standard errors not available with derivatives. ",
+ "Setting 'se = FALSE'")
+ se <- FALSE
+ }
- preplot.object <- x at preplot
- if (!length(preplot.object)) {
- preplot.object <- preplotvgam(x, newdata = newdata,
- raw = raw,
- deriv.arg = deriv.arg, se = se,
- varxij = varxij)
- }
+ preplot.object <- x at preplot
+ if (!length(preplot.object)) {
+ preplot.object <- preplotvgam(x, newdata = newdata,
+ raw = raw,
+ deriv.arg = deriv.arg, se = se,
+ varxij = varxij)
+ }
- x at preplot = preplot.object
+ x at preplot <- preplot.object
- if (!is.null(residuals) && length(residuals) == 1) {
- if (residuals) {
- if (missing.type.residuals) {
- for(rtype in type.residuals)
- if (!is.null(residuals <- resid(x, type=rtype))) break
- } else {
- residuals=resid(x,typ=type.residuals) #Get the prespecified type
- if (!length(residuals))
- warning("residuals are NULL. Ignoring 'residuals = TRUE'")
- }
- } else {
- residuals <- NULL
- }
- }
-
- if (!missing.control) {
- control = c(plotvgam.control(.include.dots= FALSE, ...),
- control, plotvgam.control(...))
+ if (!is.null(residuals) && length(residuals) == 1) {
+ if (residuals) {
+ if (missing.type.residuals) {
+ for (rtype in type.residuals)
+ if (!is.null(residuals <- resid(x, type = rtype))) break
+ } else {
+ residuals = resid(x, typ = type.residuals)# Get the prespecified type
+ if (!length(residuals))
+ warning("residuals are NULL. Ignoring 'residuals = TRUE'")
+ }
+ } else {
+ residuals <- NULL
}
-
- x at post$plotvgam.control = control # Add it to the object
-
- if (plot.arg)
- plotpreplotvgam(preplot.object, residuals = residuals,
- rugplot = rugplot, scale = scale, se = se,
- offset.arg = offset.arg,
- deriv.arg = deriv.arg,
- overlay = overlay,
- which.term = which.term, which.cf = which.cf,
- control = control)
-
- x at na.action = na.act # Restore it's original value
- invisible(x)
+ }
+
+ if (!missing.control) {
+ control <- c(plotvgam.control(.include.dots = FALSE, ...),
+ control, plotvgam.control(...))
+ }
+
+ x at post$plotvgam.control <- control # Add it to the object
+
+ if (plot.arg)
+ plotpreplotvgam(preplot.object, residuals = residuals,
+ rugplot = rugplot, scale = scale, se = se,
+ offset.arg = offset.arg,
+ deriv.arg = deriv.arg,
+ overlay = overlay,
+ which.term = which.term, which.cf = which.cf,
+ control = control)
+
+ x at na.action <- na.act # Restore its original value
+ invisible(x)
}
@@ -117,100 +117,103 @@ ylim.scale <- function(ylim, scale = 0) {
-getallresponses = function(xij) {
- if (!is.list(xij)) return("")
+getallresponses <- function(xij) {
+ if (!is.list(xij))
+ return("")
- allterms = lapply(xij, terms)
- allres = NULL
- for(ii in 1:length(xij))
- allres = c(allres, as.character(attr(allterms[[ii]],"variables"))[2])
- allres
+ allterms <- lapply(xij, terms)
+ allres <- NULL
+ for(ii in 1:length(xij))
+ allres <- c(allres,
+ as.character(attr(allterms[[ii]],"variables"))[2])
+ allres
}
-headpreplotvgam = function(object, newdata = NULL,
- terms = attr((object at terms)$terms, "term.labels"),
- raw = TRUE, deriv.arg = deriv.arg, se = FALSE,
- varxij = 1) {
- Terms <- terms(object) # 11/8/03; object at terms$terms
- aa <- attributes(Terms)
- all.terms <- labels(Terms)
- xvars <- parse(text=all.terms)
+ headpreplotvgam <-
+ function(object, newdata = NULL,
+ terms = attr((object at terms)$terms, "term.labels"),
+ raw = TRUE, deriv.arg = deriv.arg, se = FALSE,
+ varxij = 1) {
+ Terms <- terms(object) # 11/8/03; object at terms$terms
+ aa <- attributes(Terms)
+ all.terms <- labels(Terms)
+ xvars <- parse(text = all.terms)
- names(xvars) <- all.terms
- terms <- sapply(terms, match.arg, all.terms)
-
- Interactions <- aa$order > 1
- if (any(Interactions)) {
- stop("cannot handle interactions")
- }
-
- xvars <- xvars[terms]
- xnames <- as.list(terms)
- names(xnames) <- terms
- modes <- sapply(xvars, mode)
- for(term in terms[modes != "name"]) {
- evars <- all.names(xvars[term], functions= FALSE, unique= TRUE)
- if (!length(evars))
- next
- xnames[[term]] <- evars
- evars <- parse(text=evars)
- if (length(evars) == 1) {
- evars <- evars[[1]]
- } else if ( length(evars) > 1 &&
- any(getallresponses(object at control$xij) == names(xnames)) ) {
-
-
-
-
- evars <- evars[[varxij]]
- } else {
- evars <- c(as.name("list"), evars)
- mode(evars) <- "call"
- }
- xvars[[term]] <- evars
- }
+ names(xvars) <- all.terms
+ terms <- sapply(terms, match.arg, all.terms)
+
+ Interactions <- aa$order > 1
+ if (any(Interactions)) {
+ stop("cannot handle interactions")
+ }
+
+ xvars <- xvars[terms]
+ xnames <- as.list(terms)
+ names(xnames) <- terms
+ modes <- sapply(xvars, mode)
+ for(term in terms[modes != "name"]) {
+ evars <- all.names(xvars[term], functions= FALSE, unique = TRUE)
+ if (!length(evars))
+ next
+ xnames[[term]] <- evars
+ evars <- parse(text=evars)
+ if (length(evars) == 1) {
+ evars <- evars[[1]]
+ } else if (length(evars) > 1 &&
+ any(getallresponses(object at control$xij) == names(xnames)) ) {
+
+
+
+
+ evars <- evars[[varxij]]
+ } else {
+ evars <- c(as.name("list"), evars)
+ mode(evars) <- "call"
+ }
+ xvars[[term]] <- evars
+ }
- xvars <- c(as.name("list"), xvars)
- mode(xvars) <- "call"
- if (length(newdata)) {
- xvars <- eval(xvars, newdata)
- } else {
- Call <- object at call
- if (!is.null(Call$subset) | !is.null(Call$na.action) |
- !is.null(options("na.action")[[1]])) {
- Rownames <- names(fitted(object))
- if (!(Rl <- length(Rownames)))
- Rownames <- dimnames(fitted(object))[[1]]
-
- if (length(object at x) && !(Rl <- length(Rownames)))
- Rownames <- (dimnames(object at x))[[1]]
- if (length(object at y) && !(Rl <- length(Rownames)))
- Rownames <- (dimnames(object at y))[[1]]
-
- if (!(Rl <- length(Rownames)))
- stop("need to have names for fitted.values ",
- "when call has a 'subset' or 'na.action' argument")
-
- form <- paste("~", unlist(xnames), collapse = "+")
- Mcall <- c(as.name("model.frame"), list(formula =
- terms(as.formula(form)),
- subset = Rownames, na.action = function(x) x))
- mode(Mcall) <- "call"
- Mcall$data <- Call$data
- xvars <- eval(xvars, eval(Mcall))
- } else {
- ecall <- substitute(eval(expression(xvars)))
- ecall$local <- Call$data
- xvars <- eval(ecall)
- }
- }
- list(xnames=xnames, xvars=xvars)
+ xvars <- c(as.name("list"), xvars)
+ mode(xvars) <- "call"
+ if (length(newdata)) {
+ xvars <- eval(xvars, newdata)
+ } else {
+ Call <- object at call
+ if (!is.null(Call$subset) | !is.null(Call$na.action) |
+ !is.null(options("na.action")[[1]])) {
+ Rownames <- names(fitted(object))
+ if (!(Rl <- length(Rownames)))
+ Rownames <- dimnames(fitted(object))[[1]]
+
+ if (length(object at x) && !(Rl <- length(Rownames)))
+ Rownames <- (dimnames(object at x))[[1]]
+ if (length(object at y) && !(Rl <- length(Rownames)))
+ Rownames <- (dimnames(object at y))[[1]]
+
+ if (!(Rl <- length(Rownames)))
+ stop("need to have names for fitted.values ",
+ "when call has a 'subset' or 'na.action' argument")
+
+ form <- paste("~", unlist(xnames), collapse = "+")
+ Mcall <- c(as.name("model.frame"), list(formula =
+ terms(as.formula(form)),
+ subset = Rownames, na.action = function(x) x))
+ mode(Mcall) <- "call"
+ Mcall$data <- Call$data
+ xvars <- eval(xvars, eval(Mcall))
+ } else {
+ ecall <- substitute(eval(expression(xvars)))
+ ecall$local <- Call$data
+ xvars <- eval(ecall)
+ }
+ }
+ list(xnames = xnames, xvars = xvars)
}
@@ -220,17 +223,17 @@ headpreplotvgam = function(object, newdata = NULL,
-preplotvgam = function(object, newdata = NULL,
+preplotvgam <- function(object, newdata = NULL,
terms = attr((object at terms)$terms, "term.labels"),
raw = TRUE, deriv.arg = deriv.arg, se = FALSE,
- varxij=1) {
+ varxij = 1) {
- result1 = headpreplotvgam(object, newdata = newdata, terms = terms,
+ result1 <- headpreplotvgam(object, newdata = newdata, terms = terms,
raw = raw, deriv.arg = deriv.arg, se = se,
varxij = varxij)
- xvars = result1$xvars
- xnames = result1$xnames
+ xvars <- result1$xvars
+ xnames <- result1$xnames
@@ -239,198 +242,206 @@ preplotvgam = function(object, newdata = NULL,
- myxij = object at control$xij
- if (length(myxij)) {
- }
+ myxij <- object at control$xij
+ if (length(myxij)) {
+ }
- }
+ }
pred <- if (length(newdata)) {
- predict(object, newdata, type = "terms",
- raw = raw, se.fit = se, deriv.arg = deriv.arg)
+ predict(object, newdata, type = "terms",
+ raw = raw, se.fit = se, deriv.arg = deriv.arg)
} else {
- predict(object, type = "terms",
- raw = raw, se.fit = se, deriv.arg = deriv.arg)
+ predict(object, type = "terms",
+ raw = raw, se.fit = se, deriv.arg = deriv.arg)
}
fits <- if (is.atomic(pred)) NULL else pred$fit
se.fit <- if (is.atomic(pred)) NULL else pred$se.fit
if (is.null(fits))
- fits <- pred
+ fits <- pred
fred <- attr(fits, "vterm.assign") # NULL for M==1
- Constant = attr(fits, "constant") # NULL if se = TRUE
+ Constant <- attr(fits, "constant") # NULL if se = TRUE
gamplot <- xnames
- loop.var = names(fred)
+ loop.var <- names(fred)
for(term in loop.var) {
- .VGAM.x <- xvars[[term]]
-
- myylab = if (all(substring(term, 1:nchar(term), 1:nchar(term)) != "("))
- paste("partial for", term) else term
-
- TT <- list(x = .VGAM.x,
- y = fits[,(if(is.null(fred)) term else fred[[term]])],
- se.y = if (is.null(se.fit)) NULL else
- se.fit[,(if(is.null(fred)) term else fred[[term]])],
- xlab = xnames[[term]],
- ylab = myylab)
- class(TT) <- "preplotvgam"
- gamplot[[term]] <- TT
- }
- attr(gamplot, "Constant") = Constant
- invisible(gamplot)
+ .VGAM.x <- xvars[[term]]
+
+ myylab <- if (all(substring(term, 1:nchar(term), 1:nchar(term)) != "("))
+ paste("partial for", term) else term
+
+ TT <- list(x = .VGAM.x,
+ y = fits[,(if(is.null(fred)) term else fred[[term]])],
+ se.y = if (is.null(se.fit)) NULL else
+ se.fit[,(if(is.null(fred)) term else fred[[term]])],
+ xlab = xnames[[term]],
+ ylab = myylab)
+ class(TT) <- "preplotvgam"
+ gamplot[[term]] <- TT
+ }
+ attr(gamplot, "Constant") <- Constant
+ invisible(gamplot)
}
+
plotvlm <- function(object, residuals = NULL, rugplot= FALSE, ...) {
- stop("sorry, this function hasn't been written yet")
+ stop("sorry, this function hasn't been written yet")
}
+
plotvglm <- function(x, residuals = NULL, smooths= FALSE,
rugplot= FALSE, id.n= FALSE, ...) {
- stop("this function hasn't been written yet")
+ stop("this function hasn't been written yet")
}
-plotpreplotvgam <- function(x, y = NULL, residuals = NULL,
- rugplot= TRUE, se= FALSE, scale = 0,
- offset.arg = 0, deriv.arg = 0, overlay = FALSE,
- which.term = NULL, which.cf = NULL,
- control = NULL)
-{
- listof <- inherits(x[[1]], "preplotvgam")
- if (listof) {
- TT <- names(x)
- if (is.null(which.term))
- which.term = TT # Plot them all
- plot.no = 0
- for(ii in TT) {
- plot.no = plot.no + 1
- if ((is.character(which.term) && any(which.term == ii)) ||
- (is.numeric(which.term) && any(which.term == plot.no)))
- plotpreplotvgam(x[[ii]], y = NULL,
- residuals, rugplot = rugplot, se = se, scale = scale,
- offset.arg = offset.arg,
- deriv.arg = deriv.arg, overlay = overlay,
- which.cf = which.cf,
- control = control)
- }
- } else {
- dummy <- function(residuals = NULL, rugplot= TRUE, se= FALSE, scale = 0,
- offset.arg = 0, deriv.arg = 0, overlay= FALSE,
- which.cf = NULL, control=plotvgam.control())
- c(list(residuals=residuals, rugplot=rugplot, se=se, scale=scale,
- offset.arg = offset.arg, deriv.arg = deriv.arg, overlay=overlay,
- which.cf=which.cf), control)
-
- d <- dummy(residuals=residuals, rugplot=rugplot, se=se, scale=scale,
- offset.arg = offset.arg, deriv.arg = deriv.arg,
- overlay=overlay,
- which.cf=which.cf,
- control=control)
-
- uniq.comps <- unique(c(names(x), names(d)))
- Call <- c(as.name("vplot"), c(d, x)[uniq.comps])
- mode(Call) <- "call"
- invisible(eval(Call))
+ plotpreplotvgam <-
+ function(x, y = NULL, residuals = NULL,
+ rugplot= TRUE, se= FALSE, scale = 0,
+ offset.arg = 0, deriv.arg = 0, overlay = FALSE,
+ which.term = NULL, which.cf = NULL,
+ control = NULL) {
+ listof <- inherits(x[[1]], "preplotvgam")
+ if (listof) {
+ TT <- names(x)
+ if (is.null(which.term))
+ which.term <- TT # Plot them all
+ plot.no <- 0
+ for(ii in TT) {
+ plot.no <- plot.no + 1
+ if ((is.character(which.term) && any(which.term == ii)) ||
+ (is.numeric(which.term) && any(which.term == plot.no)))
+ plotpreplotvgam(x[[ii]], y = NULL,
+ residuals, rugplot = rugplot, se = se,
+ scale = scale,
+ offset.arg = offset.arg,
+ deriv.arg = deriv.arg, overlay = overlay,
+ which.cf = which.cf,
+ control = control)
}
+ } else {
+ dummy <- function(residuals = NULL, rugplot = TRUE, se = FALSE, scale = 0,
+ offset.arg = 0, deriv.arg = 0, overlay = FALSE,
+ which.cf = NULL, control = plotvgam.control())
+ c(list(residuals=residuals, rugplot = rugplot, se = se, scale = scale,
+ offset.arg = offset.arg, deriv.arg = deriv.arg, overlay = overlay,
+ which.cf = which.cf), control)
+
+ d <- dummy(residuals = residuals, rugplot = rugplot,
+ se = se, scale = scale,
+ offset.arg = offset.arg, deriv.arg = deriv.arg,
+ overlay = overlay,
+ which.cf = which.cf,
+ control = control)
+
+ uniq.comps <- unique(c(names(x), names(d)))
+ Call <- c(as.name("vplot"), c(d, x)[uniq.comps])
+ mode(Call) <- "call"
+ invisible(eval(Call))
+ }
}
vplot.default <- function(x, y, se.y = NULL, xlab = "", ylab = "",
- residuals = NULL, rugplot= FALSE,
- scale = 0, se= FALSE,
- offset.arg = 0, deriv.arg = 0, overlay= FALSE,
+ residuals = NULL, rugplot = FALSE,
+ scale = 0, se = FALSE,
+ offset.arg = 0, deriv.arg = 0, overlay = FALSE,
which.cf = NULL, ...) {
- switch(data.class(x)[1],
- logical=vplot.factor(factor(x), y, se.y, xlab, ylab, residuals,
+ switch(data.class(x)[1],
+ logical = vplot.factor(factor(x), y, se.y, xlab, ylab, residuals,
rugplot, scale, se,
- offset.arg = offset.arg, overlay=overlay, ...),
- if (is.numeric(x)) {
- vplot.numeric(as.vector(x), y, se.y, xlab, ylab,
- residuals, rugplot, scale, se,
- offset.arg = offset.arg, overlay=overlay, ...)
- } else {
- warning("The \"x\" component of \"", ylab, "\" has class \"",
- class(x), "\"; no vplot() methods available")
- }
- )
+ offset.arg = offset.arg,
+ overlay = overlay, ...),
+ if (is.numeric(x)) {
+ vplot.numeric(as.vector(x), y, se.y, xlab, ylab,
+ residuals, rugplot, scale, se,
+ offset.arg = offset.arg, overlay = overlay, ...)
+ } else {
+ warning("The \"x\" component of \"", ylab, "\" has class \"",
+ class(x), "\"; no vplot() methods available")
+ }
+ ) # End of switch
}
-vplot.list <- function(x, y, se.y = NULL, xlab, ylab,
- residuals = NULL, rugplot = FALSE, scale = 0, se = FALSE,
- offset.arg = 0, deriv.arg = 0, overlay = FALSE,
- which.cf = NULL, ...)
-{
+vplot.list <-
+ function(x, y, se.y = NULL, xlab, ylab,
+ residuals = NULL, rugplot = FALSE, scale = 0, se = FALSE,
+ offset.arg = 0, deriv.arg = 0, overlay = FALSE,
+ which.cf = NULL, ...) {
- if (is.numeric(x[[1]])) {
- vplot.numeric(x[[1]], y, se.y, xlab, ylab,
- residuals, rugplot, scale, se,
- offset.arg = offset.arg, deriv.arg = deriv.arg,
- overlay=overlay, ...)
- } else
- stop("this function hasn't been written yet")
+ if (is.numeric(x[[1]])) {
+ vplot.numeric(x[[1]], y, se.y, xlab, ylab,
+ residuals, rugplot, scale, se,
+ offset.arg = offset.arg, deriv.arg = deriv.arg,
+ overlay = overlay, ...)
+ } else {
+ stop("this function has not been written yet")
+ }
}
- plotvgam.control = function(
- which.cf = NULL,
- xlim = NULL, ylim = NULL,
- llty = par()$lty,
- slty = "dashed",
- pcex = par()$cex,
- pch = par()$pch,
- pcol = par()$col,
- lcol = par()$col,
- rcol = par()$col,
- scol = par()$col,
- llwd = par()$lwd,
- slwd = par()$lwd,
- add.arg= FALSE,
- one.at.a.time= FALSE,
- .include.dots= TRUE,
- noxmean= FALSE,
- ...) {
-
-
- ans =
- list(which.cf=which.cf,
- xlim=xlim, ylim=ylim,
- llty=llty, slty=slty,
- pcex=pcex, pch=pch,
- pcol=pcol, lcol=lcol, rcol=rcol, scol=scol,
- llwd=llwd, slwd=slwd,
- add.arg=add.arg,
- noxmean=noxmean,
- one.at.a.time=one.at.a.time)
-
- if (.include.dots) {
- c(list(...), ans)
+ plotvgam.control <-
+ function(which.cf = NULL,
+ xlim = NULL, ylim = NULL,
+ llty = par()$lty,
+ slty = "dashed",
+ pcex = par()$cex,
+ pch = par()$pch,
+ pcol = par()$col,
+ lcol = par()$col,
+ rcol = par()$col,
+ scol = par()$col,
+ llwd = par()$lwd,
+ slwd = par()$lwd,
+ add.arg = FALSE,
+ one.at.a.time = FALSE,
+ .include.dots = TRUE,
+ noxmean = FALSE,
+ ...) {
+
+
+ ans <-
+ list(which.cf = which.cf,
+ xlim = xlim, ylim = ylim,
+ llty = llty, slty = slty,
+ pcex = pcex, pch = pch,
+ pcol = pcol, lcol = lcol, rcol = rcol, scol = scol,
+ llwd = llwd, slwd = slwd,
+ add.arg = add.arg,
+ noxmean = noxmean,
+ one.at.a.time = one.at.a.time)
+
+ if (.include.dots) {
+ c(list(...), ans)
+ } else {
+ default.vals <- plotvgam.control()
+ return.list <- list()
+ for(ii in names(default.vals)) {
+ replace.val <- !((length(ans[[ii]]) == length(default.vals[[ii]])) &&
+ (length(default.vals[[ii]]) > 0) &&
+ (is.logical(all.equal(ans[[ii]], default.vals[[ii]]))) &&
+ all.equal(ans[[ii]], default.vals[[ii]]))
+
+ if (replace.val)
+ return.list[[ii]] <- ans[[ii]]
+ }
+ if (length(return.list)) {
+ names(return.list) <- names(return.list)
+ return.list
} else {
- default.vals = plotvgam.control()
- return.list = list()
- for(ii in names(default.vals)) {
- replace.val = !((length(ans[[ii]]) == length(default.vals[[ii]])) &&
- (length(default.vals[[ii]]) > 0) &&
- (is.logical(all.equal(ans[[ii]], default.vals[[ii]]))) &&
- all.equal(ans[[ii]], default.vals[[ii]]))
-
- if (replace.val)
- return.list[[ii]] = ans[[ii]]
- }
- if (length(return.list)) {
- names(return.list) = names(return.list)
- return.list
- } else NULL
+ NULL
}
+ }
}
@@ -457,8 +468,7 @@ vplot.numeric <- function(x, y, se.y = NULL, xlab, ylab,
one.at.a.time= FALSE,
noxmean = FALSE,
separator = ":",
- ...)
-{
+ ...) {
@@ -469,7 +479,7 @@ vplot.numeric <- function(x, y, se.y = NULL, xlab, ylab,
stop("length of 'x' and 'y' do not seem to match")
y <- as.matrix(y)
if (!length(which.cf))
- which.cf = 1:ncol(y) # Added 7/8/04
+ which.cf <- 1:ncol(y) # Added 7/8/04
if (!is.null(se.y))
se.y <- as.matrix(se.y)
@@ -484,355 +494,366 @@ vplot.numeric <- function(x, y, se.y = NULL, xlab, ylab,
}
}
- offset.arg <- matrix(offset.arg, nrow(y), ncol(y), byrow= TRUE)
+ offset.arg <- matrix(offset.arg, nrow(y), ncol(y), byrow = TRUE)
y <- y + offset.arg
ylab <- add.hookey(ylab, deriv.arg)
- if (xmeanAdded <- (se && !is.null(se.y) && !noxmean &&
- all(substring(ylab, 1:nchar(ylab), 1:nchar(ylab)) != "("))) {
- x = c(x, mean(x))
- y = rbind(y, 0 * y[1,])
- se.y = rbind(se.y, 0 * se.y[1,])
- if (!is.null(residuals))
- residuals = rbind(residuals, NA*residuals[1,]) # NAs not plotted
+ if (xmeanAdded <-
+ (se && !is.null(se.y) && !noxmean &&
+ all(substring(ylab, 1:nchar(ylab), 1:nchar(ylab)) != "("))) {
+ x <- c(x, mean(x))
+ y <- rbind(y, 0 * y[1,])
+ se.y <- rbind(se.y, 0 * se.y[1,])
+ if (!is.null(residuals))
+ residuals <- rbind(residuals, NA*residuals[1,]) # NAs not plotted
}
ux <- unique(sort(x))
o <- match(ux, x)
- uy <- y[o,,drop= FALSE]
+ uy <- y[o, , drop = FALSE]
xlim <- range(xlim, ux)
- ylim <- range(ylim, uy[,which.cf], na.rm= TRUE)
+ ylim <- range(ylim, uy[, which.cf], na.rm = TRUE)
if (rugplot) {
- usex = if (xmeanAdded) x[-length(x)] else x
- jx <- jitter(usex[!is.na(usex)])
- xlim <- range(c(xlim, jx))
+ usex <- if (xmeanAdded) x[-length(x)] else x
+ jx <- jitter(usex[!is.na(usex)])
+ xlim <- range(c(xlim, jx))
}
if (se && !is.null(se.y)) {
- se.upper <- uy + 2 * se.y[o,,drop= FALSE]
- se.lower <- uy - 2 * se.y[o,,drop= FALSE]
- ylim <- range(c(ylim, se.upper[,which.cf], se.lower[,which.cf]))
+ se.upper <- uy + 2 * se.y[o,,drop = FALSE]
+ se.lower <- uy - 2 * se.y[o,,drop = FALSE]
+ ylim <- range(c(ylim, se.upper[,which.cf], se.lower[,which.cf]))
}
if (!is.null(residuals)) {
- if (length(residuals) == length(y)) {
- residuals <- as.matrix(y + residuals)
- ylim <- range(c(ylim, residuals[,which.cf]), na.rm= TRUE)
- } else {
- residuals <- NULL
- warning("Residuals do not match 'x' in \"", ylab,
- "\" preplot object")
- }
+ if (length(residuals) == length(y)) {
+ residuals <- as.matrix(y + residuals)
+ ylim <- range(c(ylim, residuals[,which.cf]), na.rm = TRUE)
+ } else {
+ residuals <- NULL
+ warning("Residuals do not match 'x' in \"", ylab,
+ "\" preplot object")
+ }
}
- all.missingy <- all(is.na(y))
+ all.missingy <- all(is.na(y))
- if (all.missingy)
- return()
+ if (all.missingy)
+ return()
- ylim <- ylim.scale(ylim, scale)
+ ylim <- ylim.scale(ylim, scale)
- if (overlay) {
- if (!length(which.cf)) which.cf = 1:ncol(uy) # Added 7/8/04
- if (!add.arg) {
- matplot(ux, uy[,which.cf], type = "n",
- xlim=xlim, ylim=ylim,
- xlab=xlab, ylab=ylab, ...)
- }
- matlines(ux, uy[,which.cf],
- lwd=llwd, col=lcol, lty=llty)
- if (!is.null(residuals))
- if (ncol(y) == 1) {
- points(x, residuals, pch=pch, col=pcol, cex=pcex)
- } else {
- matpoints(x, residuals[,which.cf],
- pch=pch, col=pcol, cex=pcex) # add.arg = TRUE,
- }
- if (rugplot)
- rug(jx, col=rcol)
- if (se && !is.null(se.y)) {
- matlines(ux, se.upper[,which.cf], lty= slty, lwd=slwd, col=scol)
- matlines(ux, se.lower[,which.cf], lty= slty, lwd=slwd, col=scol)
- }
- } else {
- YLAB <- ylab
-
- pcex = rep(pcex, len=ncol(uy))
- pch = rep(pch , len=ncol(uy))
- pcol = rep(pcol, len=ncol(uy))
- lcol = rep(lcol, len=ncol(uy))
- llty = rep(llty, len=ncol(uy))
- llwd = rep(llwd, len=ncol(uy))
- slty = rep(slty, len=ncol(uy))
- rcol = rep(rcol, len=ncol(uy))
- scol = rep(scol, len=ncol(uy))
- slwd = rep(slwd, len=ncol(uy))
-
- for(ii in 1:ncol(uy)) {
- if (!length(which.cf) ||
- (length(which.cf) && any(which.cf == ii))) {
-
- if (is.Numeric(ylim0, allowable.length = 2)) {
- ylim = ylim0
- } else {
- ylim <- range(ylim0, uy[,ii], na.rm= TRUE)
- if (se && !is.null(se.y))
- ylim <- range(ylim0, se.lower[,ii], se.upper[,ii],
+ if (overlay) {
+ if (!length(which.cf))
+ which.cf <- 1:ncol(uy) # Added 7/8/04
+ if (!add.arg) {
+ matplot(ux, uy[,which.cf], type = "n",
+ xlim = xlim, ylim = ylim,
+ xlab = xlab, ylab = ylab, ...)
+ }
+ matlines(ux, uy[,which.cf],
+ lwd = llwd, col = lcol, lty = llty)
+ if (!is.null(residuals))
+ if (ncol(y) == 1) {
+ points(x, residuals, pch = pch, col = pcol, cex = pcex)
+ } else {
+ matpoints(x, residuals[,which.cf],
+ pch = pch, col = pcol, cex = pcex) # add.arg = TRUE,
+ }
+ if (rugplot)
+ rug(jx, col = rcol)
+ if (se && !is.null(se.y)) {
+ matlines(ux, se.upper[,which.cf], lty = slty, lwd = slwd, col = scol)
+ matlines(ux, se.lower[,which.cf], lty = slty, lwd = slwd, col = scol)
+ }
+ } else {
+ YLAB <- ylab
+
+ pcex <- rep(pcex, len = ncol(uy))
+ pch <- rep(pch , len = ncol(uy))
+ pcol <- rep(pcol, len = ncol(uy))
+ lcol <- rep(lcol, len = ncol(uy))
+ llty <- rep(llty, len = ncol(uy))
+ llwd <- rep(llwd, len = ncol(uy))
+ slty <- rep(slty, len = ncol(uy))
+ rcol <- rep(rcol, len = ncol(uy))
+ scol <- rep(scol, len = ncol(uy))
+ slwd <- rep(slwd, len = ncol(uy))
+
+ for(ii in 1:ncol(uy)) {
+ if (!length(which.cf) ||
+ (length(which.cf) && any(which.cf == ii))) {
+
+ if (is.Numeric(ylim0, allowable.length = 2)) {
+ ylim <- ylim0
+ } else {
+ ylim <- range(ylim0, uy[, ii], na.rm = TRUE)
+ if (se && !is.null(se.y))
+ ylim <- range(ylim0, se.lower[, ii], se.upper[, ii],
na.rm = TRUE)
- if (!is.null(residuals))
- ylim <- range(c(ylim, residuals[,ii]), na.rm= TRUE)
- ylim <- ylim.scale(ylim, scale)
- }
- if (ncol(uy)>1 && length(separator))
- YLAB <- paste(ylab, separator, ii, sep = "")
- if (!add.arg) {
- if (one.at.a.time) {
- readline("Hit return for the next plot ")
- }
- plot(ux, uy[,ii], type = "n",
- xlim=xlim, ylim=ylim,
- xlab=xlab, ylab=YLAB, ...)
- }
- lines(ux, uy[,ii],
- lwd=llwd[ii], col=lcol[ii], lty=llty[ii])
- if (!is.null(residuals))
- points(x, residuals[,ii], pch=pch[ii],
- col=pcol[ii], cex=pcex[ii])
- if (rugplot)
- rug(jx, col=rcol[ii])
-
- if (se && !is.null(se.y)) {
- lines(ux, se.upper[,ii], lty=slty[ii], lwd=slwd[ii],
- col=scol[ii])
- lines(ux, se.lower[,ii], lty=slty[ii], lwd=slwd[ii],
- col=scol[ii])
- }
+ if (!is.null(residuals))
+ ylim <- range(c(ylim, residuals[, ii]), na.rm = TRUE)
+ ylim <- ylim.scale(ylim, scale)
+ }
+ if (ncol(uy)>1 && length(separator))
+ YLAB <- paste(ylab, separator, ii, sep = "")
+ if (!add.arg) {
+ if (one.at.a.time) {
+ readline("Hit return for the next plot ")
+ }
+ plot(ux, uy[, ii], type = "n",
+ xlim = xlim, ylim = ylim,
+ xlab = xlab, ylab = YLAB, ...)
+ }
+ lines(ux, uy[, ii],
+ lwd = llwd[ii], col = lcol[ii], lty = llty[ii])
+ if (!is.null(residuals))
+ points(x, residuals[, ii], pch = pch[ii],
+ col = pcol[ii], cex = pcex[ii])
+ if (rugplot)
+ rug(jx, col = rcol[ii])
+
+ if (se && !is.null(se.y)) {
+ lines(ux, se.upper[, ii], lty = slty[ii], lwd = slwd[ii],
+ col = scol[ii])
+ lines(ux, se.lower[, ii], lty = slty[ii], lwd = slwd[ii],
+ col = scol[ii])
}
}
}
+ }
}
-vplot.matrix <- function(x, y, se.y = NULL, xlab, ylab,
- residuals = NULL, rugplot= FALSE, scale = 0, se= FALSE,
- offset.arg = 0, deriv.arg = 0, overlay= FALSE,
- which.cf = NULL, ...) {
- stop("You shouldn't ever call this function!")
+vplot.matrix <-
+ function(x, y, se.y = NULL, xlab, ylab,
+ residuals = NULL, rugplot = FALSE, scale = 0, se = FALSE,
+ offset.arg = 0, deriv.arg = 0, overlay = FALSE,
+ which.cf = NULL, ...) {
+ stop("You shouldn't ever call this function!")
}
add.hookey <- function(ch, deriv.arg = 0) {
- if (!is.Numeric(deriv.arg, integer.valued = TRUE,
- allowable.length = 1) ||
- deriv.arg < 0)
- stop("bad input for the 'deriv' argument")
-
- if (deriv.arg == 0)
- return(ch)
-
- hookey <- switch(deriv.arg, "'", "''", "'''", "''''",
- "'''''", stop("too high a derivative"))
- nc <- nchar(ch)
- sub <- substring(ch, 1:nc, 1:nc)
- if (nc >= 2 && sub[1] == "s" && sub[2] == "(") {
- paste("s", hookey, substring(ch, 2, nc), sep = "", coll = "")
- } else {
- paste(ch, hookey, sep = "", collapse = "")
- }
+ if (!is.Numeric(deriv.arg, integer.valued = TRUE,
+ allowable.length = 1) ||
+ deriv.arg < 0)
+ stop("bad input for the 'deriv' argument")
+
+ if (deriv.arg == 0)
+ return(ch)
+
+ hookey <- switch(deriv.arg, "'", "''", "'''", "''''",
+ "'''''", stop("too high a derivative"))
+ nc <- nchar(ch)
+ sub <- substring(ch, 1:nc, 1:nc)
+ if (nc >= 2 && sub[1] == "s" && sub[2] == "(") {
+ paste("s", hookey, substring(ch, 2, nc), sep = "", coll = "")
+ } else {
+ paste(ch, hookey, sep = "", collapse = "")
+ }
}
-vplot.factor <- function(x, y, se.y = NULL, xlab, ylab,
- residuals = NULL, rugplot= FALSE, scale = 0,
- se= FALSE, xlim = NULL, ylim = NULL,
- offset.arg = 0, deriv.arg = 0, overlay= FALSE,
- which.cf = NULL, ...)
-{
- if (deriv.arg>0)
- return(NULL)
+vplot.factor <-
+ function(x, y, se.y = NULL, xlab, ylab,
+ residuals = NULL, rugplot = FALSE, scale = 0,
+ se = FALSE, xlim = NULL, ylim = NULL,
+ offset.arg = 0, deriv.arg = 0, overlay= FALSE,
+ which.cf = NULL, ...) {
+ if (deriv.arg > 0)
+ return(NULL)
- if (length(y)/length(x) != round(length(y)/length(x)))
- stop("length of 'x' and 'y' do not seem to match")
- y <- as.matrix(y)
+ if (length(y)/length(x) != round(length(y)/length(x)))
+ stop("length of 'x' and 'y' do not seem to match")
+ y <- as.matrix(y)
- if (!is.null(se.y))
- se.y <- as.matrix(se.y)
- if (!is.null(se.y) && any(is.na(se.y)))
- se.y <- NULL
+ if (!is.null(se.y))
+ se.y <- as.matrix(se.y)
+ if (!is.null(se.y) && any(is.na(se.y)))
+ se.y <- NULL
- if (!is.null(residuals)) {
- residuals <- as.matrix(residuals)
- if (ncol(residuals) != ncol(y)) {
- warning("ncol(residuals) != ncol(y) so residuals are not plotted")
- residuals <- NULL
- }
+ if (!is.null(residuals)) {
+ residuals <- as.matrix(residuals)
+ if (ncol(residuals) != ncol(y)) {
+ warning("ncol(residuals) != ncol(y) so residuals are not plotted")
+ residuals <- NULL
}
+ }
+
if (overlay) {
- vvplot.factor(x, y,
- se.y = if (is.null(se.y)) NULL else se.y,
- xlab=xlab, ylab=ylab,
- residuals=residuals,
- rugplot=rugplot, scale=scale,
- se=se, xlim=xlim, ylim=ylim, ...)
- } else {
- for(ii in 1:ncol(y)) {
- ylab <- rep(ylab, len=ncol(y))
- if (ncol(y) > 1)
- ylab <- dimnames(y)[[2]]
- vvplot.factor(x, y[,ii,drop= FALSE],
- se.y = if (is.null(se.y)) NULL else se.y[,ii,drop= FALSE],
- xlab=xlab, ylab=ylab[ii],
- residuals= if (is.null(residuals))
- NULL else residuals[,ii,drop= FALSE],
- rugplot=rugplot, scale=scale,
- se=se, xlim=xlim, ylim=ylim, ...)
+ vvplot.factor(x, y,
+ se.y = if (is.null(se.y)) NULL else se.y,
+ xlab = xlab, ylab = ylab,
+ residuals = residuals,
+ rugplot = rugplot, scale = scale,
+ se = se, xlim = xlim, ylim = ylim, ...)
+ } else {
+ for(ii in 1:ncol(y)) {
+ ylab <- rep(ylab, len = ncol(y))
+ if (ncol(y) > 1)
+ ylab <- dimnames(y)[[2]]
+ vvplot.factor(x, y[, ii,drop = FALSE],
+ se.y = if (is.null(se.y)) NULL else
+ se.y[, ii,drop = FALSE],
+ xlab = xlab, ylab = ylab[ii],
+ residuals = if (is.null(residuals))
+ NULL else residuals[, ii,drop = FALSE],
+ rugplot = rugplot, scale = scale,
+ se = se, xlim = xlim, ylim = ylim, ...)
- }
- }
- invisible(NULL)
+ }
+ }
+ invisible(NULL)
}
-vvplot.factor <- function(x, y, se.y = NULL, xlab, ylab,
- residuals = NULL, rugplot= FALSE, scale = 0,
- se= FALSE, xlim = NULL, ylim = NULL,
- ...)
-{
+vvplot.factor <-
+ function(x, y, se.y = NULL, xlab, ylab,
+ residuals = NULL, rugplot = FALSE, scale = 0,
+ se = FALSE, xlim = NULL, ylim = NULL,
+ ...) {
- M <- ncol(y)
- nn <- as.numeric(table(x))
- codex <- as.numeric(x)
- ucodex <- seq(nn)[nn > 0]
- o <- match(ucodex, codex, 0)
+ M <- ncol(y)
+ nn <- as.numeric(table(x))
+ codex <- as.numeric(x)
+ ucodex <- seq(nn)[nn > 0]
+ o <- match(ucodex, codex, 0)
- uy <- y[o,,drop= FALSE]
- ylim <- range(ylim, uy)
- xlim <- range(c(0, sum(nn), xlim))
- rightx <- cumsum(nn)
- leftx <- c(0, rightx[ - length(nn)])
- ux <- (leftx + rightx)/2
- delta <- (rightx - leftx)/8
+ uy <- y[o,,drop = FALSE]
+ ylim <- range(ylim, uy)
+ xlim <- range(c(0, sum(nn), xlim))
+ rightx <- cumsum(nn)
+ leftx <- c(0, rightx[ - length(nn)])
+ ux <- (leftx + rightx)/2
+ delta <- (rightx - leftx)/8
- jx <- runif(length(codex), (ux - delta)[codex], (ux + delta)[codex])
- nnajx <- jx[!is.na(jx)]
+ jx <- runif(length(codex), (ux - delta)[codex], (ux + delta)[codex])
+ nnajx <- jx[!is.na(jx)]
- if (rugplot)
- xlim <- range(c(xlim, nnajx))
- if (se && !is.null(se.y)) {
- se.upper <- uy + 2 * se.y[o,,drop= FALSE]
- se.lower <- uy - 2 * se.y[o,,drop= FALSE]
- ylim <- range(c(ylim, se.upper, se.lower))
- }
- if (!is.null(residuals)) {
- if (length(residuals) == length(y)) {
- residuals <- y + residuals
- ylim <- range(c(ylim, residuals))
- } else {
- residuals <- NULL
- warning("Residuals do not match 'x' in \"", ylab,
- "\" preplot object")
- }
- }
- ylim <- ylim.scale(ylim, scale)
- Levels <- levels(x)
- if (!all(nn)) {
- keep <- nn > 0
- nn <- nn[keep]
- ux <- ux[keep]
- delta <- delta[keep]
- leftx <- leftx[keep]
- rightx <- rightx[keep]
- Levels <- Levels[keep]
+ if (rugplot)
+ xlim <- range(c(xlim, nnajx))
+ if (se && !is.null(se.y)) {
+ se.upper <- uy + 2 * se.y[o,, drop = FALSE]
+ se.lower <- uy - 2 * se.y[o,, drop = FALSE]
+ ylim <- range(c(ylim, se.upper, se.lower))
+ }
+ if (!is.null(residuals)) {
+ if (length(residuals) == length(y)) {
+ residuals <- y + residuals
+ ylim <- range(c(ylim, residuals))
+ } else {
+ residuals <- NULL
+ warning("Residuals do not match 'x' in \"", ylab,
+ "\" preplot object")
}
-
-
- about <- function(ux, M, Delta=1/M) {
- if (M == 1) return(cbind(ux))
- ans <- matrix(as.numeric(NA), length(ux), M)
- grid <- seq(-Delta, Delta, len=M)
- for(ii in 1:M) {
- ans[,ii] <- ux + grid[ii]
- }
- ans
+ }
+ ylim <- ylim.scale(ylim, scale)
+ Levels <- levels(x)
+ if (!all(nn)) {
+ keep <- nn > 0
+ nn <- nn[keep]
+ ux <- ux[keep]
+ delta <- delta[keep]
+ leftx <- leftx[keep]
+ rightx <- rightx[keep]
+ Levels <- Levels[keep]
+ }
+
+
+ about <- function(ux, M, Delta = 1 / M) {
+ if (M == 1) return(cbind(ux))
+ ans <- matrix(as.numeric(NA), length(ux), M)
+ grid <- seq(-Delta, Delta, len = M)
+ for(ii in 1:M) {
+ ans[, ii] <- ux + grid[ii]
}
-
- uxx <- about(ux, M, Delta=min(delta))
- xlim <- range(c(xlim, uxx))
-
- matplot(ux, uy, ylim=ylim, xlim=xlim, xlab = "", type = "n",
- ylab=ylab, axes= FALSE, frame.plot = TRUE, ...)
- mtext(xlab, 1, 2, adj = 0.5)
- axis(side=2)
- lpos <- par("mar")[3]
- mtext(Levels, side=3, line=lpos/2, at=ux, adj = 0.5, srt=45)
-
- for(ii in 1:M)
- segments(uxx[,ii] - 1.0 * delta, uy[,ii],
- uxx[,ii] + 1.0 * delta, uy[,ii])
- if (!is.null(residuals)) {
- for(ii in 1:M) {
- jux <- uxx[,ii]
- jux <- jux[codex]
- jux <- jux + runif(length(jux), -0.7*min(delta), 0.7*min(delta))
- if (M == 1) points(jux, residuals[,ii]) else
- points(jux, residuals[,ii], pch=as.character(ii))
- }
+ ans
+ }
+
+ uxx <- about(ux, M, Delta = min(delta))
+ xlim <- range(c(xlim, uxx))
+
+ matplot(ux, uy, ylim = ylim, xlim = xlim, xlab = "", type = "n",
+ ylab = ylab, axes = FALSE, frame.plot = TRUE, ...)
+ mtext(xlab, 1, 2, adj = 0.5)
+ axis(side = 2)
+ lpos <- par("mar")[3]
+ mtext(Levels, side = 3, line = lpos/2, at = ux, adj = 0.5, srt = 45)
+
+ for(ii in 1:M)
+ segments(uxx[, ii] - 1.0 * delta, uy[, ii],
+ uxx[, ii] + 1.0 * delta, uy[, ii])
+ if (!is.null(residuals)) {
+ for(ii in 1:M) {
+ jux <- uxx[, ii]
+ jux <- jux[codex]
+ jux <- jux + runif(length(jux), -0.7*min(delta), 0.7*min(delta))
+ if (M == 1) points(jux, residuals[, ii]) else
+ points(jux, residuals[, ii], pch = as.character(ii))
}
- if (rugplot)
- rug(nnajx)
- if (se) {
- for(ii in 1:M) {
- segments(uxx[,ii]+0.5*delta, se.upper[,ii],
- uxx[,ii]-0.5*delta, se.upper[,ii])
- segments(uxx[,ii]+0.5*delta, se.lower[,ii],
- uxx[,ii]-0.5*delta, se.lower[,ii])
- segments(uxx[,ii], se.lower[,ii], uxx[,ii], se.upper[,ii], lty=2)
- }
+ }
+ if (rugplot)
+ rug(nnajx)
+ if (se) {
+ for(ii in 1:M) {
+ segments(uxx[, ii]+0.5*delta, se.upper[, ii],
+ uxx[, ii]-0.5*delta, se.upper[, ii])
+ segments(uxx[, ii]+0.5*delta, se.lower[, ii],
+ uxx[, ii]-0.5*delta, se.lower[, ii])
+ segments(uxx[, ii], se.lower[, ii],
+ uxx[, ii], se.upper[, ii], lty = 2)
}
- invisible(diff(ylim))
+ }
+ invisible(diff(ylim))
}
if(!isGeneric("vplot"))
-setGeneric("vplot", function(x, ...) standardGeneric("vplot"))
+ setGeneric("vplot", function(x, ...) standardGeneric("vplot"))
+
+
setMethod("vplot", "factor", function(x, ...)
- vplot.factor(x, ...))
+ vplot.factor(x, ...))
setMethod("vplot", "list", function(x, ...)
- vplot.list(x, ...))
+ vplot.list(x, ...))
setMethod("vplot", "matrix", function(x, ...)
- vplot.matrix(x, ...))
+ vplot.matrix(x, ...))
setMethod("vplot", "numeric", function(x, ...)
- vplot.numeric(x, ...))
+ vplot.numeric(x, ...))
setMethod("plot", "vlm",
function(x, y, ...) {
- if (!missing(y)) stop("cannot process the 'y' argument")
+ if (!missing(y))
+ stop("cannot process the 'y' argument")
invisible(plotvlm(x, y, ...))})
setMethod("plot", "vglm",
function(x, y, ...) {
- if (!missing(y)) stop("cannot process the 'y' argument")
+ if (!missing(y))
+ stop("cannot process the 'y' argument")
invisible(plotvglm(x, y, ...))})
setMethod("plot", "vgam",
function(x, y, ...) {
- if (!missing(y)) stop("cannot process the 'y' argument")
+ if (!missing(y))
+ stop("cannot process the 'y' argument")
invisible(plotvgam(x, ...))})
-plotqrrvglm = function(object,
+plotqrrvglm <- function(object,
rtype = c("response", "pearson", "deviance", "working"),
ask = FALSE,
main = paste(Rtype, "residuals vs latent variable(s)"),
@@ -848,14 +869,14 @@ plotqrrvglm = function(object,
res <- resid(object, type = rtype)
my.ylab <- if (length(object at misc$ynames)) object at misc$ynames else
- rep(" ", len = M)
+ rep(" ", len = M)
Rtype <- switch(rtype, pearson = "Pearson", response = "Response",
- deviance = "Deviance", working = "Working")
+ deviance = "Deviance", working = "Working")
done <- 0
for(rr in 1:Rank)
for(ii in 1:M) {
- plot(Coef.object at lv[,rr], res[,ii],
+ plot(Coef.object at lv[, rr], res[, ii],
xlab = paste(xlab, if (Rank == 1) "" else rr, sep = ""),
ylab = my.ylab[ii],
main = main, ...)
@@ -877,9 +898,9 @@ setMethod("plot", "qrrvglm", function(x, y, ...)
-put.caption = function(text.arg = "(a)",
- w.x = c(0.50, 0.50),
- w.y = c(0.07, 0.93), ...) {
+put.caption <- function(text.arg = "(a)",
+ w.x = c(0.50, 0.50),
+ w.y = c(0.07, 0.93), ...) {
text(text.arg,
x = weighted.mean(par()$usr[1:2], w = w.x),
y = weighted.mean(par()$usr[3:4], w = w.y), ...)
diff --git a/R/predict.vgam.q b/R/predict.vgam.q
index d2569c4..7a5434e 100644
--- a/R/predict.vgam.q
+++ b/R/predict.vgam.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -12,163 +12,164 @@ predict.vgam <- function(object, newdata = NULL,
raw = FALSE,
all = TRUE, offset = 0,
untransform = FALSE,
- dispersion = NULL, ...)
-{
- if (missing(newdata)) {
- newdata <- NULL
- } else {
- newdata <- as.data.frame(newdata)
+ dispersion = NULL, ...) {
+ newdata <- if (missing(newdata)) {
+ NULL
+ } else {
+ as.data.frame(newdata)
+ }
+ no.newdata <- (length(newdata) == 0)
+
+ na.act <- object at na.action
+ object at na.action <- list()
+
+ if (mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("link", "response", "terms"))[1]
+
+
+ if (untransform &&
+ (type != "link" || se.fit || deriv.arg != 0 || offset != 0))
+ stop("argument 'untransform = TRUE' only if type='link', ",
+ "se.fit = FALSE, deriv = 0")
+
+ if (raw && type!="terms")
+ stop("'raw = TRUE' only works when 'type = \"terms\"'")
+
+ if (!is.numeric(deriv.arg) || deriv.arg < 0 ||
+ deriv.arg != round(deriv.arg) || length(deriv.arg) > 1)
+ stop("bad input for the 'deriv' argument")
+
+ if (deriv.arg > 0 && type!="terms")
+ stop("'deriv>0' can only be specified if 'type=\"terms\"'")
+
+ if (deriv.arg != 0 && !(type != "response" && !se.fit))
+ stop("argument 'deriv' only works with type != 'response' and ",
+ "se.fit = FALSE")
+
+ if (se.fit && length(newdata))
+ stop("cannot specify 'se.fit = TRUE' when argument 'newdata' ",
+ "is assigned")
+
+
+ tt <- terms(object) # 11/8/03; object at terms$terms
+
+ ttf <- attr(tt, "factors")
+ tto <- attr(tt, "order")
+ intercept <- attr(tt, "intercept")
+ if (!intercept)
+ stop("an intercept is assumed")
+
+ M <- object at misc$M
+ Blist <- object at constraints
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ if (intercept)
+ ncolBlist <- ncolBlist[-1]
+ if (raw) {
+ Blist <- canonical.Blist(Blist)
+ object at constraints <- Blist
+ }
+
+ if (!length(newdata)) {
+ if (type == "link") {
+ if (se.fit) {
+ stop("cannot handle this option (se.fit = TRUE) currently")
+ } else {
+ answer <- if (length(na.act)) {
+ napredict(na.act[[1]], object at predictors)
+ } else {
+ object at predictors
+ }
+ if (untransform)
+ return(untransformVGAM(object, answer)) else
+ return(answer)
+ }
+ } else
+ if (type == "response") {
+ if (se.fit) {
+ stop("cannot handle this option (se.fit = TRUE) currently")
+ } else {
+ if (length(na.act)) {
+ return(napredict(na.act[[1]], object at fitted.values))
+ } else {
+ return(object at fitted.values)
+ }
+ }
}
- no.newdata = length(newdata) == 0
- na.act = object at na.action
- object at na.action = list()
+ predictor <- predict.vlm(object,
+ type = "terms",
+ se.fit = se.fit,
+ terms.arg = terms.arg,
+ raw = raw,
+ all = all, offset = offset,
+ dispersion = dispersion, ...) # deriv.arg = deriv.arg,
- if (mode(type) != "character" && mode(type) != "name")
- type <- as.character(substitute(type))
- type <- match.arg(type, c("link", "response", "terms"))[1]
+ newdata <- model.matrixvlm(object, type = "lm")
- if (untransform &&
- (type != "link" || se.fit || deriv.arg != 0 || offset != 0))
- stop("argument 'untransform = TRUE' only if type='link', ",
- "se.fit = FALSE, deriv = 0")
+ } else {
- if (raw && type!="terms")
- stop("'raw = TRUE' only works when 'type = \"terms\"'")
+ temp.type <- if (type == "link") "response" else type
- if (!is.numeric(deriv.arg) || deriv.arg < 0 ||
- deriv.arg != round(deriv.arg) || length(deriv.arg) > 1)
- stop("bad input for the 'deriv' argument")
- if (deriv.arg > 0 && type!="terms")
- stop("'deriv>0' can only be specified if 'type=\"terms\"'")
-
- if (deriv.arg != 0 && !(type!="response" && !se.fit))
- stop("'deriv=' only works with type!='response' and se.fit = FALSE")
-
- if (se.fit && length(newdata))
- stop("cannot specify 'se.fit = TRUE' when 'newdata' is assigned")
-
-
- tt <- terms(object) # 11/8/03; object at terms$terms
-
- ttf <- attr(tt, "factors")
- tto <- attr(tt, "order")
- intercept <- attr(tt, "intercept")
- if (!intercept)
- stop("an intercept is assumed")
-
- M <- object at misc$M
- Blist <- object at constraints
- ncolBlist <- unlist(lapply(Blist, ncol))
- if (intercept)
- ncolBlist <- ncolBlist[-1]
- if (raw) {
- Blist <- canonical.Blist(Blist)
- object at constraints <- Blist
- }
-
- if (!length(newdata)) {
- if (type=="link") {
- if (se.fit) {
- stop("cannot handle this option (se.fit = TRUE) currently")
- } else {
- if (length(na.act)) {
- answer = napredict(na.act[[1]], object at predictors)
- } else {
- answer = object at predictors
- }
- if (untransform)
- return(untransformVGAM(object, answer)) else
- return(answer)
- }
- } else
- if (type=="response") {
- if (se.fit) {
- stop("cannot handle this option (se.fit = TRUE) currently")
- } else {
- if (length(na.act)) {
- return(napredict(na.act[[1]], object at fitted.values))
- } else {
- return(object at fitted.values)
- }
- }
- }
-
- predictor <- predict.vlm(object,
- type = "terms",
- se.fit = se.fit,
- terms.arg = terms.arg,
- raw = raw,
- all = all, offset = offset,
- dispersion = dispersion, ...) # deriv.arg = deriv.arg,
-
- newdata <- model.matrixvlm(object, type = "lm")
+ predictor <- predict.vlm(object, newdata,
+ type = temp.type,
+ se.fit = se.fit,
+ terms.arg = terms.arg,
+ raw = raw,
+ all = all, offset = offset,
+ dispersion = dispersion, ...) # deriv.arg = deriv.arg,
+ }
+ if (deriv.arg > 0)
+ if (se.fit) {
+ predictor$fitted.values <- predictor$fitted.values * 0
+ predictor$se.fit <- predictor$se.fit * NA
} else {
-
- temp.type <- if (type=="link") "response" else type
-
-
- predictor <- predict.vlm(object, newdata,
- type = temp.type,
- se.fit = se.fit,
- terms.arg = terms.arg,
- raw = raw,
- all = all, offset = offset,
- dispersion = dispersion, ...) # deriv.arg = deriv.arg,
+ predictor <- predictor * 0
}
- if (deriv.arg > 0)
- if (se.fit) {
- predictor$fitted.values <- predictor$fitted.values * 0
- predictor$se.fit <- predictor$se.fit * NA
- } else {
- predictor <- predictor * 0
- }
-
-
- if (length(s.xargument <- object at s.xargument)) {
+ if (length(s.xargument <- object at s.xargument)) {
- dnames2 <- dimnames(newdata)[[2]]
- index1 <- match(s.xargument, dnames2, nomatch = FALSE)
- index2 <- match(names(s.xargument), dnames2, nomatch = FALSE)
- index <- index1 | index2
- if (!length(index) || any(!index))
- stop("required variables not found in newdata")
+ dnames2 <- dimnames(newdata)[[2]]
+ index1 <- match(s.xargument, dnames2, nomatch = FALSE)
+ index2 <- match(names(s.xargument), dnames2, nomatch = FALSE)
+ index <- index1 | index2
+ if (!length(index) || any(!index))
+ stop("required variables not found in newdata")
- if (is.null(tmp6 <- attr(if(se.fit) predictor$fitted.values else
- predictor, "vterm.assign"))) {
+ if (is.null(tmp6 <- attr(if(se.fit) predictor$fitted.values else
+ predictor, "vterm.assign"))) {
- Blist <- subconstraints(object at misc$orig.assign,
- object at constraints)
- ncolBlist <- unlist(lapply(Blist, ncol))
- if (intercept)
- ncolBlist <- ncolBlist[-1]
+ Blist <- subconstraints(object at misc$orig.assign,
+ object at constraints)
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ if (intercept)
+ ncolBlist <- ncolBlist[-1]
- cs <- if (raw) cumsum(c(1, ncolBlist)) else
- cumsum(c(1, M + 0*ncolBlist))
- tmp6 <- vector("list", length(ncolBlist))
- for(ii in 1:length(tmp6))
- tmp6[[ii]] <- cs[ii]:(cs[ii+1]-1)
- names(tmp6) <- names(ncolBlist)
- }
+ cs <- if (raw) cumsum(c(1, ncolBlist)) else
+ cumsum(c(1, M + 0*ncolBlist))
+ tmp6 <- vector("list", length(ncolBlist))
+ for(ii in 1:length(tmp6))
+ tmp6[[ii]] <- cs[ii]:(cs[ii+1]-1)
+ names(tmp6) <- names(ncolBlist)
+ }
- n.s.xargument <- names(s.xargument) # e.g., c("s(x)", "s(x2)")
- for(ii in n.s.xargument) {
+ n.s.xargument <- names(s.xargument) # e.g., c("s(x)", "s(x2)")
+ for(ii in n.s.xargument) {
- fred <- s.xargument[ii]
- if (!any(dimnames(newdata)[[2]] == fred))
- fred <- ii
+ fred <- s.xargument[ii]
+ if (!any(dimnames(newdata)[[2]] == fred))
+ fred <- ii
xx <- newdata[,fred] # [,s.xargument[ii]] # [,nindex[ii]]
ox <- order(xx)
@@ -181,10 +182,10 @@ predict.vgam <- function(object, newdata = NULL,
eta.mat <- if (raw) rawMat else (rawMat %*% t(Blist[[ii]]))
- if (type=="terms") {
+ if (type == "terms") {
hhh <- tmp6[[ii]]
if (se.fit) {
- predictor$fitted.values[,hhh] =
+ predictor$fitted.values[,hhh] <-
predictor$fitted.values[,hhh] + eta.mat
TS <- predictor$sigma^2
@@ -201,159 +202,159 @@ predict.vgam <- function(object, newdata = NULL,
} else {
predictor[,hhh] <- predictor[,hhh] + eta.mat
}
- } else {
- if (se.fit) {
- predictor$fitted.values <- predictor$fitted.values + eta.mat
+ } else {
+ if (se.fit) {
+ predictor$fitted.values <- predictor$fitted.values + eta.mat
- TS <- 1 # out$residual.scale^2
- TS <- predictor$sigma^2
+ TS <- 1 # out$residual.scale^2
+ TS <- predictor$sigma^2
- TT <- ncol(object at var)
- predictor$se.fit <- sqrt(predictor$se.fit^2 + TS *
- object at var %*% rep(1, TT))
+ TT <- ncol(object at var)
+ predictor$se.fit <- sqrt(predictor$se.fit^2 + TS *
+ object at var %*% rep(1, TT))
- } else {
- predictor <- predictor + eta.mat
- }
- }
- }
- }
-
- if (type=="link") {
- if (no.newdata && length(na.act)) {
- return(napredict(na.act[[1]], predictor))
- } else {
- return(predictor)
- }
- } else
- if (type=="response") {
- fv <- object at family@linkinv(if(se.fit) predictor$fitted.values else
- predictor, object at extra)
- if (is.matrix(fv) && is.matrix(object at fitted.values))
- dimnames(fv) <- list(dimnames(fv)[[1]],
- dimnames(object at fitted.values)[[2]])
- if (is.matrix(fv) && ncol(fv)==1)
- fv <- c(fv)
- if (no.newdata && length(na.act)) {
- if (se.fit) {
- fv = napredict(na.act[[1]], fv)
- } else {
- fv = napredict(na.act[[1]], fv)
- }
- }
- if (se.fit) {
- return(list(fit = fv, se.fit = fv*NA))
- } else {
- return(fv)
- }
- } else {
- if (deriv.arg >= 1)
- if (se.fit) {
- attr(predictor$fitted.values, "constant") <- NULL
} else {
- attr(predictor, "constant") <- NULL
+ predictor <- predictor + eta.mat
}
+ }
+ }
+ }
- if (deriv.arg >= 1) {
- v = attr(if(se.fit) predictor$fitted.values else
- predictor, "vterm.assign")
- is.lin <- is.linear.term(names(v))
- coefmat <- coefvlm(object, matrix.out = TRUE)
- ord <- 0
- for(ii in names(v)) {
- ord <- ord + 1
- index <- v[[ii]]
- lindex <- length(index)
- if (is.lin[ii]) {
- if (tto[ord] > 1 || (length(ttf) && ttf[ii,ii])) {
- if (se.fit) {
- predictor$fitted.values[,index] =
- if (tto[ord]>1) NA else NA
- } else {
- predictor[,index] <- if (tto[ord]>1) NA else NA
- }
- } else {
- ans <- coefmat[ii, 1:lindex]
- if (se.fit) {
- predictor$fitted.values[,index] =
- if (deriv.arg == 1)
- matrix(ans, ncol = lindex, byrow = TRUE) else 0
- } else {
- predictor[,index] <- if (deriv.arg==1)
- matrix(ans, ncol = lindex, byrow = TRUE) else 0
- }
- }
- } else
- if (length(s.xargument) && any(n.s.xargument == ii)) {
- ans <- coefmat[ii, 1:lindex]
- if (se.fit) {
- predictor$fitted.values[,index] =
- predictor$fitted.values[,index] +
- (if(deriv.arg == 1)
- matrix(ans, nrow=nrow(predictor$fitted.values),
- ncol=lindex, byrow = TRUE) else 0)
- } else {
- predictor[, index] <- predictor[, index] +
- (if(deriv.arg == 1)
- matrix(ans, nrow=nrow(predictor),
- ncol=lindex, byrow = TRUE) else 0)
- }
+ if (type == "link") {
+ if (no.newdata && length(na.act)) {
+ return(napredict(na.act[[1]], predictor))
+ } else {
+ return(predictor)
+ }
+ } else
+ if (type == "response") {
+ fv <- object at family@linkinv(if(se.fit) predictor$fitted.values else
+ predictor, object at extra)
+ if (is.matrix(fv) && is.matrix(object at fitted.values))
+ dimnames(fv) <- list(dimnames(fv)[[1]],
+ dimnames(object at fitted.values)[[2]])
+ if (is.matrix(fv) && ncol(fv) == 1)
+ fv <- c(fv)
+ if (no.newdata && length(na.act)) {
+ fv <- if (se.fit) {
+ napredict(na.act[[1]], fv)
+ } else {
+ napredict(na.act[[1]], fv)
+ }
+ }
+ if (se.fit) {
+ return(list(fit = fv, se.fit = fv*NA))
+ } else {
+ return(fv)
+ }
+ } else {
+ if (deriv.arg >= 1)
+ if (se.fit) {
+ attr(predictor$fitted.values, "constant") <- NULL
+ } else {
+ attr(predictor, "constant") <- NULL
+ }
+
+ if (deriv.arg >= 1) {
+ v <- attr(if(se.fit) predictor$fitted.values else
+ predictor, "vterm.assign")
+ is.lin <- is.linear.term(names(v))
+ coefmat <- coefvlm(object, matrix.out = TRUE)
+ ord <- 0
+ for(ii in names(v)) {
+ ord <- ord + 1
+ index <- v[[ii]]
+ lindex <- length(index)
+ if (is.lin[ii]) {
+ if (tto[ord] > 1 || (length(ttf) && ttf[ii,ii])) {
+ if (se.fit) {
+ predictor$fitted.values[,index] <-
+ if (tto[ord]>1) NA else NA
+ } else {
+ predictor[,index] <- if (tto[ord]>1) NA else NA
+ }
+ } else {
+ ans <- coefmat[ii, 1:lindex]
+ if (se.fit) {
+ predictor$fitted.values[,index] =
+ if (deriv.arg == 1)
+ matrix(ans, ncol <- lindex, byrow = TRUE) else 0
} else {
- cat("Derivatives of term ", ii, "are unknown\n")
- if (se.fit) {
- predictor$fitted.values[,index] <- NA
- } else {
- predictor[,index] <- NA
- }
+ predictor[,index] <- if (deriv.arg == 1)
+ matrix(ans, ncol <- lindex, byrow = TRUE) else 0
}
}
+ } else
+ if (length(s.xargument) && any(n.s.xargument == ii)) {
+ ans <- coefmat[ii, 1:lindex]
+ if (se.fit) {
+ predictor$fitted.values[,index] =
+ predictor$fitted.values[,index] +
+ (if(deriv.arg == 1)
+ matrix(ans, nrow = nrow(predictor$fitted.values),
+ ncol = lindex, byrow = TRUE) else 0)
+ } else {
+ predictor[, index] <- predictor[, index] +
+ (if(deriv.arg == 1)
+ matrix(ans, nrow = nrow(predictor),
+ ncol = lindex, byrow = TRUE) else 0)
+ }
+ } else {
+ cat("Derivatives of term ", ii, "are unknown\n")
+ if (se.fit) {
+ predictor$fitted.values[,index] <- NA
+ } else {
+ predictor[,index] <- NA
+ }
+ }
}
+ }
- if (no.newdata && length(na.act)) {
- if (se.fit) {
- predictor$fitted.values = napredict(na.act[[1]],
- predictor$fitted.values)
- predictor$se.fit = napredict(na.act[[1]], predictor$se.fit)
- } else {
- predictor = napredict(na.act[[1]], predictor)
- }
- }
-
- if (se.fit) {
- attr(predictor$fitted.values, "derivative") <- deriv.arg
- } else {
- attr(predictor, "derivative") <- deriv.arg
- }
+ if (no.newdata && length(na.act)) {
+ if (se.fit) {
+ predictor$fitted.values <- napredict(na.act[[1]],
+ predictor$fitted.values)
+ predictor$se.fit <- napredict(na.act[[1]], predictor$se.fit)
+ } else {
+ predictor <- napredict(na.act[[1]], predictor)
+ }
+ }
- return(predictor)
+ if (se.fit) {
+ attr(predictor$fitted.values, "derivative") <- deriv.arg
+ } else {
+ attr(predictor, "derivative") <- deriv.arg
}
+
+ return(predictor)
+ }
}
- setMethod("predict", "vgam",
- function(object, ...)
- predict.vgam(object, ...))
+ setMethod("predict", "vgam",
+ function(object, ...)
+ predict.vgam(object, ...))
varassign <- function(constraints, n.s.xargument) {
- if (!length(n.s.xargument))
- stop("length(n.s.xargument) must be > 0")
+ if (!length(n.s.xargument))
+ stop("length(n.s.xargument) must be > 0")
- ans <- vector("list", length(n.s.xargument))
+ ans <- vector("list", length(n.s.xargument))
- ncolBlist <- unlist(lapply(constraints, ncol))
+ ncolBlist <- unlist(lapply(constraints, ncol))
- names(ans) <- n.s.xargument
- ptr <- 1
- for(ii in n.s.xargument) {
- temp <- ncolBlist[[ii]]
- ans[[ii]] <- ptr:(ptr + temp - 1)
- ptr <- ptr + temp
- }
- ans
+ names(ans) <- n.s.xargument
+ ptr <- 1
+ for(ii in n.s.xargument) {
+ temp <- ncolBlist[[ii]]
+ ans[[ii]] <- ptr:(ptr + temp - 1)
+ ptr <- ptr + temp
+ }
+ ans
}
diff --git a/R/predict.vglm.q b/R/predict.vglm.q
index c7d592f..a6fc6f5 100644
--- a/R/predict.vglm.q
+++ b/R/predict.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -14,116 +14,116 @@ predictvglm <- function(object,
dispersion = NULL,
untransform = FALSE,
extra = object at extra, ...) {
- na.act = object at na.action
- object at na.action = list()
+ na.act <- object at na.action
+ object at na.action <- list()
- if (missing(extra)) {
- }
+ if (missing(extra)) {
+ }
+
+ if (deriv != 0)
+ stop("'deriv' must be 0 for predictvglm()")
+
+ if (mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("link", "response", "terms"))[1]
+
+ if (untransform && (type!="link" || se.fit || deriv != 0))
+ stop("argument 'untransform=TRUE' only if 'type=\"link\", ",
+ "se.fit = FALSE, deriv=0'")
+
+
+
+
+ pred <- if (se.fit) {
+ switch(type,
+ response = {
+ warning("'type=\"response\"' and 'se.fit=TRUE' not valid ",
+ "together; setting 'se.fit = FALSE'")
+ se.fit <- FALSE
+ predictor <- predict.vlm(object, newdata = newdata,
+ type = type, se.fit = se.fit,
+ deriv = deriv,
+ dispersion = dispersion, ...)
+ fv <- object at family@linkinv(predictor, extra)
+ dimnames(fv) <- list(dimnames(fv)[[1]],
+ dimnames(object at fitted.values)[[2]])
+ fv
+ },
+ link = {
+ predict.vlm(object, newdata = newdata,
+ type = "response", se.fit = se.fit,
+ deriv = deriv, dispersion = dispersion, ...)
+ },
+ terms = {
+ predict.vlm(object, newdata = newdata,
+ type = type, se.fit = se.fit,
+ deriv = deriv, dispersion = dispersion, ...)
+ }) # End of switch
+ } else {
+ if (is.null(newdata)) {
+ switch(type,
+ link = object at predictors,
+ response = object at fitted.values,
+ terms = {
+ predict.vlm(object, newdata = newdata,
+ type = type, se.fit = se.fit,
+ deriv = deriv, dispersion = dispersion, ...)
+ })
+ } else {
+ if (!(length(object at offset) == 1 && object at offset == 0))
+ warning("zero offset used")
+ switch(type,
+ response = {
- if (deriv != 0)
- stop("'deriv' must be 0 for predictvglm()")
- if (mode(type) != "character" && mode(type) != "name")
- type = as.character(substitute(type))
- type = match.arg(type, c("link", "response", "terms"))[1]
- if (untransform && (type!="link" || se.fit || deriv != 0))
- stop("argument 'untransform=TRUE' only if 'type=\"link\", ",
- "se.fit = FALSE, deriv=0'")
+ predictor <- predict.vlm(object, newdata = newdata,
+ type = type, se.fit = se.fit,
+ deriv = deriv,
+ dispersion = dispersion, ...)
- pred = if (se.fit) {
- switch(type,
- response = {
- warning("'type=\"response\"' and 'se.fit=TRUE' not valid ",
- "together; setting 'se.fit = FALSE'")
- se.fit = FALSE
- predictor = predict.vlm(object, newdata=newdata,
- type=type, se.fit=se.fit,
- deriv=deriv,
- dispersion=dispersion, ...)
- fv = object at family@linkinv(predictor, extra)
- dimnames(fv) = list(dimnames(fv)[[1]],
- dimnames(object at fitted.values)[[2]])
+ M <- object at misc$M
+
+ fv <- object at family@linkinv(predictor, extra)
+ if (M > 1 && is.matrix(fv)) {
+ dimnames(fv) <- list(dimnames(fv)[[1]],
+ dimnames(object at fitted.values)[[2]])
+ } else {
+ }
fv
},
link = {
- predict.vlm(object, newdata=newdata,
- type="response", se.fit=se.fit,
- deriv=deriv, dispersion=dispersion, ...)
- },
- terms={
- predict.vlm(object, newdata=newdata,
- type=type, se.fit=se.fit,
- deriv=deriv, dispersion=dispersion, ...)
- }) # End of switch
- } else {
- if (is.null(newdata)) {
- switch(type,
- link = object at predictors,
- response = object at fitted.values,
- terms={
- predict.vlm(object, newdata=newdata,
- type=type, se.fit=se.fit,
- deriv=deriv, dispersion=dispersion, ...)
- })
- } else {
- if (!(length(object at offset) == 1 && object at offset == 0))
- warning("zero offset used")
- switch(type,
- response={
-
-
- predictor = predict.vlm(object, newdata=newdata,
- type=type, se.fit=se.fit,
- deriv=deriv,
- dispersion=dispersion, ...)
+ predict.vlm(object, newdata = newdata,
+ type = "response", se.fit = se.fit,
+ deriv = deriv, dispersion = dispersion, ...)
- M = object at misc$M
- fv = object at family@linkinv(predictor, extra)
- if (M > 1 && is.matrix(fv)) {
- dimnames(fv) = list(dimnames(fv)[[1]],
- dimnames(object at fitted.values)[[2]])
- } else {
- }
- fv
- },
- link = {
-
-
-
- predict.vlm(object, newdata=newdata,
- type="response", se.fit=se.fit,
- deriv=deriv, dispersion=dispersion, ...)
-
-
-
- },
- terms = {
- predict.vlm(object, newdata=newdata,
- type=type, se.fit=se.fit,
- deriv=deriv, dispersion=dispersion, ...)
- }) # End of switch
+ },
+ terms = {
+ predict.vlm(object, newdata = newdata,
+ type = type, se.fit = se.fit,
+ deriv = deriv, dispersion = dispersion, ...)
+ }) # End of switch
}
- }
+ }
- if (!length(newdata) && length(na.act)) {
- if (se.fit) {
- pred$fitted.values = napredict(na.act[[1]], pred$fitted.values)
- pred$se.fit = napredict(na.act[[1]], pred$se.fit)
- } else {
- pred = napredict(na.act[[1]], pred)
- }
+ if (!length(newdata) && length(na.act)) {
+ if (se.fit) {
+ pred$fitted.values <- napredict(na.act[[1]], pred$fitted.values)
+ pred$se.fit <- napredict(na.act[[1]], pred$se.fit)
+ } else {
+ pred <- napredict(na.act[[1]], pred)
}
-
- if (untransform) untransformVGAM(object, pred) else pred
+ }
+
+ if (untransform) untransformVGAM(object, pred) else pred
}
@@ -143,59 +143,59 @@ predict.rrvglm <- function(object,
dispersion = NULL,
extra = object at extra, ...) {
- if (se.fit) {
- stop("11/8/03; predict.rrvglm(..., se.fit=TRUE) not complete yet")
- pred =
- switch(type,
- response = {
- warning("'type=\"response\"' and 'se.fit=TRUE' not valid ",
- "together; setting 'se.fit = FALSE'")
- se.fit = FALSE
- predictor = predict.vlm(object, newdata=newdata,
- type=type, se.fit=se.fit,
- deriv=deriv,
- dispersion=dispersion, ...)
- fv = object at family@linkinv(predictor, extra)
- dimnames(fv) = list(dimnames(fv)[[1]],
- dimnames(object at fitted.values)[[2]])
- fv
- },
- link = {
- type = "response"
- predict.vlm(object, newdata=newdata,
- type=type, se.fit=se.fit,
- deriv=deriv, dispersion=dispersion, ...)
- },
- terms={
- predict.vlm(object, newdata=newdata,
- type=type, se.fit=se.fit,
- deriv=deriv, dispersion=dispersion, ...)
- }
- )
- } else {
- return(predictvglm(object, newdata=newdata,
- type=type, se.fit=se.fit,
- deriv=deriv,
- dispersion=dispersion, ...))
+ if (se.fit) {
+ stop("20030811; predict.rrvglm(..., se.fit=TRUE) not complete yet")
+ pred <-
+ switch(type,
+ response = {
+ warning("'type=\"response\"' and 'se.fit=TRUE' not valid ",
+ "together; setting 'se.fit = FALSE'")
+ se.fit <- FALSE
+ predictor <- predict.vlm(object, newdata = newdata,
+ type = type, se.fit = se.fit,
+ deriv = deriv,
+ dispersion = dispersion, ...)
+ fv <- object at family@linkinv(predictor, extra)
+ dimnames(fv) <- list(dimnames(fv)[[1]],
+ dimnames(object at fitted.values)[[2]])
+ fv
+ },
+ link = {
+ type <- "response"
+ predict.vlm(object, newdata = newdata,
+ type = type, se.fit = se.fit,
+ deriv = deriv, dispersion = dispersion, ...)
+ },
+ terms = {
+ predict.vlm(object, newdata = newdata,
+ type = type, se.fit = se.fit,
+ deriv = deriv, dispersion = dispersion, ...)
+ }
+ )
+ } else {
+ return(predictvglm(object, newdata = newdata,
+ type = type, se.fit = se.fit,
+ deriv = deriv,
+ dispersion = dispersion, ...))
}
- na.act = object at na.action
+ na.act <- object at na.action
- if (!length(newdata) && length(na.act)) {
- if (se.fit) {
- pred$fitted.values = napredict(na.act[[1]], pred$fitted.values)
- pred$se.fit = napredict(na.act[[1]], pred$se.fit)
- } else {
- pred = napredict(na.act[[1]], pred)
- }
+ if (!length(newdata) && length(na.act)) {
+ if (se.fit) {
+ pred$fitted.values <- napredict(na.act[[1]], pred$fitted.values)
+ pred$se.fit <- napredict(na.act[[1]], pred$se.fit)
+ } else {
+ pred <- napredict(na.act[[1]], pred)
}
+ }
- pred
+ pred
}
setMethod("predict", "rrvglm", function(object, ...)
- predict.rrvglm(object, ...))
+ predict.rrvglm(object, ...))
@@ -203,7 +203,7 @@ untransformVGAM <- function(object, pred) {
M <- object at misc$M
Links <- object at misc$link
if (length(Links) != M && length(Links) != 1)
- stop("cannot obtain the link functions to untransform the object")
+ stop("cannot obtain the link functions to untransform the object")
upred <- pred
earg <- object at misc$earg
diff --git a/R/predict.vlm.q b/R/predict.vlm.q
index 5e05800..0a541cc 100644
--- a/R/predict.vlm.q
+++ b/R/predict.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -7,289 +7,291 @@
-predict.vlm = function(object,
- newdata = NULL,
- type = c("response", "terms"),
- se.fit = FALSE, scale = NULL,
- terms.arg = NULL,
- raw=FALSE,
- dispersion = NULL, ...)
-{
- Xm2 = NULL
- xij.used = length(form2 <- object at misc$form2) || length(object at control$xij)
+predict.vlm <- function(object,
+ newdata = NULL,
+ type = c("response", "terms"),
+ se.fit = FALSE, scale = NULL,
+ terms.arg = NULL,
+ raw = FALSE,
+ dispersion = NULL, ...) {
+ Xm2 <- NULL
+ xij.used <- length(form2 <- object at misc$form2) ||
+ length(object at control$xij)
- if (mode(type) != "character" && mode(type) != "name")
- type = as.character(substitute(type))
- type = match.arg(type, c("response", "terms"))[1]
+ if (mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("response", "terms"))[1]
- na.act = object at na.action
- object at na.action = list()
+ na.act <- object at na.action
+ object at na.action <- list()
- if (raw && type != "terms")
- stop("sorry, 'raw=TRUE' only works when 'type=\"terms\"'")
+ if (raw && type != "terms")
+ stop("sorry, 'raw=TRUE' only works when 'type=\"terms\"'")
- if (!length(newdata) && type == "response" && !se.fit &&
- length(object at fitted.values)) {
- if (length(na.act)) {
- return(napredict(na.act[[1]], object at fitted.values))
- } else {
- return(object at fitted.values)
- }
+ if (!length(newdata) && type == "response" && !se.fit &&
+ length(object at fitted.values)) {
+ if (length(na.act)) {
+ return(napredict(na.act[[1]], object at fitted.values))
+ } else {
+ return(object at fitted.values)
}
+ }
- ttob = terms(object) # 11/8/03; object at terms$terms
+ ttob <- terms(object) # 11/8/03; object at terms$terms
- if (!length(newdata)) {
- offset = object at offset
+ if (!length(newdata)) {
+ offset <- object at offset
- if (xij.used) {
- bothList = model.matrix(object, type="bothlmlm2")
- X = bothList$X
- Xm2 = bothList$Xm2
- } else {
- X = model.matrix(object, type="lm")
- }
+ if (xij.used) {
+ bothList <- model.matrix(object, type = "bothlmlm2")
+ X <- bothList$X
+ Xm2 <- bothList$Xm2
} else {
+ X <- model.matrix(object, type = "lm")
+ }
+ } else {
- if (is.smart(object) && length(object at smart.prediction)) {
- setup.smart("read", smart.prediction=object at smart.prediction)
- }
-
- X = model.matrix(delete.response(ttob), newdata,
- contrasts = if (length(object at contrasts))
- object at contrasts else NULL,
- xlev = object at xlevels)
- if (xij.used) {
- ttXm2 = terms(form2)
- Xm2 = model.matrix(delete.response(ttXm2), newdata,
- contrasts = if (length(object at contrasts))
- object at contrasts else NULL,
- xlev = object at xlevels)
- }
+ if (is.smart(object) && length(object at smart.prediction)) {
+ setup.smart("read", smart.prediction = object at smart.prediction)
+ }
- if (object at misc$intercept.only && nrow(X) != nrow(newdata)) {
- as.save = attr(X, "assign")
- X = X[rep(1, nrow(newdata)),,drop=FALSE] # =matrix(1,nrow(newdata),1)
- dimnames(X) = list(dimnames(newdata)[[1]], "(Intercept)")
- attr(X, "assign") = as.save # Restored
- }
+ X <- model.matrix(delete.response(ttob), newdata,
+ contrasts = if (length(object at contrasts))
+ object at contrasts else NULL,
+ xlev = object at xlevels)
+ if (xij.used) {
+ ttXm2 <- terms(form2)
+ Xm2 <- model.matrix(delete.response(ttXm2), newdata,
+ contrasts = if (length(object at contrasts))
+ object at contrasts else NULL,
+ xlev = object at xlevels)
+ }
- offset = if (!is.null(off.num <- attr(ttob, "offset"))) {
- eval(attr(ttob, "variables")[[off.num+1]], newdata)
- } else if (!is.null(object at offset))
- eval(object at call$offset, newdata)
+ if (object at misc$intercept.only &&
+ nrow(X) != nrow(newdata)) {
+ as.save <- attr(X, "assign")
+ X <- X[rep(1, nrow(newdata)), , drop = FALSE]
+ dimnames(X) <- list(dimnames(newdata)[[1]], "(Intercept)")
+ attr(X, "assign") <- as.save # Restored
+ }
- if (is.smart(object) && length(object at smart.prediction)) {
- wrapup.smart()
- }
+ offset <- if (!is.null(off.num <- attr(ttob, "offset"))) {
+ eval(attr(ttob, "variables")[[off.num+1]], newdata)
+ } else if (!is.null(object at offset))
+ eval(object at call$offset, newdata)
- attr(X, "assign") = attrassigndefault(X, ttob)
- if (length(Xm2))
- attr(Xm2, "assign") = attrassigndefault(Xm2, ttXm2)
+ if (is.smart(object) && length(object at smart.prediction)) {
+ wrapup.smart()
}
+ attr(X, "assign") <- attrassigndefault(X, ttob)
+ if (length(Xm2))
+ attr(Xm2, "assign") <- attrassigndefault(Xm2, ttXm2)
+ }
- hasintercept = attr(ttob, "intercept")
- dx1 = dimnames(X)[[1]]
- M = object at misc$M
- Blist = object at constraints
- ncolBlist = unlist(lapply(Blist, ncol))
- if (hasintercept)
- ncolBlist = ncolBlist[-1]
+ hasintercept <- attr(ttob, "intercept")
- xbar = x2bar = NULL
- if (type == "terms" && hasintercept) {
- if (length(object at control$xij)) {
- x2bar = colMeans(Xm2)
- Xm2 = sweep(Xm2, 2, x2bar)
- }
- xbar = colMeans(X)
- X = sweep(X, 2, xbar)
- nac = is.na(object at coefficients)
- if (any(nac)) {
- if (length(object at control$xij))
- stop("cannot handle 'xij' argument when ",
- "there are NAs in the coefficients")
- X = X[, !nac, drop=FALSE]
- xbar = xbar[!nac]
- }
+ dx1 <- dimnames(X)[[1]]
+ M <- object at misc$M
+ Blist <- object at constraints
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ if (hasintercept)
+ ncolBlist <- ncolBlist[-1]
+
+ xbar <- x2bar <- NULL
+ if (type == "terms" && hasintercept) {
+ if (length(object at control$xij)) {
+ x2bar <- colMeans(Xm2)
+ Xm2 <- sweep(Xm2, 2, x2bar)
+ }
+ xbar <- colMeans(X)
+ X <- sweep(X, 2, xbar)
+ nac <- is.na(object at coefficients)
+ if (any(nac)) {
+ if (length(object at control$xij))
+ stop("cannot handle 'xij' argument when ",
+ "there are NAs in the coefficients")
+ X <- X[, !nac, drop = FALSE]
+ xbar <- xbar[!nac]
}
+ }
if (!is.null(newdata) && !is.data.frame(newdata))
- newdata = as.data.frame(newdata)
+ newdata <- as.data.frame(newdata)
- nn = if (!is.null(newdata)) nrow(newdata) else object at misc$n
+ nn <- if (!is.null(newdata)) nrow(newdata) else object at misc$n
if (raw) {
- Blist = canonical.Blist(Blist)
- object at constraints = Blist
+ Blist <- canonical.Blist(Blist)
+ object at constraints <- Blist
}
- X_vlm = lm2vlm.model.matrix(X, Blist=Blist, M=M,
- xij=object at control$xij, Xm2=Xm2)
+ X_vlm <- lm2vlm.model.matrix(X, Blist = Blist, M = M,
+ xij = object at control$xij, Xm2 = Xm2)
- attr(X_vlm, "constant") = xbar
- attr(X_vlm, "constant2") = x2bar
+ attr(X_vlm, "constant") <- xbar
+ attr(X_vlm, "constant2") <- x2bar
- coefs = coefvlm(object)
- vasgn = attr(X_vlm, "vassign")
+ coefs <- coefvlm(object)
+ vasgn <- attr(X_vlm, "vassign")
if (type == "terms") {
- nv = names(vasgn)
- if (hasintercept)
- nv = nv[-(1:ncol(object at constraints[["(Intercept)"]]))]
- terms.arg = if (is.null(terms.arg)) nv else terms.arg
-
- index = charmatch(terms.arg, nv)
- if (all(index == 0)) {
- warning("no match found; returning all terms")
- index = 1:length(nv)
- }
- vasgn = vasgn[nv[index]]
+ nv <- names(vasgn)
+ if (hasintercept)
+ nv <- nv[-(1:ncol(object at constraints[["(Intercept)"]]))]
+ terms.arg <- if (is.null(terms.arg)) nv else terms.arg
+
+ index <- charmatch(terms.arg, nv)
+ if (all(index == 0)) {
+ warning("no match found; returning all terms")
+ index <- 1:length(nv)
+ }
+ vasgn <- vasgn[nv[index]]
}
if (any(is.na(object at coefficients)))
stop("cannot handle NAs in 'object at coefficients'")
- dname2 = object at misc$predictors.names
+ dname2 <- object at misc$predictors.names
if (se.fit) {
- object = as(object, "vlm") # Coerce
- fit.summary = summaryvlm(object, dispersion=dispersion)
- sigma = if (is.numeric(fit.summary at sigma)) fit.summary at sigma else
- sqrt(deviance(object) / object at df.residual) # was @rss
- pred = Build.terms.vlm(x = X_vlm, coefs = coefs,
- cov = sigma^2 * fit.summary at cov.unscaled,
- assign = vasgn,
- collapse = type!="terms", M=M,
- dimname=list(dx1, dname2),
- coefmat = coefvlm(object, matrix.out = TRUE))
- pred$df = object at df.residual
- pred$sigma = sigma
+ object <- as(object, "vlm") # Coerce
+ fit.summary <- summaryvlm(object, dispersion=dispersion)
+ sigma <- if (is.numeric(fit.summary at sigma)) fit.summary at sigma else
+ sqrt(deviance(object) / object at df.residual) # was @rss
+ pred <- Build.terms.vlm(x = X_vlm, coefs = coefs,
+ cov = sigma^2 * fit.summary at cov.unscaled,
+ assign = vasgn,
+ collapse = type != "terms", M = M,
+ dimname = list(dx1, dname2),
+ coefmat = coefvlm(object, matrix.out = TRUE))
+ pred$df <- object at df.residual
+ pred$sigma <- sigma
} else {
- pred = Build.terms.vlm(x = X_vlm, coefs = coefs,
- cov = NULL,
- assign = vasgn,
- collapse = type!="terms", M=M,
- dimname = list(dx1, dname2),
- coefmat = coefvlm(object, matrix.out = TRUE))
+ pred <- Build.terms.vlm(x = X_vlm, coefs = coefs,
+ cov = NULL,
+ assign = vasgn,
+ collapse = type != "terms", M = M,
+ dimname = list(dx1, dname2),
+ coefmat = coefvlm(object, matrix.out = TRUE))
}
- constant = attr(pred, "constant")
+ constant <- attr(pred, "constant")
- if (type != "terms" && length(offset) && any(offset != 0)) {
- if (se.fit) {
- pred$fitted.values = pred$fitted.values + offset
- } else {
- pred = pred + offset
- }
+ if (type != "terms" && length(offset) && any(offset != 0)) {
+ if (se.fit) {
+ pred$fitted.values <- pred$fitted.values + offset
+ } else {
+ pred <- pred + offset
}
+ }
- if (type == "terms") {
- Blist = subconstraints(object at misc$orig.assign, object at constraints)
- ncolBlist = unlist(lapply(Blist, ncol))
- if (hasintercept)
- ncolBlist = ncolBlist[-1]
-
- cs = cumsum(c(1, ncolBlist)) # Like a pointer
- for(ii in 1:(length(cs)-1))
- if (cs[ii+1]-cs[ii] > 1)
- for(kk in (cs[ii]+1):(cs[ii+1]-1))
- if (se.fit) {
- pred$fitted.values[,cs[ii]]= pred$fitted.values[,cs[ii]] +
- pred$fitted.values[,kk]
- pred$se.fit[,cs[ii]] = pred$se.fit[,cs[ii]] +
- pred$se.fit[,kk]
- } else {
- pred[,cs[ii]] = pred[,cs[ii]] + pred[,kk]
- }
+ if (type == "terms") {
+ Blist <- subconstraints(object at misc$orig.assign, object at constraints)
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ if (hasintercept)
+ ncolBlist <- ncolBlist[-1]
+
+ cs <- cumsum(c(1, ncolBlist)) # Like a pointer
+ for(ii in 1:(length(cs)-1))
+ if (cs[ii+1] - cs[ii] > 1)
+ for(kk in (cs[ii]+1):(cs[ii+1]-1))
+ if (se.fit) {
+ pred$fitted.values[, cs[ii]]<- pred$fitted.values[, cs[ii]] +
+ pred$fitted.values[, kk]
+ pred$se.fit[, cs[ii]] <- pred$se.fit[, cs[ii]] +
+ pred$se.fit[, kk]
+ } else {
+ pred[, cs[ii]] <- pred[, cs[ii]] + pred[, kk]
+ }
if (se.fit) {
- pred$fitted.values = pred$fitted.values[,cs[-length(cs)],drop=FALSE]
- pred$se.fit = pred$se.fit[, cs[-length(cs)], drop=FALSE]
+ pred$fitted.values <- pred$fitted.values[, cs[-length(cs)],
+ drop = FALSE]
+ pred$se.fit <- pred$se.fit[, cs[-length(cs)], drop = FALSE]
} else {
- pred = pred[, cs[-length(cs)], drop=FALSE]
+ pred <- pred[, cs[-length(cs)], drop = FALSE]
}
- pp = if (se.fit) ncol(pred$fitted.values) else ncol(pred)
+ pp <- if (se.fit) ncol(pred$fitted.values) else ncol(pred)
if (se.fit) {
- dimnames(pred$fitted.values) = dimnames(pred$se.fit) = NULL
- dim(pred$fitted.values) = dim(pred$se.fit) = c(M, nn, pp)
- pred$fitted.values = aperm(pred$fitted.values, c(2,1,3))
- pred$se.fit = aperm(pred$se.fit, c(2,1,3))
- dim(pred$fitted.values) = dim(pred$se.fit) = c(nn, M*pp)
+ dimnames(pred$fitted.values) <- dimnames(pred$se.fit) <- NULL
+ dim(pred$fitted.values) <- dim(pred$se.fit) <- c(M, nn, pp)
+ pred$fitted.values <- aperm(pred$fitted.values, c(2, 1, 3))
+ pred$se.fit <- aperm(pred$se.fit, c(2, 1, 3))
+ dim(pred$fitted.values) <- dim(pred$se.fit) <- c(nn, M*pp)
} else {
- dimnames(pred) = NULL # Saves a warning
- dim(pred) = c(M, nn, pp)
- pred = aperm(pred, c(2,1,3))
- dim(pred) = c(nn, M*pp)
+ dimnames(pred) <- NULL # Saves a warning
+ dim(pred) <- c(M, nn, pp)
+ pred <- aperm(pred, c(2, 1, 3))
+ dim(pred) <- c(nn, M*pp)
}
- if (raw) {
- kindex = NULL
- for(ii in 1:pp)
- kindex = c(kindex, (ii-1)*M + (1:ncolBlist[ii]))
- if (se.fit) {
- pred$fitted.values = pred$fitted.values[,kindex,drop=FALSE]
- pred$se.fit = pred$se.fit[,kindex,drop=FALSE]
- } else {
- pred = pred[,kindex,drop=FALSE]
- }
- }
-
- temp = if (raw) ncolBlist else rep(M, length(ncolBlist))
- dd = vlabel(names(ncolBlist), temp, M)
+ if (raw) {
+ kindex <- NULL
+ for(ii in 1:pp)
+ kindex <- c(kindex, (ii-1)*M + (1:ncolBlist[ii]))
if (se.fit) {
- dimnames(pred$fitted.values) =
- dimnames(pred$se.fit) = list(if(length(newdata))
- dimnames(newdata)[[1]] else dx1, dd)
+ pred$fitted.values <- pred$fitted.values[, kindex, drop = FALSE]
+ pred$se.fit <- pred$se.fit[, kindex, drop = FALSE]
} else {
- dimnames(pred) = list(if(length(newdata))
- dimnames(newdata)[[1]] else dx1, dd)
- }
-
- if (!length(newdata) && length(na.act)) {
- if (se.fit) {
- pred$fitted.values = napredict(na.act[[1]], pred$fitted.values)
- pred$se.fit = napredict(na.act[[1]], pred$se.fit)
- } else {
- pred = napredict(na.act[[1]], pred)
- }
+ pred <- pred[, kindex, drop = FALSE]
}
+ }
+
+ temp <- if (raw) ncolBlist else rep(M, length(ncolBlist))
+ dd <- vlabel(names(ncolBlist), temp, M)
+ if (se.fit) {
+ dimnames(pred$fitted.values) <-
+ dimnames(pred$se.fit) <- list(if(length(newdata))
+ dimnames(newdata)[[1]] else dx1, dd)
+ } else {
+ dimnames(pred) <- list(if(length(newdata))
+ dimnames(newdata)[[1]] else dx1, dd)
+ }
- if (!raw)
- cs = cumsum(c(1, M + 0*ncolBlist))
- fred = vector("list", length(ncolBlist))
- for(ii in 1:length(fred))
- fred[[ii]] = cs[ii]:(cs[ii+1]-1)
- names(fred) = names(ncolBlist)
+ if (!length(newdata) && length(na.act)) {
if (se.fit) {
- attr(pred$fitted.values, "vterm.assign") = fred
- attr(pred$se.fit, "vterm.assign") = fred
+ pred$fitted.values <- napredict(na.act[[1]], pred$fitted.values)
+ pred$se.fit <- napredict(na.act[[1]], pred$se.fit)
} else {
- attr(pred, "vterm.assign") = fred
+ pred <- napredict(na.act[[1]], pred)
}
- } # End of if (type == "terms")
+ }
- if (!is.null(xbar)) {
- if (se.fit) {
- attr(pred$fitted.values, "constant") = constant
- } else {
- attr(pred, "constant") = constant
- }
+ if (!raw)
+ cs <- cumsum(c(1, M + 0*ncolBlist))
+ fred <- vector("list", length(ncolBlist))
+ for(ii in 1:length(fred))
+ fred[[ii]] <- cs[ii]:(cs[ii+1]-1)
+ names(fred) <- names(ncolBlist)
+ if (se.fit) {
+ attr(pred$fitted.values, "vterm.assign") <-
+ attr(pred$se.fit, "vterm.assign") <- fred
+ } else {
+ attr(pred, "vterm.assign") <- fred
+ }
+ } # End of if (type == "terms")
+
+ if (!is.null(xbar)) {
+ if (se.fit) {
+ attr(pred$fitted.values, "constant") <- constant
+ } else {
+ attr(pred, "constant") <- constant
}
+ }
- pred
+ pred
}
@@ -310,16 +312,16 @@ predict.vglm.se <- function(fit, ...) {
H_ss <- hatvalues(fit, type = "centralBlocks") # diag = FALSE
- M = npred(fit)
- nn = nobs(fit, type = "lm")
+ M <- npred(fit)
+ nn <- nobs(fit, type = "lm")
U <- vchol(weights(fit, type = "working"), M = M, n = nn)
- Uarray = array(0, c(M, M, nn))
+ Uarray <- array(0, c(M, M, nn))
ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- MM12 = M * (M + 1) / 2
- for (jay in 1:MM12)
- Uarray[ind1$row.index[jay],
- ind1$col.index[jay], ] <- U[jay,]
+ MM12 <- M * (M + 1) / 2
+ for (jay in 1:MM12)
+ Uarray[ind1$row.index[jay],
+ ind1$col.index[jay], ] <- U[jay, ]
Uinv.array <- apply(Uarray, 3, backsolve, x = diag(M))
dim(Uinv.array) <- c(M, M, nn)
@@ -351,39 +353,40 @@ predict.vglm.se <- function(fit, ...) {
-subconstraints = function(assign, constraints) {
+subconstraints <- function(assign, constraints) {
- ans = vector("list", length(assign))
- if (!length(assign) || !length(constraints))
- stop("assign and/or constraints is empty")
- for(ii in 1:length(assign))
- ans[[ii]] = constraints[[assign[[ii]][1]]]
- names(ans) = names(assign)
- ans
+ ans <- vector("list", length(assign))
+ if (!length(assign) || !length(constraints))
+ stop("assign and/or constraints is empty")
+ for(ii in 1:length(assign))
+ ans[[ii]] <- constraints[[assign[[ii]][1]]]
+ names(ans) <- names(assign)
+ ans
}
-is.linear.term = function(ch) {
- lch = length(ch)
- ans = rep(FALSE, len=lch)
- for(ii in 1:lch) {
- nc = nchar(ch[ii])
- x = substring(ch[ii], 1:nc, 1:nc)
- ans[ii] = all(x!="(" & x!="+" & x!="-" & x!="/" & x!="*" & x!="^")
- }
- names(ans) = ch
- ans
+is.linear.term <- function(ch) {
+ lchar <- length(ch)
+ ans <- rep(FALSE, len = lchar)
+ for(ii in 1:lchar) {
+ nc <- nchar(ch[ii])
+ x <- substring(ch[ii], 1:nc, 1:nc)
+ ans[ii] <- all(x != "(" & x != "+" & x != "-" &
+ x != "/" & x != "*" & x != "^")
+ }
+ names(ans) <- ch
+ ans
}
-canonical.Blist = function(Blist) {
- for(ii in 1:length(Blist)) {
- temp = Blist[[ii]] * 0
- temp[cbind(1:ncol(temp),1:ncol(temp))] = 1
- Blist[[ii]] = temp
- }
- Blist
+canonical.Blist <- function(Blist) {
+ for(ii in 1:length(Blist)) {
+ temp <- Blist[[ii]] * 0
+ temp[cbind(1:ncol(temp),1:ncol(temp))] <- 1
+ Blist[[ii]] <- temp
+ }
+ Blist
}
diff --git a/R/print.vglm.q b/R/print.vglm.q
index 7c653f5..f42fb83 100644
--- a/R/print.vglm.q
+++ b/R/print.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -37,7 +37,7 @@ show.vglm <- function(object) {
if (length(deviance(object)))
cat("Residual deviance:", format(deviance(object)), "\n")
- llx = logLik.vlm(object = object)
+ llx <- logLik.vlm(object = object)
if (length(llx))
cat("Log-likelihood:", format(llx), "\n")
@@ -65,7 +65,7 @@ show.vglm <- function(object) {
show.vgam <- function(object) {
- digits = 2
+ digits <- 2
if (!is.null(cl <- object at call)) {
@@ -90,7 +90,7 @@ show.vgam <- function(object) {
if (length(deviance(object)))
cat("Residual deviance:", format(deviance(object)), "\n")
- llx = logLik.vlm(object = object)
+ llx <- logLik.vlm(object = object)
if (length(llx))
cat("Log-likelihood:", format(llx), "\n")
@@ -148,7 +148,7 @@ print.vglm <- function(x, ...) {
if (length(deviance(x)))
cat("Residual deviance:", format(deviance(x)), "\n")
- llx = logLik.vlm(object = x)
+ llx <- logLik.vlm(object = x)
if (length(llx))
cat("Log-likelihood:", format(llx), "\n")
@@ -191,7 +191,7 @@ print.vgam <- function(x, digits = 2, ...) {
if (length(deviance(x)))
cat("Residual deviance:", format(deviance(x)), "\n")
- llx = logLik.vlm(object = x)
+ llx <- logLik.vlm(object = x)
if (length(llx))
cat("Log-likelihood:", format(llx), "\n")
@@ -206,8 +206,7 @@ print.vgam <- function(x, digits = 2, ...) {
- if (FALSE)
-{
+ if (FALSE) {
setMethod("print", "vlm", function(x, ...) print.vlm(x, ...))
setMethod("print", "vglm", function(x, ...) print.vglm(x, ...))
diff --git a/R/print.vlm.q b/R/print.vlm.q
index 5dce615..c65bd44 100644
--- a/R/print.vlm.q
+++ b/R/print.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/qrrvglm.control.q b/R/qrrvglm.control.q
index 6722a68..bfe7214 100644
--- a/R/qrrvglm.control.q
+++ b/R/qrrvglm.control.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -22,7 +22,8 @@ qrrvglm.control = function(Rank = 1,
imethod = 1,
Maxit.optim = 250,
MUXfactor = rep(7, length = Rank),
- Norrr = ~ 1,
+ noRRR = ~ 1,
+ Norrr = NA,
optim.maxit = 20,
Parscale = if (ITolerances) 0.001 else 1.0,
SD.Cinit = 0.02,
@@ -30,8 +31,18 @@ qrrvglm.control = function(Rank = 1,
trace = TRUE,
Use.Init.Poisson.QO = TRUE,
wzepsilon = .Machine$double.eps^0.75,
- ...)
-{
+ ...) {
+
+
+
+
+
+ if (length(Norrr) != 1 || !is.na(Norrr)) {
+ warning("argument 'Norrr' has been replaced by 'noRRR'. ",
+ "Assigning the latter but using 'Norrr' will become an error in ",
+ "the next VGAM version soon.")
+ noRRR <- Norrr
+ }
@@ -101,7 +112,7 @@ qrrvglm.control = function(Rank = 1,
allowable.length = 1, positive = TRUE))
stop("bad input for 'wzepsilon'")
- ans = list(
+ ans <- list(
Bestof = Bestof,
checkwz=checkwz,
Cinit = Cinit,
@@ -124,7 +135,7 @@ qrrvglm.control = function(Rank = 1,
Maxit.optim = Maxit.optim,
min.criterion = TRUE, # needed for calibrate
MUXfactor = rep(MUXfactor, length = Rank),
- Norrr = Norrr,
+ noRRR = noRRR,
optim.maxit = optim.maxit,
OptimizeWrtC = TRUE,
Parscale = Parscale,
diff --git a/R/qtplot.q b/R/qtplot.q
index 0f8b80e..0bcdc54 100644
--- a/R/qtplot.q
+++ b/R/qtplot.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/residuals.vlm.q b/R/residuals.vlm.q
index 55c3f00..7fbf9d1 100644
--- a/R/residuals.vlm.q
+++ b/R/residuals.vlm.q
@@ -1,192 +1,204 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
-residualsvlm <- function(object,
- type = c("response", "deviance", "pearson", "working"))
-{
- if (mode(type) != "character" && mode(type) != "name")
- type <- as.character(substitute(type))
- type <- match.arg(type, c("response", "deviance", "pearson", "working"))[1]
-
- na.act = object at na.action
- object at na.action = list()
-
- pooled.weight <- object at misc$pooled.weight
- if (is.null(pooled.weight))
- pooled.weight <- FALSE
-
- answer =
- switch(type,
- working = if (pooled.weight) NULL else object at residuals,
- pearson = {
- if (pooled.weight) return(NULL)
- n <- object at misc$n
- M <- object at misc$M
- wz <- weights(object, type="w") # $weights
- if (!length(wz))
- wz <- if (M==1) rep(1, n) else matrix(1, n, M)
-
- if (M==1) {
- if (any(wz < 0))
- warning(paste("some weights are negative.",
- "Their residual will be assigned NA"))
- ans <- sqrt(c(wz)) * c(object at residuals)
- names(ans) <- names(object at residuals)
- ans
- } else {
- wz.sqrt <- matrix.power(wz, M = M, power = 0.5, fast = TRUE)
- ans <- mux22(wz.sqrt, object at residuals,
- M = M, upper = FALSE)
- dim(ans) <- c(M, n)
- ans <- t(ans)
- dimnames(ans) <- dimnames(object at residuals) # n x M
- ans
- }
- },
- deviance = {
- M <- object at misc$M
- if (M > 1)
- return(NULL)
- ans <- residualsvlm(object, type = "pearson")
+
+
+
+residualsvlm <-
+ function(object,
+ type = c("response", "deviance", "pearson", "working")) {
+
+ if (mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type, c("response", "deviance", "pearson", "working"))[1]
+
+ na.act <- object at na.action
+ object at na.action <- list()
+
+ pooled.weight <- object at misc$pooled.weight
+ if (is.null(pooled.weight))
+ pooled.weight <- FALSE
+
+ answer <-
+ switch(type,
+ working = if (pooled.weight) NULL else object at residuals,
+ pearson = {
+ if (pooled.weight) return(NULL)
+ n <- object at misc$n
+ M <- object at misc$M
+ wz <- weights(object, type = "w") # $weights
+ if (!length(wz))
+ wz <- if (M == 1) rep(1, n) else matrix(1, n, M)
+
+ if (M == 1) {
+ if (any(wz < 0))
+ warning(paste("some weights are negative.",
+ "Their residual will be assigned NA"))
+ ans <- sqrt(c(wz)) * c(object at residuals)
+ names(ans) <- names(object at residuals)
+ ans
+ } else {
+ wz.sqrt <- matrix.power(wz, M = M, power = 0.5, fast = TRUE)
+ ans <- mux22(wz.sqrt, object at residuals,
+ M = M, upper = FALSE)
+ dim(ans) <- c(M, n)
+ ans <- t(ans)
+ dimnames(ans) <- dimnames(object at residuals) # n x M
ans
- },
- response = object at residuals
- )
-
- if (length(answer) && length(na.act)) {
- napredict(na.act[[1]], answer)
- } else {
- answer
- }
+ }
+ },
+ deviance = {
+ M <- object at misc$M
+ if (M > 1)
+ return(NULL)
+ ans <- residualsvlm(object, type = "pearson")
+ ans
+ },
+ response = object at residuals
+ )
+
+ if (length(answer) && length(na.act)) {
+ napredict(na.act[[1]], answer)
+ } else {
+ answer
+ }
}
-residualsvglm <- function(object,
- type = c("working", "pearson", "response", "deviance", "ldot"),
- matrix.arg=TRUE)
+residualsvglm <-
+ function(object,
+ type = c("working", "pearson", "response", "deviance", "ldot"),
+ matrix.arg = TRUE)
{
- if (mode(type) != "character" && mode(type) != "name")
- type <- as.character(substitute(type))
- type <- match.arg(type,
- c("working", "pearson", "response", "deviance", "ldot"))[1]
-
- na.act = object at na.action
- object at na.action = list()
-
- pooled.weight <- object at misc$pooled.weight
- if (is.null(pooled.weight))
- pooled.weight <- FALSE
-
- answer =
- switch(type,
- working = if (pooled.weight) NULL else object at residuals,
- pearson = {
- if (pooled.weight) return(NULL)
-
- n <- object at misc$n
- M <- object at misc$M
- wz <- weights(object, type="w") # $weights
-
- if (M==1) {
- if (any(wz < 0))
- warning(paste("some weights are negative.",
- "Their residual will be assigned NA"))
- ans <- sqrt(c(wz)) * c(object at residuals)
- names(ans) <- names(object at residuals)
- ans
+ if (mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type,
+ c("working", "pearson", "response", "deviance", "ldot"))[1]
+
+ na.act <- object at na.action
+ object at na.action <- list()
+
+ pooled.weight <- object at misc$pooled.weight
+ if (is.null(pooled.weight))
+ pooled.weight <- FALSE
+
+ answer <-
+ switch(type,
+ working = if (pooled.weight) NULL else object at residuals,
+ pearson = {
+ if (pooled.weight) return(NULL)
+
+ n <- object at misc$n
+ M <- object at misc$M
+ wz <- weights(object, type = "w") # $weights
+
+ if (M == 1) {
+ if (any(wz < 0))
+ warning(paste("some weights are negative.",
+ "Their residual will be assigned NA"))
+ ans <- sqrt(c(wz)) * c(object at residuals)
+ names(ans) <- names(object at residuals)
+ ans
+ } else {
+ wz.sqrt <- matrix.power(wz, M = M, power = 0.5, fast = TRUE)
+ ans <- mux22(wz.sqrt, object at residuals,
+ M = M, upper = FALSE)
+ dim(ans) <- c(M, n)
+ ans <- t(ans)
+ dimnames(ans) <- dimnames(object at residuals) # n x M
+ ans
+ }
+ },
+ deviance = {
+ n <- object at misc$n
+
+ y <- as.matrix(object at y)
+ mu <- object at fitted.values
+
+
+ w <- object at prior.weights
+ if (!length(w))
+ w <- rep(1, n)
+ eta <- object at predictors
+
+ dev.fn <- object at family@deviance # May not 'exist' for that model
+ if (length(body(dev.fn)) > 0) {
+ extra <- object at extra
+ ans <- dev.fn(mu = mu,y = y, w = w,
+ residuals = TRUE, eta = eta, extra)
+ if (length(ans)) {
+ lob <- labels(object at residuals)
+ if (is.list(lob)) {
+ if (is.matrix(ans))
+ dimnames(ans) <- lob else
+ names(ans) <- lob[[1]]
} else {
- wz.sqrt <- matrix.power(wz, M=M, power=0.5, fast=TRUE)
- ans <- mux22(wz.sqrt, object at residuals,
- M = M, upper = FALSE)
- dim(ans) <- c(M,n)
- ans <- t(ans)
- dimnames(ans) <- dimnames(object at residuals) # n x M
- ans
+ names(ans) <- lob
}
- },
- deviance = {
- n <- object at misc$n
-
- y <- as.matrix(object at y)
- mu <- object at fitted.values
-
-
- w <- object at prior.weights
- if (!length(w))
- w <- rep(1, n)
- eta <- object at predictors
-
- dev.fn <- object at family@deviance # May not 'exist' for that model
- if (( is.R() && length(body(dev.fn)) > 0) ||
- (!is.R() && length(args(dev.fn)) > 1)) {
- extra <- object at extra
- ans <- dev.fn(mu=mu,y=y,w=w,residuals=TRUE,eta=eta,extra)
- if (length(ans)) {
- lob <- labels(object at residuals)
- if (is.list(lob)) {
- if (is.matrix(ans)) dimnames(ans) <- lob else
- names(ans) <- lob[[1]]
- } else
- names(ans) <- lob
- }
- ans
- } else NULL
- },
- ldot = {
- n <- object at misc$n
- y <- as.matrix(object at y)
- mu <- object at fitted
- w <- object at prior.weights
- if (is.null(w))
- w <- rep(1, n)
- eta <- object at predictors
- if (!is.null(ll.fn <- object at family@loglikelihood)) {
- extra <- object at extra
- ans <- ll.fn(mu=mu,y=y,w=w,residuals=TRUE,eta=eta,extra)
- if (!is.null(ans)) {
- ans <- c(ans) # ldot residuals can only be a vector
- names(ans) <- labels(object at residuals)
- }
- ans
- } else NULL
- },
- response = {
- y <- object at y
-
- mu <- fitted(object)
-
- true.mu <- object at misc$true.mu
- if (is.null(true.mu))
- true.mu <- TRUE
-
- ans <- if (true.mu) y - mu else NULL
-
-
- if (!matrix.arg && length(ans)) {
- if (ncol(ans) == 1) {
- names.ans = dimnames(ans)[[1]]
- ans = c(ans)
- names(ans) = names.ans
- ans
- } else {
- warning("ncol(ans) is not 1")
- ans
- }
- } else ans
- })
-
- if (length(answer) && length(na.act)) {
- napredict(na.act[[1]], answer)
- } else {
- answer
- }
+ }
+ ans
+ } else {
+ NULL
+ }
+ },
+ ldot = {
+ n <- object at misc$n
+ y <- as.matrix(object at y)
+ mu <- object at fitted
+ w <- object at prior.weights
+ if (is.null(w))
+ w <- rep(1, n)
+ eta <- object at predictors
+ if (!is.null(ll.fn <- object at family@loglikelihood)) {
+ extra <- object at extra
+ ans <- ll.fn(mu = mu,y = y,w = w,
+ residuals = TRUE, eta = eta, extra)
+ if (!is.null(ans)) {
+ ans <- c(ans) # ldot residuals can only be a vector
+ names(ans) <- labels(object at residuals)
+ }
+ ans
+ } else NULL
+ },
+ response = {
+ y <- object at y
+
+ mu <- fitted(object)
+
+ true.mu <- object at misc$true.mu
+ if (is.null(true.mu))
+ true.mu <- TRUE
+
+ ans <- if (true.mu) y - mu else NULL
+
+
+ if (!matrix.arg && length(ans)) {
+ if (ncol(ans) == 1) {
+ names.ans <- dimnames(ans)[[1]]
+ ans <- c(ans)
+ names(ans) <- names.ans
+ ans
+ } else {
+ warning("ncol(ans) is not 1")
+ ans
+ }
+ } else {
+ ans
+ }
+ })
+
+ if (length(answer) && length(na.act)) {
+ napredict(na.act[[1]], answer)
+ } else {
+ answer
+ }
}
@@ -194,24 +206,23 @@ residualsvglm <- function(object,
residualsqrrvglm <- function(object,
- type = c("response"),
- matrix.arg = TRUE)
+ type = c("response"),
+ matrix.arg = TRUE)
{
if (mode(type) != "character" && mode(type) != "name")
type <- as.character(substitute(type))
- type <- match.arg(type,
- c("response"))[1]
+ type <- match.arg(type, c("response"))[1]
- na.act = object at na.action
- object at na.action = list()
+ na.act <- object at na.action
+ object at na.action <- list()
pooled.weight <- object at misc$pooled.weight
if (is.null(pooled.weight))
pooled.weight <- FALSE
- answer =
+ answer <-
switch(type,
working = if (pooled.weight) NULL else object at residuals,
pearson = {
@@ -229,16 +240,16 @@ residualsqrrvglm <- function(object,
true.mu <- object at misc$true.mu
if (is.null(true.mu))
- true.mu <- TRUE
+ true.mu <- TRUE
ans <- if (true.mu) y - mu else NULL
if (!matrix.arg && length(ans)) {
if (ncol(ans) == 1) {
- names.ans = dimnames(ans)[[1]]
- ans = c(ans)
- names(ans) = names.ans
+ names.ans <- dimnames(ans)[[1]]
+ ans <- c(ans)
+ names(ans) <- names.ans
ans
} else {
warning("ncol(ans) is not 1")
diff --git a/R/rrvglm.R b/R/rrvglm.R
index 34e9341..cdc5ac5 100644
--- a/R/rrvglm.R
+++ b/R/rrvglm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/rrvglm.control.q b/R/rrvglm.control.q
index 1198712..0696d4d 100644
--- a/R/rrvglm.control.q
+++ b/R/rrvglm.control.q
@@ -1,11 +1,11 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
-rrvglm.control = function(Rank = 1,
+rrvglm.control <- function(Rank = 1,
Algorithm = c("alternating", "derivative"),
Corner = TRUE,
Uncorrelated.lv = FALSE,
@@ -21,118 +21,128 @@ rrvglm.control = function(Rank = 1,
SD.Ainit = 0.02,
SD.Cinit = 0.02,
szero = NULL,
- Norrr = ~ 1,
+ noRRR = ~ 1,
+ Norrr = NA,
trace = FALSE,
Use.Init.Poisson.QO = FALSE,
checkwz = TRUE,
wzepsilon = .Machine$double.eps^0.75,
- ...)
-{
-
-
-
-
- if (mode(Algorithm) != "character" && mode(Algorithm) != "name")
- Algorithm <- as.character(substitute(Algorithm))
- Algorithm <- match.arg(Algorithm, c("alternating", "derivative"))[1]
-
- if (Svd.arg) Corner = FALSE
-
- if (!is.Numeric(Rank, positive = TRUE,
- allowable.length = 1, integer.valued = TRUE))
- stop("bad input for 'Rank'")
- if (!is.Numeric(Alpha, positive = TRUE,
- allowable.length = 1) || Alpha > 1)
- stop("bad input for 'Alpha'")
- if (!is.Numeric(Bestof, positive = TRUE,
- allowable.length = 1, integer.valued = TRUE))
- stop("bad input for 'Bestof'")
- if (!is.Numeric(SD.Ainit, positive = TRUE,
- allowable.length = 1))
- stop("bad input for 'SD.Ainit'")
- if (!is.Numeric(SD.Cinit, positive = TRUE,
- allowable.length = 1))
- stop("bad input for 'SD.Cinit'")
- if (!is.Numeric(Etamat.colmax, positive = TRUE,
- allowable.length = 1) ||
- Etamat.colmax < Rank)
- stop("bad input for 'Etamat.colmax'")
-
- if (length(szero) &&
- (any(round(szero) != szero) ||
- any(szero < 1)))
- stop("bad input for the argument 'szero'")
-
-
- Quadratic = FALSE
- if (!Quadratic && Algorithm == "derivative" && !Corner) {
- dd = "derivative algorithm only supports corner constraints"
- if (length(Wmat) || Uncorrelated.lv || Svd.arg)
- stop(dd)
- warning(dd)
- Corner = TRUE
- }
- if (Quadratic && Algorithm != "derivative")
- stop("Quadratic model can only be fitted using the derivative algorithm")
-
- if (Corner && (Svd.arg || Uncorrelated.lv || length(Wmat)))
- stop("cannot have 'Corner = TRUE' and either 'Svd = TRUE' or ",
- "'Uncorrelated.lv = TRUE' or Wmat")
-
- if (Corner && length(intersect(szero, Index.corner)))
- stop("cannot have 'szero' and 'Index.corner' having ",
- "common values")
-
- if (length(Index.corner) != Rank)
- stop("length(Index.corner) != Rank")
-
- if (!is.logical(checkwz) ||
- length(checkwz) != 1)
- stop("bad input for 'checkwz'")
-
- if (!is.Numeric(wzepsilon, allowable.length = 1,
- positive = TRUE))
- stop("bad input for 'wzepsilon'")
-
- if (class(Norrr) != "formula" && !is.null(Norrr))
- stop("argument 'Norrr' should be a formula or a NULL")
-
- ans =
- c(vglm.control(trace = trace, ...),
- switch(Algorithm,
- "alternating" = valt.control(...),
- "derivative" = rrvglm.optim.control(...)),
- list(Rank = Rank,
- Ainit = Ainit,
- Algorithm = Algorithm,
- Alpha = Alpha,
- Bestof = Bestof,
- Cinit = Cinit,
- Index.corner = Index.corner,
- Norrr = Norrr,
- Corner = Corner,
- Uncorrelated.lv = Uncorrelated.lv,
- Wmat = Wmat,
- OptimizeWrtC = TRUE, # OptimizeWrtC,
- Quadratic = FALSE, # A constant now, here.
- SD.Ainit = SD.Ainit,
- SD.Cinit = SD.Cinit,
- Etamat.colmax = Etamat.colmax,
- szero = szero,
- Svd.arg=Svd.arg,
- Use.Init.Poisson.QO = Use.Init.Poisson.QO),
- checkwz = checkwz,
- wzepsilon = wzepsilon,
- if (Quadratic) qrrvglm.control(Rank = Rank, ...) else NULL)
-
- if (Quadratic && ans$ITolerances) {
- ans$Svd.arg = FALSE
- ans$Uncorrelated.lv = FALSE
- ans$Corner = FALSE
- }
-
- ans$half.stepsizing = FALSE # Turn it off
- ans
+ ...) {
+
+
+
+
+
+ if (length(Norrr) != 1 || !is.na(Norrr)) {
+ warning("argument 'Norrr' has been replaced by 'noRRR'. ",
+ "Assigning the latter but using 'Norrr' will become an error in ",
+ "the next VGAM version soon.")
+ noRRR <- Norrr
+ }
+
+
+ if (mode(Algorithm) != "character" && mode(Algorithm) != "name")
+ Algorithm <- as.character(substitute(Algorithm))
+ Algorithm <- match.arg(Algorithm, c("alternating", "derivative"))[1]
+
+ if (Svd.arg)
+ Corner <- FALSE
+
+ if (!is.Numeric(Rank, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE))
+ stop("bad input for 'Rank'")
+ if (!is.Numeric(Alpha, positive = TRUE,
+ allowable.length = 1) || Alpha > 1)
+ stop("bad input for 'Alpha'")
+ if (!is.Numeric(Bestof, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE))
+ stop("bad input for 'Bestof'")
+ if (!is.Numeric(SD.Ainit, positive = TRUE,
+ allowable.length = 1))
+ stop("bad input for 'SD.Ainit'")
+ if (!is.Numeric(SD.Cinit, positive = TRUE,
+ allowable.length = 1))
+ stop("bad input for 'SD.Cinit'")
+ if (!is.Numeric(Etamat.colmax, positive = TRUE,
+ allowable.length = 1) ||
+ Etamat.colmax < Rank)
+ stop("bad input for 'Etamat.colmax'")
+
+ if (length(szero) &&
+ (any(round(szero) != szero) ||
+ any(szero < 1)))
+ stop("bad input for the argument 'szero'")
+
+
+ Quadratic <- FALSE
+ if (!Quadratic && Algorithm == "derivative" && !Corner) {
+ dd <- "derivative algorithm only supports corner constraints"
+ if (length(Wmat) || Uncorrelated.lv || Svd.arg)
+ stop(dd)
+ warning(dd)
+ Corner <- TRUE
+ }
+ if (Quadratic && Algorithm != "derivative")
+ stop("Quadratic model can only be fitted using the derivative algorithm")
+
+ if (Corner && (Svd.arg || Uncorrelated.lv || length(Wmat)))
+ stop("cannot have 'Corner = TRUE' and either 'Svd = TRUE' or ",
+ "'Uncorrelated.lv = TRUE' or Wmat")
+
+ if (Corner && length(intersect(szero, Index.corner)))
+ stop("cannot have 'szero' and 'Index.corner' having ",
+ "common values")
+
+ if (length(Index.corner) != Rank)
+ stop("length(Index.corner) != Rank")
+
+ if (!is.logical(checkwz) ||
+ length(checkwz) != 1)
+ stop("bad input for 'checkwz'")
+
+ if (!is.Numeric(wzepsilon, allowable.length = 1,
+ positive = TRUE))
+ stop("bad input for 'wzepsilon'")
+
+ if (class(noRRR) != "formula" && !is.null(noRRR))
+ stop("argument 'noRRR' should be a formula or a NULL")
+
+ ans <-
+ c(vglm.control(trace = trace, ...),
+ switch(Algorithm,
+ "alternating" = valt.control(...),
+ "derivative" = rrvglm.optim.control(...)),
+ list(Rank = Rank,
+ Ainit = Ainit,
+ Algorithm = Algorithm,
+ Alpha = Alpha,
+ Bestof = Bestof,
+ Cinit = Cinit,
+ Index.corner = Index.corner,
+ noRRR = noRRR,
+ Corner = Corner,
+ Uncorrelated.lv = Uncorrelated.lv,
+ Wmat = Wmat,
+ OptimizeWrtC = TRUE, # OptimizeWrtC,
+ Quadratic = FALSE, # A constant now, here.
+ SD.Ainit = SD.Ainit,
+ SD.Cinit = SD.Cinit,
+ Etamat.colmax = Etamat.colmax,
+ szero = szero,
+ Svd.arg = Svd.arg,
+ Use.Init.Poisson.QO = Use.Init.Poisson.QO),
+ checkwz = checkwz,
+ wzepsilon = wzepsilon,
+ if (Quadratic) qrrvglm.control(Rank = Rank, ...) else NULL)
+
+ if (Quadratic && ans$ITolerances) {
+ ans$Svd.arg <- FALSE
+ ans$Uncorrelated.lv <- FALSE
+ ans$Corner <- FALSE
+ }
+
+ ans$half.stepsizing <- FALSE # Turn it off
+ ans
}
@@ -142,12 +152,13 @@ rrvglm.control = function(Rank = 1,
setClass("summary.rrvglm",
representation("rrvglm",
- coef3 = "matrix",
- cov.unscaled = "matrix",
- correlation = "matrix",
- df = "numeric",
- pearson.resid = "matrix",
- sigma = "numeric"))
+ coef3 = "matrix",
+ cov.unscaled = "matrix",
+ correlation = "matrix",
+ df = "numeric",
+ pearson.resid = "matrix",
+ sigma = "numeric"))
+
setMethod("summary", "rrvglm",
function(object, ...)
@@ -157,15 +168,14 @@ setMethod("summary", "rrvglm",
show.summary.rrvglm <- function(x, digits = NULL,
- quote= TRUE, prefix = "")
-{
+ quote = TRUE, prefix = "") {
- show.summary.vglm(x, digits = digits, quote = quote, prefix = prefix)
+ show.summary.vglm(x, digits = digits, quote = quote, prefix = prefix)
- invisible(x)
- NULL
+ invisible(x)
+ NULL
}
diff --git a/R/rrvglm.fit.q b/R/rrvglm.fit.q
index 2e8176b..73fdddf 100644
--- a/R/rrvglm.fit.q
+++ b/R/rrvglm.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -8,19 +8,20 @@
-rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
- etastart = NULL, mustart = NULL, coefstart = NULL,
- offset = 0, family,
- control=rrvglm.control(...),
- criterion = "coefficients",
- qr.arg = FALSE,
- constraints = NULL,
- extra = NULL,
- Terms=Terms, function.name = "rrvglm", ...)
-{
- specialCM = NULL
- post = list()
- check.rank = TRUE # !control$Quadratic
+rrvglm.fit <-
+ function(x, y, w = rep(1, length(x[, 1])),
+ etastart = NULL, mustart = NULL, coefstart = NULL,
+ offset = 0, family,
+ control=rrvglm.control(...),
+ criterion = "coefficients",
+ qr.arg = FALSE,
+ constraints = NULL,
+ extra = NULL,
+ Terms = Terms, function.name = "rrvglm", ...) {
+
+ specialCM <- NULL
+ post <- list()
+ check.rank <- TRUE # !control$Quadratic
nonparametric <- FALSE
epsilon <- control$epsilon
maxit <- control$maxit
@@ -30,11 +31,16 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
minimize.criterion <- control$min.criterion
+ fv <- one.more <- rrr.expression <- modelno <- NULL
+ RRR.expression <- paste("rrr", control$Algorithm,
+ "expression", sep = ".")
+
+
+
n <- dim(x)[1]
new.s.call <- expression({
- if (c.list$one.more)
- {
+ if (c.list$one.more) {
fv <- c.list$fit
new.coeffs <- c.list$coeff
@@ -51,19 +57,19 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
old.crit <- new.crit
new.crit <-
switch(criterion,
- coefficients=new.coeffs,
- tfun(mu=mu, y=y, w=w, res = FALSE, eta=eta, extra))
+ coefficients = new.coeffs,
+ tfun(mu = mu, y = y, w = w, res = FALSE, eta = eta, extra))
if (trace && orig.stepsize == 1) {
cat(if(control$Quadratic) "QRR-VGLM" else "RR-VGLM",
" linear loop ", iter, ": ", criterion, "= ")
- UUUU = switch(criterion, coefficients=
+ UUUU <- switch(criterion, coefficients=
format(new.crit, dig=round(2-log10(epsilon))),
format(round(new.crit, 4)))
switch(criterion,
- coefficients={if(length(new.crit) > 2) cat("\n");
+ coefficients = {if(length(new.crit) > 2) cat("\n");
cat(UUUU, fill = TRUE, sep = ", ")},
cat(UUUU, fill = TRUE, sep = ", "))
}
@@ -76,7 +82,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
(if(minimize.criterion) new.crit > old.crit else
new.crit < old.crit)))
if (!is.logical(take.half.step))
- take.half.step = TRUE
+ take.half.step <- TRUE
if (take.half.step) {
stepsize <- 2 * min(orig.stepsize, 2*stepsize)
new.coeffs.save <- new.coeffs
@@ -110,8 +116,8 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
new.crit <-
switch(criterion,
- coefficients=new.coeffs,
- tfun(mu=mu,y=y,w=w,res = FALSE,eta=eta,extra))
+ coefficients = new.coeffs,
+ tfun(mu = mu,y = y,w = w,res = FALSE,eta = eta,extra))
if ((criterion == "coefficients") ||
( minimize.criterion && new.crit < old.crit) ||
@@ -129,12 +135,12 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if (trace) {
cat(if(control$Quadratic) "QRR-VGLM" else "RR-VGLM",
" linear loop ", iter, ": ", criterion, "= ")
- UUUU = switch(criterion, coefficients=
- format(new.crit, dig=round(2-log10(epsilon))),
+ UUUU <- switch(criterion, coefficients =
+ format(new.crit, dig = round(2-log10(epsilon))),
format(round(new.crit, 4)))
switch(criterion,
- coefficients={if(length(new.crit) > 2) cat("\n");
+ coefficients = {if(length(new.crit) > 2) cat("\n");
cat(UUUU, fill = TRUE, sep = ", ")},
cat(UUUU, fill = TRUE, sep = ", "))
}
@@ -152,18 +158,16 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
deriv.mu <- eval(family at deriv)
wz <- eval(family at weight)
if (control$checkwz)
- wz = checkwz(wz, M = M, trace = trace,
+ wz <- checkwz(wz, M = M, trace = trace,
wzepsilon = control$wzepsilon)
- wz = matrix(wz, nrow = n)
+ wz <- matrix(wz, nrow = n)
U <- vchol(wz, M = M, n = n, silent=!trace)
tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
- z = eta + vbacksub(U, tvfor, M, n) - offset # Contains \bI \bnu
+ z <- eta + vbacksub(U, tvfor, M, n) - offset # Contains \bI \bnu
- rrr.expression = paste("rrr", control$Algorithm,
- "expression", sep = ".")
- rrr.expression = get(rrr.expression)
+ rrr.expression <- get(RRR.expression)
eval(rrr.expression)
c.list$z <- z # contains \bI_{Rank} \bnu
@@ -227,13 +231,13 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
M <- if (is.matrix(eta)) ncol(eta) else 1
if (is.character(rrcontrol$Dzero)) {
- index = match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]])
+ index <- match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]])
if (any(is.na(index)))
stop("Dzero argument didn't fully match y-names")
if (length(index) == M)
stop("all linear predictors are linear in the ",
"latent variable(s); so set 'Quadratic = FALSE'")
- rrcontrol$Dzero = control$Dzero = index
+ rrcontrol$Dzero <- control$Dzero <- index
}
@@ -244,12 +248,12 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
eval(family at constraints)
- special.matrix = matrix(-34956.125, M, M) # An unlikely used matrix
- just.testing <- cm.vgam(special.matrix, x, rrcontrol$Norrr, constraints)
+ special.matrix <- matrix(-34956.125, M, M) # An unlikely used matrix
+ just.testing <- cm.vgam(special.matrix, x, rrcontrol$noRRR, constraints)
- findex = trivial.constraints(just.testing, special.matrix)
- if (is.null(just.testing)) findex = NULL # 20100617
- tc1 = trivial.constraints(constraints)
+ findex <- trivial.constraints(just.testing, special.matrix)
+ if (is.null(just.testing)) findex <- NULL # 20100617
+ tc1 <- trivial.constraints(constraints)
if (!is.null(findex) && !control$Quadratic && sum(!tc1)) {
for(ii in names(tc1))
@@ -260,28 +264,28 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if (!is.null(findex) && all(findex == 1))
stop("use vglm(), not rrvglm()!")
- colx1.index = names.colx1.index = NULL
- dx2 = dimnames(x)[[2]]
+ colx1.index <- names.colx1.index <- NULL
+ dx2 <- dimnames(x)[[2]]
if (sum(findex)) {
- asx = attr(x, "assign")
+ asx <- attr(x, "assign")
for(ii in names(findex))
if (findex[ii]) {
- names.colx1.index = c(names.colx1.index, dx2[asx[[ii]]])
- colx1.index = c(colx1.index, asx[[ii]])
+ names.colx1.index <- c(names.colx1.index, dx2[asx[[ii]]])
+ colx1.index <- c(colx1.index, asx[[ii]])
}
- names(colx1.index) = names.colx1.index
+ names(colx1.index) <- names.colx1.index
}
- rrcontrol$colx1.index = control$colx1.index =
+ rrcontrol$colx1.index <- control$colx1.index <-
colx1.index # Save it on the object
- colx2.index = 1:ncol(x)
- names(colx2.index) = dx2
+ colx2.index <- 1:ncol(x)
+ names(colx2.index) <- dx2
if (length(colx1.index))
- colx2.index = colx2.index[-colx1.index]
+ colx2.index <- colx2.index[-colx1.index]
- p1 = length(colx1.index); p2 = length(colx2.index)
- rrcontrol$colx2.index = control$colx2.index =
- colx2.index # Save it on the object
- Index.corner = control$Index.corner
+ p1 <- length(colx1.index); p2 <- length(colx2.index)
+ rrcontrol$colx2.index <- control$colx2.index <-
+ colx2.index # Save it on the object
+ Index.corner <- control$Index.corner
@@ -290,35 +294,39 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
matrix(rnorm(M * Rank, sd = rrcontrol$SD.Cinit), M, Rank)
Cmat <- if (length(rrcontrol$Cinit)) rrcontrol$Cinit else {
if (!rrcontrol$Use.Init.Poisson.QO) {
- matrix(rnorm(p2 * Rank, sd=rrcontrol$SD.Cinit), p2, Rank)
+ matrix(rnorm(p2 * Rank, sd = rrcontrol$SD.Cinit), p2, Rank)
} else
- .Init.Poisson.QO(ymat=as.matrix(y),
- X1=if (length(colx1.index)) x[, colx1.index, drop = FALSE] else NULL,
- X2=x[, colx2.index, drop = FALSE],
- Rank=rrcontrol$Rank, trace=rrcontrol$trace,
+ .Init.Poisson.QO(ymat = as.matrix(y),
+ X1 = if (length(colx1.index))
+ x[, colx1.index, drop = FALSE] else NULL,
+ X2 = x[, colx2.index, drop = FALSE],
+ Rank = rrcontrol$Rank, trace = rrcontrol$trace,
max.ncol.etamat = rrcontrol$Etamat.colmax,
- Crow1positive=rrcontrol$Crow1positive,
- isdlv=rrcontrol$isdlv)
+ Crow1positive = rrcontrol$Crow1positive,
+ isdlv = rrcontrol$isdlv)
}
- if (modelno == 3)
- Amat[c(FALSE, TRUE),] <- 0 # Intercept only for log(k)
+
+
+
+
+
if (control$Corner)
- Amat[control$Index.corner,] = diag(Rank)
+ Amat[control$Index.corner,] <- diag(Rank)
if (length(control$szero))
- Amat[control$szero,] = 0
+ Amat[control$szero,] <- 0
- rrcontrol$Ainit = control$Ainit = Amat # Good for valt()
- rrcontrol$Cinit = control$Cinit = Cmat # Good for valt()
+ rrcontrol$Ainit <- control$Ainit <- Amat # Good for valt()
+ rrcontrol$Cinit <- control$Cinit <- Cmat # Good for valt()
- Blist <- process.constraints(constraints, x, M, specialCM=specialCM)
+ Blist <- process.constraints(constraints, x, M, specialCM = specialCM)
- nice31 = control$Quadratic && (!control$EqualTol || control$ITolerances) &&
- all(trivial.constraints(Blist) == 1)
+ nice31 <- control$Quadratic && (!control$EqualTol || control$ITolerances) &&
+ all(trivial.constraints(Blist) == 1)
- Blist = Blist.save = replace.constraints(Blist, Amat, colx2.index)
+ Blist <- Blist.save <- replace.constraints(Blist, Amat, colx2.index)
ncolBlist <- unlist(lapply(Blist, ncol))
@@ -326,22 +334,22 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
X_vlm_save <- if (control$Quadratic) {
- tmp500 = lm2qrrvlm.model.matrix(x=x, Blist=Blist,
- C=Cmat, control=control)
- xsmall.qrr = tmp500$new.lv.model.matrix
- B.list = tmp500$constraints # Doesn't change or contain \bI_{Rank} \bnu
- if (modelno == 3 && FALSE) {
- B.list[[1]] = (B.list[[1]])[,c(TRUE,FALSE),drop = FALSE] # Amat
- B.list[[2]] = (B.list[[2]])[,c(TRUE,FALSE),drop = FALSE] # D
+ tmp500 <- lm2qrrvlm.model.matrix(x = x, Blist = Blist,
+ C = Cmat, control = control)
+ xsmall.qrr <- tmp500$new.lv.model.matrix
+ B.list <- tmp500$constraints # Doesn't change or contain \bI_{Rank} \bnu
+ if (FALSE && modelno == 3) {
+ B.list[[1]] <- (B.list[[1]])[, c(TRUE, FALSE), drop = FALSE] # Amat
+ B.list[[2]] <- (B.list[[2]])[, c(TRUE, FALSE), drop = FALSE] # D
}
- lv.mat = tmp500$lv.mat
+ lv.mat <- tmp500$lv.mat
if (length(tmp500$offset)) {
- offset = tmp500$offset
+ offset <- tmp500$offset
}
- lm2vlm.model.matrix(xsmall.qrr, B.list, xij=control$xij)
+ lm2vlm.model.matrix(xsmall.qrr, B.list, xij = control$xij)
} else {
- lv.mat = x[,colx2.index,drop = FALSE] %*% Cmat
+ lv.mat <- x[,colx2.index,drop = FALSE] %*% Cmat
lm2vlm.model.matrix(x, Blist, xij=control$xij)
}
@@ -351,7 +359,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if (length(coefstart)) {
eta <- if (ncol(X_vlm_save)>1) X_vlm_save %*% coefstart +
offset else X_vlm_save * coefstart + offset
- eta <- if (M > 1) matrix(eta, ncol=M, byrow = TRUE) else c(eta)
+ eta <- if (M > 1) matrix(eta, ncol = M, byrow = TRUE) else c(eta)
mu <- family at linkinv(eta, extra)
@@ -364,7 +372,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
iter <- 1
new.crit <- switch(criterion,
coefficients = 1,
- tfun(mu=mu, y=y, w=w, res = FALSE, eta=eta, extra))
+ tfun(mu = mu, y = y, w = w, res = FALSE, eta = eta, extra))
old.crit <- if (minimize.criterion) 10*new.crit+10 else -10*new.crit-10
@@ -372,17 +380,20 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
wz <- eval(family at weight)
if (control$checkwz)
- wz = checkwz(wz, M = M, trace = trace,
+ wz <- checkwz(wz, M = M, trace = trace,
wzepsilon = control$wzepsilon)
U <- vchol(wz, M = M, n = n, silent = !trace)
tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset
- c.list <- list(z=as.double(z), fit=as.double(t(eta)), one.more = TRUE,
- coeff=as.double(rep(1,ncol(X_vlm_save))), U=as.double(U),
- copy_X_vlm=copy_X_vlm,
- X_vlm = if (copy_X_vlm) as.double(X_vlm_save) else double(3))
+ c.list <- list(z = as.double(z), fit = as.double(t(eta)),
+ one.more = TRUE,
+ coeff = as.double(rep(1,ncol(X_vlm_save))),
+ U = as.double(U),
+ copy_X_vlm = copy_X_vlm,
+ X_vlm = if (copy_X_vlm) as.double(X_vlm_save) else
+ double(3))
@@ -394,7 +405,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
stop(ncol_X_vlm, " parameters but only ", nrow_X_vlm, " observations")
{
- bf.call = expression(vlm.wfit(xmat=X_vlm_save, zedd,
+ bf.call <- expression(vlm.wfit(xmat=X_vlm_save, zedd,
Blist = if (control$Quadratic) B.list else Blist,
ncolx=ncol(x), U=U,
Eta.range = control$Eta.range,
@@ -402,35 +413,36 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
is.vlmX = TRUE, qr = qr.arg, xij = control$xij))
while(c.list$one.more) {
- if (control$Quadratic) {
- zedd = as.matrix(z)
- if (control$Corner)
- zedd[,Index.corner] = zedd[,Index.corner] - lv.mat
- } else {
- zedd = z
- }
+ if (control$Quadratic) {
+ zedd <- as.matrix(z)
+ if (control$Corner)
+ zedd[,Index.corner] <- zedd[,Index.corner] - lv.mat
+ } else {
+ zedd <- z
+ }
if (!nice31)
- tfit <- eval(bf.call) # tfit$fitted.values is n x M
+ tfit <- eval(bf.call) # tfit$fitted.values is n x M
if (!control$Quadratic) {
- Cmat = tfit$mat.coef[colx2.index,,drop = FALSE] %*%
+ Cmat <- tfit$mat.coef[colx2.index,,drop = FALSE] %*%
Amat %*% solve(t(Amat) %*% Amat)
- rrcontrol$Ainit = control$Ainit = Amat # Good for valt()
- rrcontrol$Cinit = control$Cinit = Cmat # Good for valt()
+ rrcontrol$Ainit <- control$Ainit <- Amat # Good for valt()
+ rrcontrol$Cinit <- control$Cinit <- Cmat # Good for valt()
}
if (!nice31) c.list$coeff <- tfit$coefficients
if (control$Quadratic) {
if (control$Corner)
- tfit$fitted.values[,Index.corner] =
+ tfit$fitted.values[,Index.corner] <-
tfit$fitted.values[,Index.corner] + lv.mat
}
if (!nice31)
- tfit$predictors = tfit$fitted.values # Doesn't contain the offset
- if (!nice31) c.list$fit = tfit$fitted.values
+ tfit$predictors <- tfit$fitted.values # Doesn't contain the offset
+ if (!nice31)
+ c.list$fit <- tfit$fitted.values
c.list <- eval(new.s.call)
NULL
}
@@ -452,7 +464,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
asgn <- attr(X_vlm_save, "assign")
if (nice31) {
- coefs <- rep(0, len=length(xnrow_X_vlm))
+ coefs <- rep(0, len = length(xnrow_X_vlm))
rank <- ncol_X_vlm
} else {
coefs <- tfit$coefficients
@@ -470,12 +482,12 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
} else {
R <- tfit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop = FALSE]
R[lower.tri(R)] <- 0
- attributes(R) <- list(dim=c(ncol_X_vlm, ncol_X_vlm),
- dimnames=list(cnames, cnames), rank=rank)
+ attributes(R) <- list(dim = c(ncol_X_vlm, ncol_X_vlm),
+ dimnames = list(cnames, cnames), rank = rank)
}
if (nice31) {
- effects <- rep(0, len=77)
+ effects <- rep(0, len = 77)
} else {
effects <- tfit$effects
neff <- rep("", nrow_X_vlm)
@@ -508,39 +520,41 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
}
if (is.matrix(mu)) {
- if (length(dimnames(y)[[2]])) {
- y.names <- dimnames(y)[[2]]
- }
- if (length(dimnames(mu)[[2]])) {
- y.names <- dimnames(mu)[[2]]
- }
- dimnames(mu) <- list(yn, y.names)
+ if (length(dimnames(y)[[2]])) {
+ y.names <- dimnames(y)[[2]]
+ }
+ if (length(dimnames(mu)[[2]])) {
+ y.names <- dimnames(mu)[[2]]
+ }
+ dimnames(mu) <- list(yn, y.names)
} else {
- names(mu) <- names(fv)
+ names(mu) <- names(fv)
}
- elts.tildeA = (M - Rank - length(control$szero)) * Rank
- no.dpar = 0
+
+
+ elts.tildeA <- (M - Rank - length(control$szero)) * Rank
+ no.dpar <- 0
df.residual <- nrow_X_vlm - rank -
(if(control$Quadratic) Rank*p2 else 0) -
no.dpar - elts.tildeA
- fit <- list(assign=asgn,
- coefficients=coefs,
+ fit <- list(assign = asgn,
+ coefficients = coefs,
constraints = if (control$Quadratic) B.list else Blist,
- df.residual=df.residual,
- df.total=n*M,
- effects=effects,
- fitted.values=mu,
- offset=offset,
- rank=rank,
- residuals=residuals,
- R=R,
- terms=Terms) # terms: This used to be done in vglm()
+ df.residual = df.residual,
+ df.total = n*M,
+ effects = effects,
+ fitted.values = mu,
+ offset = offset,
+ rank = rank,
+ residuals = residuals,
+ R = R,
+ terms = Terms) # terms: This used to be done in vglm()
if (qr.arg && !nice31) {
fit$qr <- tfit$qr
@@ -570,7 +584,7 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
ynames = dimnames(y)[[2]])
if (one.more)
- misc$rrr.expression = rrr.expression #
+ misc$rrr.expression <- rrr.expression #
crit.list <- list()
@@ -579,9 +593,11 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
for(ii in names(.min.criterion.VGAM)) {
if (ii != criterion &&
- any(slotNames(family) == ii) && length(body(slot(family, ii)))) {
+ any(slotNames(family) == ii) &&
+ length(body(slot(family, ii)))) {
fit[[ii]] <- crit.list[[ii]] <-
- (slot(family, ii))(mu=mu, y=y, w=w, res = FALSE, eta=eta, extra)
+ (slot(family, ii))(mu = mu, y = y, w = w,
+ res = FALSE, eta = eta, extra)
}
}
@@ -596,18 +612,18 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
structure(c(fit, list(predictors = if (nice31) matrix(eta, n, M) else
tfit$predictors,
- contrasts=attr(x, "contrasts"),
- control=control,
- crit.list=crit.list,
- extra=extra,
- family=family,
- iter=iter,
- misc=misc,
+ contrasts = attr(x, "contrasts"),
+ control = control,
+ crit.list = crit.list,
+ extra = extra,
+ family = family,
+ iter = iter,
+ misc = misc,
post = post,
rss = if (nice31) 000 else tfit$rss,
- x=x,
- y=y)),
- vclass=family at vfamily)
+ x = x,
+ y = y)),
+ vclass = family at vfamily)
}
diff --git a/R/s.q b/R/s.q
index 3434900..7d3fcdd 100644
--- a/R/s.q
+++ b/R/s.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -10,12 +10,12 @@ s <- function(x, df = 4, spar = 0, ...) {
xs <- substitute(x)
ans <- as.character(xs)
if (length(ans) > 1)
- stop("x argument must be of length one")
+ stop("argument 'x' must be of length one")
call <- deparse(sys.call())
if (ncol(as.matrix(x)) > 1)
- stop("x must be a vector")
+ stop("argument 'x' must be a vector")
if (!is.null(levels(x))) {
x <- if (is.ordered(x)) {
as.vector(x)
diff --git a/R/s.vam.q b/R/s.vam.q
index f0da59a..a40613a 100644
--- a/R/s.vam.q
+++ b/R/s.vam.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -9,153 +9,163 @@
+
s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
bf.epsilon = 0.001, trace = FALSE, se.fit = TRUE,
X_vlm_save, Blist, ncolBlist, M, qbig, Umat,
all.knots = FALSE, nk = NULL,
- sf.only = FALSE)
-{
- nwhich <- names(which)
+ sf.only = FALSE) {
+ nwhich <- names(which)
- dX_vlm <- as.integer(dim(X_vlm_save))
- pbig <- dX_vlm[2]
+ dX_vlm <- as.integer(dim(X_vlm_save))
+ pbig <- dX_vlm[2]
- if (!length(smooth.frame$first)) {
- data <- smooth.frame[, nwhich, drop = FALSE]
- smooth.frame <- vgam.match(data, all.knots=all.knots, nk=nk)
- smooth.frame$first <- FALSE # No longer first for next time
+ if (!length(smooth.frame$first)) {
+ data <- smooth.frame[, nwhich, drop = FALSE]
+ smooth.frame <- vgam.match(data, all.knots = all.knots, nk = nk)
+ smooth.frame$first <- TRUE # Only executed at the first time
- dx <- as.integer(dim(x))
- smooth.frame$n_lm <- dx[1]
- smooth.frame$p_lm <- dx[2]
- attr(data, "class") <- NULL
+ dx <- as.integer(dim(x))
+ smooth.frame$n_lm <- dx[1]
+ smooth.frame$p_lm <- dx[2]
+ attr(data, "class") <- NULL
- sparv <- lapply(data, attr, "spar")
- dfvec <- lapply(data, attr, "df")
- s.xargument <- lapply(data, attr, "s.xargument")
-
- for(kk in 1:length(nwhich)) {
- ii <- nwhich[kk]
-
- temp <- sparv[[ii]]
- if (!is.numeric(temp) || any(temp < 0)) {
- stop("spar cannot be negative or non-numeric")
- }
- if (length(temp) > ncolBlist[ii]) {
- warning("only the first ", ncolBlist[ii], " values of ",
- "'spar' are used for variable '", s.xargument, "'")
- }
- sparv[[ii]] <- rep(temp, length = ncolBlist[ii]) # recycle
-
- temp <- dfvec[[ii]]
- if (!is.numeric(temp) || any(temp < 1)) {
- stop("df is non-numeric or less than 1")
- }
- if (length(temp) > ncolBlist[ii]) {
- warning("only the first ", ncolBlist[ii], " value(s) of 'df' ",
- "are used for variable '", s.xargument, "'")
- }
- dfvec[[ii]] <- rep(temp, length = ncolBlist[ii]) # recycle
- if (max(temp) > smooth.frame$nef[kk]-1) {
- stop("'df' value too high for variable '", s.xargument, "'")
- }
-
- if (any(sparv[[ii]] != 0) && any(dfvec[[ii]] != 4)) {
- stop("cannot specify both 'spar' and 'df'")
- }
- } # End of kk loop
-
- sparv <- unlist(sparv)
- dfvec <- unlist(dfvec)
- smooth.frame$sparv <- sparv # original
- smooth.frame$dfvec <- dfvec # original
+ osparv <- lapply(data, attr, "spar") # "o" for original
+ odfvec <- lapply(data, attr, "df")
+ s.xargument <- lapply(data, attr, "s.xargument")
- if (sum(smooth.frame$dfvec[smooth.frame$sparv == 0]) + pbig >
- smooth.frame$n_lm * sum(ncolBlist[nwhich])) {
- stop("too many parameters/dof for data on hand")
- }
+ for (kk in 1:length(nwhich)) {
+ ii <- nwhich[kk]
+
+ temp <- osparv[[ii]]
+ if (!is.numeric(temp) || any(temp < 0)) {
+ stop("spar cannot be negative or non-numeric")
+ }
+ if (length(temp) > ncolBlist[ii]) {
+ warning("only the first ", ncolBlist[ii], " values of ",
+ "'spar' are used for variable '", s.xargument, "'")
+ }
+ osparv[[ii]] <- rep(temp, length = ncolBlist[ii]) # recycle
- xnrow_X_vlm <- labels(X_vlm_save)[[2]]
- asgn <- attr(X_vlm_save, "assign")
- aa <- NULL
- for(ii in nwhich) {
- aa <- c(aa, xnrow_X_vlm[asgn[[ii]]])
- }
- smooth.frame$ndfsparv <- aa # Stored here
- smooth.frame$xnrow_X_vlm <- xnrow_X_vlm # Stored here
- smooth.frame$s.xargument <- s.xargument # Stored here
+ temp <- odfvec[[ii]]
+ if (!is.numeric(temp) || any(temp < 1)) {
+ stop("df is non-numeric or less than 1")
+ }
+ if (length(temp) > ncolBlist[ii]) {
+ warning("only the first ", ncolBlist[ii], " value(s) of 'df' ",
+ "are used for variable '", s.xargument, "'")
+ }
+ odfvec[[ii]] <- rep(temp, length = ncolBlist[ii]) # recycle
+ if (max(temp) > smooth.frame$neffec[kk]-1) {
+ stop("'df' value too high for variable '", s.xargument, "'")
+ }
+
+ if (any(osparv[[ii]] != 0) &&
+ any(odfvec[[ii]] != 4)) {
+ stop("cannot specify both 'spar' and 'df'")
+ }
+ } # End of kk loop
+
+
+ osparv <- unlist(osparv)
+ odfvec <- unlist(odfvec)
+ smooth.frame$osparv <- osparv # Original
+ smooth.frame$odfvec <- odfvec # Original
- smooth.frame$smap=as.vector(cumsum(
- c(1, ncolBlist[nwhich]))[1:length(nwhich)])
+ if (sum(smooth.frame$dfvec[smooth.frame$osparv == 0]) + pbig >
+ smooth.frame$n_lm * sum(ncolBlist[nwhich])) {
+ stop("too many parameters/dof for data on hand")
+ }
- smooth.frame$try.sparv <- sparv
- smooth.frame$prev.dof <- dfvec
+ xnrow_X_vlm <- labels(X_vlm_save)[[2]]
+ asgn <- attr(X_vlm_save, "assign")
+ aa <- NULL
+ for (ii in nwhich) {
+ aa <- c(aa, xnrow_X_vlm[asgn[[ii]]])
+ }
+ smooth.frame$ndfsparv <- aa # Stored here
+ smooth.frame$xnrow_X_vlm <- xnrow_X_vlm # Stored here
+ smooth.frame$s.xargument <- s.xargument # Stored here
+ smooth.frame$smap <- as.vector(cumsum(
+ c(1, ncolBlist[nwhich]))[1:length(nwhich)])
- smooth.frame$bindex <- as.integer(cumsum(c(1,
- smooth.frame$nknots*ncolBlist[nwhich])))
- smooth.frame$kindex = as.integer(
- cumsum(c(1, 4 + smooth.frame$nknots)))
- } # End of first
+ smooth.frame$try.sparv <- osparv
+ smooth.frame$lamvector <- double(length(odfvec))
- if (sf.only) {
- return(smooth.frame)
- }
+ smooth.frame$bindex <-
+ as.integer(cumsum(c(1, smooth.frame$nknots * ncolBlist[nwhich])))
+
+
+ smooth.frame$lindex <-
+ as.integer(cumsum(c(1, smooth.frame$neffec * ncolBlist[nwhich])))
+
+
+
+ smooth.frame$kindex <-
+ as.integer(cumsum(c(1, 4 + smooth.frame$nknots)))
+ } else {
+ smooth.frame$first <- FALSE
+ }
+
- ldk <- 3 * max(ncolBlist[nwhich]) + 1 # 11/7/02
+ if (sf.only) {
+ return(smooth.frame)
+ }
- which <- unlist(which)
- p_lm <- smooth.frame$p_lm
- n_lm <- smooth.frame$n_lm
- dim2wz <- if (is.matrix(wz)) ncol(wz) else 1
+ ldk <- 3 * max(ncolBlist[nwhich]) + 1 # 20020711
- dim1U <- if (is.matrix(Umat)) nrow(Umat) else 1
- nBlist <- names(Blist)
- for(ii in length(nBlist):1) {
- if (!any(nBlist[ii] == nwhich)) {
- Blist[[ii]] <- NULL
- }
+ which <- unlist(which)
+ p_lm <- smooth.frame$p_lm
+ n_lm <- smooth.frame$n_lm
+ dim2wz <- if (is.matrix(wz)) ncol(wz) else 1
+
+ dim1U <- if (is.matrix(Umat)) nrow(Umat) else 1
+
+ nBlist <- names(Blist)
+ for (ii in length(nBlist):1) {
+ if (!any(nBlist[ii] == nwhich)) {
+ Blist[[ii]] <- NULL
}
- trivc <- trivial.constraints(Blist)
+ }
+ trivc <- trivial.constraints(Blist)
+
+ ncbvec <- ncolBlist[nwhich]
+ ncolbmax <- max(ncbvec)
+
- ncbvec <- ncolBlist[nwhich]
- ncolbmax <- max(ncbvec)
+ contr.sp <- list(low = -1.5, ## low = 0. was default till R 1.3.x
+ high = 1.5,
+ tol = 1e-4, ## tol = 0.001 was default till R 1.3.x
+ eps = 2e-8, ## eps = 0.00244 was default till R 1.3.x
+ maxit = 500)
- contr.sp <- list(low = -1.5,## low = 0. was default till R 1.3.x
- high = 1.5,
- tol = 1e-4,## tol = 0.001 was default till R 1.3.x
- eps = 2e-8,## eps = 0.00244 was default till R 1.3.x
- maxit = 500 )
- if (FALSE)
- contr.sp <- list(low = -1.5,## low = 0. was default till R 1.3.x
- high = 1.5,
- tol = 0.001, # was default till R 1.3.x
- eps = 0.00244, # was default till R 1.3.x
- maxit = 500 )
- fit <- dotC(name="Yee_vbfa", # ---------------------------------
+ fit <-
+ dotC(name = "Yee_vbfa", # ---------------------------------
npetc = as.integer(c(n_lm, p_lm, length(which), se.fit, 0,
- bf.maxit, qrank = 0, M, nbig = n_lm * M, pbig,
- qbig, dim2wz, dim1U, ier = 0, ldk=ldk, # ldk may be unused
- contr.sp$maxit, iinfo = 0
- )),
- doubvec = as.double(c(bf.epsilon, resSS = 0, unlist(contr.sp[1:4]))),
+ bf.maxit, qrank = 0, M, nbig = n_lm * M, pbig,
+ qbig, dim2wz, dim1U, ier = 0,
+ ldk = ldk, # ldk may be unused
+ contr.sp$maxit, iinfo = 0
+ )),
+ doubvec = as.double(c(bf.epsilon, resSS = 0, unlist(contr.sp[1:4]))),
as.double(x),
y = as.double(zedd), wz = as.double(wz),
- dfvec = as.double(smooth.frame$dfvec),
- lamvec = as.double(smooth.frame$try.sparv),
+ dfvec = as.double(smooth.frame$odfvec + 1), # 20130427; + 1 added
+ lamvec = as.double(smooth.frame$lamvector),
sparv = as.double(smooth.frame$try.sparv),
- as.integer(smooth.frame$o), as.integer(smooth.frame$nef),
+ as.integer(smooth.frame$matcho), as.integer(smooth.frame$neffec),
as.integer(which),
smomat = as.double(smomat), etamat = double(M * n_lm),
beta = double(pbig),
@@ -167,101 +177,142 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
as.integer(ncbvec), as.integer(smooth.frame$smap),
trivc = as.integer(trivc),
- levmat = if (se.fit) as.double(smomat) else double(1), # 20100227
+
+
+
+
+
+ levmat = double(sum(smooth.frame$neffec * ncbvec)), # 20130427;
+
bcoefficients = double(sum(smooth.frame$nknots * ncbvec)),
knots = as.double(unlist(smooth.frame$knots)),
bindex = as.integer(smooth.frame$bindex),
+ lindex = as.integer(smooth.frame$lindex),
nknots = as.integer(smooth.frame$nknots),
kindex = as.integer(smooth.frame$kindex)) # End of dotC
- dim(fit$qr) = dim(X_vlm_save)
- dimnames(fit$qr) = dimnames(X_vlm_save)
- dim(fit$y) = dim(zedd)
- dimnames(fit$y) = dimnames(zedd)
- dim(fit$smomat) = dim(smomat)
- dimnames(fit$smomat) = dimnames(smomat) # Needed for vgam.nlchisq
- if (se.fit) {
- dim(fit$varmat) = dim(smomat)
- dimnames(fit$varmat) = dimnames(smomat)
- dim(fit$levmat) = dim(smomat)
- dimnames(fit$levmat) = dimnames(smomat)
+ if (exists("flush.console")) flush.console()
+
- }
+ if (smooth.frame$first) {
+ }
+ dim(fit$qr) <- dim(X_vlm_save)
+ dimnames(fit$qr) <- dimnames(X_vlm_save)
+ dim(fit$y) <- dim(zedd)
+ dimnames(fit$y) <- dimnames(zedd)
+ dim(fit$smomat) <- dim(smomat)
+ dimnames(fit$smomat) <- dimnames(smomat) # Needed for vgam.nlchisq
+ if (se.fit) {
+ dim(fit$varmat) <- dim(smomat)
+ dimnames(fit$varmat) <- dimnames(smomat)
+ }
+
- if (fit$npetc[14] != 0 || fit$npetc[17] != 0) {
- stop("something went wrong in the C function 'vbfa'")
- }
- fit$etamat = if (M > 1) matrix(fit$etamat, n_lm, M, byrow = TRUE) else
- c(fit$etamat) # May no longer be a matrix
- nits <- fit$npetc[5]
- qrank <- fit$npetc[7]
+ if (fit$npetc[14] != 0 ||
+ fit$npetc[17] != 0) {
+ stop("something went wrong in the C function 'vbfa'")
+ }
+ fit$etamat <- if (M > 1) matrix(fit$etamat, n_lm, M, byrow = TRUE) else
+ c(fit$etamat) # May no longer be a matrix
+ nits <- fit$npetc[5]
+ qrank <- fit$npetc[7]
+
+
+
+
+
+
+
+
+
+
+
+
+ if (smooth.frame$first) {
smooth.frame$try.sparv <- fit$sparv
+ }
- change <- abs(smooth.frame$prev.dof - fit$dfvec)/(1+fit$dfvec) > 0.00 &
- smooth.frame$sparv == 0
- smooth.frame$try.sparv[change] <- 0 # For next time
- smooth.frame$prev.dof <- fit$dfvec
+ if ((nits == bf.maxit) & bf.maxit > 1) {
+ warning("'s.vam' convergence not obtained in ", bf.maxit,
+ " iterations")
+ }
- if ((nits == bf.maxit) & bf.maxit > 1) {
- warning("'s.vam' convergence not obtained in ", bf.maxit,
- " iterations")
- }
+ R <- fit$qr[1:pbig, 1:pbig]
+ R[lower.tri(R)] <- 0
- R <- fit$qr[1:pbig, 1:pbig]
- R[lower.tri(R)] <- 0
+ Bspline <- vector("list", length(nwhich))
+ names(Bspline) <- nwhich
+ for (ii in 1:length(nwhich)) {
+ b_coefs <- fit$bcoeff[(smooth.frame$bindex[ii]):
+ (smooth.frame$bindex[ii+1]-1)]
+ b_coefs <- matrix(b_coefs, ncol = ncolBlist[nwhich[ii]])
+ Bspline[[ii]] <-
+ new("vsmooth.spline.fit",
+ "Bcoefficients" = b_coefs,
+ "xmax" = smooth.frame$xmax[ii],
+ "xmin" = smooth.frame$xmin[ii],
+ "knots" = as.vector(smooth.frame$knots[[ii]]))
+ }
- Bspline <- vector("list", length(nwhich))
- names(Bspline) <- nwhich
- for(ii in 1:length(nwhich)) {
- ans = fit$bcoeff[(smooth.frame$bindex[ii]):
- (smooth.frame$bindex[ii+1]-1)]
- ans = matrix(ans, ncol=ncolBlist[nwhich[ii]])
- Bspline[[ii]] =
- new("vsmooth.spline.fit",
- "Bcoefficients" = ans,
- "xmax" = smooth.frame$xmax[ii],
- "xmin" = smooth.frame$xmin[ii],
- "knots" = as.vector(smooth.frame$knots[[ii]]))
- }
- rl <- list(
- Bspline = Bspline,
- coefficients = fit$beta,
- df.residual = n_lm * M - qrank - sum(fit$dfvec - 1),
- fitted.values = fit$etamat,
- nl.df = fit$dfvec - 1,
- qr = list(qr=fit$qr, rank=qrank, qraux=fit$qraux, pivot=fit$qpivot),
- R = R,
- rank = qrank,
- residuals = fit$y - fit$etamat,
- rss = fit$doubvec[2],
- smomat = fit$smomat,
- sparv = fit$sparv,
- s.xargument = unlist(smooth.frame$s.xargument))
-
-
- names(rl$coefficients) <- smooth.frame$xnrow_X_vlm
- names(rl$sparv) <- smooth.frame$ndfspar
- names(rl$nl.df) <- smooth.frame$ndfspar
-
- if (se.fit) {
- rl <- c(rl, list(varmat = fit$varmat))
- }
- c(list(smooth.frame = smooth.frame), rl)
+
+ Leverages <- vector("list", length(nwhich))
+ names(Leverages) <- nwhich
+ for (ii in 1:length(nwhich)) {
+ levvec <- fit$levmat[(smooth.frame$lindex[ii]):
+ (smooth.frame$lindex[ii+1]-1)]
+ levmat <- matrix(levvec,
+ nrow = smooth.frame$neffec[ii],
+ ncol = ncolBlist[nwhich[ii]])
+ Leverages[[ii]] <- levmat
+ }
+
+
+
+ nl.df <- fit$dfvec - 1 # Used to be -1; Decrement/increment ?
+
+
+ retlist <- list(
+ Bspline = Bspline,
+ coefficients = fit$beta,
+ df.residual = n_lm * M - qrank - sum(nl.df), # Decrement/increment ?
+ fitted.values = fit$etamat,
+ Leverages = Leverages,
+ nl.df = nl.df,
+ qr = list(qr = fit$qr, rank = qrank,
+ qraux = fit$qraux, pivot = fit$qpivot),
+ R = R,
+ rank = qrank,
+ residuals = fit$y - fit$etamat,
+ rss = fit$doubvec[2],
+ smomat = fit$smomat,
+ sparv = fit$sparv,
+ s.xargument = unlist(smooth.frame$s.xargument))
+
+
+ names(retlist$coefficients) <- smooth.frame$xnrow_X_vlm
+ names(retlist$sparv) <-
+ names(retlist$nl.df) <- smooth.frame$ndfspar
+
+ if (se.fit) {
+ retlist <- c(retlist, list(varmat = fit$varmat))
+ }
+
+ c(list(smooth.frame = smooth.frame), retlist)
}
diff --git a/R/smart.R b/R/smart.R
index d9f02ca..c33e385 100644
--- a/R/smart.R
+++ b/R/smart.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/step.vglm.q b/R/step.vglm.q
index dd78967..2d95ef2 100644
--- a/R/step.vglm.q
+++ b/R/step.vglm.q
@@ -1,12 +1,11 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
-step.vglm <- function(fit, ...)
-{
- cat("Sorry, this function has not been written yet. Returning a NULL.\n")
- NULL
+step.vglm <- function(fit, ...) {
+ cat("Sorry, this function has not been written yet. Returning a NULL.\n")
+ NULL
}
diff --git a/R/summary.vgam.q b/R/summary.vgam.q
index fa329fd..9a761e4 100644
--- a/R/summary.vgam.q
+++ b/R/summary.vgam.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -19,7 +19,7 @@ summaryvgam <- function(object, dispersion = NULL,
newobject <- object
class(newobject) <- "vglm"
stuff <- summaryvglm(newobject, dispersion = dispersion)
- rdf <- stuff at df[2] <- object at df.residual # NA
+ rdf <- stuff at df[2] <- object at df.residual # NA
M <- object at misc$M
nrow_X_vlm <- object at misc$nrow_X_vlm
@@ -47,8 +47,8 @@ summaryvgam <- function(object, dispersion = NULL,
aod <- cbind(aod, NA, NA, NA)
nl.chisq <- object at nl.chisq / object at dispersion
- special = abs(nldf) < 0.1 # This was the quick fix in s.vam()
- nldf[special] = 1 # Give it a plausible value for pchisq & pf
+ special <- abs(nldf) < 0.1 # This was the quick fix in s.vam()
+ nldf[special] <- 1 # Give it a plausible value for pchisq & pf
snames <- names(nldf)
aod[snames, 2] <- round(nldf, 1)
@@ -58,12 +58,12 @@ summaryvgam <- function(object, dispersion = NULL,
pchisq(nl.chisq, nldf, lower.tail = FALSE)
if (any(special)) {
- aod[snames[special], 2:4] = NA
+ aod[snames[special], 2:4] <- NA
}
rnames <- c("Df", "Npar Df", "Npar Chisq", "P(Chi)")
if (useF)
- rnames[3:4] <- c("Npar F", "Pr(F)")
+ rnames[3:4] <- c("Npar F", "Pr(F)")
dimnames(aod) <- list(names(df), rnames)
heading <- if (useF)
"\nDF for Terms and Approximate F-values for Nonparametric Effects\n"
@@ -72,30 +72,31 @@ summaryvgam <- function(object, dispersion = NULL,
} else {
heading <- "DF for Terms\n\n"
}
- aod <- as.vanova(data.frame(aod, check.names=FALSE), heading)
+ aod <- as.vanova(data.frame(aod, check.names = FALSE), heading)
- if (is.R()) class(aod) = "data.frame"
+ class(aod) <- "data.frame"
+ } else {
+ aod <- data.frame()
}
- else aod <- if (is.R()) data.frame() else NULL
answer <-
new("summary.vgam",
object,
- call=stuff at call,
- cov.unscaled=stuff at cov.unscaled,
- correlation=stuff at correlation,
- df=stuff at df,
- sigma=stuff at sigma)
+ call = stuff at call,
+ cov.unscaled = stuff at cov.unscaled,
+ correlation = stuff at correlation,
+ df = stuff at df,
+ sigma = stuff at sigma)
- slot(answer, "coefficients") = stuff at coefficients # Replace
+ slot(answer, "coefficients") <- stuff at coefficients # Replace
if (is.numeric(stuff at dispersion))
- slot(answer, "dispersion") = stuff at dispersion
+ slot(answer, "dispersion") <- stuff at dispersion
- presid = residuals(object, type = "pearson")
+ presid <- residuals(object, type = "pearson")
if (length(presid))
- answer at pearson.resid= as.matrix(presid)
+ answer at pearson.resid <- as.matrix(presid)
- slot(answer, "anova") = aod
+ slot(answer, "anova") <- aod
answer
}
@@ -140,27 +141,27 @@ show.summary.vgam <- function(x, quote = TRUE, prefix = "",
prose <- ""
if (length(x at dispersion)) {
- if (is.logical(x at misc$estimated.dispersion) &&
- x at misc$estimated.dispersion)
- prose <- "(Estimated) " else {
-
- if (is.numeric(x at misc$default.dispersion) &&
- x at dispersion==x at misc$default.dispersion)
- prose <- "(Default) "
+ if (is.logical(x at misc$estimated.dispersion) &&
+ x at misc$estimated.dispersion) {
+ prose <- "(Estimated) "
+ } else {
+ if (is.numeric(x at misc$default.dispersion) &&
+ x at dispersion == x at misc$default.dispersion)
+ prose <- "(Default) "
- if (is.numeric(x at misc$default.dispersion) &&
- x at dispersion!=x at misc$default.dispersion)
- prose <- "(Pre-specified) "
- }
- cat(paste("\n", prose, "Dispersion Parameter for ",
- x at family@vfamily[1],
- " family: ",
- format(round(x at dispersion, digits)), "\n", sep = ""))
+ if (is.numeric(x at misc$default.dispersion) &&
+ x at dispersion != x at misc$default.dispersion)
+ prose <- "(Pre-specified) "
+ }
+ cat(paste("\n", prose, "Dispersion Parameter for ",
+ x at family@vfamily[1],
+ " family: ",
+ format(round(x at dispersion, digits)), "\n", sep = ""))
}
- if (length(deviance(x)))
- cat("\nResidual deviance: ", format(round(deviance(x), digits)),
- "on", format(round(rdf, 3)), "degrees of freedom\n")
+ if (length(deviance(x)))
+ cat("\nResidual deviance: ", format(round(deviance(x), digits)),
+ "on", format(round(rdf, 3)), "degrees of freedom\n")
if (length(logLik.vlm(x)))
cat("\nLog-likelihood:", format(round(logLik.vlm(x), digits)),
@@ -177,7 +178,7 @@ show.summary.vgam <- function(x, quote = TRUE, prefix = "",
cat("\nNumber of iterations: ", x at iter, "\n")
if (length(x at anova)) {
- show.vanova(x at anova, digits = digits) # ".vanova" for Splus6
+ show.vanova(x at anova, digits = digits) # ".vanova" for Splus6
}
invisible(NULL)
@@ -186,15 +187,15 @@ show.summary.vgam <- function(x, quote = TRUE, prefix = "",
- setMethod("summary", "vgam",
- function(object, ...)
- summaryvgam(object, ...))
+setMethod("summary", "vgam",
+ function(object, ...)
+ summaryvgam(object, ...))
- setMethod("show", "summary.vgam",
- function(object)
- show.summary.vgam(object))
+setMethod("show", "summary.vgam",
+ function(object)
+ show.summary.vgam(object))
@@ -204,14 +205,14 @@ show.vanova <- function(x, digits = .Options$digits, ...) {
rrr <- row.names(x)
heading <- attr(x, "heading")
if (!is.null(heading))
- cat(heading, sep = "\n")
+ cat(heading, sep = "\n")
attr(x, "heading") <- NULL
for(i in 1:length(x)) {
- xx <- x[[i]]
- xna <- is.na(xx)
- xx <- format(zapsmall(xx, digits))
- xx[xna] <- ""
- x[[i]] <- xx
+ xx <- x[[i]]
+ xna <- is.na(xx)
+ xx <- format(zapsmall(xx, digits))
+ xx[xna] <- ""
+ x[[i]] <- xx
}
print.data.frame(as.data.frame(x, row.names = rrr))
invisible(x)
diff --git a/R/summary.vglm.q b/R/summary.vglm.q
index 547e1a2..2981e04 100644
--- a/R/summary.vglm.q
+++ b/R/summary.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -50,7 +50,7 @@ summaryvglm <- function(object, correlation = FALSE,
df = stuff at df,
sigma = stuff at sigma)
- presid = resid(object, type = "pearson")
+ presid <- resid(object, type = "pearson")
if (length(presid))
answer at pearson.resid <- as.matrix(presid)
@@ -92,16 +92,16 @@ show.summary.vglm <- function(x, digits = NULL, quote = TRUE,
length(Presid) &&
all(!is.na(Presid)) &&
is.finite(rdf)) {
- cat("\nPearson Residuals:\n")
- if (rdf/M > 5) {
- rq <- apply(as.matrix(Presid), 2, quantile) # 5 x M
- dimnames(rq) <- list(c("Min", "1Q", "Median", "3Q", "Max"),
- x at misc$predictors.names)
- print(t(rq), digits = digits)
- } else
- if (rdf > 0) {
- print(Presid, digits = digits)
- }
+ cat("\nPearson Residuals:\n")
+ if (rdf/M > 5) {
+ rq <- apply(as.matrix(Presid), 2, quantile) # 5 x M
+ dimnames(rq) <- list(c("Min", "1Q", "Median", "3Q", "Max"),
+ x at misc$predictors.names)
+ print(t(rq), digits = digits)
+ } else
+ if (rdf > 0) {
+ print(Presid, digits = digits)
+ }
}
cat("\nCoefficients:\n")
@@ -122,38 +122,39 @@ show.summary.vglm <- function(x, digits = NULL, quote = TRUE,
prose <- ""
if (length(x at dispersion)) {
- if (is.logical(x at misc$estimated.dispersion) &&
- x at misc$estimated.dispersion)
- prose <- "(Estimated) " else {
-
- if (is.numeric(x at misc$default.dispersion) &&
- x at dispersion==x at misc$default.dispersion)
- prose <- "(Default) "
-
- if (is.numeric(x at misc$default.dispersion) &&
- x at dispersion!=x at misc$default.dispersion)
- prose <- "(Pre-specified) "
- }
- cat(paste("\n", prose, "Dispersion Parameter for ",
- x at family@vfamily[1],
- " family: ", yformat(x at dispersion, digits), "\n",
- sep = ""))
+ if (is.logical(x at misc$estimated.dispersion) &&
+ x at misc$estimated.dispersion) {
+ prose <- "(Estimated) "
+ } else {
+
+ if (is.numeric(x at misc$default.dispersion) &&
+ x at dispersion == x at misc$default.dispersion)
+ prose <- "(Default) "
+
+ if (is.numeric(x at misc$default.dispersion) &&
+ x at dispersion != x at misc$default.dispersion)
+ prose <- "(Pre-specified) "
+ }
+ cat(paste("\n", prose, "Dispersion Parameter for ",
+ x at family@vfamily[1],
+ " family: ", yformat(x at dispersion, digits), "\n",
+ sep = ""))
}
if (length(deviance(x))) {
cat("\nResidual deviance:", yformat(deviance(x), digits))
if (is.finite(rdf))
- cat(" on", round(rdf, digits), "degrees of freedom\n") else
- cat("\n")
+ cat(" on", round(rdf, digits), "degrees of freedom\n") else
+ cat("\n")
}
if (length(vll <- logLik.vlm(x))) {
cat("\nLog-likelihood:", yformat(vll, digits))
if (is.finite(rdf))
- cat(" on", round(rdf, digits), "degrees of freedom\n") else
- cat("\n")
+ cat(" on", round(rdf, digits), "degrees of freedom\n") else
+ cat("\n")
}
@@ -184,17 +185,17 @@ show.summary.vglm <- function(x, digits = NULL, quote = TRUE,
- setMethod("summary", "vglm",
- function(object, ...)
- summaryvglm(object, ...))
+setMethod("summary", "vglm",
+ function(object, ...)
+ summaryvglm(object, ...))
- setMethod("show", "summary.vglm",
- function(object)
- show.summary.vglm(object))
+setMethod("show", "summary.vglm",
+ function(object)
+ show.summary.vglm(object))
diff --git a/R/summary.vlm.q b/R/summary.vlm.q
index 52310d0..9055726 100644
--- a/R/summary.vlm.q
+++ b/R/summary.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -10,7 +10,10 @@
-summaryvlm <- function(object, correlation = FALSE, dispersion = NULL) {
+summaryvlm <-
+ function(object, correlation = FALSE, dispersion = NULL,
+ Colnames = c("Estimate", "Std. Error", "z value")) {
+
if (is.logical(object at misc$BFGS) && object at misc$BFGS)
@@ -25,7 +28,7 @@ summaryvlm <- function(object, correlation = FALSE, dispersion = NULL) {
coef <- object at coefficients
cnames <- names(coef)
- presid = residualsvlm(object, type = "pearson") # NULL if pooled.weight
+ presid <- residualsvlm(object, type = "pearson") # NULL if pooled.weight
if (any(is.na(coef))) {
warning(paste("Some NAs in the coefficients---no summary",
@@ -64,19 +67,19 @@ summaryvlm <- function(object, correlation = FALSE, dispersion = NULL) {
if (ncol_X_vlm < max(dim(R)))
stop("R is rank deficient")
- rinv = diag(ncol_X_vlm)
- rinv = backsolve(R, rinv)
- rowlen = drop(((rinv^2) %*% rep(1, ncol_X_vlm))^0.5)
- covun = rinv %*% t(rinv)
+ rinv <- diag(ncol_X_vlm)
+ rinv <- backsolve(R, rinv)
+ rowlen <- drop(((rinv^2) %*% rep(1, ncol_X_vlm))^0.5)
+ covun <- rinv %*% t(rinv)
dimnames(covun) <- list(cnames, cnames)
}
- coef <- matrix(rep(coef, 3), ncol=3)
- dimnames(coef) <- list(cnames, c("Estimate", "Std. Error", "z value"))
+ coef <- matrix(rep(coef, 3), ncol = 3)
+ dimnames(coef) <- list(cnames, Colnames)
if (length(sigma) == 1 && is.Numeric(ncol_X_vlm)) {
coef[, 2] <- rowlen %o% sigma # Fails here when sigma is a vector
coef[, 3] <- coef[, 1] / coef[, 2]
} else {
- coef[,1] = coef[,2] = coef[,3] = NA
+ coef[,1] <- coef[,2] <- coef[,3] <- NA
}
if (correlation) {
correl <- covun * outer(1 / rowlen, 1 / rowlen)
@@ -96,11 +99,12 @@ summaryvlm <- function(object, correlation = FALSE, dispersion = NULL) {
df = c(ncol_X_vlm, rdf),
sigma = sigma)
- if (is.Numeric(ncol_X_vlm)) answer at cov.unscaled = covun
- answer at dispersion = dispersion # Overwrite this
+ if (is.Numeric(ncol_X_vlm))
+ answer at cov.unscaled <- covun
+ answer at dispersion <- dispersion # Overwrite this
if (length(presid))
- answer at pearson.resid = as.matrix(presid)
+ answer at pearson.resid <- as.matrix(presid)
answer
@@ -113,86 +117,87 @@ show.summary.vlm <- function(x, digits = NULL, quote = TRUE,
prefix = "") {
- M <- x at misc$M
- coef3 <- x at coef3 # ficients
- correl <- x at correlation
+ M <- x at misc$M
+ coef3 <- x at coef3 # ficients
+ correl <- x at correlation
- if (is.null(digits)) {
- digits <- options()$digits
- } else {
- old.digits <- options(digits = digits)
- on.exit(options(old.digits))
- }
+ if (is.null(digits)) {
+ digits <- options()$digits
+ } else {
+ old.digits <- options(digits = digits)
+ on.exit(options(old.digits))
+ }
- cat("\nCall:\n")
- dput(x at call)
-
- presid <- x at pearson.resid
- rdf <- x at df[2]
- if (length(presid) && all(!is.na(presid))) {
- cat("\nPearson residuals:\n")
- if (rdf/M > 5) {
- rq <- apply(as.matrix(presid), 2, quantile) # 5 x M
- dimnames(rq) <- list(c("Min", "1Q", "Median", "3Q", "Max"),
- x at misc$predictors.names)
- print(t(rq), digits=digits)
- } else
- if (rdf > 0) {
- print(presid, digits=digits)
- }
+ cat("\nCall:\n")
+ dput(x at call)
+
+ presid <- x at pearson.resid
+ rdf <- x at df[2]
+ if (length(presid) && all(!is.na(presid))) {
+ cat("\nPearson residuals:\n")
+ if (rdf/M > 5) {
+ rq <- apply(as.matrix(presid), 2, quantile) # 5 x M
+ dimnames(rq) <- list(c("Min", "1Q", "Median", "3Q", "Max"),
+ x at misc$predictors.names)
+ print(t(rq), digits = digits)
+ } else
+ if (rdf > 0) {
+ print(presid, digits = digits)
}
+ }
- if (!all(is.na(coef3))) {
- cat("\nCoefficients:\n")
- print(coef3, digits = digits)
- }
+ if (!all(is.na(coef3))) {
+ cat("\nCoefficients:\n")
+ print(coef3, digits = digits)
+ }
- cat("\nNumber of responses: ", M, "\n")
+ cat("\nNumber of responses: ", M, "\n")
- if (length(x at misc$predictors.names))
- if (M == 1) {
- cat("\nName of response:",
- paste(x at misc$predictors.names, collapse = ", "), "\n")
- } else {
- UUU = paste(x at misc$predictors.names, collapse = ", ")
- UUU = x at misc$predictors.names
- cat("\nNames of responses:\n")
- cat(UUU, fill = TRUE, sep = ", ")
- }
+ if (length(x at misc$predictors.names))
+ if (M == 1) {
+ cat("\nName of response:",
+ paste(x at misc$predictors.names, collapse = ", "), "\n")
+ } else {
+ UUU <- paste(x at misc$predictors.names, collapse = ", ")
+ UUU <- x at misc$predictors.names
+ cat("\nNames of responses:\n")
+ cat(UUU, fill = TRUE, sep = ", ")
+ }
- if (!is.null(x at rss))
- cat("\nResidual Sum of Squares:", format(round(x at rss, digits)),
- "on", round(rdf, digits), "degrees of freedom\n")
+ if (!is.null(x at rss))
+ cat("\nResidual Sum of Squares:", format(round(x at rss, digits)),
+ "on", round(rdf, digits), "degrees of freedom\n")
- if (length(correl)) {
- ncol_X_vlm <- dim(correl)[2]
- if (ncol_X_vlm > 1) {
- cat("\nCorrelation of Coefficients:\n")
- ll <- lower.tri(correl)
- correl[ll] <- format(round(correl[ll], digits))
- correl[!ll] <- ""
- print(correl[-1, -ncol_X_vlm, drop = FALSE], quote = FALSE, digits=digits)
- }
+ if (length(correl)) {
+ ncol_X_vlm <- dim(correl)[2]
+ if (ncol_X_vlm > 1) {
+ cat("\nCorrelation of Coefficients:\n")
+ ll <- lower.tri(correl)
+ correl[ll] <- format(round(correl[ll], digits))
+ correl[!ll] <- ""
+ print(correl[-1, -ncol_X_vlm, drop = FALSE],
+ quote = FALSE, digits = digits)
}
+ }
- invisible(NULL)
+ invisible(NULL)
}
- setMethod("summary", "vlm",
- function(object, ...)
- summaryvlm(object, ...))
+setMethod("summary", "vlm",
+ function(object, ...)
+ summaryvlm(object, ...))
- setMethod("show", "summary.vlm",
- function(object)
- show.summary.vlm(object))
+setMethod("show", "summary.vlm",
+ function(object)
+ show.summary.vlm(object))
diff --git a/R/uqo.R b/R/uqo.R
index 298b8c7..05e3ea5 100644
--- a/R/uqo.R
+++ b/R/uqo.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -80,7 +80,7 @@ uqo.control = function(Rank = 1,
if (Use.Init.Poisson.QO && CA1)
stop("cannot have both 'Use.Init.Poisson.QO = TRUE' and 'CA1 = TRUE'")
- ans = list(
+ ans <- list(
Bestof = Bestof,
CA1 = CA1,
ConstrainedQO = FALSE, # A constant, not a control parameter
@@ -116,7 +116,7 @@ uqo.control = function(Rank = 1,
uqo <- function(formula,
- family, data=list(),
+ family, data = list(),
weights = NULL, subset = NULL, na.action = na.fail,
etastart = NULL, mustart = NULL, coefstart = NULL,
control = uqo.control(...),
@@ -126,8 +126,7 @@ uqo <- function(formula,
contrasts = NULL,
constraints = NULL,
extra = NULL,
- qr.arg = FALSE, ...)
-{
+ qr.arg = FALSE, ...) {
dataname <- as.character(substitute(data)) # "list" if no data=
function.name <- "uqo"
@@ -158,7 +157,7 @@ uqo <- function(formula,
y <- model.response(mf, "numeric") # model.extract(mf, "response")
x <- model.matrix(mt, mf, contrasts)
- attr(x, "assign") = attrassigndefault(x, mt)
+ attr(x, "assign") <- attrassigndefault(x, mt)
offset <- model.offset(mf)
if (is.null(offset))
offset <- 0 # yyy ???
@@ -187,7 +186,7 @@ uqo <- function(formula,
length(as.list(family at deviance)) <= 1)
stop("The fast algorithm requires the family ",
"function to have a deviance slot")
- deviance.Bestof = rep(as.numeric(NA), len = control$Bestof)
+ deviance.Bestof <- rep(as.numeric(NA), len = control$Bestof)
for(tries in 1:control$Bestof) {
if (control$trace && (control$Bestof>1))
cat(paste("\n========================= Fitting model", tries,
@@ -199,11 +198,12 @@ uqo <- function(formula,
qr.arg = qr.arg, Terms = mt, function.name = function.name,
ca1 = control$CA1 && tries == 1, ...)
deviance.Bestof[tries] = it$crit.list$deviance
- if (tries == 1||min(deviance.Bestof[1:(tries-1)]) > deviance.Bestof[tries])
- fit = it
+ if (tries == 1 ||
+ min(deviance.Bestof[1:(tries-1)]) > deviance.Bestof[tries])
+ fit <- it
}
- fit$misc$deviance.Bestof = deviance.Bestof
- fit$misc$criterion = "deviance" # Needed for calibrate; 21/1/05
+ fit$misc$deviance.Bestof <- deviance.Bestof
+ fit$misc$criterion <- "deviance" # Needed for calibrate; 21/1/05
fit$misc$dataname <- dataname
@@ -226,12 +226,12 @@ uqo <- function(formula,
answer at control$min.criterion = TRUE # Needed for calibrate; 21/1/05
if (length(fit$weights))
- slot(answer, "weights") = as.matrix(fit$weights)
+ slot(answer, "weights") <- as.matrix(fit$weights)
if (x.arg)
- slot(answer, "x") = x
+ slot(answer, "x") <- x
if (y.arg)
- slot(answer, "y") = as.matrix(fit$y)
- slot(answer, "extra") = if (length(fit$extra)) {
+ slot(answer, "y") <- as.matrix(fit$y)
+ slot(answer, "extra") <- if (length(fit$extra)) {
if (is.list(fit$extra)) fit$extra else {
warning("'extra' is not a list, therefore ",
"placing 'extra' into a list")
@@ -239,55 +239,56 @@ uqo <- function(formula,
}
} else list() # R-1.5.0
if (length(fit$prior.weights))
- slot(answer, "prior.weights") = as.matrix(fit$prior.weights)
+ slot(answer, "prior.weights") <- as.matrix(fit$prior.weights)
answer
}
-calluqof = function(sitescores, etamat, ymat, wvec, modelno, nice31, xmat,
+calluqof <- function(sitescores, etamat, ymat, wvec, modelno, nice31, xmat,
Control,
- n, M, maxMr5, othint, othdbl, bnumat, Hstep=NA, alldump) {
- control = Control
- Rank = control$Rank
- itol = othint[14]
- inited = if (is.R()) {
+ n, M, maxMr5, othint, othdbl, bnumat,
+ Hstep = NA, alldump) {
+ control <- Control
+ Rank <- control$Rank
+ itol <- othint[14]
+ inited <- if (is.R()) {
as.numeric(existsinVGAMenv("etamat", prefix = ".VGAM.UQO."))
} else 0
- othint[5] = inited # Replacement
- usethiseta = if (inited == 1)
+ othint[5] <- inited # Replacement
+ usethiseta <- if (inited == 1)
getfromVGAMenv("etamat", prefix = ".VGAM.UQO.") else t(etamat)
- usethisbeta = double(othint[13])
- pstar = othint[3]
- nstar = if (nice31) ifelse(modelno==3 || modelno==5,n*2,n) else n*M
- NOS = ifelse(modelno == 3 || modelno==5, M/2, M)
+ usethisbeta <- double(othint[13])
+ pstar <- othint[3]
+ nstar <- if (nice31) ifelse(modelno==3 || modelno==5,n*2,n) else n*M
+ NOS <- ifelse(modelno == 3 || modelno==5, M/2, M)
- sitescores = matrix(sitescores, ncol=Rank)
- sitescores = scale(sitescores, center = TRUE, scale = FALSE)
+ sitescores <- matrix(sitescores, ncol=Rank)
+ sitescores <- scale(sitescores, center = TRUE, scale = FALSE)
if (itol) {
- numat = matrix(sitescores, ncol=Rank)
+ numat <- matrix(sitescores, ncol=Rank)
if (Rank > 1) {
- evnu = eigen(var(numat))
- numat = numat %*% evnu$vector
+ evnu <- eigen(var(numat))
+ numat <- numat %*% evnu$vector
}
- sdnumat = apply(numat, 2, sd)
+ sdnumat <- apply(numat, 2, sd)
for(lookat in 1:Rank)
if (sdnumat[lookat]>control$MUXfactor[lookat]*control$isdlv[lookat]){
muxer = control$isdlv[lookat] *
control$MUXfactor[lookat] / sdnumat[lookat]
- numat[,lookat] = numat[,lookat] * muxer
+ numat[,lookat] <- numat[,lookat] * muxer
if (control$trace) {
}
}
} else {
- numat = matrix(sitescores, ncol=Rank)
- evnu = eigen(var(numat))
- temp7 = if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
+ numat <- matrix(sitescores, ncol=Rank)
+ evnu <- eigen(var(numat))
+ temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
evnu$vector %*% evnu$value^(-0.5)
- numat = numat %*% temp7
+ numat <- numat %*% temp7
}
ans1 <-
@@ -508,18 +509,25 @@ uqo.fit <- function(x, y, w = rep(1, len = nrow(x)),
"negbinomial" = 0,
"gamma2"=5,
0) # stop("can't fit this model using fast algorithm")
- if (!modelno) stop("the family function does not work with uqo()")
- if (modelno == 1) modelno = get("modelno", envir = VGAM:::VGAMenv)
+ if (!modelno)
+ stop("the family function does not work with uqo()")
+ if (modelno == 1)
+ modelno <- get("modelno", envir = VGAM:::VGAMenv)
rmfromVGAMenv(c("etamat", "beta"), prefix = ".VGAM.UQO.")
- cqofastok = if (is.R()) (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv) &&
- get("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)) else
- (exists("CQO.FastAlgorithm", inherits = TRUE) && CQO.FastAlgorithm)
+
+ cqofastok <-
+ exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv) &&
+ get("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
+
+
if (!cqofastok)
- stop("can't fit this model using fast algorithm")
+ stop("can't fit this model using fast algorithm")
- nice31 = (!control$EqualTol || control$ITolerances) && control$Quadratic &&
- all(trivial.constraints(Blist))
+ nice31 <- (!control$EqualTol ||
+ control$ITolerances) &&
+ control$Quadratic &&
+ all(trivial.constraints(Blist))
X_vlm_1save <- if (nice31) {
@@ -837,7 +845,7 @@ jitteruqo = function(mat) {
setMethod("Opt", "uqo", function(object, ...) Opt.qrrvglm(object, ...))
setMethod("Max", "uqo", function(object, ...) Max.qrrvglm(object, ...))
-setMethod("lv", "uqo", function(object, ...) lv.qrrvglm(object, ...))
+setMethod("lv", "uqo", function(object, ...) latvar.qrrvglm(object, ...))
diff --git a/R/vgam.R b/R/vgam.R
index f1bc33f..debd2b0 100644
--- a/R/vgam.R
+++ b/R/vgam.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -10,7 +10,7 @@
vgam <- function(formula,
family,
- data=list(),
+ data = list(),
weights = NULL,
subset = NULL,
na.action=na.fail,
@@ -23,255 +23,249 @@ vgam <- function(formula,
constraints = NULL,
extra=list(),
qr.arg = FALSE, smart = TRUE,
- ...)
-{
- dataname <- as.character(substitute(data)) # "list" if no data=
- function.name <- "vgam"
-
- ocall <- match.call()
-
- if (smart)
- setup.smart("write")
-
- if (missing(data))
- data <- environment(formula)
-
- mtsave <- terms(formula, "s", data = data)
-
- mf <- match.call(expand.dots = FALSE)
- m <- match(c("formula", "data", "subset", "weights", "na.action",
- "etastart", "mustart", "offset"), names(mf), 0)
- mf <- mf[c(1, m)]
- mf$drop.unused.levels <- TRUE
- mf[[1]] <- as.name("model.frame")
- mf <- eval(mf, parent.frame())
- switch(method, model.frame = return(mf), vgam.fit = 1,
- stop("invalid 'method': ", method))
- mt <- attr(mf, "terms")
-
- xlev = .getXlevels(mt, mf)
- y <- model.response(mf, "any") # model.extract(mf, "response")
- x <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else
- matrix(, NROW(y), 0)
- attr(x, "assign") = attrassigndefault(x, mt)
-
- offset <- model.offset(mf)
- if (is.null(offset))
- offset <- 0 # yyy ???
-
-
-
-
- mf2 = mf
- if (!missing(subset)) {
- mf2$subset <- NULL
- mf2 <- eval(mf2, parent.frame()) # mf2 is the full data frame.
- spars2 = lapply(mf2, attr, "spar")
- dfs2 = lapply(mf2, attr, "df")
- sx2 = lapply(mf2, attr, "s.xargument")
- for (ii in 1:length(mf)) {
- if (length(sx2[[ii]])) {
- attr(mf[[ii]], "spar") = spars2[[ii]]
- attr(mf[[ii]], "dfs2") = dfs2[[ii]]
- attr(mf[[ii]], "s.xargument") = sx2[[ii]]
- }
+ ...) {
+ dataname <- as.character(substitute(data)) # "list" if no data=
+ function.name <- "vgam"
+
+ ocall <- match.call()
+
+ if (smart)
+ setup.smart("write")
+
+ if (missing(data))
+ data <- environment(formula)
+
+ mtsave <- terms(formula, "s", data = data)
+
+ mf <- match.call(expand.dots = FALSE)
+ m <- match(c("formula", "data", "subset", "weights", "na.action",
+ "etastart", "mustart", "offset"), names(mf), 0)
+ mf <- mf[c(1, m)]
+ mf$drop.unused.levels <- TRUE
+ mf[[1]] <- as.name("model.frame")
+ mf <- eval(mf, parent.frame())
+ switch(method, model.frame = return(mf), vgam.fit = 1,
+ stop("invalid 'method': ", method))
+ mt <- attr(mf, "terms")
+
+ xlev <- .getXlevels(mt, mf)
+ y <- model.response(mf, "any") # model.extract(mf, "response")
+ x <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else
+ matrix(, NROW(y), 0)
+ attr(x, "assign") <- attrassigndefault(x, mt)
+
+ offset <- model.offset(mf)
+ if (is.null(offset))
+ offset <- 0 # yyy ???
+
+
+
+
+ mf2 <- mf
+ if (!missing(subset)) {
+ mf2$subset <- NULL
+ mf2 <- eval(mf2, parent.frame()) # mf2 is the full data frame.
+ spars2 <- lapply(mf2, attr, "spar")
+ dfs2 <- lapply(mf2, attr, "df")
+ sx2 <- lapply(mf2, attr, "s.xargument")
+ for (ii in 1:length(mf)) {
+ if (length(sx2[[ii]])) {
+ attr(mf[[ii]], "spar") <- spars2[[ii]]
+ attr(mf[[ii]], "dfs2") <- dfs2[[ii]]
+ attr(mf[[ii]], "s.xargument") <- sx2[[ii]]
}
- rm(mf2)
- }
+ }
+ rm(mf2)
+ }
- w <- model.weights(mf)
- if (!length(w))
- w <- rep(1, nrow(mf))
- else if (ncol(as.matrix(w))==1 && any(w < 0))
- stop("negative weights not allowed")
+ w <- model.weights(mf)
+ if (!length(w)) {
+ w <- rep(1, nrow(mf))
+ } else if (ncol(as.matrix(w)) == 1 && any(w < 0))
+ stop("negative weights not allowed")
- if (is.character(family))
- family <- get(family)
- if (is.function(family))
- family <- family()
- if (!inherits(family, "vglmff")) {
- stop("'family = ", family, "' is not a VGAM family function")
- }
+ if (is.character(family))
+ family <- get(family)
+ if (is.function(family))
+ family <- family()
+ if (!inherits(family, "vglmff")) {
+ stop("'family = ", family, "' is not a VGAM family function")
+ }
- eval(vcontrol.expression)
+ eval(vcontrol.expression)
- n <- dim(x)[1]
+ n <- dim(x)[1]
- if (FALSE && is.R()) {
- family at linkinv <- eval(family at linkinv)
- family at link <- eval(family at link)
+ if (FALSE && is.R()) {
+ family at linkinv <- eval(family at linkinv)
+ family at link <- eval(family at link)
- for (ii in names(.min.criterion.VGAM))
- if (length(family[[ii]])) family[[ii]] <- eval(family[[ii]])
- }
+ for (ii in names(.min.criterion.VGAM))
+ if (length(family[[ii]])) family[[ii]] <- eval(family[[ii]])
+ }
- if (length(slot(family, "first")))
- eval(slot(family, "first"))
+ if (length(slot(family, "first")))
+ eval(slot(family, "first"))
- if (method != "vgam.fit")
- stop("method must be \"model.frame\" or \"vgam.fit\"")
+ if (method != "vgam.fit")
+ stop("method must be \"model.frame\" or \"vgam.fit\"")
# --------------------------------------------------------------
- aa <- attributes(mtsave)
- smoothers <- aa$specials
+ aa <- attributes(mtsave)
+ smoothers <- aa$specials
- nonparametric <- length(smoothers$s) > 0
- if (nonparametric) {
+ nonparametric <- length(smoothers$s) > 0
+ if (nonparametric) {
- ff <- apply(aa$factors[smoothers[["s"]],,drop = FALSE], 2, any)
- smoothers[["s"]] <- if (any(ff))
- seq(along=ff)[aa$order==1 & ff] else NULL
+ ff <- apply(aa$factors[smoothers[["s"]],,drop = FALSE], 2, any)
+ smoothers[["s"]] <- if (any(ff))
+ seq(along = ff)[aa$order == 1 & ff] else NULL
- smooth.labels <- aa$term.labels[unlist(smoothers)]
- } else
- function.name = "vglm" # This is effectively so
+ smooth.labels <- aa$term.labels[unlist(smoothers)]
+ } else
+ function.name <- "vglm" # This is effectively so
- fit <- vgam.fit(x = x, y = y, w = w, mf = mf,
- etastart = etastart, mustart = mustart, coefstart = coefstart,
- offset = offset, family = family, control = control,
- criterion = control$criterion,
- constraints = constraints, extra = extra, qr.arg = qr.arg,
- Terms = mtsave,
- nonparametric = nonparametric, smooth.labels = smooth.labels,
- function.name = function.name, ...)
+ fit <- vgam.fit(x = x, y = y, w = w, mf = mf,
+ etastart = etastart, mustart = mustart, coefstart = coefstart,
+ offset = offset, family = family, control = control,
+ criterion = control$criterion,
+ constraints = constraints, extra = extra, qr.arg = qr.arg,
+ Terms = mtsave,
+ nonparametric = nonparametric, smooth.labels = smooth.labels,
+ function.name = function.name, ...)
- if (is.Numeric(fit$nl.df) && any(fit$nl.df < 0)) {
- fit$nl.df[fit$nl.df < 0] = 0
- }
+ if (is.Numeric(fit$nl.df) && any(fit$nl.df < 0)) {
+ fit$nl.df[fit$nl.df < 0] <- 0
+ }
# --------------------------------------------------------------
- if (!is.null(fit[["smooth.frame"]])) {
- fit <- fit[-1] # Strip off smooth.frame
- } else {
+ if (!is.null(fit[["smooth.frame"]])) {
+ fit <- fit[-1] # Strip off smooth.frame
+ } else {
+ }
+
+ fit$smomat <- NULL # Not needed
+
+ fit$call <- ocall
+ if (model)
+ fit$model <- mf
+ if (!x.arg)
+ fit$x <- NULL
+ if (!y.arg)
+ fit$y <- NULL
+
+ if (nonparametric)
+ fit$misc$smooth.labels <- smooth.labels
+
+
+ fit$misc$dataname <- dataname
+
+
+ if (smart)
+ fit$smart.prediction <- get.smart.prediction()
+
+
+ answer <-
+ new("vgam",
+ "assign" = attr(x, "assign"),
+ "call" = fit$call,
+ "coefficients" = fit$coefficients,
+ "constraints" = fit$constraints,
+ "criterion" = fit$crit.list,
+ "df.residual" = fit$df.residual,
+ "dispersion" = 1,
+ "family" = fit$family,
+ "misc" = fit$misc,
+ "model" = if (model) mf else data.frame(),
+ "R" = fit$R,
+ "rank" = fit$rank,
+ "residuals" = as.matrix(fit$residuals),
+ "rss" = fit$rss,
+ "smart.prediction" = as.list(fit$smart.prediction),
+ "terms" = list(terms=fit$terms))
+
+ if (!smart)
+ answer at smart.prediction <- list(smart.arg = FALSE)
+
+ if (qr.arg) {
+ class(fit$qr) <- "list"
+ slot(answer, "qr") <- fit$qr
+ }
+ if (length(attr(x, "contrasts")))
+ slot(answer, "contrasts") <- attr(x, "contrasts")
+ if (length(fit$fitted.values))
+ slot(answer, "fitted.values") <- as.matrix(fit$fitted.values)
+ slot(answer, "na.action") <- if (length(aaa <- attr(mf, "na.action")))
+ list(aaa) else list()
+ if (length(offset))
+ slot(answer, "offset") <- as.matrix(offset)
+ if (length(fit$weights))
+ slot(answer, "weights") <- as.matrix(fit$weights)
+ if (x.arg)
+ slot(answer, "x") <- x # The 'small' design matrix
+ if (length(xlev))
+ slot(answer, "xlevels") <- xlev
+ if (y.arg)
+ slot(answer, "y") <- as.matrix(fit$y)
+ answer at misc$formula <- formula
+
+
+ slot(answer, "control") <- fit$control
+
+ if (length(fit$extra)) {
+ slot(answer, "extra") <- fit$extra
+ }
+ slot(answer, "iter") <- fit$iter
+ slot(answer, "post") <- fit$post
+
+ fit$predictors <- as.matrix(fit$predictors) # Must be a matrix
+ dimnames(fit$predictors) <- list(dimnames(fit$predictors)[[1]],
+ fit$misc$predictors.names)
+ slot(answer, "predictors") <- fit$predictors
+ if (length(fit$prior.weights))
+ slot(answer, "prior.weights") <- as.matrix(fit$prior.weights)
+
+
+ if (nonparametric) {
+ slot(answer, "Bspline") <- fit$Bspline
+ slot(answer, "nl.chisq") <- fit$nl.chisq
+ if (is.Numeric(fit$nl.df))
+ slot(answer, "nl.df") <- fit$nl.df
+ slot(answer, "spar") <- fit$spar
+ slot(answer, "s.xargument") <- fit$s.xargument
+ if (length(fit$varmat)) {
+ slot(answer, "var") <- fit$varmat
}
- fit$smomat <- NULL # Not needed
-
- fit$call <- ocall
- if (model)
- fit$model <- mf
- if (!x.arg)
- fit$x <- NULL
- if (!y.arg)
- fit$y <- NULL
-
- if (nonparametric)
- fit$misc$smooth.labels <- smooth.labels
- fit$misc$dataname <- dataname
- if (smart)
- fit$smart.prediction <- get.smart.prediction()
+ }
+ if (length(fit$effects))
+ slot(answer, "effects") <- fit$effects
- answer <-
- new("vgam",
- "assign" = attr(x, "assign"),
- "call" = fit$call,
- "coefficients" = fit$coefficients,
- "constraints" = fit$constraints,
- "criterion" = fit$crit.list,
- "df.residual" = fit$df.residual,
- "dispersion" = 1,
- "family" = fit$family,
- "misc" = fit$misc,
- "model" = if (model) mf else data.frame(),
- "R" = fit$R,
- "rank" = fit$rank,
- "residuals" = as.matrix(fit$residuals),
- "rss" = fit$rss,
- "smart.prediction" = as.list(fit$smart.prediction),
- "terms" = list(terms=fit$terms))
-
- if (!smart) answer at smart.prediction <- list(smart.arg = FALSE)
-
- if (qr.arg) {
- class(fit$qr) = "list"
- slot(answer, "qr") = fit$qr
- }
- if (length(attr(x, "contrasts")))
- slot(answer, "contrasts") = attr(x, "contrasts")
- if (length(fit$fitted.values))
- slot(answer, "fitted.values") = as.matrix(fit$fitted.values)
- slot(answer, "na.action") = if (length(aaa <- attr(mf, "na.action")))
- list(aaa) else list()
- if (length(offset))
- slot(answer, "offset") = as.matrix(offset)
- if (length(fit$weights))
- slot(answer, "weights") = as.matrix(fit$weights)
- if (x.arg)
- slot(answer, "x") = x # The 'small' design matrix
- if (length(xlev))
- slot(answer, "xlevels") = xlev
- if (y.arg)
- slot(answer, "y") = as.matrix(fit$y)
- answer at misc$formula = formula
-
-
- slot(answer, "control") = fit$control
-
- if (length(fit$extra)) {
- slot(answer, "extra") = fit$extra
- }
- slot(answer, "iter") = fit$iter
- slot(answer, "post") = fit$post
-
- fit$predictors = as.matrix(fit$predictors) # Must be a matrix
- dimnames(fit$predictors) = list(dimnames(fit$predictors)[[1]],
- fit$misc$predictors.names)
- slot(answer, "predictors") = fit$predictors
- if (length(fit$prior.weights))
- slot(answer, "prior.weights") = as.matrix(fit$prior.weights)
-
-
- if (nonparametric) {
- slot(answer, "Bspline") = fit$Bspline
- slot(answer, "nl.chisq") = fit$nl.chisq
- if (is.Numeric(fit$nl.df))
- slot(answer, "nl.df") = fit$nl.df
- slot(answer, "spar") = fit$spar
- slot(answer, "s.xargument") = fit$s.xargument
- if (length(fit$varmat)) {
- slot(answer, "var") = fit$varmat
- }
-
-
-
-
-
- }
- if (length(fit$effects))
- slot(answer, "effects") = fit$effects
-
-
- answer
+ answer
}
attr(vgam, "smart") <- TRUE
-care.exp <- function(x, thresh = -log(.Machine$double.eps)) {
- x[x > thresh] <- thresh
- x[x < (-thresh)] <- -thresh
- exp(x)
-}
-
diff --git a/R/vgam.control.q b/R/vgam.control.q
index 1ca3ff4..20286ec 100644
--- a/R/vgam.control.q
+++ b/R/vgam.control.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -80,7 +80,7 @@ vgam.control <- function(all.knots = FALSE,
vgam.nlchisq <- function(qr, resid, wz, smomat, deriv, U, smooth.labels,
assign, M, n, constraints) {
- attr(qr, "class") = "qr"
+ attr(qr, "class") <- "qr"
class(qr) <- "qr"
if (!is.matrix(smomat)) smomat <- as.matrix(smomat)
diff --git a/R/vgam.fit.q b/R/vgam.fit.q
index 714398a..c24dfd1 100644
--- a/R/vgam.fit.q
+++ b/R/vgam.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -10,222 +10,231 @@ vgam.fit <- function(x, y, w, mf,
constraints = NULL, extra, qr.arg,
Terms,
nonparametric, smooth.labels,
- function.name = "vgam", ...)
-{
- specialCM = NULL
- post = list()
- check.Rank <- TRUE # Set this to false for family functions vppr() etc.
- epsilon <- control$epsilon
- maxit <- control$maxit
- save.weight <- control$save.weight
- trace <- control$trace
-
- bf.maxit <- control$bf.maxit
- bf.epsilon <- control$bf.epsilon
- se.fit <- control$se.fit
- minimize.criterion <- control$min.criterion
+ function.name = "vgam", ...) {
+
+
+ specialCM <- NULL
+ post <- list()
+ check.Rank <- TRUE # Set this to false for family functions vppr() etc.
+ epsilon <- control$epsilon
+ maxit <- control$maxit
+ save.weight <- control$save.weight
+ trace <- control$trace
+
+ bf.maxit <- control$bf.maxit
+ bf.epsilon <- control$bf.epsilon
+ se.fit <- control$se.fit
+ minimize.criterion <- control$min.criterion
+
+
+ fv <- NULL
- n <- dim(x)[1]
+ n <- dim(x)[1]
- # --------------------------------------------------------------
- new.s.call <- expression({
- if (c.list$one.more) {
- fv <- c.list$fit
- new.coeffs <- c.list$coeff
- if (length(family at middle))
- eval(family at middle)
- eta <- fv + offset
- mu <- family at linkinv(eta, extra)
+ new.s.call <- expression({
+ if (c.list$one.more) {
+ fv <- c.list$fit
+ new.coeffs <- c.list$coeff
- if (length(family at middle2))
- eval(family at middle2)
+ if (length(family at middle))
+ eval(family at middle)
- old.crit <- new.crit
+ eta <- fv + offset
+ mu <- family at linkinv(eta, extra)
- new.crit <- switch(criterion,
- coefficients=new.coeffs,
- tfun(mu=mu, y=y, w=w, res = FALSE, eta=eta, extra))
- if (trace) {
- cat("VGAM ", bf, " loop ", iter, ": ", criterion, "= ")
+ if (length(family at middle2))
+ eval(family at middle2)
- UUUU = switch(criterion, coefficients=
- format(new.crit, dig=round(2-log10(epsilon))),
- format(round(new.crit, 4)))
+ old.crit <- new.crit
- switch(criterion,
- coefficients={if(length(new.crit) > 2) cat("\n");
- cat(UUUU, fill = TRUE, sep = ", ")},
- cat(UUUU, fill = TRUE, sep = ", "))
- }
+ new.crit <- switch(criterion,
+ coefficients = new.coeffs,
+ tfun(mu = mu, y = y, w = w,
+ res = FALSE, eta = eta, extra))
+ if (trace) {
+ cat("VGAM ", bf, " loop ", iter, ": ", criterion, "= ")
+
+ UUUU <- switch(criterion, coefficients =
+ format(new.crit, dig = round(2-log10(epsilon))),
+ format(round(new.crit, 4)))
+
+ switch(criterion,
+ coefficients = {if(length(new.crit) > 2) cat("\n");
+ cat(UUUU, fill = TRUE, sep = ", ")},
+ cat(UUUU, fill = TRUE, sep = ", "))
+ }
one.more <- eval(control$convergence)
- flush.console()
-
- if (!is.finite(one.more) ||
- !is.logical(one.more)) one.more = FALSE
- if (one.more) {
- iter <- iter + 1
- deriv.mu <- eval(family at deriv)
- wz <- eval(family at weight)
- if (control$checkwz)
- wz = checkwz(wz, M = M, trace = trace,
- wzepsilon = control$wzepsilon)
-
- U <- vchol(wz, M=M, n=n, silent=!trace)
- tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
- z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset
-
- c.list$z <- z
- c.list$wz <- wz
- c.list$U <- U
- }
-
- c.list$one.more <- one.more
- c.list$coeff = runif(length(new.coeffs)) # 12/3/03; twist needed!
- old.coeffs <- new.coeffs
- }
- c.list
- })
+ flush.console()
+
+ if (!is.finite(one.more) ||
+ !is.logical(one.more)) one.more <- FALSE
+ if (one.more) {
+ iter <- iter + 1
+ deriv.mu <- eval(family at deriv)
+ wz <- eval(family at weight)
+ if (control$checkwz)
+ wz <- checkwz(wz, M = M, trace = trace,
+ wzepsilon = control$wzepsilon)
+
+ U <- vchol(wz, M = M, n = n, silent = !trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
+ z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset
+
+ c.list$z <- z
+ c.list$wz <- wz
+ c.list$U <- U
+ }
+
+ c.list$one.more <- one.more
+ c.list$coeff <- runif(length(new.coeffs)) # 12/3/03; twist needed!
+ old.coeffs <- new.coeffs
+ }
+ c.list
+ })
- old.coeffs <- coefstart
+ old.coeffs <- coefstart
- intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
- y.names <- predictors.names <- NULL # May be overwritten in @initialize
+ intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
+ y.names <- predictors.names <- NULL # May be overwritten in @initialize
- n.save <- n
- if (length(slot(family, "initialize")))
- eval(slot(family, "initialize")) # Initialize mu & M (& optionally w)
+ n.save <- n
+ if (length(slot(family, "initialize")))
+ eval(slot(family, "initialize")) # Initialize mu & M (& optionally w)
- if (length(etastart)) {
- eta <- etastart
- mu <- if (length(mustart)) mustart else
- if (length(body(slot(family, "linkinv"))))
- slot(family, "linkinv")(eta, extra) else
- warning("argument 'etastart' assigned a value ",
- "but there is no 'linkinv' slot to use it")
- }
+ if (length(etastart)) {
+ eta <- etastart
+ mu <- if (length(mustart)) mustart else
+ if (length(body(slot(family, "linkinv"))))
+ slot(family, "linkinv")(eta, extra) else
+ warning("argument 'etastart' assigned a value ",
+ "but there is no 'linkinv' slot to use it")
+ }
- if (length(mustart)) {
- mu <- mustart
- if (length(body(slot(family, "linkfun")))) {
- eta <- slot(family, "linkfun")(mu, extra)
- } else {
- warning("argument 'mustart' assigned a value ",
- "but there is no 'link' slot to use it")
- }
+ if (length(mustart)) {
+ mu <- mustart
+ if (length(body(slot(family, "linkfun")))) {
+ eta <- slot(family, "linkfun")(mu, extra)
+ } else {
+ warning("argument 'mustart' assigned a value ",
+ "but there is no 'link' slot to use it")
}
+ }
- M <- if (is.matrix(eta)) ncol(eta) else 1
+ M <- if (is.matrix(eta)) ncol(eta) else 1
- if (length(family at constraints))
- eval(family at constraints)
- Blist <- process.constraints(constraints, x, M, specialCM=specialCM)
+ if (length(family at constraints))
+ eval(family at constraints)
+ Blist <- process.constraints(constraints, x, M, specialCM = specialCM)
- ncolBlist <- unlist(lapply(Blist, ncol))
- dimB <- sum(ncolBlist)
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ dimB <- sum(ncolBlist)
if (nonparametric) {
- smooth.frame <- mf
- assignx <- attr(x, "assign")
- which <- assignx[smooth.labels]
+ smooth.frame <- mf
+ assignx <- attr(x, "assign")
+ which <- assignx[smooth.labels]
- bf <- "s.vam"
- bf.call <- parse(text=paste(
- "s.vam(x, z, wz, tfit$smomat, which, tfit$smooth.frame,",
- "bf.maxit, bf.epsilon, trace, se=se.fit, X_vlm_save, ",
- "Blist, ncolBlist, M=M, qbig=qbig, Umat=U, ",
- "all.knots=control$all.knots, nk=control$nk)",
- sep = ""))[[1]]
+ bf <- "s.vam"
+ bf.call <- parse(text = paste(
+ "s.vam(x, z, wz, tfit$smomat, which, tfit$smooth.frame,",
+ "bf.maxit, bf.epsilon, trace, se = se.fit, X_vlm_save, ",
+ "Blist, ncolBlist, M = M, qbig = qbig, Umat = U, ",
+ "all.knots = control$all.knots, nk = control$nk)",
+ sep = ""))[[1]]
- qbig <- sum(ncolBlist[smooth.labels]) # Number of component funs
- smomat <- matrix(0, n, qbig)
- dy <- if (is.matrix(y)) dimnames(y)[[1]] else names(y)
- d2 <- if (is.null(predictors.names))
- paste("(Additive predictor ",1:M,")", sep = "") else
- predictors.names
- dimnames(smomat) <- list(dy, vlabel(smooth.labels,
- ncolBlist[smooth.labels], M))
+ qbig <- sum(ncolBlist[smooth.labels]) # Number of component funs
+ smomat <- matrix(0, n, qbig)
+ dy <- if (is.matrix(y)) dimnames(y)[[1]] else names(y)
+ d2 <- if (is.null(predictors.names))
+ paste("(Additive predictor ",1:M,")", sep = "") else
+ predictors.names
+ dimnames(smomat) <- list(dy, vlabel(smooth.labels,
+ ncolBlist[smooth.labels], M))
- tfit <- list(smomat = smomat, smooth.frame = smooth.frame)
+ tfit <- list(smomat = smomat, smooth.frame = smooth.frame)
} else {
- bf.call <- expression(vlm.wfit(xmat=X_vlm_save, z, Blist = NULL, U=U,
- matrix.out = FALSE, is.vlmX = TRUE,
- qr = qr.arg, xij = NULL))
- bf <- "vlm.wfit"
+ bf.call <- expression(vlm.wfit(xmat = X_vlm_save, z,
+ Blist = NULL, U = U,
+ matrix.out = FALSE, is.vlmX = TRUE,
+ qr = qr.arg, xij = NULL))
+ bf <- "vlm.wfit"
}
- X_vlm_save <- lm2vlm.model.matrix(x, Blist, xij=control$xij)
+ X_vlm_save <- lm2vlm.model.matrix(x, Blist, xij = control$xij)
if (length(coefstart)) {
- eta <- if (ncol(X_vlm_save) > 1) X_vlm_save %*% coefstart +
- offset else X_vlm_save * coefstart + offset
- eta <- if (M > 1) matrix(eta, ncol=M, byrow = TRUE) else c(eta)
- mu <- family at linkinv(eta, extra)
+ eta <- if (ncol(X_vlm_save) > 1) X_vlm_save %*% coefstart +
+ offset else X_vlm_save * coefstart + offset
+ eta <- if (M > 1) matrix(eta, ncol = M, byrow = TRUE) else c(eta)
+ mu <- family at linkinv(eta, extra)
}
if (criterion != "coefficients") {
- tfun <- slot(family, criterion) # Needed 4 R so have to follow suit
+ tfun <- slot(family, criterion) # Needed 4 R so have to follow suit
}
iter <- 1
new.crit <- switch(criterion,
- coefficients=1,
- tfun(mu=mu, y=y, w=w, res = FALSE, eta=eta, extra))
+ coefficients = 1,
+ tfun(mu = mu, y = y, w = w, res = FALSE,
+ eta = eta, extra))
old.crit <- if (minimize.criterion) 10*new.crit+10 else -10*new.crit-10
deriv.mu <- eval(family at deriv)
wz <- eval(family at weight)
if (control$checkwz)
- wz = checkwz(wz, M = M, trace = trace,
- wzepsilon = control$wzepsilon)
+ wz <- checkwz(wz, M = M, trace = trace,
+ wzepsilon = control$wzepsilon)
- U <- vchol(wz, M=M, n=n, silent=!trace)
- tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
- z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset
+ U <- vchol(wz, M = M, n = n, silent = !trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
+ z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset
- c.list <- list(wz=as.double(wz), z=as.double(z),
- fit=as.double(t(eta)),
- one.more = TRUE, U=as.double(U),
- coeff=as.double(rep(1,ncol(X_vlm_save))))
+ c.list <- list(wz = as.double(wz), z = as.double(z),
+ fit = as.double(t(eta)),
+ one.more = TRUE, U = as.double(U),
+ coeff = as.double(rep(1, ncol(X_vlm_save))))
dX_vlm <- as.integer(dim(X_vlm_save))
nrow_X_vlm <- dX_vlm[[1]]
ncol_X_vlm <- dX_vlm[[2]]
if (nrow_X_vlm < ncol_X_vlm)
- stop(ncol_X_vlm, " parameters but only ", nrow_X_vlm, " observations")
+ stop(ncol_X_vlm, " parameters but only ", nrow_X_vlm,
+ " observations")
while (c.list$one.more) {
- tfit <- eval(bf.call) # fit$smooth.frame is new
+ tfit <- eval(bf.call) # fit$smooth.frame is new
- c.list$coeff <- tfit$coefficients
+ c.list$coeff <- tfit$coefficients
- tfit$predictors <- tfit$fitted.values + offset
+ tfit$predictors <- tfit$fitted.values + offset
- c.list$fit <- tfit$fitted.values
- c.list <- eval(new.s.call)
- NULL
+ c.list$fit <- tfit$fitted.values
+ c.list <- eval(new.s.call)
+ NULL
}
if (maxit > 1 && iter >= maxit)
- warning("convergence not obtained in ", maxit, " iterations")
+ warning("convergence not obtained in ", maxit, " iterations")
dnrow_X_vlm <- labels(X_vlm_save)
@@ -233,7 +242,7 @@ vgam.fit <- function(x, y, w, mf,
ynrow_X_vlm <- dnrow_X_vlm[[1]]
if (length(family at fini))
- eval(family at fini)
+ eval(family at fini)
coefs <- tfit$coefficients
asgn <- attr(X_vlm_save, "assign") # 29/11/01 was x
@@ -242,15 +251,15 @@ vgam.fit <- function(x, y, w, mf,
cnames <- xnrow_X_vlm
if (!is.null(tfit$rank)) {
- rank <- tfit$rank
- if (rank < ncol(x))
- stop("rank < ncol(x) is bad")
+ rank <- tfit$rank
+ if (rank < ncol(x))
+ stop("rank < ncol(x) is bad")
} else rank <- ncol(x)
R <- tfit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop = FALSE]
R[lower.tri(R)] <- 0
- attributes(R) <- list(dim=c(ncol_X_vlm, ncol_X_vlm),
- dimnames=list(cnames, cnames), rank=rank)
+ attributes(R) <- list(dim = c(ncol_X_vlm, ncol_X_vlm),
+ dimnames = list(cnames, cnames), rank = rank)
dn <- labels(x)
@@ -260,53 +269,53 @@ vgam.fit <- function(x, y, w, mf,
if (is.matrix(mu)) {
- if (length(dimnames(mu)[[2]])) {
- y.names <- dimnames(mu)[[2]]
- } else
- if (length(dimnames(y)[[2]])) {
- y.names <- dimnames(y)[[2]]
- }
- dimnames(mu) <- list(yn, y.names)
+ if (length(dimnames(mu)[[2]])) {
+ y.names <- dimnames(mu)[[2]]
+ } else
+ if (length(dimnames(y)[[2]])) {
+ y.names <- dimnames(y)[[2]]
+ }
+ dimnames(mu) <- list(yn, y.names)
} else {
- names(mu) <- names(fv)
+ names(mu) <- names(fv)
}
tfit$fitted.values <- NULL # Have to kill it off 3/12/01
fit <- structure(c(tfit, list(
- assign=asgn,
- constraints=Blist,
- control=control,
- fitted.values=mu,
- formula=as.vector(attr(Terms, "formula")),
- iter=iter,
- offset=offset,
- rank=rank,
- R=R,
- terms=Terms)))
+ assign = asgn,
+ constraints = Blist,
+ control = control,
+ fitted.values = mu,
+ formula = as.vector(attr(Terms, "formula")),
+ iter = iter,
+ offset = offset,
+ rank = rank,
+ R = R,
+ terms = Terms)))
df.residual <- nrow_X_vlm - rank
if (!se.fit) {
- fit$varmat <- NULL
+ fit$varmat <- NULL
}
if (M == 1) {
- wz <- as.vector(wz) # Convert wz into a vector
+ wz <- as.vector(wz) # Convert wz into a vector
} # else
fit$weights <- if (save.weight) wz else NULL
if (M == 1) {
- fit$predictors <- as.vector(fit$predictors)
- fit$residuals <- as.vector(fit$residuals)
- names(fit$residuals) <- names(fit$predictors) <- yn
+ fit$predictors <- as.vector(fit$predictors)
+ fit$residuals <- as.vector(fit$residuals)
+ names(fit$residuals) <- names(fit$predictors) <- yn
} else
- dimnames(fit$residuals) <- dimnames(fit$predictors) <-
- list(yn, predictors.names)
+ dimnames(fit$residuals) <-
+ dimnames(fit$predictors) <- list(yn, predictors.names)
- NewBlist <- process.constraints(constraints, x, M, specialCM=specialCM,
- by.col = FALSE)
+ NewBlist <- process.constraints(constraints, x, M,
+ specialCM = specialCM, by.col = FALSE)
misc <- list(
colnames.x = xn,
@@ -327,31 +336,32 @@ vgam.fit <- function(x, y, w, mf,
if (criterion != "coefficients")
- fit[[criterion]] <- new.crit
+ fit[[criterion]] <- new.crit
if (se.fit && length(fit$s.xargument)) {
- misc$varassign <-
- varassign(Blist, names(fit$s.xargument))
+ misc$varassign <- varassign(Blist, names(fit$s.xargument))
}
if (nonparametric) {
- misc$smooth.labels <- smooth.labels
+ misc$smooth.labels <- smooth.labels
}
crit.list <- list()
if (criterion != "coefficients")
- crit.list[[criterion]] <- fit[[criterion]] <- new.crit
+ crit.list[[criterion]] <- fit[[criterion]] <- new.crit
for (ii in names(.min.criterion.VGAM)) {
- if (ii != criterion &&
- any(slotNames(family) == ii) &&
- length(body(slot(family, ii)))) {
- fit[[ii]] <- crit.list[[ii]] <- (slot(family, ii))(mu=mu,
- y=y, w=w, res = FALSE, eta=eta, extra)
+ if (ii != criterion &&
+ any(slotNames(family) == ii) &&
+ length(body(slot(family, ii)))) {
+ fit[[ii]] <-
+ crit.list[[ii]] <-
+ (slot(family, ii))(mu = mu, y = y, w = w, res = FALSE,
+ eta = eta, extra)
}
}
@@ -359,87 +369,88 @@ vgam.fit <- function(x, y, w, mf,
if (M == 1) {
- fit$predictors <- as.vector(fit$predictors)
- fit$residuals <- as.vector(fit$residuals)
- names(fit$residuals) <- names(fit$predictors) <- yn
- } else
- dimnames(fit$residuals) <- dimnames(fit$predictors) <-
- list(yn, predictors.names)
+ fit$predictors <- as.vector(fit$predictors)
+ fit$residuals <- as.vector(fit$residuals)
+ names(fit$residuals) <- names(fit$predictors) <- yn
+ } else {
+ dimnames(fit$residuals) <- dimnames(fit$predictors) <-
+ list(yn, predictors.names)
+ }
if (w[1] != 1 || any(w != w[1]))
- fit$prior.weights <- w
+ fit$prior.weights <- w
if (length(family at last))
- eval(family at last)
+ eval(family at last)
if (!is.null(fit$smomat)) {
- fit$nl.chisq <- vgam.nlchisq(fit$qr, fit$resid, wz=wz,
- smomat=fit$smomat, deriv=deriv.mu, U=U,
- smooth.labels, attr(x, "assign"),
- M=M, n=n, constraints=Blist)
+ fit$nl.chisq <- vgam.nlchisq(fit$qr, fit$resid, wz = wz,
+ smomat = fit$smomat,
+ deriv = deriv.mu, U = U,
+ smooth.labels, attr(x, "assign"),
+ M = M, n = n, constraints = Blist)
}
if (!qr.arg) {
- fit$qr <- NULL
+ fit$qr <- NULL
}
- fit$misc = NULL # 8/6/02; It's necessary to kill it as it exists in vgam
+ fit$misc <- NULL # 8/6/02; It's necessary to kill it as it exists in vgam
structure(c(fit, list(
- contrasts=attr(x, "contrasts"),
- control=control,
- crit.list=crit.list,
- extra=extra,
- family=family,
- iter=iter,
- misc=misc,
- post=post,
- x=x,
- y=y)),
- vclass=family at vfamily)
+ contrasts = attr(x, "contrasts"),
+ control = control,
+ crit.list = crit.list,
+ extra = extra,
+ family = family,
+ iter = iter,
+ misc = misc,
+ post = post,
+ x = x,
+ y = y)),
+ vclass = family at vfamily)
}
-new.assign <- function(X, Blist)
-{
+new.assign <- function(X, Blist) {
- M <- nrow(Blist[[1]])
- dn <- labels(X)
- xn <- dn[[2]]
+ M <- nrow(Blist[[1]])
+ dn <- labels(X)
+ xn <- dn[[2]]
- asgn <- attr(X, "assign")
- nasgn <- names(asgn)
- lasgn <- unlist(lapply(asgn, length))
-
- ncolBlist <- unlist(lapply(Blist, ncol))
- names(ncolBlist) <- NULL # This is necessary for below to work
-
- temp2 <- vlabel(nasgn, ncolBlist, M)
- L <- length(temp2)
- newasgn <- vector("list", L)
-
- kk <- 0
- low <- 1
- for (ii in 1:length(asgn)) {
- len <- low:(low + ncolBlist[ii] * lasgn[ii] -1)
- temp <- matrix(len, ncolBlist[ii], lasgn[ii])
- for (mm in 1:ncolBlist[ii])
- newasgn[[kk+mm]] <- temp[mm,]
- low <- low + ncolBlist[ii] * lasgn[ii]
- kk <- kk + ncolBlist[ii]
- }
+ asgn <- attr(X, "assign")
+ nasgn <- names(asgn)
+ lasgn <- unlist(lapply(asgn, length))
+
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ names(ncolBlist) <- NULL # This is necessary for below to work
+
+ temp2 <- vlabel(nasgn, ncolBlist, M)
+ L <- length(temp2)
+ newasgn <- vector("list", L)
+
+ kk <- 0
+ low <- 1
+ for (ii in 1:length(asgn)) {
+ len <- low:(low + ncolBlist[ii] * lasgn[ii] -1)
+ temp <- matrix(len, ncolBlist[ii], lasgn[ii])
+ for (mm in 1:ncolBlist[ii])
+ newasgn[[kk+mm]] <- temp[mm,]
+ low <- low + ncolBlist[ii] * lasgn[ii]
+ kk <- kk + ncolBlist[ii]
+ }
- names(newasgn) <- temp2
- newasgn
+ names(newasgn) <- temp2
+ newasgn
}
diff --git a/R/vgam.match.q b/R/vgam.match.q
index 87433cf..20bcd6a 100644
--- a/R/vgam.match.q
+++ b/R/vgam.match.q
@@ -1,85 +1,90 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
-vgam.match <- function(x, all.knots=FALSE, nk=NULL) {
+
+vgam.match <- function(x, all.knots = FALSE, nk = NULL) {
if (is.list(x)) {
- nvar <- length(x)
- if (length(nk))
- nk = rep(nk, length=nvar)
- temp <- vgam.match(x[[1]], all.knots=all.knots, nk=nk[1])
-
- ooo <- matrix(temp$o, length(temp$o), nvar)
- nef <- rep(temp$nef, nvar)
- xmin <- rep(temp$xmin, nvar)
- xmax <- rep(temp$xmax, nvar)
- nknots <- rep(temp$nknots, nvar)
- knots <- vector("list", nvar)
- knots[[1]] <- temp$knots
-
- if (nvar > 1)
- for (ii in 2:nvar) {
- temp = vgam.match(x[[ii]], all.knots = all.knots, nk = nk[ii])
- ooo[, ii] <- temp$o
- nef[ii] <- temp$nef
- nknots[ii] <- temp$nknots
- knots[[ii]] <- temp$knots
- xmin[ii] <- temp$xmin
- xmax[ii] <- temp$xmax
- }
- names(nknots) <- names(knots) <-
- names(nef) <- names(xmin) <- names(xmax) <- names(x)
- dimnames(ooo) <- list(NULL, names(x))
-
- return(list(o=ooo, nef=nef, nknots=nknots, knots=knots,
- xmin=xmin, xmax=xmax))
+ nvar <- length(x)
+ if (length(nk))
+ nk <- rep(nk, length = nvar)
+ temp <- vgam.match(x[[1]], all.knots = all.knots, nk = nk[1])
+
+ ooo <- matrix(temp$matcho, length(temp$matcho), nvar)
+ neffec <- rep(temp$neffec, nvar)
+ xmin <- rep(temp$xmin, nvar)
+ xmax <- rep(temp$xmax, nvar)
+ nknots <- rep(temp$nknots, nvar)
+ knots <- vector("list", nvar)
+ knots[[1]] <- temp$knots
+
+ if (nvar > 1)
+ for (ii in 2:nvar) {
+ temp <- vgam.match(x[[ii]], all.knots = all.knots, nk = nk[ii])
+ ooo[, ii] <- temp$matcho
+ neffec[ii] <- temp$neffec
+ nknots[ii] <- temp$nknots
+ knots[[ii]] <- temp$knots
+ xmin[ii] <- temp$xmin
+ xmax[ii] <- temp$xmax
+ }
+ names(nknots) <- names(knots) <-
+ names(neffec) <- names(xmin) <- names(xmax) <- names(x)
+ dimnames(ooo) <- list(NULL, names(x))
+
+ return(list(matcho = ooo, neffec = neffec, nknots = nknots, knots = knots,
+ xmin = xmin, xmax = xmax))
}
if (!is.null(attributes(x)$NAs) || any(is.na(x)))
- stop("cannot smooth on variables with NAs")
+ stop("cannot smooth on variables with NAs")
sx <- unique(sort(as.vector(x))) # "as.vector()" strips off attributes
ooo <- match(x, sx) # as.integer(match(x, sx)) # sx[o]==x
- nef <- length(sx) # as.integer(length(sx))
+ neffec <- length(sx) # as.integer(length(sx))
- if (nef < 7)
- stop("smoothing variables must have at least 7 unique values")
+ if (neffec < 7)
+ stop("smoothing variables must have at least 7 unique values")
xmin <- sx[1] # Don't use rounded value
- xmax <- sx[nef]
+ xmax <- sx[neffec]
xbar <- (sx - xmin) / (xmax - xmin)
- noround = TRUE # Improvement 3/8/02
+ noround <- TRUE # Improvement 3/8/02
if (all.knots) {
- if (noround) {
- knot = valid.vknotl2(c(rep(xbar[1], 3), xbar, rep(xbar[nef], 3)))
- } else {
- knot <- c(rep(xbar[1], 3), xbar, rep(xbar[nef], 3))
- }
- if (length(nk)) warning("overriding nk by all.knots = TRUE")
- nk <- length(knot) - 4 # No longer: nef + 2
+ knot <- if (noround) {
+ valid.vknotl2(c(rep(xbar[1], 3), xbar, rep(xbar[neffec], 3)))
+ } else {
+ c(rep(xbar[1], 3), xbar, rep(xbar[neffec], 3))
+ }
+ if (length(nk))
+ warning("overriding nk by all.knots = TRUE")
+ nk <- length(knot) - 4 # No longer: neffec + 2
} else {
- chosen = length(nk)
- if (chosen && (nk > nef+2 || nk <= 5))
- stop("bad value for 'nk'")
- if (!chosen) nk = 0
- knot.list <- dotC(name="vknootl2", as.double(xbar),
- as.integer(nef), knot=double(nef+6),
- k=as.integer(nk+4), chosen=as.integer(chosen))
- if (noround) {
- knot = valid.vknotl2(knot.list$knot[1:(knot.list$k)])
- knot.list$k = length(knot)
- } else {
- knot <- knot.list$knot[1:(knot$k)]
- }
- nk <- knot.list$k - 4
+ chosen <- length(nk)
+ if (chosen && (nk > neffec+2 || nk <= 5))
+ stop("bad value for 'nk'")
+ if (!chosen)
+ nk <- 0
+ knot.list <- dotC(name = "vknootl2", as.double(xbar),
+ as.integer(neffec), knot = double(neffec+6),
+ k = as.integer(nk+4), chosen = as.integer(chosen))
+ if (noround) {
+ knot <- valid.vknotl2(knot.list$knot[1:(knot.list$k)])
+ knot.list$k <- length(knot)
+ } else {
+ knot <- knot.list$knot[1:(knot$k)]
+ }
+ nk <- knot.list$k - 4
}
- if (nk <= 5) stop("not enough distinct knots found")
+ if (nk <= 5)
+ stop("not enough distinct knots found")
- return(list(o=ooo, nef=nef, nknots=nk, knots=knot, xmin=xmin, xmax=xmax))
+ return(list(matcho = ooo, neffec = neffec, nknots = nk, knots = knot,
+ xmin = xmin, xmax = xmax))
}
diff --git a/R/vglm.R b/R/vglm.R
index 1b19f71..745e3e3 100644
--- a/R/vglm.R
+++ b/R/vglm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -42,11 +42,11 @@ vglm <- function(formula,
stop("invalid 'method': ", method))
mt <- attr(mf, "terms")
- xlev = .getXlevels(mt, mf)
+ xlev <- .getXlevels(mt, mf)
y <- model.response(mf, "any") # model.extract(mf, "response")
x <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else
matrix(, NROW(y), 0)
- attr(x, "assign") = attrassigndefault(x, mt)
+ attr(x, "assign") <- attrassigndefault(x, mt)
@@ -57,7 +57,7 @@ vglm <- function(formula,
if (!is.null(subset))
stop("argument 'subset' cannot be used when ",
"argument 'form2' is used")
- retlist = shadowvglm(formula =
+ retlist <- shadowvglm(formula =
form2,
family = family, data = data,
na.action = na.action,
@@ -80,7 +80,7 @@ vglm <- function(formula,
stop("number of rows of 'y' and 'Ym2' are unequal")
}
} else {
- Xm2 = Ym2 = NULL
+ Xm2 <- Ym2 <- NULL
}
@@ -152,58 +152,58 @@ vglm <- function(formula,
if (!smart) answer at smart.prediction <- list(smart.arg = FALSE)
if (qr.arg) {
- class(fit$qr) = "list"
- slot(answer, "qr") = fit$qr
+ class(fit$qr) <- "list"
+ slot(answer, "qr") <- fit$qr
}
if (length(attr(x, "contrasts")))
- slot(answer, "contrasts") = attr(x, "contrasts")
+ slot(answer, "contrasts") <- attr(x, "contrasts")
if (length(fit$fitted.values))
- slot(answer, "fitted.values") = as.matrix(fit$fitted.values)
- slot(answer, "na.action") = if (length(aaa <- attr(mf, "na.action")))
+ slot(answer, "fitted.values") <- as.matrix(fit$fitted.values)
+ slot(answer, "na.action") <- if (length(aaa <- attr(mf, "na.action")))
list(aaa) else list()
if (length(offset))
- slot(answer, "offset") = as.matrix(offset)
+ slot(answer, "offset") <- as.matrix(offset)
if (length(fit$weights))
- slot(answer, "weights") = as.matrix(fit$weights)
+ slot(answer, "weights") <- as.matrix(fit$weights)
if (x.arg)
- slot(answer, "x") = fit$x # The 'small' (lm) design matrix
+ slot(answer, "x") <- fit$x # The 'small' (lm) design matrix
if (x.arg && length(Xm2))
- slot(answer, "Xm2") = Xm2 # The second (lm) design matrix
+ slot(answer, "Xm2") <- Xm2 # The second (lm) design matrix
if (y.arg && length(Ym2))
- slot(answer, "Ym2") = as.matrix(Ym2) # The second response
+ slot(answer, "Ym2") <- as.matrix(Ym2) # The second response
if (!is.null(form2))
- slot(answer, "callXm2") = retlist$call
- answer at misc$formula = formula
- answer at misc$form2 = form2
+ slot(answer, "callXm2") <- retlist$call
+ answer at misc$formula <- formula
+ answer at misc$form2 <- form2
if (length(xlev))
- slot(answer, "xlevels") = xlev
+ slot(answer, "xlevels") <- xlev
if (y.arg)
- slot(answer, "y") = as.matrix(fit$y)
+ slot(answer, "y") <- as.matrix(fit$y)
- slot(answer, "control") = fit$control
- slot(answer, "extra") = if (length(fit$extra)) {
+ slot(answer, "control") <- fit$control
+ slot(answer, "extra") <- if (length(fit$extra)) {
if (is.list(fit$extra)) fit$extra else {
warning("'extra' is not a list, therefore placing ",
"'extra' into a list")
list(fit$extra)
}
} else list() # R-1.5.0
- slot(answer, "iter") = fit$iter
- slot(answer, "post") = fit$post
+ slot(answer, "iter") <- fit$iter
+ slot(answer, "post") <- fit$post
- fit$predictors = as.matrix(fit$predictors) # Must be a matrix
+ fit$predictors <- as.matrix(fit$predictors) # Must be a matrix
if (length(fit$misc$predictors.names) == ncol(fit$predictors))
- dimnames(fit$predictors) = list(dimnames(fit$predictors)[[1]],
+ dimnames(fit$predictors) <- list(dimnames(fit$predictors)[[1]],
fit$misc$predictors.names)
- slot(answer, "predictors") = fit$predictors
+ slot(answer, "predictors") <- fit$predictors
if (length(fit$prior.weights))
- slot(answer, "prior.weights") = as.matrix(fit$prior.weights)
+ slot(answer, "prior.weights") <- as.matrix(fit$prior.weights)
answer
@@ -250,11 +250,11 @@ shadowvglm <-
x <- y <- NULL
- xlev = .getXlevels(mt, mf)
+ xlev <- .getXlevels(mt, mf)
y <- model.response(mf, "any") # model.extract(mf, "response")
x <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else
matrix(, NROW(y), 0)
- attr(x, "assign") = attrassigndefault(x, mt)
+ attr(x, "assign") <- attrassigndefault(x, mt)
list(Xm2=x, Ym2=y, call=ocall)
}
diff --git a/R/vglm.control.q b/R/vglm.control.q
index 83c1936..6f15ba2 100644
--- a/R/vglm.control.q
+++ b/R/vglm.control.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -14,42 +14,47 @@
-vlm.control <- function(save.weight = TRUE, tol = 1e-7, method="qr",
- checkwz = TRUE, wzepsilon = .Machine$double.eps^0.75,
+vlm.control <- function(save.weight = TRUE,
+ tol = 1e-7,
+ method = "qr",
+ checkwz = TRUE,
+ wzepsilon = .Machine$double.eps^0.75,
...) {
- if (tol <= 0) {
- warning("tol not positive; using 1e-7 instead")
- tol <- 1e-7
- }
- if (!is.logical(checkwz) || length(checkwz) != 1)
- stop("bad input for argument 'checkwz'")
- if (!is.Numeric(wzepsilon, allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'wzepsilon'")
-
- list(save.weight=save.weight, tol=tol, method=method,
- checkwz = checkwz,
- wzepsilon = wzepsilon)
+ if (tol <= 0) {
+ warning("tol not positive; using 1e-7 instead")
+ tol <- 1e-7
+ }
+ if (!is.logical(checkwz) || length(checkwz) != 1)
+ stop("bad input for argument 'checkwz'")
+ if (!is.Numeric(wzepsilon, allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'wzepsilon'")
+
+ list(save.weight = save.weight,
+ tol = tol,
+ method = method,
+ checkwz = checkwz,
+ wzepsilon = wzepsilon)
}
vglm.control <- function(checkwz = TRUE,
+ Check.rank = TRUE,
criterion = names(.min.criterion.VGAM),
epsilon = 1e-7,
half.stepsizing = TRUE,
maxit = 30,
- nowarning = FALSE,
+ noWarning = FALSE,
stepsize = 1,
save.weight = FALSE,
trace = FALSE,
wzepsilon = .Machine$double.eps^0.75,
xij = NULL,
- ...)
-{
+ ...) {
if (mode(criterion) != "character" && mode(criterion) != "name")
- criterion <- as.character(substitute(criterion))
+ criterion <- as.character(substitute(criterion))
criterion <- pmatch(criterion[1], names(.min.criterion.VGAM), nomatch = 1)
criterion <- names(.min.criterion.VGAM)[criterion]
@@ -63,36 +68,36 @@ vglm.control <- function(checkwz = TRUE,
convergence <- expression({
- switch(criterion,
- coefficients = if (iter == 1) iter < maxit else
- (iter < maxit &&
- max(abs(new.crit - old.crit) / (abs(old.crit) + epsilon))
- > epsilon),
- abs(old.crit-new.crit) / (abs(old.crit)+epsilon) > epsilon &&
- iter < maxit)
+ switch(criterion,
+ coefficients = if (iter == 1) iter < maxit else
+ (iter < maxit &&
+ max(abs(new.crit - old.crit) / (abs(old.crit) + epsilon)) > epsilon),
+ iter < maxit &&
+ abs(old.crit - new.crit) / (abs(old.crit) + epsilon) > epsilon)
})
if (!is.Numeric(epsilon, allowable.length = 1, positive = TRUE)) {
- warning("bad input for argument 'epsilon'; using 0.00001 instead")
- epsilon <- 0.00001
+ warning("bad input for argument 'epsilon'; using 0.00001 instead")
+ epsilon <- 0.00001
}
if (!is.Numeric(maxit, allowable.length = 1,
positive = TRUE, integer.valued = TRUE)) {
- warning("bad input for argument 'maxit'; using 30 instead")
- maxit <- 30
+ warning("bad input for argument 'maxit'; using 30 instead")
+ maxit <- 30
}
if (!is.Numeric(stepsize, allowable.length = 1, positive = TRUE)) {
- warning("bad input for argument 'stepsize'; using 1 instead")
- stepsize <- 1
+ warning("bad input for argument 'stepsize'; using 1 instead")
+ stepsize <- 1
}
list(checkwz = checkwz,
+ Check.rank = Check.rank,
convergence = convergence,
criterion = criterion,
epsilon = epsilon,
half.stepsizing = as.logical(half.stepsizing)[1],
maxit = maxit,
- nowarning = as.logical(nowarning)[1],
+ noWarning = as.logical(noWarning)[1],
min.criterion = .min.criterion.VGAM,
save.weight = as.logical(save.weight)[1],
stepsize = stepsize,
@@ -106,27 +111,27 @@ vglm.control <- function(checkwz = TRUE,
vcontrol.expression <- expression({
- control <- control # First one, e.g., vgam.control(...)
- mylist <- family at vfamily
- for(i in length(mylist):1) {
- for(ii in 1:2) {
- temp <- paste(if(ii == 1) "" else paste(function.name, ".", sep=""),
- mylist[i], ".control", sep="")
- tempexists = if (is.R()) exists(temp, envir = VGAM:::VGAMenv) else
- exists(temp, inherit = TRUE)
- if (tempexists) {
- temp <- get(temp)
- temp <- temp(...)
- for(k in names(temp))
- control[[k]] <- temp[[k]]
- }
- }
- }
+ control <- control # First one, e.g., vgam.control(...)
+ mylist <- family at vfamily
+ for(i in length(mylist):1) {
+ for(ii in 1:2) {
+ temp <- paste(if(ii == 1) "" else paste(function.name, ".", sep=""),
+ mylist[i], ".control", sep="")
+ tempexists <- if (is.R()) exists(temp, envir = VGAM:::VGAMenv) else
+ exists(temp, inherit = TRUE)
+ if (tempexists) {
+ temp <- get(temp)
+ temp <- temp(...)
+ for(k in names(temp))
+ control[[k]] <- temp[[k]]
+ }
+ }
+}
- orig.criterion = control$criterion
+ orig.criterion <- control$criterion
if (control$criterion != "coefficients") {
- try.crit = c(names(.min.criterion.VGAM), "coefficients")
+ try.crit <- c(names(.min.criterion.VGAM), "coefficients")
for(i in try.crit) {
if (any(slotNames(family) == i) &&
(( is.R() && length(body(slot(family, i)))) ||
diff --git a/R/vglm.fit.q b/R/vglm.fit.q
index bc9a96a..fa61764 100644
--- a/R/vglm.fit.q
+++ b/R/vglm.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -17,11 +17,12 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
qr.arg = FALSE,
constraints = NULL,
extra = NULL,
- Terms = Terms, function.name = "vglm", ...)
-{
- specialCM = NULL
- post = list()
- check.rank <- TRUE # Set this to false for family functions vppr() etc.
+ Terms = Terms, function.name = "vglm", ...) {
+
+ specialCM <- NULL
+ post <- list()
+ check.rank <- TRUE # Set this to false for family functions vppr() etc.
+ check.rank <- control$Check.rank
nonparametric <- FALSE
epsilon <- control$epsilon
maxit <- control$maxit
@@ -31,6 +32,10 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
minimize.criterion <- control$min.criterion
+ fv <- NULL
+
+
+
n <- dim(x)[1]
@@ -40,13 +45,13 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
new.coeffs <- c.list$coeff
if (length(slot(family, "middle")))
- eval(slot(family, "middle"))
+ eval(slot(family, "middle"))
eta <- fv + offset
mu <- slot(family, "linkinv")(eta, extra)
if (length(slot(family, "middle2")))
- eval(slot(family, "middle2"))
+ eval(slot(family, "middle2"))
old.crit <- new.crit
new.crit <-
@@ -57,17 +62,16 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
if (trace && orig.stepsize == 1) {
- cat("VGLM linear loop ", iter, ": ", criterion, "= ")
- UUUU =
- switch(criterion,
- coefficients = format(new.crit,
- dig = round(2 - log10(epsilon))),
- format(round(new.crit, 4)))
-
- switch(criterion,
- coefficients = {if(length(new.crit) > 2) cat("\n");
- cat(UUUU, fill = TRUE, sep = ", ")},
- cat(UUUU, fill = TRUE, sep = ", "))
+ cat("VGLM linear loop ", iter, ": ", criterion, "= ")
+ UUUU <- switch(criterion,
+ coefficients = format(new.crit,
+ dig = round(2 - log10(epsilon))),
+ format(round(new.crit, 4)))
+
+ switch(criterion,
+ coefficients = {if (length(new.crit) > 2) cat("\n");
+ cat(UUUU, fill = TRUE, sep = ", ")},
+ cat(UUUU, fill = TRUE, sep = ", "))
}
@@ -77,10 +81,10 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
length(old.coeffs)) &&
((orig.stepsize != 1) ||
(criterion != "coefficients" &&
- (if(minimize.criterion) new.crit > old.crit else
+ (if (minimize.criterion) new.crit > old.crit else
new.crit < old.crit)))
if (!is.logical(take.half.step))
- take.half.step = TRUE
+ take.half.step <- TRUE
if (take.half.step) {
stepsize <- 2 * min(orig.stepsize, 2*stepsize)
new.coeffs.save <- new.coeffs
@@ -93,22 +97,22 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
}
stepsize <- stepsize / 2
if (too.small <- stepsize < 0.001)
- break
+ break
new.coeffs <- (1-stepsize)*old.coeffs +
stepsize*new.coeffs.save
if (length(slot(family, "middle")))
- eval(slot(family, "middle"))
+ eval(slot(family, "middle"))
fv <- X_vlm_save %*% new.coeffs
if (M > 1)
- fv <- matrix(fv, n, M, byrow = TRUE)
+ fv <- matrix(fv, n, M, byrow = TRUE)
eta <- fv + offset
mu <- slot(family, "linkinv")(eta, extra)
if (length(slot(family, "middle2")))
- eval(slot(family, "middle2"))
+ eval(slot(family, "middle2"))
new.crit <-
@@ -124,44 +128,46 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
} # of repeat
if (trace)
- cat("\n")
+ cat("\n")
if (too.small) {
- warning("iterations terminated because ",
- "half-step sizes are very small")
- one.more <- FALSE
+ warning("iterations terminated because ",
+ "half-step sizes are very small")
+ one.more <- FALSE
} else {
- if (trace) {
- cat("VGLM linear loop ",
- iter, ": ", criterion, "= ")
-
- UUUU = switch(criterion,
- coefficients = format(new.crit,
- dig = round(2-log10(epsilon))),
- format(round(new.crit, 4)))
-
- switch(criterion,
- coefficients = {
- if(length(new.crit) > 2) cat("\n");
- cat(UUUU, fill = TRUE, sep = ", ")},
- cat(UUUU, fill = TRUE, sep = ", "))
- }
-
- one.more <- eval(control$convergence)
+ if (trace) {
+ cat("VGLM linear loop ",
+ iter, ": ", criterion, "= ")
+
+ UUUU <- switch(criterion,
+ coefficients =
+ format(new.crit,
+ dig = round(2-log10(epsilon))),
+ format(round(new.crit, 4)))
+
+ switch(criterion,
+ coefficients = {
+ if (length(new.crit) > 2) cat("\n");
+ cat(UUUU, fill = TRUE, sep = ", ")},
+ cat(UUUU, fill = TRUE, sep = ", "))
+ }
+
+ one.more <- eval(control$convergence)
}
} else {
- one.more <- eval(control$convergence)
+ one.more <- eval(control$convergence)
}
}
flush.console()
- if (!is.logical(one.more)) one.more = FALSE
+ if (!is.logical(one.more))
+ one.more <- FALSE
if (one.more) {
iter <- iter + 1
deriv.mu <- eval(slot(family, "deriv"))
wz <- eval(slot(family, "weight"))
if (control$checkwz)
- wz = checkwz(wz, M = M, trace = trace,
- wzepsilon = control$wzepsilon)
+ wz <- checkwz(wz, M = M, trace = trace,
+ wzepsilon = control$wzepsilon)
U <- vchol(wz, M = M, n = n, silent = !trace)
tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
@@ -169,11 +175,12 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
c.list$z <- z
c.list$U <- U
- if (copy_X_vlm) c.list$X_vlm <- X_vlm_save
+ if (copy_X_vlm)
+ c.list$X_vlm <- X_vlm_save
}
c.list$one.more <- one.more
- c.list$coeff = runif(length(new.coeffs)) # 12/3/03; twist needed!
+ c.list$coeff <- runif(length(new.coeffs)) # 12/3/03; twist needed!
old.coeffs <- new.coeffs
}
c.list
@@ -223,11 +230,11 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
if (length(slot(family, "constraints")))
- eval(slot(family, "constraints"))
+ eval(slot(family, "constraints"))
Blist <- process.constraints(constraints, x, M,
- specialCM = specialCM)
+ specialCM = specialCM)
ncolBlist <- unlist(lapply(Blist, ncol))
@@ -237,9 +244,9 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
- X_vlm_save = if (length(X_vlm_arg)) X_vlm_arg else
+ X_vlm_save <- if (length(X_vlm_arg)) X_vlm_arg else
lm2vlm.model.matrix(x, Blist, xij = control$xij,
- Xm2 = Xm2)
+ Xm2 = Xm2)
@@ -247,7 +254,7 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
if (length(coefstart)) {
eta <- if (ncol(X_vlm_save)>1) X_vlm_save %*% coefstart +
- offset else X_vlm_save * coefstart + offset
+ offset else X_vlm_save * coefstart + offset
eta <- if (M > 1) matrix(eta, ncol = M, byrow = TRUE) else c(eta)
mu <- slot(family, "linkinv")(eta, extra)
}
@@ -269,8 +276,8 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
deriv.mu <- eval(slot(family, "deriv"))
wz <- eval(slot(family, "weight"))
if (control$checkwz)
- wz = checkwz(wz, M = M, trace = trace,
- wzepsilon = control$wzepsilon)
+ wz <- checkwz(wz, M = M, trace = trace,
+ wzepsilon = control$wzepsilon)
U <- vchol(wz, M = M, n = n, silent = !trace)
tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
@@ -314,7 +321,7 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
NULL
}
- if (maxit > 1 && iter >= maxit && !control$nowarning)
+ if (maxit > 1 && iter >= maxit && !control$noWarning)
warning("convergence not obtained in ", maxit, " iterations")
@@ -325,10 +332,10 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
ynrow_X_vlm <- dnrow_X_vlm[[1]]
if (length(slot(family, "fini")))
- eval(slot(family, "fini"))
+ eval(slot(family, "fini"))
if (M > 1)
- tfit$predictors <- matrix(tfit$predictors, n, M)
+ tfit$predictors <- matrix(tfit$predictors, n, M)
coefs <- tfit$coefficients
asgn <- attr(X_vlm_save, "assign")
@@ -339,7 +346,7 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
cnames <- xnrow_X_vlm
if (check.rank && rank < ncol_X_vlm)
- stop("vglm only handles full-rank models (currently)")
+ stop("vglm only handles full-rank models (currently)")
R <- tfit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop = FALSE]
R[lower.tri(R)] <- 0
@@ -359,12 +366,12 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
residuals <- z - tfit$predictors
if (M == 1) {
- tfit$predictors <- as.vector(tfit$predictors)
- residuals <- as.vector(residuals)
- names(residuals) <- names(tfit$predictors) <- yn
+ tfit$predictors <- as.vector(tfit$predictors)
+ residuals <- as.vector(residuals)
+ names(residuals) <- names(tfit$predictors) <- yn
} else {
- dimnames(residuals) <- dimnames(tfit$predictors) <-
- list(yn, predictors.names)
+ dimnames(residuals) <- dimnames(tfit$predictors) <-
+ list(yn, predictors.names)
}
if (is.matrix(mu)) {
@@ -410,7 +417,7 @@ vglm.fit <- function(x, y, w = rep(1, length(x[, 1])),
colnames.X_vlm = xnrow_X_vlm,
criterion = criterion,
function.name = function.name,
- intercept.only=intercept.only,
+ intercept.only = intercept.only,
predictors.names = predictors.names,
M = M,
n = n,
diff --git a/R/vlm.R b/R/vlm.R
index f05b4bf..29a8e34 100644
--- a/R/vlm.R
+++ b/R/vlm.R
@@ -1,106 +1,108 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
vlm <- function(formula,
- data=list(),
- weights=NULL, subset = NULL, na.action=na.fail,
- prior.weights=NULL,
- control=vlm.control(...),
- method="qr",
- model=FALSE, x.arg=FALSE, y.arg=TRUE, qr.arg=TRUE,
- contrasts=NULL,
- constraints=NULL,
- extra=NULL, offset=NULL,
- smart=TRUE, ...)
+ data = list(),
+ weights = NULL, subset = NULL, na.action = na.fail,
+ prior.weights = NULL,
+ control = vlm.control(...),
+ method = "qr",
+ model = FALSE, x.arg = FALSE, y.arg = TRUE, qr.arg = TRUE,
+ contrasts = NULL,
+ constraints = NULL,
+ extra = NULL, offset = NULL,
+ smart = TRUE, ...)
{
- dataname <- as.character(substitute(data)) # "list" if no data=
- function.name <- "vlm"
+ dataname <- as.character(substitute(data)) # "list" if no data=
+ function.name <- "vlm"
- ocall <- match.call()
+ ocall <- match.call()
- if (smart)
- setup.smart("write")
+ if (smart)
+ setup.smart("write")
- if (missing(data))
- data <- environment(formula)
+ if (missing(data))
+ data <- environment(formula)
- mf <- match.call(expand.dots = FALSE)
- m <- match(c("formula", "data", "subset", "weights", "na.action",
- "offset"), names(mf), 0)
- mf <- mf[c(1, m)]
- mf$drop.unused.levels <- TRUE
- mf[[1]] <- as.name("model.frame")
- mf <- eval(mf, parent.frame())
- switch(method, model.frame = return(mf), qr = 1,
- stop("invalid 'method': ", method))
- mt <- attr(mf, "terms")
+ mf <- match.call(expand.dots = FALSE)
+ m <- match(c("formula", "data", "subset", "weights", "na.action", "offset"),
+ names(mf), 0)
+ mf <- mf[c(1, m)]
+ mf$drop.unused.levels <- TRUE
+ mf[[1]] <- as.name("model.frame")
+ mf <- eval(mf, parent.frame())
+ switch(method,
+ model.frame = return(mf),
+ qr = 1,
+ stop("invalid 'method': ", method))
+ mt <- attr(mf, "terms")
- if (method != "qr")
- stop("only method = 'qr' is implemented")
+ if (method != "qr")
+ stop("only method = 'qr' is implemented")
- xlev = .getXlevels(mt, mf)
- y <- model.response(mf, "any") # model.extract(mf, "response")
- x <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else
- matrix(, NROW(y), 0)
- attr(x, "assign") = attrassigndefault(x, mt)
+ xlev <- .getXlevels(mt, mf)
+ y <- model.response(mf, "any") # model.extract(mf, "response")
+ x <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else
+ matrix(, NROW(y), 0)
+ attr(x, "assign") <- attrassigndefault(x, mt)
- offset <- model.offset(mf)
- if (is.null(offset))
- offset <- 0 # yyy ???
- if (length(offset) && any(offset!=0))
- stop("offsets are redundant for (vector) linear models")
- wz <- model.weights(mf)
+ offset <- model.offset(mf)
+ if (is.null(offset))
+ offset <- 0 # yyy ???
+ if (length(offset) && any(offset != 0))
+ stop("offsets are redundant for (vector) linear models")
+ wz <- model.weights(mf)
- y = as.matrix(y)
- M <- ncol(as.matrix(y))
- n <- nrow(x)
- dy <- dimnames(y)
- dy1 <- if (length(dy[[1]])) dy[[1]] else dimnames(mf)[[1]]
- dy2 <- if (length(dy[[2]])) dy[[2]] else paste("Y", 1:M, sep="")
- dimnames(y) <- list(dy1, dy2)
- predictors.names = dy2
+ y <- as.matrix(y)
+ M <- ncol(as.matrix(y))
+ n <- nrow(x)
+ dy <- dimnames(y)
+ dy1 <- if (length(dy[[1]])) dy[[1]] else dimnames(mf)[[1]]
+ dy2 <- if (length(dy[[2]])) dy[[2]] else paste("Y", 1:M, sep = "")
+ dimnames(y) <- list(dy1, dy2)
+ predictors.names <- dy2
- if (!length(prior.weights)) {
- prior.weights = rep(1, len=n)
- names(prior.weights) = dy1
- }
- if (any(prior.weights <= 0))
- stop("only positive weights allowed")
- if (!length(wz)) {
- wz <- matrix(prior.weights, n, M)
- identity.wts <- TRUE
- } else {
- identity.wts <- FALSE
- temp = ncol(as.matrix(wz))
- if (temp < M || temp > M*(M+1)/2)
- stop("input 'w' must have between ", M, " and ", M*(M+1)/2,
- " columns")
- wz <- prior.weights * wz
- }
+ if (!length(prior.weights)) {
+ prior.weights <- rep(1, len = n)
+ names(prior.weights) <- dy1
+ }
+ if (any(prior.weights <= 0))
+ stop("only positive weights allowed")
+ if (!length(wz)) {
+ wz <- matrix(prior.weights, n, M)
+ identity.wts <- TRUE
+ } else {
+ identity.wts <- FALSE
+ temp <- ncol(as.matrix(wz))
+ if (temp < M || temp > M*(M+1)/2)
+ stop("input 'w' must have between ", M, " and ", M*(M+1)/2,
+ " columns")
+ wz <- prior.weights * wz
+ }
- control = control
- Blist <- process.constraints(constraints, x, M)
- intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
+ control <- control
+ Blist <- process.constraints(constraints, x, M)
+ intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
- fit = vlm.wfit(xmat=x, zmat = y, Blist = Blist, wz = wz, U = NULL,
- matrix.out = FALSE, is.vlmX = FALSE,
- rss = TRUE, qr = qr.arg,
- x.ret = TRUE, offset = offset)
+ fit <- vlm.wfit(xmat = x, zmat = y, Blist = Blist, wz = wz, U = NULL,
+ matrix.out = FALSE, is.vlmX = FALSE,
+ rss = TRUE, qr = qr.arg,
+ x.ret = TRUE, offset = offset)
- ncol_X_vlm <- fit$rank
- fit$R <- fit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop=FALSE]
- fit$R[lower.tri(fit$R)] <- 0
+ ncol_X_vlm <- fit$rank
+ fit$R <- fit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop = FALSE]
+ fit$R[lower.tri(fit$R)] <- 0
@@ -136,8 +138,8 @@ vlm <- function(formula,
if (smart) {
- fit$smart.prediction <- get.smart.prediction()
- wrapup.smart()
+ fit$smart.prediction <- get.smart.prediction()
+ wrapup.smart()
}
answer <-
@@ -147,7 +149,7 @@ vlm <- function(formula,
"coefficients" = fit$coefficients,
"constraints" = fit$constraints,
"control" = control,
- "criterion" = list(deviance=fit$rss),
+ "criterion" = list(deviance = fit$rss),
"dispersion" = 1,
"df.residual" = fit$df.residual,
"df.total" = n*M,
@@ -160,33 +162,34 @@ vlm <- function(formula,
"residuals" = as.matrix(fit$residuals),
"rss" = fit$rss,
"smart.prediction" = as.list(fit$smart.prediction),
- "terms" = list(terms=mt))
-
- if (!smart) answer at smart.prediction <- list(smart.arg=FALSE)
-
- slot(answer, "prior.weights") = as.matrix(prior.weights)
-
- if (length(attr(x, "contrasts")))
- slot(answer, "contrasts") = attr(x, "contrasts")
- slot(answer, "na.action") = if (length(aaa <- attr(mf, "na.action")))
- list(aaa) else list()
-
- if (length(offset))
- slot(answer, "offset") = as.matrix(offset)
- if (qr.arg) {
- class(fit$qr) = "list"
- slot(answer, "qr") = fit$qr
- }
- if (x.arg)
- slot(answer, "x") = x # The 'small' design matrix
- if (control$save.weight)
- slot(answer, "weights") = wz
- if (length(xlev))
- slot(answer, "xlevels") = xlev
- if (y.arg)
- slot(answer, "y") = as.matrix(y)
-
- answer
+ "terms" = list(terms = mt))
+
+ if (!smart)
+ answer at smart.prediction <- list(smart.arg = FALSE)
+
+ slot(answer, "prior.weights") <- as.matrix(prior.weights)
+
+ if (length(attr(x, "contrasts")))
+ slot(answer, "contrasts") <- attr(x, "contrasts")
+ slot(answer, "na.action") <- if (length(aaa <- attr(mf, "na.action")))
+ list(aaa) else list()
+
+ if (length(offset))
+ slot(answer, "offset") <- as.matrix(offset)
+ if (qr.arg) {
+ class(fit$qr) <- "list"
+ slot(answer, "qr") <- fit$qr
+ }
+ if (x.arg)
+ slot(answer, "x") <- x # The 'small' design matrix
+ if (control$save.weight)
+ slot(answer, "weights") <- wz
+ if (length(xlev))
+ slot(answer, "xlevels") <- xlev
+ if (y.arg)
+ slot(answer, "y") <- as.matrix(y)
+
+ answer
}
attr(vlm, "smart") <- TRUE
diff --git a/R/vlm.wfit.q b/R/vlm.wfit.q
index 5dc23fa..4cf263a 100644
--- a/R/vlm.wfit.q
+++ b/R/vlm.wfit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -10,128 +10,137 @@
-vlm.wfit <- function(xmat, zmat, Blist, wz = NULL, U = NULL,
- matrix.out = FALSE, is.vlmX = FALSE, rss = TRUE, qr = FALSE,
- x.ret = FALSE,
- offset = NULL,
- omit.these = NULL, only.rss = FALSE,
- ncolx = if (matrix.out && is.vlmX) {
- stop("need argument 'ncolx'")
- } else {
- ncol(xmat)
- },
- xij = NULL,
- lp.names = NULL, Eta.range = NULL, Xm2 = NULL, ...) {
- missing.Blist <- missing(Blist)
- zmat = as.matrix(zmat)
- n <- nrow(zmat)
- M <- ncol(zmat)
- if (!only.rss) {
- contrast.save <- attr(xmat, "contrasts")
- znames <- dimnames(zmat)[[2]]
- }
+vlm.wfit <-
+ function(xmat, zmat, Blist, wz = NULL, U = NULL,
+ matrix.out = FALSE, is.vlmX = FALSE, rss = TRUE, qr = FALSE,
+ x.ret = FALSE,
+ offset = NULL,
+ omit.these = NULL, only.rss = FALSE,
+ ncolx = if (matrix.out && is.vlmX) {
+ stop("need argument 'ncolx'")
+ } else {
+ ncol(xmat)
+ },
+ xij = NULL,
+ lp.names = NULL, Eta.range = NULL, Xm2 = NULL, ...) {
- if (length(offset)) {
- zmat <- zmat - offset
- }
- if (missing(U) || !length(U)) {
- U <- vchol(wz, M = M, n = n, silent = FALSE)
- }
- dU <- dim(U)
- if (dU[2] != n) {
- stop("input unconformable")
- }
- X_vlm_save <- if (is.vlmX) {
- xmat
- } else {
- if (missing.Blist || !length(Blist)) {
- Blist = replace.constraints(vector("list", ncol(xmat)),
- diag(M), 1:ncol(xmat)) # NULL
- }
- lm2vlm.model.matrix(x=xmat, Blist=Blist, M = M,
- assign.attributes = FALSE,
- xij = xij,
- Xm2=Xm2)
+ missing.Blist <- missing(Blist)
+ zmat <- as.matrix(zmat)
+ n <- nrow(zmat)
+ M <- ncol(zmat)
+ if (!only.rss) {
+ contrast.save <- attr(xmat, "contrasts")
+ znames <- dimnames(zmat)[[2]]
+ }
+
+ if (length(offset)) {
+ zmat <- zmat - offset
+ }
+ if (missing(U) || !length(U)) {
+ U <- vchol(wz, M = M, n = n, silent = FALSE)
+ }
+ dU <- dim(U)
+ if (dU[2] != n) {
+ stop("input unconformable")
+ }
+
+ X_vlm_save <- if (is.vlmX) {
+ xmat
+ } else {
+ if (missing.Blist || !length(Blist)) {
+ Blist <- replace.constraints(vector("list", ncol(xmat)),
+ diag(M), 1:ncol(xmat)) # NULL
+ }
+ lm2vlm.model.matrix(x = xmat, Blist = Blist, M = M,
+ assign.attributes = FALSE,
+ xij = xij,
+ Xm2 = Xm2)
}
- X_vlm <- mux111(U, X_vlm_save, M = M)
- z_vlm <- mux22(U, zmat, M = M, upper = TRUE, as.matrix = FALSE)
+ X_vlm <- mux111(U, X_vlm_save, M = M)
+ z_vlm <- mux22(U, zmat, M = M, upper = TRUE, as.matrix = FALSE)
- if (length(omit.these)) {
- X_vlm = X_vlm[!omit.these,,drop = FALSE]
- z_vlm = z_vlm[!omit.these]
- }
+ if (length(omit.these)) {
+ X_vlm <- X_vlm[!omit.these,,drop = FALSE]
+ z_vlm <- z_vlm[!omit.these]
+ }
- ans <- lm.fit(X_vlm, z_vlm, ...)
- if (rss) {
- ans$rss <- sum(ans$resid^2)
- if (only.rss) return(list(rss = ans$rss))
- }
- if (length(omit.these) && any(omit.these)) {
- stop("code beyond here cannot handle omitted observations")
- }
- fv <- ans$fitted.values
- dim(fv) <- c(M, n)
- fv <- vbacksub(U, fv, M = M, n = n) # Have to premultiply fv by U
+ ans <- lm.fit(X_vlm, y = z_vlm, ...)
- if (length(Eta.range)) {
- if (length(Eta.range) != 2) {
- stop("length(Eta.range) must equal 2")
- }
- fv = ifelse(fv < Eta.range[1], Eta.range[1], fv)
- fv = ifelse(fv > Eta.range[2], Eta.range[2], fv)
- }
+ if (rss) {
+ ans$rss <- sum(ans$resid^2)
+ if (only.rss)
+ return(list(rss = ans$rss))
+ }
- ans$fitted.values <- if (M == 1) c(fv) else fv
- if (M > 1) {
- dimnames(ans$fitted.values) <- list(dimnames(zmat)[[1]], znames)
- }
- ans$residuals <- if (M == 1) c(zmat-fv) else zmat-fv
- if (M > 1) {
- dimnames(ans$residuals) <- list(dimnames(ans$residuals)[[1]], znames)
- }
- ans$misc <- list(M = M, n = n)
- ans$call <- match.call()
+ if (length(omit.these) && any(omit.these)) {
+ stop("code beyond here cannot handle omitted observations")
+ }
- ans$constraints <- Blist
- ans$contrasts <- contrast.save
- if (x.ret) {
- ans$X_vlm <- X_vlm_save
- }
- if (!is.null(offset)) {
- ans$fitted.values <- ans$fitted.values + offset
+ fv <- ans$fitted.values
+ dim(fv) <- c(M, n)
+ fv <- vbacksub(U, fv, M = M, n = n) # Have to premultiply fv by U
+
+
+ if (length(Eta.range)) {
+ if (length(Eta.range) != 2) {
+ stop("length(Eta.range) must equal 2")
}
+ fv <- ifelse(fv < Eta.range[1], Eta.range[1], fv)
+ fv <- ifelse(fv > Eta.range[2], Eta.range[2], fv)
+ }
+
+ ans$fitted.values <- if (M == 1) c(fv) else fv
+ if (M > 1) {
+ dimnames(ans$fitted.values) <- list(dimnames(zmat)[[1]], znames)
+ }
+ ans$residuals <- if (M == 1) c(zmat-fv) else zmat-fv
+ if (M > 1) {
+ dimnames(ans$residuals) <- list(dimnames(ans$residuals)[[1]], znames)
+ }
+ ans$misc <- list(M = M, n = n)
+ ans$call <- match.call()
+
+ ans$constraints <- Blist
+ ans$contrasts <- contrast.save
+ if (x.ret) {
+ ans$X_vlm <- X_vlm_save
+ }
+ if (!is.null(offset)) {
+ ans$fitted.values <- ans$fitted.values + offset
+ }
- if (!matrix.out) {
- return(ans)
- }
+ if (!matrix.out) {
+ return(ans)
+ }
- dx2 = if (is.vlmX) NULL else dimnames(xmat)[[2]]
- B = matrix(as.numeric(NA),
- nrow = M, ncol = ncolx, dimnames = list(lp.names, dx2))
- if (is.null(Blist)) {
- Blist = replace.constraints(vector("list", ncolx), diag(M), 1:ncolx)
- }
- ncolBlist <- unlist(lapply(Blist, ncol))
- temp <- c(0, cumsum(ncolBlist))
- for(ii in 1:ncolx) {
- index <- (temp[ii]+1):temp[ii+1]
- cm <- Blist[[ii]]
- B[,ii] <- cm %*% ans$coef[index]
- }
- ans$mat.coefficients <- t(B)
- ans
+
+ dx2 <- if (is.vlmX) NULL else dimnames(xmat)[[2]]
+ B <- matrix(as.numeric(NA),
+ nrow = M, ncol = ncolx, dimnames = list(lp.names, dx2))
+ if (is.null(Blist)) {
+ Blist <- replace.constraints(vector("list", ncolx), diag(M), 1:ncolx)
+ }
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ temp <- c(0, cumsum(ncolBlist))
+ for(ii in 1:ncolx) {
+ index <- (temp[ii]+1):temp[ii+1]
+ cm <- Blist[[ii]]
+ B[,ii] <- cm %*% ans$coef[index]
+ }
+ ans$mat.coefficients <- t(B)
+ ans
}
diff --git a/R/vsmooth.spline.q b/R/vsmooth.spline.q
index d5bbd14..99d7adc 100644
--- a/R/vsmooth.spline.q
+++ b/R/vsmooth.spline.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2013 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -31,31 +31,31 @@ setClass("vsmooth.spline", representation(
"yin" = "matrix"))
-setMethod("coefficients", signature(object="vsmooth.spline"),
+setMethod("coefficients", signature(object = "vsmooth.spline"),
function(object, ...)
coefvsmooth.spline(object, ...))
-setMethod("coef", signature(object="vsmooth.spline"),
+setMethod("coef", signature(object = "vsmooth.spline"),
function(object, ...)
coefvsmooth.spline(object, ...))
-setMethod("coefficients", signature(object="vsmooth.spline.fit"),
+setMethod("coefficients", signature(object = "vsmooth.spline.fit"),
function(object, ...)
coefvsmooth.spline.fit(object, ...))
-setMethod("coef", signature(object="vsmooth.spline.fit"),
+setMethod("coef", signature(object = "vsmooth.spline.fit"),
function(object, ...)
coefvsmooth.spline.fit(object, ...))
-setMethod("fitted.values", signature(object="vsmooth.spline"),
+setMethod("fitted.values", signature(object = "vsmooth.spline"),
function(object, ...)
fittedvsmooth.spline(object, ...))
-setMethod("fitted", signature(object="vsmooth.spline"),
+setMethod("fitted", signature(object = "vsmooth.spline"),
function(object, ...)
fittedvsmooth.spline(object, ...))
-setMethod("residuals", signature(object="vsmooth.spline"),
+setMethod("residuals", signature(object = "vsmooth.spline"),
function(object, ...)
residvsmooth.spline(object, ...))
-setMethod("resid", signature(object="vsmooth.spline"),
+setMethod("resid", signature(object = "vsmooth.spline"),
function(object, ...)
residvsmooth.spline(object, ...))
@@ -89,159 +89,170 @@ setMethod("model.matrix", "vsmooth.spline",
+depvar.vsmooth.spline <- function(object, ...) {
+ object at y
+}
+if (!isGeneric("depvar"))
+ setGeneric("depvar", function(object, ...) standardGeneric("depvar"),
+ package = "VGAM")
-vsmooth.spline <- function(x, y, w = NULL, df = rep(5, M),
- spar = NULL, #rep(0,M),
- all.knots = FALSE,
- iconstraint = diag(M),
- xconstraint = diag(M),
- constraints = list("(Intercepts)"=diag(M), x=diag(M)),
- var.arg = FALSE,
- scale.w=TRUE,
- nk=NULL,
- control.spar = list()) {
+setMethod("depvar", "vsmooth.spline", function(object, ...)
+ depvar.vsmooth.spline(object, ...))
- if (var.arg) {
- warning("@var will be returned, but no use will be made of it")
- }
- missing.constraints <- missing(constraints)
- if (!(missing.spar <- missing(spar)) && !missing(df)) {
- stop("cannot specify both 'spar' and 'df'")
- }
+vsmooth.spline <-
+ function(x, y, w = NULL, df = rep(5, M),
+ spar = NULL, #rep(0,M),
+ all.knots = FALSE,
+ iconstraint = diag(M),
+ xconstraint = diag(M),
+ constraints = list("(Intercepts)" = diag(M), x = diag(M)),
+ var.arg = FALSE,
+ scale.w = TRUE,
+ nk = NULL,
+ control.spar = list()) {
+ if (var.arg) {
+ warning("@var will be returned, but no use will be made of it")
+ }
- contr.sp <- list(low = -1.5,## low = 0. was default till R 1.3.x
- high = 1.5,
- tol = 1e-4,## tol = 0.001 was default till R 1.3.x
- eps = 2e-8,## eps = 0.00244 was default till R 1.3.x
- maxit = 500 )
-
- if(FALSE)
- contr.sp <- list(low = -1.5,## low = 0. was default till R 1.3.x
- high = 1.5,
- tol = 0.001, # was default till R 1.3.x
- eps = 0.00244, # was default till R 1.3.x
- maxit = 500 )
- contr.sp[(names(control.spar))] <- control.spar
- if(!all(sapply(contr.sp[1:4], is.numeric)) ||
- contr.sp$tol < 0 || contr.sp$eps <= 0 || contr.sp$maxit <= 0)
- stop("invalid 'control.spar'")
-
-
- my.call <- match.call()
- if (missing(y)) {
- if (is.list(x)) {
- if (any(is.na(match(c("x", "y"), names(x)))))
- stop("cannot find 'x' and 'y' in list")
- y <- x$y
- x <- x$x
- } else if (is.complex(x)) {
- y <- Im(x)
- x <- Re(x)
- } else if (is.matrix(x)) {
- y <- x[,-1]
- x <- x[,1]
- } else {
- y <- x
- x <- time(x)
- }
- }
- xvector <- x
- n_lm <- length(xvector)
- ymat <- as.matrix(y)
- ny2 <- dimnames(ymat)[[2]] # NULL if vector
- M <- ncol(ymat)
- if (n_lm != nrow(ymat)) {
- stop("lengths of 'x' and 'y' must match")
- }
+ missing.constraints <- missing(constraints)
+ if (!(missing.spar <- missing(spar)) && !missing(df)) {
+ stop("cannot specify both 'spar' and 'df'")
+ }
- if (any(is.na(xvector)) || any(is.na(ymat))) {
- stop("NAs not allowed in 'x' or 'y'")
- }
- if (is.null(w)) {
- wzmat <- matrix(1, n_lm, M)
+
+ contr.sp <- list(low = -1.5,## low = 0. was default till R 1.3.x
+ high = 1.5,
+ tol = 1e-4,## tol = 0.001 was default till R 1.3.x
+ eps = 2e-8,## eps = 0.00244 was default till R 1.3.x
+ maxit = 500 )
+
+
+
+
+ contr.sp[(names(control.spar))] <- control.spar
+ if(!all(sapply(contr.sp[1:4], is.numeric)) ||
+ contr.sp$tol < 0 || contr.sp$eps <= 0 || contr.sp$maxit <= 0)
+ stop("invalid 'control.spar'")
+
+
+ my.call <- match.call()
+ if (missing(y)) {
+ if (is.list(x)) {
+ if (any(is.na(match(c("x", "y"), names(x)))))
+ stop("cannot find 'x' and 'y' in list")
+ y <- x$y
+ x <- x$x
+ } else if (is.complex(x)) {
+ y <- Im(x)
+ x <- Re(x)
+ } else if (is.matrix(x)) {
+ y <- x[,-1]
+ x <- x[,1]
} else {
- if (any(is.na(w))) {
- stop("NAs not allowed in 'w'")
- }
- wzmat <- as.matrix(w)
-
- if (nrow(ymat) != nrow(wzmat) || ncol(wzmat) > M * (M+1) / 2) {
- stop("'w' and 'y' don't match")
- }
-
- if (scale.w) {
- wzmat <- wzmat / mean(wzmat[,1:M]) # 'Average' value is 1
- }
+ y <- x
+ x <- time(x)
}
- dim2wz <- ncol(wzmat)
+ }
+
+ xvector <- x
+ n_lm <- length(xvector)
+ ymat <- as.matrix(y)
+ ny2 <- dimnames(ymat)[[2]] # NULL if vector
+ M <- ncol(ymat)
+ if (n_lm != nrow(ymat)) {
+ stop("lengths of arguments 'x' and 'y' must match")
+ }
+
+ if (any(is.na(xvector)) || any(is.na(ymat))) {
+ stop("NAs not allowed in arguments 'x' or 'y'")
+ }
- if (missing.constraints) {
- constraints <- list("(Intercepts)" = eval(iconstraint),
- "x" = eval(xconstraint))
+ if (is.null(w)) {
+ wzmat <- matrix(1, n_lm, M)
+ } else {
+ if (any(is.na(w))) {
+ stop("NAs not allowed in argument 'w'")
}
- constraints <- eval(constraints)
- if (is.matrix(constraints)) {
- constraints <- list("(Intercepts)" = constraints, x = constraints)
+ wzmat <- as.matrix(w)
+
+ if (nrow(ymat) != nrow(wzmat) || ncol(wzmat) > M * (M+1) / 2) {
+ stop("arguments 'w' and 'y' don't match")
}
- if (!is.list(constraints) || length(constraints) != 2) {
- stop("'constraints' must equal a list (of length 2) or a matrix")
+
+ if (scale.w) {
+ wzmat <- wzmat / mean(wzmat[,1:M]) # 'Average' value is 1
}
- for (ii in 1:2)
- if (!is.numeric(constraints[[ii]]) ||
- !is.matrix (constraints[[ii]]) ||
- nrow(constraints[[ii]]) != M ||
- ncol(constraints[[ii]]) > M)
- stop("something wrong with 'constraints'")
- names(constraints) <- c("(Intercepts)", "x")
+ }
+ dim2wz <- ncol(wzmat)
+
+ if (missing.constraints) {
+ constraints <- list("(Intercepts)" = eval(iconstraint),
+ "x" = eval(xconstraint))
+ }
+ constraints <- eval(constraints)
+ if (is.matrix(constraints)) {
+ constraints <- list("(Intercepts)" = constraints,
+ "x" = constraints)
+ }
+ if (!is.list(constraints) || length(constraints) != 2) {
+ stop("'constraints' must equal a list (of length 2) or a matrix")
+ }
+ for (ii in 1:2)
+ if (!is.numeric(constraints[[ii]]) ||
+ !is.matrix (constraints[[ii]]) ||
+ nrow(constraints[[ii]]) != M ||
+ ncol(constraints[[ii]]) > M)
+ stop("something wrong with argument 'constraints'")
+ names(constraints) <- c("(Intercepts)", "x")
usortx <- unique(sort(as.vector(xvector)))
ooo <- match(xvector, usortx) # usortx[ooo] == x
neff <- length(usortx)
if (neff < 7) {
- stop("not enough unique 'x' values (need 7 or more)")
+ stop("not enough unique 'x' values (need 7 or more)")
}
dim1U <- dim2wz # 10/1/00; was M * (M+1) / 2
- collaps <- dotC(name="vsuff9",
+ collaps <- dotC(name = "vsuff9",
as.integer(n_lm), as.integer(neff), as.integer(ooo),
as.double(xvector), as.double(ymat), as.double(wzmat),
- xbar=double(neff), ybar=double(neff * M),
- wzbar=double(neff * dim2wz),
- uwzbar=double(1), wzybar=double(neff * M), okint=as.integer(0),
- as.integer(M), dim2wz=as.integer(dim2wz), dim1U=as.integer(dim1U),
- blist=as.double(diag(M)), ncolb=as.integer(M),
- trivc=as.integer(1), wuwzbar=as.integer(0),
+ xbar = double(neff), ybar = double(neff * M),
+ wzbar = double(neff * dim2wz),
+ uwzbar = double(1), wzybar = double(neff * M), okint = as.integer(0),
+ as.integer(M), dim2wz = as.integer(dim2wz), dim1U = as.integer(dim1U),
+ blist = as.double(diag(M)), ncolb = as.integer(M),
+ trivc = as.integer(1), wuwzbar = as.integer(0),
dim1Uwzbar = as.integer(dim1U), dim2wzbar = as.integer(dim2wz))
if (collaps$okint != 1) {
- stop("some non-positive-definite weight matrices ",
- "detected in 'vsuff9'")
+ stop("some non-positive-definite weight matrices ",
+ "detected in 'vsuff9'")
}
dim(collaps$ybar) <- c(neff, M)
if (FALSE) {
} else {
- yinyin = collaps$ybar # Includes both linear and nonlinear parts
- x = collaps$xbar # Could call this xxx for location finder
-
- lfit = vlm(yinyin ~ 1 + x, # xxx
- constraints = constraints,
- save.weight = FALSE,
- qr.arg = FALSE, x.arg = FALSE, y.arg = FALSE,
- smart = FALSE,
- weights = matrix(collaps$wzbar, neff, dim2wz))
+ yinyin <- collaps$ybar # Includes both linear and nonlinear parts
+ x <- collaps$xbar # Could call this xxx for location finder
+
+ lfit <- vlm(yinyin ~ 1 + x, # xxx
+ constraints = constraints,
+ save.weight = FALSE,
+ qr.arg = FALSE, x.arg = FALSE, y.arg = FALSE,
+ smart = FALSE,
+ weights = matrix(collaps$wzbar, neff, dim2wz))
}
ncb0 <- ncol(constraints[[2]]) # Of xxx and not of the intercept
@@ -249,193 +260,199 @@ vsmooth.spline <- function(x, y, w = NULL, df = rep(5, M),
dfvec <- rep(df, length = ncb0)
if (!missing.spar) {
- ispar <- 1
- if (any(spar <= 0) || !is.numeric(spar)) {
- stop("not allowed non-positive or non-numeric ",
- "smoothing parameters")
- }
- nonlin <- (spar != Inf)
+ ispar <- 1
+ if (any(spar <= 0) || !is.numeric(spar)) {
+ stop("not allowed non-positive or non-numeric ",
+ "smoothing parameters")
+ }
+ nonlin <- (spar != Inf)
} else {
- ispar <- 0
- if (!is.numeric(dfvec) || any(dfvec < 2 | dfvec > neff)) {
- stop("you must supply '2 <= df <= ", neff, "'")
- }
- nonlin <- (abs(dfvec - 2) > contr.sp$tol)
+ ispar <- 0
+ if (!is.numeric(dfvec) || any(dfvec < 2 | dfvec > neff)) {
+ stop("you must supply '2 <= df <= ", neff, "'")
+ }
+ nonlin <- (abs(dfvec - 2) > contr.sp$tol)
}
if (all(!nonlin)) {
- junk.fill = new("vsmooth.spline.fit",
- "Bcoefficients" = matrix(as.numeric(NA), 1, 1),
- "knots" = numeric(0),
- "xmin" = numeric(0),
- "xmax" = numeric(0)) # 8/11/03
-
- ratio = as.numeric(NA)
-
- object =
- new("vsmooth.spline",
- "call" = my.call,
- "constraints" = constraints,
- "df" = if (ispar == 0) dfvec else rep(2, length(spar)),
- "lfit" = lfit,
- "nlfit" = junk.fill,
- "spar" = if (ispar == 1) spar else rep(Inf, length(dfvec)),
- "lambda" = if (ispar == 1) ratio * 16.0^(spar * 6.0 - 2.0) else
- rep(Inf, length(dfvec)),
- "w" = matrix(collaps$wzbar, neff, dim2wz),
- "x" = usortx,
- "y" = lfit at fitted.values,
- "yin" = yinyin)
+ junk.fill <- new("vsmooth.spline.fit",
+ "Bcoefficients" = matrix(as.numeric(NA), 1, 1),
+ "knots" = numeric(0),
+ "xmin" = numeric(0),
+ "xmax" = numeric(0)) # 8/11/03
+
+ dratio <- as.numeric(NA)
+
+ object <-
+ new("vsmooth.spline",
+ "call" = my.call,
+ "constraints" = constraints,
+ "df" = if (ispar == 0) dfvec else rep(2, length(spar)),
+ "lfit" = lfit,
+ "nlfit" = junk.fill,
+ "spar" = if (ispar == 1) spar else rep(Inf, length(dfvec)),
+ "lambda" = if (ispar == 1) dratio * 16.0^(spar * 6.0 - 2.0) else
+ rep(Inf, length(dfvec)),
+ "w" = matrix(collaps$wzbar, neff, dim2wz),
+ "x" = usortx,
+ "y" = lfit at fitted.values,
+ "yin" = yinyin)
- return(object)
- }
+ return(object)
+ }
- xbar <- (usortx - usortx[1]) / (usortx[neff] - usortx[1])
- noround = TRUE # Improvement 3/8/02
- nknots <- nk
- if (all.knots) {
- knot <- if (noround) {
- valid.vknotl2(c(rep(xbar[1],3), xbar, rep(xbar[neff],3)))
- } else {
- c(rep(xbar[1], 3), xbar, rep(xbar[neff], 3))
- }
- if (length(nknots)) {
- warning("overriding 'nk' by 'all.knots = TRUE'")
- }
- nknots <- length(knot) - 4 # No longer neff + 2
- } else {
- chosen = length(nknots)
- if (chosen && (nknots > neff+2 || nknots <= 5)) {
- stop("bad value for 'nk'")
- }
- if (!chosen) {
- nknots = 0
- }
- knot.list <- dotC(name="vknootl2", as.double(xbar),
- as.integer(neff), knot=double(neff+6),
- k=as.integer(nknots+4), chosen=as.integer(chosen))
- if (noround) {
- knot = valid.vknotl2(knot.list$knot[1:(knot.list$k)])
- knot.list$k = length(knot)
- } else {
- knot <- knot.list$knot[1:(knot.list$k)]
- }
- nknots <- knot.list$k - 4
+ xbar <- (usortx - usortx[1]) / (usortx[neff] - usortx[1])
+ noround <- TRUE # Improvement 3/8/02
+ nknots <- nk
+ if (all.knots) {
+ knot <- if (noround) {
+ valid.vknotl2(c(rep(xbar[1],3), xbar, rep(xbar[neff],3)))
+ } else {
+ c(rep(xbar[1], 3), xbar, rep(xbar[neff], 3))
+ }
+ if (length(nknots)) {
+ warning("overriding 'nk' by 'all.knots = TRUE'")
+ }
+ nknots <- length(knot) - 4 # No longer neff + 2
+ } else {
+ chosen <- length(nknots)
+ if (chosen && (nknots > neff+2 || nknots <= 5)) {
+ stop("bad value for 'nk'")
}
- if (nknots <= 5) {
- stop("not enough distinct knots found")
+ if (!chosen) {
+ nknots <- 0
+ }
+ knot.list <- dotC(name = "vknootl2", as.double(xbar),
+ as.integer(neff), knot = double(neff+6),
+ k = as.integer(nknots+4),
+ chosen = as.integer(chosen))
+ if (noround) {
+ knot <- valid.vknotl2(knot.list$knot[1:(knot.list$k)])
+ knot.list$k <- length(knot)
+ } else {
+ knot <- knot.list$knot[1:(knot.list$k)]
}
+ nknots <- knot.list$k - 4
+ }
+ if (nknots <= 5) {
+ stop("not enough distinct knots found")
+ }
- conmat <- (constraints[[2]])[, nonlin, drop=FALSE]
- ncb <- sum(nonlin)
- trivc <- trivial.constraints(conmat)
- resmat <- collaps$ybar - lfit at fitted.values # neff by M
- spar.nl <- spar[nonlin]
- dofr.nl <- dfvec[nonlin]
+ conmat <- (constraints[[2]])[, nonlin, drop = FALSE]
+ ncb <- sum(nonlin)
+ trivc <- trivial.constraints(conmat)
+ resmat <- collaps$ybar - lfit at fitted.values # neff by M
+ spar.nl <- spar[nonlin]
+ dofr.nl <- dfvec[nonlin]
- dim1Uwzbar = if (trivc) dim1U else ncb * (ncb+1) / 2
- dim2wzbar = if (trivc) dim2wz else ncb * (ncb+1) / 2
- ooo <- 1:neff # Already sorted
+ dim1Uwzbar <- if (trivc) dim1U else ncb * (ncb+1) / 2
+ dim2wzbar <- if (trivc) dim2wz else ncb * (ncb+1) / 2
+ ooo <- 1:neff # Already sorted
- collaps <- dotC(name="vsuff9",
+ collaps <- dotC(name = "vsuff9",
as.integer(neff), as.integer(neff), as.integer(ooo),
as.double(collaps$xbar), as.double(resmat), as.double(collaps$wzbar),
- xbar=double(neff), ybar=double(neff * ncb),
- wzbar=double(neff * dim2wzbar),
- uwzbar=double(1), wzybar=double(neff * ncb), okint=as.integer(0),
+ xbar = double(neff), ybar = double(neff * ncb),
+ wzbar = double(neff * dim2wzbar),
+ uwzbar = double(1), wzybar = double(neff * ncb), okint = as.integer(0),
as.integer(M), as.integer(dim2wz), as.integer(dim1U),
- blist=as.double(conmat), ncolb=as.integer(ncb),
- as.integer(trivc), wuwzbar=as.integer(0),
+ blist = as.double(conmat), ncolb = as.integer(ncb),
+ as.integer(trivc), wuwzbar = as.integer(0),
as.integer(dim1Uwzbar), as.integer(dim2wzbar))
- if (collaps$okint != 1) {
- stop("some non-positive-definite weight matrices ",
- "detected in 'vsuff9' during the second call.")
- }
+ if (collaps$okint != 1) {
+ stop("some non-positive-definite weight matrices ",
+ "detected in 'vsuff9' during the second call.")
+ }
+
+ dim(collaps$ybar) <- dim(collaps$wzybar) <- c(neff, ncb)
+ dim(collaps$wzbar) <- c(neff, dim2wzbar)
+
+
+ ldk <- 3 * ncb + 1 # 10/7/02; Previously 4 * ncb
+ varmat <- if (var.arg) matrix(0, neff, ncb) else double(1)
+ vsplin <- dotC(name = "Yee_spline",
+ xs = as.double(xbar), as.double(collaps$wzybar),
+ as.double(collaps$wzbar), xknot = as.double(knot),
+ n = as.integer(neff), nknots = as.integer(nknots), as.integer(ldk),
+ M = as.integer(ncb), dim2wz = as.integer(dim2wzbar),
+
+ spar.nl = as.double(spar.nl), lamvec = as.double(spar.nl),
- dim(collaps$ybar) <- dim(collaps$wzybar) <- c(neff, ncb)
- dim(collaps$wzbar) <- c(neff, dim2wzbar)
-
-
- ldk = 3 * ncb + 1 # 10/7/02; Previously 4 * ncb
- varmat <- if (var.arg) matrix(0, neff, ncb) else double(1)
- vsplin <- dotC(name="Yee_spline",
- xs=as.double(xbar), as.double(collaps$wzybar),
- as.double(collaps$wzbar), xknot=as.double(knot),
- n=as.integer(neff), nknots=as.integer(nknots), as.integer(ldk),
- M=as.integer(ncb), dim2wz=as.integer(dim2wzbar),
- spar.nl=as.double(spar.nl), lamvec=as.double(spar.nl),
- iinfo=integer(1), fv=double(neff * ncb),
- Bcoef=double(nknots * ncb), varmat=as.double(varmat),
- levmat=double(neff * ncb), as.double(dofr.nl),
- ifvar=as.integer(var.arg), ierror=as.integer(0),
- n_lm=as.integer(neff),
+ iinfo = integer(1), fv = double(neff * ncb),
+ Bcoef = double(nknots * ncb), varmat = as.double(varmat),
+
+ levmat = double(neff * ncb), as.double(dofr.nl),
+
+ ifvar = as.integer(var.arg), ierror = as.integer(0),
+ n_lm = as.integer(neff),
double(nknots), double(nknots), double(nknots), double(nknots),
double(1), as.integer(0),
+
icontrsp = as.integer(contr.sp$maxit),
contrsp = as.double(unlist(contr.sp[1:4])))
- if (vsplin$ierror != 0) {
- stop("vsplin$ierror == ", vsplin$ierror,
- ". Something gone wrong in 'vsplin'")
- }
- if (vsplin$iinfo != 0) {
- stop("leading minor of order ", vsplin$iinfo,
- " is not positive-definite")
- }
+ if (vsplin$ierror != 0) {
+ stop("vsplin$ierror == ", vsplin$ierror,
+ ". Something gone wrong in 'vsplin'")
+ }
+ if (vsplin$iinfo != 0) {
+ stop("leading minor of order ", vsplin$iinfo,
+ " is not positive-definite")
+ }
- dim(vsplin$levmat) <- c(neff, ncb) # A matrix even when ncb == 1
- if (ncb > 1) {
- dim(vsplin$fv) <- c(neff, ncb)
- if (var.arg)
- dim(vsplin$varmat) <- c(neff, ncb)
- }
+ dim(vsplin$levmat) <- c(neff, ncb) # A matrix even when ncb == 1
+ if (ncb > 1) {
+ dim(vsplin$fv) <- c(neff, ncb)
+ if (var.arg)
+ dim(vsplin$varmat) <- c(neff, ncb)
+ }
- dofr.nl <- colSums(vsplin$levmat) # Actual EDF used
+ dofr.nl <- colSums(vsplin$levmat) # Actual EDF used
- fv <- lfit at fitted.values + vsplin$fv %*% t(conmat)
- if (M > 1) {
- dimnames(fv) <- list(NULL, ny2)
- }
+ fv <- lfit at fitted.values + vsplin$fv %*% t(conmat)
+ if (M > 1) {
+ dimnames(fv) <- list(NULL, ny2)
+ }
- dfvec[!nonlin] = 2.0
- dfvec[ nonlin] = dofr.nl
- if (ispar == 0) {
- spar[!nonlin] = Inf
- spar[ nonlin] = vsplin$spar.nl # Actually used
- }
+ dfvec[!nonlin] <- 2.0
+ dfvec[ nonlin] <- dofr.nl
+ if (ispar == 0) {
+ spar[!nonlin] <- Inf
+ spar[ nonlin] <- vsplin$spar.nl # Actually used
+ }
- fit.object = new("vsmooth.spline.fit",
- "Bcoefficients" = matrix(vsplin$Bcoef, nknots, ncb),
- "knots" = knot,
- "xmax" = usortx[neff],
- "xmin" = usortx[1])
+ fit.object <- new("vsmooth.spline.fit",
+ "Bcoefficients" = matrix(vsplin$Bcoef, nknots, ncb),
+ "knots" = knot,
+ "xmax" = usortx[neff],
+ "xmin" = usortx[1])
- object =
- new("vsmooth.spline",
- "call" = my.call,
- "constraints" = constraints,
- "df" = dfvec,
- "nlfit" = fit.object,
- "lev" = vsplin$levmat,
- "lfit" = lfit,
- "spar" = spar, # if (ispar == 1) spar else vsplin$spar,
- "lambda" = vsplin$lamvec, #
- "w" = collaps$wzbar,
- "x" = usortx,
- "y" = fv,
- "yin" = yinyin)
-
- if (var.arg)
- object at var = vsplin$varmat
-
- object
+ object <-
+ new("vsmooth.spline",
+ "call" = my.call,
+ "constraints" = constraints,
+ "df" = dfvec,
+ "nlfit" = fit.object,
+ "lev" = vsplin$levmat,
+ "lfit" = lfit,
+ "spar" = spar, # if (ispar == 1) spar else vsplin$spar,
+ "lambda" = vsplin$lamvec, #
+ "w" = collaps$wzbar,
+ "x" = usortx,
+ "y" = fv,
+ "yin" = yinyin)
+
+ if (var.arg)
+ object at var <- vsplin$varmat
+
+ object
}
@@ -448,11 +465,11 @@ show.vsmooth.spline <- function(x, ...) {
ncb <- if (length(x at nlfit)) ncol(x at nlfit@Bcoefficients) else NULL
cat("\nSmoothing Parameter (Spar):",
if (length(ncb) && ncb == 1) format(x at spar) else
- paste(format(x at spar), collapse=", "), "\n")
+ paste(format(x at spar), collapse = ", "), "\n")
cat("\nEquivalent Degrees of Freedom (Df):",
if (length(ncb) && ncb == 1) format(x at df) else
- paste(format(x at df), collapse=", "), "\n")
+ paste(format(x at df), collapse = ", "), "\n")
if (!all(trivial.constraints(x at constraints) == 1)) {
cat("\nConstraint matrices:\n")
@@ -464,160 +481,162 @@ show.vsmooth.spline <- function(x, ...) {
coefvsmooth.spline.fit <- function(object, ...) {
- object at Bcoefficients
+ object at Bcoefficients
}
coefvsmooth.spline <- function(object, matrix = FALSE, ...) {
list(lfit = coefvlm(object at lfit, matrix.out = matrix),
- nlfit=coefvsmooth.spline.fit(object at nlfit))
+ nlfit = coefvsmooth.spline.fit(object at nlfit))
}
fittedvsmooth.spline <- function(object, ...) {
- object at y
+ object at y
}
residvsmooth.spline <- function(object, ...) {
- as.matrix(object at yin - object at y)
+ as.matrix(object at yin - object at y)
}
-plotvsmooth.spline <- function(x, xlab="x", ylab="", points=TRUE,
- pcol=par()$col, pcex=par()$cex,
- pch=par()$pch, lcol=par()$col,
- lwd=par()$lwd, lty=par()$lty,
- add=FALSE, ...) {
- points.arg = points; rm(points)
- M = ncol(x at y)
- pcol = rep(pcol, length = M)
- pcex = rep(pcex, length = M)
- pch = rep(pch, length = M)
- lcol = rep(lcol, length = M)
- lwd = rep(lwd, length = M)
- lty = rep(lty, length = M)
- if (!add)
- matplot(x at x, x at yin, type="n", xlab=xlab, ylab=ylab, ...)
- for (ii in 1:ncol(x at y)) {
- if (points.arg)
- points(x at x, x at yin[,ii], col=pcol[ii], pch=pch[ii], cex=pcex[ii])
- lines(x at x, x at y[,ii], col=lcol[ii], lwd=lwd[ii], lty=lty[ii])
- }
- invisible(x)
+plotvsmooth.spline <- function(x, xlab = "x", ylab = "", points = TRUE,
+ pcol = par()$col, pcex = par()$cex,
+ pch = par()$pch, lcol = par()$col,
+ lwd = par()$lwd, lty = par()$lty,
+ add = FALSE, ...) {
+ points.arg <- points; rm(points)
+ M <- ncol(x at y)
+ pcol <- rep(pcol, length = M)
+ pcex <- rep(pcex, length = M)
+ pch <- rep(pch, length = M)
+ lcol <- rep(lcol, length = M)
+ lwd <- rep(lwd, length = M)
+ lty <- rep(lty, length = M)
+ if (!add)
+ matplot(x at x, x at yin, type = "n", xlab = xlab, ylab = ylab, ...)
+ for (ii in 1:ncol(x at y)) {
+ if (points.arg)
+ points(x at x, x at yin[,ii], col = pcol[ii], pch = pch[ii], cex = pcex[ii])
+ lines(x at x, x at y[,ii], col = lcol[ii], lwd = lwd[ii], lty = lty[ii])
+ }
+ invisible(x)
}
predictvsmooth.spline <- function(object, x, deriv = 0, se.fit = FALSE) {
- if (se.fit)
- warning("'se.fit=TRUE' is not currently implemented. ",
- "Using 'se.fit=FALSE'")
-
- lfit <- object at lfit # Linear part of the vector spline
- nlfit <- object at nlfit # Nonlinear part of the vector spline
+ if (se.fit)
+ warning("'se.fit = TRUE' is not currently implemented. ",
+ "Using 'se.fit = FALSE'")
- if (missing(x)) {
- if (deriv == 0) {
- return(list(x = object at x, y = object at y))
- } else {
- x <- object at x
- return(Recall(object, x, deriv))
- }
+ lfit <- object at lfit # Linear part of the vector spline
+ nlfit <- object at nlfit # Nonlinear part of the vector spline
+ if (missing(x)) {
+ if (deriv == 0) {
+ return(list(x = object at x, y = object at y))
+ } else {
+ x <- object at x
+ return(Recall(object, x, deriv))
}
- mat.coef = coefvlm(lfit, matrix.out = TRUE)
- coeflfit <- t(mat.coef) # M x p now
- M <- nrow(coeflfit) # if (is.matrix(object at y)) ncol(object at y) else 1
-
- pred = if (deriv == 0)
- predict(lfit, data.frame(x = x)) else
- if (deriv == 1)
- matrix(coeflfit[,2], length(x), M, byrow = TRUE) else
- matrix(0, length(x), M)
- if (!length(nlfit at knots)) {
- return(list(x = x, y = pred))
- }
+ }
+
+ mat.coef <- coefvlm(lfit, matrix.out = TRUE)
+ coeflfit <- t(mat.coef) # M x p now
+ M <- nrow(coeflfit) # if (is.matrix(object at y)) ncol(object at y) else 1
+
+ pred <- if (deriv == 0)
+ predict(lfit, data.frame(x = x)) else
+ if (deriv == 1)
+ matrix(coeflfit[,2], length(x), M, byrow = TRUE) else
+ matrix(0, length(x), M)
+ if (!length(nlfit at knots)) {
+ return(list(x = x, y = pred))
+ }
- nonlin <- (object at spar != Inf)
+ nonlin <- (object at spar != Inf)
- conmat = if (!length(lfit at constraints)) diag(M) else
- lfit at constraints[[2]]
- conmat = conmat[, nonlin, drop = FALSE] # Of nonlinear functions
+ conmat <- if (!length(lfit at constraints)) diag(M) else
+ lfit at constraints[[2]]
+ conmat <- conmat[, nonlin, drop = FALSE] # Of nonlinear functions
- list(x = x, y=pred + predict(nlfit, x, deriv)$y %*% t(conmat))
+ list(x = x, y = pred + predict(nlfit, x, deriv)$y %*% t(conmat))
}
+
+
predictvsmooth.spline.fit <- function(object, x, deriv = 0) {
- nknots = nrow(object at Bcoefficients)
- drangex <- object at xmax - object at xmin
- if (missing(x))
- x <- seq(from = object at xmin, to = object at xmax, length.out = nknots-4)
-
- xs <- as.double((x - object at xmin) / drangex)
-
- bad.left <- (xs < 0)
- bad.right <- (xs > 1)
- good <- !(bad.left | bad.right)
-
- ncb <- ncol(object at Bcoefficients)
- y <- matrix(as.numeric(NA), length(xs), ncb)
- if (ngood <- sum(good)) {
- junk <- dotC(name="Yee_vbvs", as.integer(ngood),
- as.double(object at knots), as.double(object at Bcoefficients),
- as.double(xs[good]), smomat=double(ngood * ncb),
- as.integer(nknots), as.integer(deriv), as.integer(ncb))
- y[good,] <- junk$smomat
-
- if (TRUE && deriv > 1) {
- edges <- xs <= 0 | xs >= 1 # Zero the edges & beyond explicitly
- y[edges,] <- 0
- }
- }
- if (any(!good)) {
- xrange <- c(object at xmin, object at xmax)
- if (deriv == 0) {
- end.object <- Recall(object, xrange)$y
- end.slopes <- Recall(object, xrange, 1)$y * drangex
-
- if (any(bad.left)) {
- y[bad.left,] = rep(end.object[1,], rep(sum(bad.left), ncb)) +
- rep(end.slopes[1,], rep(sum(bad.left), ncb)) *
- xs[bad.left]
- }
- if (any(bad.right)) {
- y[bad.right,]= rep(end.object[2,], rep(sum(bad.right), ncb)) +
- rep(end.slopes[2,], rep(sum(bad.right), ncb)) *
- (xs[bad.right] - 1)
- }
- } else if (deriv == 1) {
- end.slopes <- Recall(object, xrange, 1)$y * drangex
- y[bad.left,] <- rep(end.slopes[1,], rep(sum(bad.left), ncb))
- y[bad.right,] <- rep(end.slopes[2,], rep(sum(bad.right), ncb))
- } else
- y[!good,] <- 0
+ nknots <- nrow(object at Bcoefficients)
+ drangex <- object at xmax - object at xmin
+ if (missing(x))
+ x <- seq(from = object at xmin, to = object at xmax, length.out = nknots-4)
+
+ xs <- as.double((x - object at xmin) / drangex)
+
+ bad.left <- (xs < 0)
+ bad.right <- (xs > 1)
+ good <- !(bad.left | bad.right)
+
+ ncb <- ncol(object at Bcoefficients)
+ y <- matrix(as.numeric(NA), length(xs), ncb)
+ if (ngood <- sum(good)) {
+ junk <- dotC(name = "Yee_vbvs", as.integer(ngood),
+ as.double(object at knots), as.double(object at Bcoefficients),
+ as.double(xs[good]), smomat = double(ngood * ncb),
+ as.integer(nknots), as.integer(deriv), as.integer(ncb))
+ y[good,] <- junk$smomat
+
+ if (TRUE && deriv > 1) {
+ edges <- xs <= 0 | xs >= 1 # Zero the edges & beyond explicitly
+ y[edges,] <- 0
}
- if (deriv > 0)
- y <- y / (drangex^deriv)
- list(x = x, y = y)
+ }
+ if (any(!good)) {
+ xrange <- c(object at xmin, object at xmax)
+ if (deriv == 0) {
+ end.object <- Recall(object, xrange)$y
+ end.slopes <- Recall(object, xrange, 1)$y * drangex
+
+ if (any(bad.left)) {
+ y[bad.left,] <- rep(end.object[1,], rep(sum(bad.left), ncb)) +
+ rep(end.slopes[1,], rep(sum(bad.left), ncb)) *
+ xs[bad.left]
+ }
+ if (any(bad.right)) {
+ y[bad.right,] <- rep(end.object[2,], rep(sum(bad.right), ncb)) +
+ rep(end.slopes[2,], rep(sum(bad.right), ncb)) *
+ (xs[bad.right] - 1)
+ }
+ } else if (deriv == 1) {
+ end.slopes <- Recall(object, xrange, 1)$y * drangex
+ y[bad.left,] <- rep(end.slopes[1,], rep(sum(bad.left), ncb))
+ y[bad.right,] <- rep(end.slopes[2,], rep(sum(bad.right), ncb))
+ } else
+ y[!good,] <- 0
+ }
+ if (deriv > 0)
+ y <- y / (drangex^deriv)
+ list(x = x, y = y)
}
valid.vknotl2 <- function(knot, tol = 1/1024) {
- junk = dotC(name="Yee_pknootl2", knot=as.double(knot),
- as.integer(length(knot)),
- keep=integer(length(knot)), as.double(tol))
- keep = as.logical(junk$keep)
- knot = junk$knot[keep]
- if (length(knot) <= 11) {
- stop("too few (distinct) knots")
- }
- knot
+ junk <- dotC(name = "Yee_pknootl2", knot = as.double(knot),
+ as.integer(length(knot)),
+ keep = integer(length(knot)), as.double(tol))
+ keep <- as.logical(junk$keep)
+ knot <- junk$knot[keep]
+ if (length(knot) <= 11) {
+ stop("too few (distinct) knots")
+ }
+ knot
}
diff --git a/R/zzz.R b/R/zzz.R
deleted file mode 100644
index b26c345..0000000
--- a/R/zzz.R
+++ /dev/null
@@ -1,13 +0,0 @@
-# These functions are
-# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
-# All rights reserved.
-
-
-
-.First.lib <- function(lib, pkg) {
- library.dynam("VGAM", pkg, lib)
-}
-
-
-
-
diff --git a/data/Huggins89.t1.rda b/data/Huggins89.t1.rda
new file mode 100644
index 0000000..04a4c45
Binary files /dev/null and b/data/Huggins89.t1.rda differ
diff --git a/data/Perom.rda b/data/Perom.rda
new file mode 100644
index 0000000..a5627f2
Binary files /dev/null and b/data/Perom.rda differ
diff --git a/data/alclevels.rda b/data/alclevels.rda
index 81f9610..139f886 100644
Binary files a/data/alclevels.rda and b/data/alclevels.rda differ
diff --git a/data/alcoff.rda b/data/alcoff.rda
index d9dba17..97e6be8 100644
Binary files a/data/alcoff.rda and b/data/alcoff.rda differ
diff --git a/data/auuc.rda b/data/auuc.rda
index f305bdd..5c937fa 100644
Binary files a/data/auuc.rda and b/data/auuc.rda differ
diff --git a/data/backPain.rda b/data/backPain.rda
index 365a38a..db0e160 100644
Binary files a/data/backPain.rda and b/data/backPain.rda differ
diff --git a/data/car.all.rda b/data/car.all.rda
index c106411..e0e3019 100644
Binary files a/data/car.all.rda and b/data/car.all.rda differ
diff --git a/data/chinese.nz.txt.gz b/data/chinese.nz.txt.gz
index dac5491..0f4c6a1 100644
Binary files a/data/chinese.nz.txt.gz and b/data/chinese.nz.txt.gz differ
diff --git a/data/crashbc.rda b/data/crashbc.rda
index ea9c46a..dcbcbcd 100644
Binary files a/data/crashbc.rda and b/data/crashbc.rda differ
diff --git a/data/crashf.rda b/data/crashf.rda
index fbd0fc1..402a25e 100644
Binary files a/data/crashf.rda and b/data/crashf.rda differ
diff --git a/data/crashi.rda b/data/crashi.rda
index bb96e9f..860ee59 100644
Binary files a/data/crashi.rda and b/data/crashi.rda differ
diff --git a/data/crashmc.rda b/data/crashmc.rda
index 2932344..5a59896 100644
Binary files a/data/crashmc.rda and b/data/crashmc.rda differ
diff --git a/data/crashp.rda b/data/crashp.rda
index 6d995b8..fc07dbd 100644
Binary files a/data/crashp.rda and b/data/crashp.rda differ
diff --git a/data/crashtr.rda b/data/crashtr.rda
index 6feedb5..41be541 100644
Binary files a/data/crashtr.rda and b/data/crashtr.rda differ
diff --git a/data/crime.us.rda b/data/crime.us.rda
deleted file mode 100644
index d223c25..0000000
Binary files a/data/crime.us.rda and /dev/null differ
diff --git a/data/datalist b/data/datalist
deleted file mode 100644
index db7b6bd..0000000
--- a/data/datalist
+++ /dev/null
@@ -1,49 +0,0 @@
-alclevels
-alcoff
-auuc
-backPain
-bmi.nz
-car.all
-chest.nz
-chinese.nz
-coalminers
-crashbc
-crashf
-crashi
-crashmc
-crashp
-crashtr
-crime.us
-enzyme
-fibre15
-fibre1dot5: fibre1.5
-finney44
-gala
-gew
-grain.us
-hormone
-hspider
-hued
-huie
-hunua
-huse
-leukemia
-lirat
-marital.nz
-mmt
-olympic
-oxtemp
-pneumo
-rainfall
-ruge
-toxop
-ucberk
-ugss
-venice
-venice90
-waitakere
-wffc
-wffc.indiv
-wffc.nc
-wffc.teams
-xs.nz
diff --git a/data/fibre15.rda b/data/fibre15.rda
deleted file mode 100644
index f8eb7a6..0000000
Binary files a/data/fibre15.rda and /dev/null differ
diff --git a/data/fibre1dot5.rda b/data/fibre1dot5.rda
deleted file mode 100644
index 5f52020..0000000
Binary files a/data/fibre1dot5.rda and /dev/null differ
diff --git a/data/finney44.rda b/data/finney44.rda
index 2601c01..722f3db 100644
Binary files a/data/finney44.rda and b/data/finney44.rda differ
diff --git a/data/gala.rda b/data/gala.rda
deleted file mode 100644
index f585262..0000000
Binary files a/data/gala.rda and /dev/null differ
diff --git a/data/gew.txt.gz b/data/gew.txt.gz
index c8c35a6..28e0e5c 100644
Binary files a/data/gew.txt.gz and b/data/gew.txt.gz differ
diff --git a/data/hspider.rda b/data/hspider.rda
index c9caaa4..c490aa1 100644
Binary files a/data/hspider.rda and b/data/hspider.rda differ
diff --git a/data/hued.rda b/data/hued.rda
deleted file mode 100644
index 881bdde..0000000
Binary files a/data/hued.rda and /dev/null differ
diff --git a/data/huie.rda b/data/huie.rda
deleted file mode 100644
index 9a1d5f9..0000000
Binary files a/data/huie.rda and /dev/null differ
diff --git a/data/huse.rda b/data/huse.rda
deleted file mode 100644
index e58dec1..0000000
Binary files a/data/huse.rda and /dev/null differ
diff --git a/data/leukemia.rda b/data/leukemia.rda
index 3bd8d95..8e81a1f 100644
Binary files a/data/leukemia.rda and b/data/leukemia.rda differ
diff --git a/data/marital.nz.rda b/data/marital.nz.rda
index 5c68005..a1ac6bb 100644
Binary files a/data/marital.nz.rda and b/data/marital.nz.rda differ
diff --git a/data/mmt.rda b/data/mmt.rda
index dd515a8..18acd65 100644
Binary files a/data/mmt.rda and b/data/mmt.rda differ
diff --git a/data/olympic.txt.gz b/data/olym08.txt.gz
similarity index 66%
rename from data/olympic.txt.gz
rename to data/olym08.txt.gz
index 42dc271..373db8a 100644
Binary files a/data/olympic.txt.gz and b/data/olym08.txt.gz differ
diff --git a/data/olym12.txt.gz b/data/olym12.txt.gz
new file mode 100644
index 0000000..528ad46
Binary files /dev/null and b/data/olym12.txt.gz differ
diff --git a/data/pneumo.rda b/data/pneumo.rda
index cf8c0ee..89426e6 100644
Binary files a/data/pneumo.rda and b/data/pneumo.rda differ
diff --git a/data/rainfall.rda b/data/rainfall.rda
deleted file mode 100644
index e3c612a..0000000
Binary files a/data/rainfall.rda and /dev/null differ
diff --git a/data/ruge.rda b/data/ruge.rda
index 161ae03..fe71487 100644
Binary files a/data/ruge.rda and b/data/ruge.rda differ
diff --git a/data/toxop.rda b/data/toxop.rda
index cf85b63..84da16c 100644
Binary files a/data/toxop.rda and b/data/toxop.rda differ
diff --git a/data/ugss.rda b/data/ugss.rda
deleted file mode 100644
index fcc35d3..0000000
Binary files a/data/ugss.rda and /dev/null differ
diff --git a/data/venice.rda b/data/venice.rda
index 7f1a32c..c66480c 100644
Binary files a/data/venice.rda and b/data/venice.rda differ
diff --git a/data/venice90.rda b/data/venice90.rda
index 397db38..ba91a39 100644
Binary files a/data/venice90.rda and b/data/venice90.rda differ
diff --git a/data/wffc.indiv.rda b/data/wffc.indiv.rda
index a3d3742..c126c35 100644
Binary files a/data/wffc.indiv.rda and b/data/wffc.indiv.rda differ
diff --git a/data/wffc.nc.rda b/data/wffc.nc.rda
index 21cc525..93d820f 100644
Binary files a/data/wffc.nc.rda and b/data/wffc.nc.rda differ
diff --git a/data/wffc.rda b/data/wffc.rda
index 6bdbf16..dcccb00 100644
Binary files a/data/wffc.rda and b/data/wffc.rda differ
diff --git a/data/wffc.teams.rda b/data/wffc.teams.rda
index d295353..aa6966e 100644
Binary files a/data/wffc.teams.rda and b/data/wffc.teams.rda differ
diff --git a/data/xs.nz.rda b/data/xs.nz.rda
deleted file mode 100644
index 0b20db8..0000000
Binary files a/data/xs.nz.rda and /dev/null differ
diff --git a/inst/CITATION b/inst/CITATION
index 0c5220d..56af1a0 100644
--- a/inst/CITATION
+++ b/inst/CITATION
@@ -41,6 +41,27 @@ citEntry(entry = "Article",
)
+citEntry(entry = "Article",
+ title = "The {VGAM} Package for Categorical Data Analysis",
+ author = personList(as.person("Thomas W. Yee")),
+ journal = "Journal of Statistical Software",
+ year = "2010",
+ volume = "32",
+ number = "10",
+ pages = "1--34",
+ url = "http://www.jstatsoft.org/v32/i10/",
+
+ textVersion =
+ paste("Thomas W. Yee (2010).",
+ "The VGAM Package for Categorical Data Analysis.",
+ "Journal of Statistical Software, 32(10), 1-34.",
+ "URL http://www.jstatsoft.org/v32/i10/."),
+ header = "and/or"
+)
+
+
+
+
citEntry(entry = "Manual",
title = "{VGAM}: Vector Generalized Linear and Additive Models",
author = personList(as.person("Thomas W. Yee")),
diff --git a/inst/doc/categoricalVGAM.R b/inst/doc/categoricalVGAM.R
new file mode 100644
index 0000000..cabf345
--- /dev/null
+++ b/inst/doc/categoricalVGAM.R
@@ -0,0 +1,396 @@
+### R code from vignette source 'categoricalVGAM.Rnw'
+
+###################################################
+### code chunk number 1: categoricalVGAM.Rnw:84-89
+###################################################
+library("VGAM")
+ps.options(pointsize = 12)
+options(width = 72, digits = 4)
+options(SweaveHooks = list(fig = function() par(las = 1)))
+options(prompt = "R> ", continue = "+")
+
+
+###################################################
+### code chunk number 2: categoricalVGAM.Rnw:613-616
+###################################################
+pneumo <- transform(pneumo, let = log(exposure.time))
+fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
+ cumulative(reverse = TRUE, parallel = TRUE), pneumo)
+
+
+###################################################
+### code chunk number 3: categoricalVGAM.Rnw:899-903
+###################################################
+journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
+squaremat <- matrix(c(NA, 33, 320, 284, 730, NA, 813, 276,
+ 498, 68, NA, 325, 221, 17, 142, NA), 4, 4)
+dimnames(squaremat) <- list(winner = journal, loser = journal)
+
+
+###################################################
+### code chunk number 4: categoricalVGAM.Rnw:1004-1008
+###################################################
+abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
+fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, abodat)
+coef(fit, matrix = TRUE)
+Coef(fit) # Estimated pA and pB
+
+
+###################################################
+### code chunk number 5: categoricalVGAM.Rnw:1314-1315
+###################################################
+head(wffc.nc, 5)
+
+
+###################################################
+### code chunk number 6: categoricalVGAM.Rnw:1324-1336
+###################################################
+fnc <- transform(wffc.nc,
+ finame = factor(iname),
+ fsector = factor(sector),
+ fday = factor(ceiling(session / 2)),
+ mornaft = 1 - (session %% 2),
+ fbeatboat = factor(beatboat))
+
+fnc <- fnc[with(fnc, !is.element(comid, c(99,72,80,93,45,71,97,78))),]
+fnc <- transform(fnc,
+ ordnum = ifelse(numbers <= 02, "few",
+ ifelse(numbers <= 10, "more", "most")))
+fnc$ordnum <- ordered(fnc$ordnum, levels = c("few", "more", "most"))
+
+
+###################################################
+### code chunk number 7: categoricalVGAM.Rnw:1341-1342
+###################################################
+with(fnc, table(ordnum))
+
+
+###################################################
+### code chunk number 8: categoricalVGAM.Rnw:1349-1356
+###################################################
+fit.pom <- vglm(ordnum ~
+ fsector +
+ mornaft +
+ fday +
+ finame,
+ family = cumulative(parallel = TRUE, reverse = TRUE),
+ data = fnc)
+
+
+###################################################
+### code chunk number 9: categoricalVGAM.Rnw:1368-1370
+###################################################
+head(fit.pom at y, 3)
+colSums(fit.pom at y)
+
+
+###################################################
+### code chunk number 10: categoricalVGAM.Rnw:1381-1383
+###################################################
+head(coef(fit.pom, matrix = TRUE), 10)
+#head(summary(fit.pom)@coef3, 10) # Old now since 0.7-10 is nicer
+
+
+###################################################
+### code chunk number 11: categoricalVGAM.Rnw:1387-1388
+###################################################
+head(coef(summary(fit.pom)), 10)
+
+
+###################################################
+### code chunk number 12: categoricalVGAM.Rnw:1434-1442
+###################################################
+fit.ppom <- vglm(ordnum ~
+ fsector +
+ mornaft +
+ fday +
+ finame,
+ cumulative(parallel = FALSE ~ 1 + mornaft, reverse = TRUE),
+ data = fnc)
+head(coef(fit.ppom, matrix = TRUE), 8)
+
+
+###################################################
+### code chunk number 13: categoricalVGAM.Rnw:1447-1449
+###################################################
+pchisq(deviance(fit.pom) - deviance(fit.ppom),
+ df = df.residual(fit.pom) - df.residual(fit.ppom), lower.tail=FALSE)
+
+
+###################################################
+### code chunk number 14: categoricalVGAM.Rnw:1456-1464
+###################################################
+fit2.ppom <- vglm(ordnum ~
+ fsector +
+ mornaft +
+ fday +
+ finame,
+ family = cumulative(parallel = FALSE ~ 1 + fday, reverse = TRUE),
+ data = fnc)
+head(coef(fit2.ppom, matrix = TRUE), 8)
+
+
+###################################################
+### code chunk number 15: categoricalVGAM.Rnw:1469-1470
+###################################################
+head(fitted(fit2.ppom), 3)
+
+
+###################################################
+### code chunk number 16: categoricalVGAM.Rnw:1475-1476
+###################################################
+head(predict(fit2.ppom), 3)
+
+
+###################################################
+### code chunk number 17: categoricalVGAM.Rnw:1480-1482
+###################################################
+dim(model.matrix(fit2.ppom, type = "lm"))
+dim(model.matrix(fit2.ppom, type = "vlm"))
+
+
+###################################################
+### code chunk number 18: categoricalVGAM.Rnw:1486-1487
+###################################################
+constraints(fit2.ppom)[c(1, 2, 5, 6)]
+
+
+###################################################
+### code chunk number 19: categoricalVGAM.Rnw:1524-1526
+###################################################
+head(marital.nz, 4)
+summary(marital.nz)
+
+
+###################################################
+### code chunk number 20: categoricalVGAM.Rnw:1529-1531
+###################################################
+fit.ms <- vgam(mstatus ~ s(age, df = 3), multinomial(refLevel = 2),
+ data = marital.nz)
+
+
+###################################################
+### code chunk number 21: categoricalVGAM.Rnw:1535-1537
+###################################################
+head(fit.ms at y, 4)
+colSums(fit.ms at y)
+
+
+###################################################
+### code chunk number 22: categoricalVGAM.Rnw:1546-1558
+###################################################
+# Plot output
+mycol <- c("red","darkgreen","blue")
+ par(mfrow=c(2,2))
+plot(fit.ms, se=TRUE, scale=12,
+ lcol=mycol, scol=mycol)
+
+# Plot output overlayed
+#par(mfrow=c(1,1))
+plot(fit.ms, se=TRUE, scale=12,
+ overlay=TRUE,
+ llwd=2,
+ lcol=mycol, scol=mycol)
+
+
+###################################################
+### code chunk number 23: categoricalVGAM.Rnw:1601-1614
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+# Plot output
+mycol <- c("red","darkgreen","blue")
+ par(mfrow=c(2,2))
+ par(mar=c(4.2,4.0,1.2,2.2)+0.1)
+plot(fit.ms, se=TRUE, scale=12,
+ lcol=mycol, scol=mycol)
+
+# Plot output overlaid
+#par(mfrow=c(1,1))
+plot(fit.ms, se=TRUE, scale=12,
+ overlay=TRUE,
+ llwd=2,
+ lcol=mycol, scol=mycol)
+
+
+###################################################
+### code chunk number 24: categoricalVGAM.Rnw:1634-1635
+###################################################
+plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
+
+
+###################################################
+### code chunk number 25: categoricalVGAM.Rnw:1644-1648
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+# Plot output
+ par(mfrow=c(1,3))
+ par(mar=c(4.5,4.0,0.2,2.2)+0.1)
+plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
+
+
+###################################################
+### code chunk number 26: categoricalVGAM.Rnw:1671-1683
+###################################################
+foo <- function(x, elbow=50)
+ poly(pmin(x, elbow), 2)
+
+clist <- list("(Intercept)" = diag(3),
+ "poly(age, 2)" = rbind(1, 0, 0),
+ "foo(age)" = rbind(0, 1, 0),
+ "age" = rbind(0, 0, 1))
+fit2.ms <-
+ vglm(mstatus ~ poly(age, 2) + foo(age) + age,
+ family = multinomial(refLevel = 2),
+ constraints = clist,
+ data = marital.nz)
+
+
+###################################################
+### code chunk number 27: categoricalVGAM.Rnw:1686-1687
+###################################################
+coef(fit2.ms, matrix = TRUE)
+
+
+###################################################
+### code chunk number 28: categoricalVGAM.Rnw:1691-1698
+###################################################
+par(mfrow=c(2,2))
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+ lcol = mycol[1], scol = mycol[1], which.term = 1)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+ lcol = mycol[2], scol=mycol[2], which.term = 2)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+ lcol = mycol[3], scol = mycol[3], which.term = 3)
+
+
+###################################################
+### code chunk number 29: categoricalVGAM.Rnw:1709-1718
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+# Plot output
+par(mfrow=c(2,2))
+ par(mar=c(4.5,4.0,1.2,2.2)+0.1)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+ lcol = mycol[1], scol = mycol[1], which.term = 1)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+ lcol = mycol[2], scol = mycol[2], which.term = 2)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+ lcol = mycol[3], scol = mycol[3], which.term = 3)
+
+
+###################################################
+### code chunk number 30: categoricalVGAM.Rnw:1736-1737
+###################################################
+deviance(fit.ms) - deviance(fit2.ms)
+
+
+###################################################
+### code chunk number 31: categoricalVGAM.Rnw:1743-1744
+###################################################
+(dfdiff <- df.residual(fit2.ms) - df.residual(fit.ms))
+
+
+###################################################
+### code chunk number 32: categoricalVGAM.Rnw:1747-1748
+###################################################
+1-pchisq(deviance(fit.ms) - deviance(fit2.ms), df=dfdiff)
+
+
+###################################################
+### code chunk number 33: categoricalVGAM.Rnw:1761-1772
+###################################################
+ooo <- with(marital.nz, order(age))
+with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
+ type="l", las=1, lwd=2, ylim=0:1,
+ ylab="Fitted probabilities",
+ xlab="Age", # main="Marital status amongst NZ Male Europeans",
+ col=c(mycol[1], "black", mycol[-1])))
+legend(x=52.5, y=0.62, # x="topright",
+ col=c(mycol[1], "black", mycol[-1]),
+ lty=1:4,
+ legend=colnames(fit.ms at y), lwd=2)
+abline(v=seq(10,90,by=5), h=seq(0,1,by=0.1), col="gray", lty="dashed")
+
+
+###################################################
+### code chunk number 34: categoricalVGAM.Rnw:1787-1800
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+ par(mfrow=c(1,1))
+ par(mar=c(4.5,4.0,0.2,0.2)+0.1)
+ooo <- with(marital.nz, order(age))
+with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
+ type="l", las=1, lwd=2, ylim=0:1,
+ ylab="Fitted probabilities",
+ xlab="Age",
+ col=c(mycol[1], "black", mycol[-1])))
+legend(x=52.5, y=0.62,
+ col=c(mycol[1], "black", mycol[-1]),
+ lty=1:4,
+ legend=colnames(fit.ms at y), lwd=2.1)
+abline(v=seq(10,90,by=5), h=seq(0,1,by=0.1), col="gray", lty="dashed")
+
+
+###################################################
+### code chunk number 35: categoricalVGAM.Rnw:1834-1838
+###################################################
+# Scale the variables? Yes; the Anderson (1984) paper did (see his Table 6).
+head(backPain, 4)
+summary(backPain)
+backPain <- transform(backPain, sx1 = -scale(x1), sx2 = -scale(x2), sx3 = -scale(x3))
+
+
+###################################################
+### code chunk number 36: categoricalVGAM.Rnw:1842-1843
+###################################################
+bp.rrmlm1 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, backPain)
+
+
+###################################################
+### code chunk number 37: categoricalVGAM.Rnw:1846-1847
+###################################################
+Coef(bp.rrmlm1)
+
+
+###################################################
+### code chunk number 38: categoricalVGAM.Rnw:1875-1876
+###################################################
+set.seed(123)
+
+
+###################################################
+### code chunk number 39: categoricalVGAM.Rnw:1879-1881
+###################################################
+bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, backPain, Rank = 2,
+ Corner = FALSE, Uncor = TRUE)
+
+
+###################################################
+### code chunk number 40: categoricalVGAM.Rnw:1889-1893
+###################################################
+biplot(bp.rrmlm2, Acol="blue", Ccol="darkgreen", scores=TRUE,
+# xlim=c(-1,6), ylim=c(-1.2,4), # Use this if not scaled
+ xlim=c(-4.5,2.2), ylim=c(-2.2, 2.2), # Use this if scaled
+ chull=TRUE, clty=2, ccol="blue")
+
+
+###################################################
+### code chunk number 41: categoricalVGAM.Rnw:1925-1933
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+# Plot output
+ par(mfrow=c(1,1))
+ par(mar=c(4.5,4.0,0.2,2.2)+0.1)
+
+biplot(bp.rrmlm2, Acol="blue", Ccol="darkgreen", scores=TRUE,
+# xlim=c(-1,6), ylim=c(-1.2,4), # Use this if not scaled
+ xlim=c(-4.5,2.2), ylim=c(-2.2, 2.2), # Use this if scaled
+ chull=TRUE, clty=2, ccol="blue")
+
+
+###################################################
+### code chunk number 42: categoricalVGAM.Rnw:2047-2048
+###################################################
+iam(NA, NA, M = 4, both = TRUE, diag = TRUE)
+
+
diff --git a/inst/doc/categoricalVGAM.pdf b/inst/doc/categoricalVGAM.pdf
index 79095d7..3703669 100644
Binary files a/inst/doc/categoricalVGAM.pdf and b/inst/doc/categoricalVGAM.pdf differ
diff --git a/man/AA.Aa.aa.Rd b/man/AA.Aa.aa.Rd
index 0786a8b..9f7f384 100644
--- a/man/AA.Aa.aa.Rd
+++ b/man/AA.Aa.aa.Rd
@@ -22,17 +22,25 @@ AA.Aa.aa(link = "logit", init.pA = NULL)
This one parameter model involves a probability called \code{pA}.
The probability of getting a count in the first column of the
input (an AA) is \code{pA*pA}.
+
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
+
}
\references{
+
+
Weir, B. S. (1996)
\emph{Genetic Data Analysis II: Methods for Discrete Population
Genetic Data},
Sunderland, MA: Sinauer Associates, Inc.
+
+
}
\author{ T. W. Yee }
\note{
@@ -46,17 +54,18 @@ Sunderland, MA: Sinauer Associates, Inc.
}
\seealso{
-\code{\link{AB.Ab.aB.ab}},
-\code{\link{AB.Ab.aB.ab2}},
-\code{\link{ABO}},
-\code{\link{G1G2G3}},
-\code{\link{MNSs}}.
+ \code{\link{AB.Ab.aB.ab}},
+ \code{\link{AB.Ab.aB.ab2}},
+ \code{\link{ABO}},
+ \code{\link{G1G2G3}},
+ \code{\link{MNSs}}.
+
}
\examples{
-y = cbind(53, 95, 38)
-fit = vglm(y ~ 1, AA.Aa.aa(link="probit"), trace=TRUE)
-rbind(y, sum(y)*fitted(fit))
+y <- cbind(53, 95, 38)
+fit <- vglm(y ~ 1, AA.Aa.aa(link = "probit"), trace = TRUE)
+rbind(y, sum(y) * fitted(fit))
Coef(fit) # Estimated pA
summary(fit)
}
diff --git a/man/AB.Ab.aB.ab.Rd b/man/AB.Ab.aB.ab.Rd
index a752ce7..f56d701 100644
--- a/man/AB.Ab.aB.ab.Rd
+++ b/man/AB.Ab.aB.ab.Rd
@@ -18,48 +18,58 @@ AB.Ab.aB.ab(link = "logit", init.p = NULL)
}
\item{init.p}{ Optional initial value for \code{p}. }
+
}
\details{
This one parameter model involves a probability called \code{p}.
+
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
+
}
\references{
+
Lange, K. (2002)
\emph{Mathematical and Statistical Methods for Genetic Analysis},
2nd ed. New York: Springer-Verlag.
+
}
\author{ T. W. Yee }
\note{
-The input can be a 4-column matrix of counts, where the columns
-are AB, Ab, aB and ab
-(in order).
-Alternatively, the input can be a 4-column matrix of
-proportions (so each row adds to 1) and the \code{weights}
-argument is used to specify the total number of counts for each row.
+ The input can be a 4-column matrix of counts, where the columns
+ are AB, Ab, aB and ab
+ (in order).
+ Alternatively, the input can be a 4-column matrix of
+ proportions (so each row adds to 1) and the \code{weights}
+ argument is used to specify the total number of counts for each row.
+
+
}
\seealso{
-\code{\link{AA.Aa.aa}},
-\code{\link{AB.Ab.aB.ab2}},
-\code{\link{ABO}},
-\code{\link{G1G2G3}},
-\code{\link{MNSs}}.
+ \code{\link{AA.Aa.aa}},
+ \code{\link{AB.Ab.aB.ab2}},
+ \code{\link{ABO}},
+ \code{\link{G1G2G3}},
+ \code{\link{MNSs}}.
+
}
\examples{
-y = cbind(AB=1997, Ab=906, aB=904, ab=32) # Data from Fisher (1925)
-fit = vglm(y ~ 1, AB.Ab.aB.ab(link="identity", init.p=0.9), trace=TRUE)
-fit = vglm(y ~ 1, AB.Ab.aB.ab, trace=TRUE)
-rbind(y, sum(y)*fitted(fit))
+ymat <- cbind(AB=1997, Ab=906, aB=904, ab=32) # Data from Fisher (1925)
+fit <- vglm(ymat ~ 1, AB.Ab.aB.ab(link = "identity", init.p = 0.9), trace = TRUE)
+fit <- vglm(ymat ~ 1, AB.Ab.aB.ab, trace = TRUE)
+rbind(ymat, sum(ymat)*fitted(fit))
Coef(fit) # Estimated p
-p = sqrt(4*(fitted(fit)[,4]))
+p <- sqrt(4*(fitted(fit)[, 4]))
p*p
summary(fit)
}
diff --git a/man/AB.Ab.aB.ab2.Rd b/man/AB.Ab.aB.ab2.Rd
index a62806d..ce722b6 100644
--- a/man/AB.Ab.aB.ab2.Rd
+++ b/man/AB.Ab.aB.ab2.Rd
@@ -22,48 +22,55 @@ AB.Ab.aB.ab2(link = "logit", init.p = NULL)
\details{
This one parameter model involves a probability called \code{p}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
-Elandt-Johnson, R. C. (1971)
-\emph{Probability Models and Statistical Methods in Genetics},
-New York: Wiley.
+
+ Elandt-Johnson, R. C. (1971)
+ \emph{Probability Models and Statistical Methods in Genetics},
+ New York: Wiley.
+
+
}
\author{ T. W. Yee }
\note{
-The input can be a 4-column matrix of counts.
-Alternatively, the input can be a 4-column matrix of
-proportions (so each row adds to 1) and the \code{weights}
-argument is used to specify the total number of counts for each row.
+ The input can be a 4-column matrix of counts.
+ Alternatively, the input can be a 4-column matrix of
+ proportions (so each row adds to 1) and the \code{weights}
+ argument is used to specify the total number of counts for each row.
+
}
\section{Warning}{
-There may be a bug in the \code{deriv} and \code{weight} slot of the
-family function.
+ There may be a bug in the \code{deriv} and \code{weight} slot of the
+ family function.
+
}
\seealso{
-\code{\link{AA.Aa.aa}},
-\code{\link{AB.Ab.aB.ab}},
-\code{\link{ABO}},
-\code{\link{G1G2G3}},
-\code{\link{MNSs}}.
+ \code{\link{AA.Aa.aa}},
+ \code{\link{AB.Ab.aB.ab}},
+ \code{\link{ABO}},
+ \code{\link{G1G2G3}},
+ \code{\link{MNSs}}.
+
+
}
\examples{
-# See Elandt-Johnson, pp.430,427
-# Estimated variance is approx 0.0021
-y = cbind(68, 11, 13, 21)
-fit = vglm(y ~ 1, AB.Ab.aB.ab2(link=cloglog), trace=TRUE, crit="coef")
-Coef(fit) # Estimated p
-rbind(y, sum(y)*fitted(fit))
-sqrt(diag(vcov(fit)))
+ymat <- cbind(68, 11, 13, 21) # See Elandt-Johnson, pp.430,427
+fit <- vglm(ymat ~ 1, AB.Ab.aB.ab2(link = cloglog), trace = TRUE, crit = "coef")
+Coef(fit) # Estimated p
+rbind(ymat, sum(ymat) * fitted(fit))
+sqrt(diag(vcov(fit))) # Estimated variance is approx 0.0021
}
\keyword{models}
\keyword{regression}
diff --git a/man/ABO.Rd b/man/ABO.Rd
index 09d617e..ce20f83 100644
--- a/man/ABO.Rd
+++ b/man/ABO.Rd
@@ -16,10 +16,13 @@ ABO(link = "logit", ipA = NULL, ipO = NULL)
Link function applied to \code{pA} and \code{pB}.
See \code{\link{Links}} for more choices.
+
}
\item{ipA, ipO}{
Optional initial value for \code{pA} and \code{pO}.
A \code{NULL} value means values are computed internally.
+
+
}
}
@@ -44,6 +47,7 @@ ABO(link = "logit", ipA = NULL, ipO = NULL)
}
\references{
+
Lange, K. (2002)
\emph{Mathematical and Statistical Methods for Genetic Analysis},
2nd ed. New York: Springer-Verlag.
@@ -71,11 +75,11 @@ ABO(link = "logit", ipA = NULL, ipO = NULL)
}
\examples{
-ymatrix = cbind(A = 725, B = 258, AB = 72, O = 1073) # Order matters, not the name
-fit = vglm(ymatrix ~ 1, ABO(link = identity), trace = TRUE, cri = "coef")
+ymat <- cbind(A = 725, B = 258, AB = 72, O = 1073) # Order matters, not the name
+fit <- vglm(ymat ~ 1, ABO(link = identity), trace = TRUE, cri = "coef")
coef(fit, matrix = TRUE)
Coef(fit) # Estimated pA and pB
-rbind(ymatrix, sum(ymatrix) * fitted(fit))
+rbind(ymat, sum(ymat) * fitted(fit))
sqrt(diag(vcov(fit)))
}
\keyword{models}
diff --git a/man/AICvlm.Rd b/man/AICvlm.Rd
index 0458241..9545cd0 100644
--- a/man/AICvlm.Rd
+++ b/man/AICvlm.Rd
@@ -19,14 +19,17 @@ AICvlm(object, \dots, k = 2)
\item{object}{
Some \pkg{VGAM} object, for example, having
class \code{\link{vglmff-class}}.
+
}
\item{\dots}{
Other possible arguments fed into
\code{logLik} in order to compute the log-likelihood.
+
}
\item{k}{
Numeric, the penalty per parameter to be used;
the default is the classical AIC.
+
}
}
\details{
@@ -37,18 +40,22 @@ AICvlm(object, \dots, k = 2)
One could assign \eqn{k = \log(n)} (\eqn{n} the number of observations)
for the so-called BIC or SBC (Schwarz's Bayesian criterion).
+
This code relies on the log-likelihood being defined, and computed,
for the object.
When comparing fitted objects, the smaller the AIC, the better the fit.
The log-likelihood and hence the AIC is only defined up to an additive
constant.
+
Any estimated scale parameter (in GLM parlance) is used as one
parameter.
+
For VGAMs the nonlinear effective degrees of freedom for each
smoothed component is used. This formula is heuristic.
+
}
\value{
Returns a numeric value with the corresponding AIC (or BIC, or \dots,
@@ -59,6 +66,7 @@ AICvlm(object, \dots, k = 2)
\note{
AIC has not been defined for QRR-VGLMs yet.
+
}
%\references{
@@ -74,6 +82,7 @@ AICvlm(object, \dots, k = 2)
In particular, \code{AIC} should not be run on some \pkg{VGAM} family
functions because of violation of certain regularity conditions, etc.
+
}
\seealso{
@@ -82,16 +91,17 @@ AICvlm(object, \dots, k = 2)
RR-VGLMs are described in \code{\link{rrvglm-class}};
\code{\link[stats]{AIC}}.
+
}
\examples{
-pneumo = transform(pneumo, let=log(exposure.time))
+pneumo <- transform(pneumo, let = log(exposure.time))
(fit1 <- vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel=TRUE, reverse=TRUE), pneumo))
-coef(fit1, matrix=TRUE)
+ cumulative(parallel = TRUE, reverse = TRUE), pneumo))
+coef(fit1, matrix = TRUE)
AIC(fit1)
(fit2 <- vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel=FALSE, reverse=TRUE), pneumo))
-coef(fit2, matrix=TRUE)
+ cumulative(parallel = FALSE, reverse = TRUE), pneumo))
+coef(fit2, matrix = TRUE)
AIC(fit2)
}
\keyword{models}
diff --git a/man/Coef.Rd b/man/Coef.Rd
index b89d025..0ced124 100644
--- a/man/Coef.Rd
+++ b/man/Coef.Rd
@@ -83,12 +83,11 @@ Reduced-rank vector generalized linear models.
}
\examples{
-set.seed(123); nn = 1000
-mydata = data.frame(y = rbeta(nn, shape1 = 1, shape2 = 3)) # Original scale
-# parameters are estimated on a log scale:
-fit = vglm(y ~ 1, betaff, mydata, trace = TRUE, crit = "c") # Intercept-only model
-coef(fit, matrix = TRUE) # log scale
-Coef(fit) # On the original scale
+nn <- 1000
+bdata <- data.frame(y = rbeta(nn, shape1 = 1, shape2 = 3)) # Original scale
+fit <- vglm(y ~ 1, beta.ab, data = bdata, trace = TRUE) # Intercept-only model
+coef(fit, matrix = TRUE) # Both on a log scale
+Coef(fit) # On the original scale
}
\keyword{models}
\keyword{regression}
diff --git a/man/Coef.qrrvglm-class.Rd b/man/Coef.qrrvglm-class.Rd
index 63d6a0e..51165dd 100644
--- a/man/Coef.qrrvglm-class.Rd
+++ b/man/Coef.qrrvglm-class.Rd
@@ -22,7 +22,7 @@ linear predictors and \eqn{n} is the number of observations.
linear `coefficients' of the matrix of latent variables.
It is \eqn{M} by \eqn{R}. }
\item{\code{B1}:}{Of class \code{"matrix"}, \bold{B1}.
- These correspond to terms of the argument \code{Norrr}. }
+ These correspond to terms of the argument \code{noRRR}. }
\item{\code{C}:}{Of class \code{"matrix"}, \bold{C}, the
canonical coefficients. It has \eqn{R} columns. }
\item{\code{Constrained}:}{Logical. Whether the model is
@@ -44,8 +44,8 @@ linear predictors and \eqn{n} is the number of observations.
}
\item{\code{Maximum}:}{Of class \code{"numeric"}, the
\eqn{M} maximum fitted values. That is, the fitted values
- at the optima for \code{Norrr = ~ 1} models.
- If \code{Norrr} is not \code{~ 1} then these will be \code{NA}s. }
+ at the optima for \code{noRRR = ~ 1} models.
+ If \code{noRRR} is not \code{~ 1} then these will be \code{NA}s. }
\item{\code{NOS}:}{Number of species.}
\item{\code{Optimum}:}{Of class \code{"matrix"}, the values
of the latent variables where the optima are.
@@ -109,18 +109,18 @@ canonical Gaussian ordination.
}
\examples{
-x2 = rnorm(n <- 100)
-x3 = rnorm(n)
-x4 = rnorm(n)
-lv1 = 0 + x3 - 2*x4
-lambda1 = exp(3 - 0.5 * (lv1-0)^2)
-lambda2 = exp(2 - 0.5 * (lv1-1)^2)
-lambda3 = exp(2 - 0.5 * ((lv1+4)/2)^2)
-y1 = rpois(n, lambda1)
-y2 = rpois(n, lambda2)
-y3 = rpois(n, lambda3)
-yy = cbind(y1,y2,y3)
-# vvv p1 = cqo(yy ~ x2 + x3 + x4, fam=poissonff, trace=FALSE)
+x2 <- rnorm(n <- 100)
+x3 <- rnorm(n)
+x4 <- rnorm(n)
+lv1 <- 0 + x3 - 2*x4
+lambda1 <- exp(3 - 0.5 * (lv1-0)^2)
+lambda2 <- exp(2 - 0.5 * (lv1-1)^2)
+lambda3 <- exp(2 - 0.5 * ((lv1+4)/2)^2)
+y1 <- rpois(n, lambda1)
+y2 <- rpois(n, lambda2)
+y3 <- rpois(n, lambda3)
+yy <- cbind(y1, y2, y3)
+# vvv p1 <- cqo(yy ~ x2 + x3 + x4, fam=poissonff, trace=FALSE)
\dontrun{
lvplot(p1, y = TRUE, lcol = 1:3, pch = 1:3, pcol = 1:3)
}
diff --git a/man/Coef.qrrvglm.Rd b/man/Coef.qrrvglm.Rd
index 0371a02..aa6e6a7 100644
--- a/man/Coef.qrrvglm.Rd
+++ b/man/Coef.qrrvglm.Rd
@@ -45,6 +45,7 @@ Coef.qrrvglm(object, varlvI = FALSE, reference = NULL, ...)
to compare the tolerances with the sites score variability then setting
\code{varlvI=TRUE} is more appropriate.
+
For rank-2 QRR-VGLMs, one of the species can be chosen so that the
angle of its major axis and minor axis is zero, i.e., parallel to
the ordination axes. This means the effect on the latent vars is
@@ -58,6 +59,7 @@ Coef.qrrvglm(object, varlvI = FALSE, reference = NULL, ...)
fitting the model, e.g., in the functions \code{\link{Coef.qrrvglm}} and
\code{\link{lvplot.qrrvglm}}.
+
}
\value{
The \bold{A}, \bold{B1}, \bold{C}, \bold{T}, \bold{D} matrices/arrays
@@ -65,6 +67,8 @@ Coef.qrrvglm(object, varlvI = FALSE, reference = NULL, ...)
The returned object has class \code{"Coef.qrrvglm"}
(see \code{\link{Coef.qrrvglm-class}}).
+
+
}
\references{
Yee, T. W. (2004)
@@ -73,20 +77,22 @@ canonical Gaussian ordination.
\emph{Ecological Monographs},
\bold{74}, 685--701.
+
Yee, T. W. (2006)
Constrained additive ordination.
\emph{Ecology}, \bold{87}, 203--213.
+
}
\author{ Thomas W. Yee }
\note{
-Consider an equal-tolerances Poisson/binomial CQO model with \code{Norrr = ~ 1}.
+Consider an equal-tolerances Poisson/binomial CQO model with \code{noRRR = ~ 1}.
For \eqn{R=1} it has about \eqn{2S+p_2}{2*S+p2} parameters.
For \eqn{R=2} it has about \eqn{3S+2 p_2}{3*S+2*p_2} parameters.
Here, \eqn{S} is the number of species, and \eqn{p_2=p-1}{p2=p-1} is
the number of environmental variables making up the latent variable.
For an unequal-tolerances Poisson/binomial CQO model with
-\code{Norrr = ~ 1}, it has about \eqn{3S -1 +p_2}{3*S-1+p2} parameters
+\code{noRRR = ~ 1}, it has about \eqn{3S -1 +p_2}{3*S-1+p2} parameters
for \eqn{R=1}, and about \eqn{6S -3 +2p_2}{6*S -3 +2*p2} parameters
for \eqn{R=2}.
Since the total number of data points is \eqn{nS}{n*S}, where
@@ -94,6 +100,7 @@ Since the total number of data points is \eqn{nS}{n*S}, where
of data points by the number of parameters to get some idea
about how much information the parameters contain.
+
}
% ~Make other sections like Warning with \section{Warning }{....} ~
@@ -102,24 +109,25 @@ about how much information the parameters contain.
\code{\link{Coef.qrrvglm-class}},
\code{print.Coef.qrrvglm},
\code{\link{lvplot.qrrvglm}}.
+
+
}
\examples{
set.seed(123)
-x2 = rnorm(n <- 100)
-x3 = rnorm(n)
-x4 = rnorm(n)
-lv1 = 0 + x3 - 2*x4
-lambda1 = exp(3 - 0.5 * (lv1-0)^2)
-lambda2 = exp(2 - 0.5 * (lv1-1)^2)
-lambda3 = exp(2 - 0.5 * ((lv1+4)/2)^2) # Unequal tolerances
-y1 = rpois(n, lambda1)
-y2 = rpois(n, lambda2)
-y3 = rpois(n, lambda3)
+x2 <- rnorm(n <- 100)
+x3 <- rnorm(n)
+x4 <- rnorm(n)
+lv1 <- 0 + x3 - 2*x4
+lambda1 <- exp(3 - 0.5 * (lv1-0)^2)
+lambda2 <- exp(2 - 0.5 * (lv1-1)^2)
+lambda3 <- exp(2 - 0.5 * ((lv1+4)/2)^2) # Unequal tolerances
+y1 <- rpois(n, lambda1)
+y2 <- rpois(n, lambda2)
+y3 <- rpois(n, lambda3)
set.seed(111)
-# vvv p1 = cqo(cbind(y1,y2,y3) ~ x2 + x3 + x4, poissonff, trace=FALSE)
-\dontrun{
-lvplot(p1, y=TRUE, lcol=1:3, pch=1:3, pcol=1:3)
+# vvv p1 <- cqo(cbind(y1, y2, y3) ~ x2 + x3 + x4, poissonff, trace = FALSE)
+\dontrun{ lvplot(p1, y = TRUE, lcol = 1:3, pch = 1:3, pcol = 1:3)
}
# vvv Coef(p1)
# vvv print(Coef(p1), digits=3)
diff --git a/man/Coef.rrvglm-class.Rd b/man/Coef.rrvglm-class.Rd
index c67ae1c..5987656 100644
--- a/man/Coef.rrvglm-class.Rd
+++ b/man/Coef.rrvglm-class.Rd
@@ -40,10 +40,14 @@ and \eqn{n} is the number of observations.
%}
\references{
+
+
Yee, T. W. and Hastie, T. J. (2003)
Reduced-rank vector generalized linear models.
\emph{Statistical Modelling},
\bold{3}, 15--41.
+
+
}
\author{ Thomas W. Yee }
%\note{ ~~further notes~~ }
@@ -51,18 +55,20 @@ Reduced-rank vector generalized linear models.
% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
-\code{\link{Coef.rrvglm}},
-\code{\link{rrvglm}},
-\code{\link{rrvglm-class}},
-\code{print.Coef.rrvglm}.
+ \code{\link{Coef.rrvglm}},
+ \code{\link{rrvglm}},
+ \code{\link{rrvglm-class}},
+ \code{print.Coef.rrvglm}.
+
+
}
\examples{
# Rank-1 stereotype model of Anderson (1984)
-pneumo = transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo)))
-fit = rrvglm(cbind(normal, mild, severe) ~ let + x3, multinomial, pneumo)
+pneumo <- transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo)))
+fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3, multinomial, pneumo)
coef(fit, matrix = TRUE)
Coef(fit)
-print(Coef(fit), digits = 3)
+# print(Coef(fit), digits = 3)
}
\keyword{classes}
diff --git a/man/Coef.rrvglm.Rd b/man/Coef.rrvglm.Rd
index 5d3af9f..6cd793e 100644
--- a/man/Coef.rrvglm.Rd
+++ b/man/Coef.rrvglm.Rd
@@ -28,6 +28,7 @@ Coef.rrvglm(object, ...)
}
\references{
+
Yee, T. W. and Hastie, T. J. (2003)
Reduced-rank vector generalized linear models.
\emph{Statistical Modelling},
@@ -36,7 +37,11 @@ Reduced-rank vector generalized linear models.
}
\author{ Thomas W. Yee }
-\note{ This function is an alternative to \code{coef.rrvglm}. }
+\note{
+This function is an alternative to \code{coef.rrvglm}.
+
+
+}
% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
@@ -49,12 +54,12 @@ Reduced-rank vector generalized linear models.
\examples{
# Rank-1 stereotype model of Anderson (1984)
-pneumo = transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo)))
-fit = rrvglm(cbind(normal, mild, severe) ~ let + x3, multinomial, pneumo)
+pneumo <- transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo)))
+fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3, multinomial, pneumo)
coef(fit, matrix = TRUE)
Coef(fit)
-print(Coef(fit), digits = 3)
}
\keyword{models}
\keyword{regression}
+% # print(Coef(fit), digits = 3)
diff --git a/man/Coef.vlm.Rd b/man/Coef.vlm.Rd
index 6259355..416eba9 100644
--- a/man/Coef.vlm.Rd
+++ b/man/Coef.vlm.Rd
@@ -23,17 +23,24 @@ Coef.vlm(object, ...)
link, parameters between 0 and 1 have a logit link.
This function can back-transform the parameter estimate to
the original scale.
+
+
}
\value{
For intercept-only models (e.g., formula is \code{y ~ 1})
the back-transformed parameter estimates can be returned.
+
+
}
\references{
+
+
Yee, T. W. and Hastie, T. J. (2003)
Reduced-rank vector generalized linear models.
\emph{Statistical Modelling},
\bold{3}, 15--41.
+
}
\author{ Thomas W. Yee }
@@ -44,20 +51,22 @@ Reduced-rank vector generalized linear models.
family functions. You should check your results on some
artificial data before applying it to models fitted to
real data.
+
+
}
\seealso{
- \code{\link{Coef}},
- \code{\link[stats]{coef}}.
+ \code{\link{Coef}},
+ \code{\link[stats]{coef}}.
+
+
}
\examples{
-set.seed(123)
-nn = 1000
-y = rbeta(nn, shape1=1, shape2=3)
-# parameters are estimated on a log scale
-fit = vglm(y ~ 1, betaff, tr=TRUE, crit="c") # intercept-only model
-coef(fit, matrix=TRUE) # log scale
+set.seed(123); nn <- 1000
+bdata <- data.frame(y = rbeta(nn, shape1 = 1, shape2 = 3))
+fit <- vglm(y ~ 1, betaff, data = bdata, trace = TRUE) # intercept-only model
+coef(fit, matrix = TRUE) # log scale
Coef(fit) # On the original scale
}
\keyword{models}
diff --git a/man/CommonVGAMffArguments.Rd b/man/CommonVGAMffArguments.Rd
index 1ff22e8..c05bbe2 100644
--- a/man/CommonVGAMffArguments.Rd
+++ b/man/CommonVGAMffArguments.Rd
@@ -7,6 +7,7 @@
in many \pkg{VGAM} family functions, e.g.,
\code{lsigma},
\code{isigma},
+ \code{gsigma},
\code{nsimEI},
\code{parallel} and
\code{zero}.
@@ -14,7 +15,10 @@
}
\usage{
TypicalVGAMfamilyFunction(lsigma = "loge",
- isigma = NULL, parallel = TRUE,
+ isigma = NULL,
+ gsigma = exp(-5:5),
+ parallel = TRUE,
+ apply.parint = FALSE,
shrinkage.init = 0.95,
nointercept = NULL, imethod = 1,
probs.x = c(0.15, 0.85),
@@ -55,6 +59,22 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
}
+ \item{gsigma}{
+ Grid-search initial values can be inputted using an argument
+ beginning with \code{"g"},
+ e.g., \code{"gsigma"}, \code{"gshape"} and \code{"gscale"}.
+ If argument \code{isigma} is inputted then that has precedence over
+ \code{gsigma}, etc.
+% The actual search values will be \code{unique(sort(c(gshape)))}, etc.
+ If the grid search is 2-dimensional then it is advisable not to
+ make the vectors too long as a nested \code{for} loop may be used.
+ Ditto for 3-dimensions.
+
+
+% Then the actual search values will be \code{unique(sort(c(gshape, 1/gshape)))}, etc.
+
+
+ }
\item{parallel}{
A logical, or a simple formula specifying which terms have equal/unequal
coefficients.
@@ -77,6 +97,15 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
}
+ \item{apply.parint}{
+ Logical. It refers to whether the parallelism constraint is
+ applied to the intercept too.
+ By default, in some models it does, in other models it does not.
+ Used only if \code{parallel = TRUE} (fully or partially with
+ respect to all the explanatory variables).
+
+
+ }
\item{nsimEIM}{
Some \pkg{VGAM} family functions use simulation to obtain an approximate
expected information matrix (EIM).
@@ -151,13 +180,16 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
}
\item{zero}{
An integer specifying which linear/additive predictor is modelled
- as intercepts-only. That is, the regression coefficients are
+ as intercept-only. That is, the regression coefficients are
set to zero for all covariates except for the intercept.
- If \code{zero} is specified then it is a vector with values
+ If \code{zero} is specified then it may be a vector with values
from the set \eqn{\{1,2,\ldots,M\}}.
- The value \code{zero = NULL} means model all linear/additive
+ The value \code{zero = NULL} means model \emph{all} linear/additive
predictors as functions of the explanatory variables.
Here, \eqn{M} is the number of linear/additive predictors.
+ Technically, if \code{zero} contains the value \eqn{j} then
+ the \eqn{j}th row of every constraint matrix (except for the
+ intercept) consists of all 0 values.
Some \pkg{VGAM} family functions allow the \code{zero} argument to
diff --git a/man/DeLury.Rd b/man/DeLury.Rd
index c5d7846..381c136 100644
--- a/man/DeLury.Rd
+++ b/man/DeLury.Rd
@@ -8,7 +8,7 @@
}
\usage{
-DeLury(catch, effort, type=c("DeLury","Leslie"), ricker=FALSE)
+DeLury(catch, effort, type = c("DeLury","Leslie"), ricker = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -47,8 +47,8 @@ DeLury(catch, effort, type=c("DeLury","Leslie"), ricker=FALSE)
up to interval \eqn{t}, and \eqn{N(t)} be the number of individuals
in the population at time \eqn{t}.
It is good idea to plot
- \eqn{\log(C(t))} against \eqn{E(t)} for \code{type="DeLury"} and
- \eqn{C(t)} versus \eqn{K(t)} for \code{type="Leslie"}.
+ \eqn{\log(C(t))} against \eqn{E(t)} for \code{type = "DeLury"} and
+ \eqn{C(t)} versus \eqn{K(t)} for \code{type = "Leslie"}.
The other assumptions are as follows.
%
@@ -87,45 +87,56 @@ The other assumptions are as follows.
\value{
A list with the following components.
+
\item{catch, effort }{
Catch and effort. Same as the original vectors.
These correspond to \eqn{c(t)} and \eqn{e(t)} respectively.
+
}
\item{type, ricker}{Same as input.
+
}
\item{N0}{an estimate of the population size at time 0.
Only valid if the assumptions are satisfied.
+
}
\item{CPUE}{Catch Per Unit Effort \eqn{=C(t)}. }
\item{K, E}{\eqn{K(t)}, \eqn{E(t)}. Only one is computed
depending on \code{type}. }
\item{lmfit}{
the \code{\link[stats:lm]{lm}} object from the
- fit of \code{log(CPUE)} on \code{K} (when \code{type="Leslie"}).
+ fit of \code{log(CPUE)} on \code{K} (when \code{type = "Leslie"}).
Note that the \code{x} component of the object is the model matrix.
+
}
}
\references{
+
DeLury, D. B. (1947)
On the estimation of biological populations.
\emph{Biometrics},
\bold{3}, 145--167.
+
Ricker, W. E. (1975)
Computation and interpretation of biological
statistics of fish populations.
\emph{Bull. Fish. Res. Bd. Can.},
\bold{191}, 382--
- Yee, T. W. (2009)
- VGLMs and VGAMs: an overview for applications in fisheries research.
- In press.
+
+Yee, T. W. (2010)
+VGLMs and VGAMs: an overview for applications in fisheries research.
+\emph{Fisheries Research},
+\bold{101}, 116--126.
+
+
}
@@ -136,46 +147,48 @@ some plots of his are reproduced.
Note that he used log to base 10 whereas natural logs are used here.
His plots had some observations obscured by the y-axis!
+
The DeLury method is not applicable to the data frame
\code{\link{wffc.nc}} since the 2008 World Fly Fishing Competition was
strictly catch-and-release.
+
}
\seealso{ \code{\link{wffc.nc}}. }
\examples{
-pounds = c( 147, 2796, 6888, 7723, 5330, 8839, 6324, 3569, 8120, 8084,
+pounds <- c( 147, 2796, 6888, 7723, 5330, 8839, 6324, 3569, 8120, 8084,
8252, 8411, 6757, 1152, 1500, 11945, 6995, 5851, 3221, 6345,
3035, 6271, 5567, 3017, 4559, 4721, 3613, 473, 928, 2784,
2375, 2640, 3569)
-traps = c( 200, 3780, 7174, 8850, 5793, 9504, 6655, 3685, 8202, 8585,
+traps <- c( 200, 3780, 7174, 8850, 5793, 9504, 6655, 3685, 8202, 8585,
9105, 9069, 7920, 1215, 1471, 11597, 8470, 7770, 3430, 7970,
4740, 8144, 7965, 5198, 7115, 8585, 6935, 1060, 2070, 5725,
5235, 5480, 8300)
-table1 = DeLury(pounds/1000, traps/1000)
+table1 <- DeLury(pounds/1000, traps/1000)
\dontrun{
-with(table1, plot(1+log(CPUE) ~ E, las=1, pch=19, main="DeLury method",
- xlab="E(t)", ylab="1 + log(C(t))", col="blue"))
+with(table1, plot(1+log(CPUE) ~ E, las = 1, pch = 19, main = "DeLury method",
+ xlab = "E(t)", ylab = "1 + log(C(t))", col = "blue"))
}
-omitIndices = -(1:16)
-table1b = DeLury(pounds[omitIndices]/1000, traps[omitIndices]/1000)
+omitIndices <- -(1:16)
+table1b <- DeLury(pounds[omitIndices]/1000, traps[omitIndices]/1000)
\dontrun{
-with(table1b, plot(1+log(CPUE) ~ E, las=1, pch=19, main="DeLury method",
- xlab="E(t)", ylab="1 + log(C(t))", col="blue"))
-mylmfit = with(table1b, lmfit)
-lines(mylmfit$x[,2], 1 + predict.lm(mylmfit), col="red", lty="dashed")
+with(table1b, plot(1+log(CPUE) ~ E, las = 1, pch = 19, main = "DeLury method",
+ xlab = "E(t)", ylab = "1 + log(C(t))", col = "blue"))
+mylmfit <- with(table1b, lmfit)
+lines(mylmfit$x[, 2], 1 + predict.lm(mylmfit), col = "red", lty = "dashed")
}
-
-omitIndices = -(1:16)
-table2 = DeLury(pounds[omitIndices]/1000, traps[omitIndices]/1000, type="L")
+omitIndices <- -(1:16)
+table2 <- DeLury(pounds[omitIndices]/1000, traps[omitIndices]/1000, type = "L")
\dontrun{
-with(table2, plot(CPUE ~ K, las=1, pch=19,
- main="Leslie method; Fig. III",
- xlab="K(t)", ylab="C(t)", col="blue"))
-mylmfit = with(table2, lmfit)
-abline(a=coef(mylmfit)[1], b=coef(mylmfit)[2], col="red", lty="dashed")
+with(table2, plot(CPUE ~ K, las = 1, pch = 19,
+ main = "Leslie method; Fig. III",
+ xlab = "K(t)", ylab = "C(t)", col = "blue"))
+mylmfit <- with(table2, lmfit)
+abline(a = coef(mylmfit)[1], b = coef(mylmfit)[2],
+ col = "orange", lty = "dashed")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
diff --git a/man/G1G2G3.Rd b/man/G1G2G3.Rd
index 55456d1..a5e0765 100644
--- a/man/G1G2G3.Rd
+++ b/man/G1G2G3.Rd
@@ -27,50 +27,57 @@ G1G2G3(link = "logit", ip1 = NULL, ip2 = NULL, iF = NULL)
\code{p3=1-p1-p2} is the third probability.
The parameter \code{f} is the third independent parameter.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
+
Lange, K. (2002)
\emph{Mathematical and Statistical Methods for Genetic Analysis},
2nd ed. New York: Springer-Verlag.
+
}
\author{ T. W. Yee }
\note{
-The input can be a 6-column matrix of counts,
-with columns corresponding to
-\code{G1G1},
-\code{G1G2},
-\code{G1G3},
-\code{G2G2},
-\code{G2G3},
-\code{G3G3} (in order).
-Alternatively, the input can be a 6-column matrix of
-proportions (so each row adds to 1) and the \code{weights}
-argument is used to specify the total number of counts for each row.
+ The input can be a 6-column matrix of counts,
+ with columns corresponding to
+ \code{G1G1},
+ \code{G1G2},
+ \code{G1G3},
+ \code{G2G2},
+ \code{G2G3},
+ \code{G3G3} (in order).
+ Alternatively, the input can be a 6-column matrix of
+ proportions (so each row adds to 1) and the \code{weights}
+ argument is used to specify the total number of counts for each row.
+
}
\seealso{
-\code{\link{AA.Aa.aa}},
-\code{\link{AB.Ab.aB.ab}},
-\code{\link{AB.Ab.aB.ab2}},
-\code{\link{ABO}},
-\code{\link{MNSs}}.
+ \code{\link{AA.Aa.aa}},
+ \code{\link{AB.Ab.aB.ab}},
+ \code{\link{AB.Ab.aB.ab2}},
+ \code{\link{ABO}},
+ \code{\link{MNSs}}.
+
+
}
\examples{
-y <- cbind(108, 196, 429, 143, 513, 559)
-fit <- vglm(y ~ 1, G1G2G3(link = probit), trace = TRUE, crit = "coef")
-fit <- vglm(y ~ 1, G1G2G3(link = logit, ip1 = 0.3, ip2 = 0.3, iF = 0.02),
+ymat <- cbind(108, 196, 429, 143, 513, 559)
+fit <- vglm(ymat ~ 1, G1G2G3(link = probit), trace = TRUE, crit = "coef")
+fit <- vglm(ymat ~ 1, G1G2G3(link = logit, ip1 = 0.3, ip2 = 0.3, iF = 0.02),
trace = TRUE, crit = "coef")
-fit <- vglm(y ~ 1, G1G2G3(link = "identity"), trace = TRUE)
+fit <- vglm(ymat ~ 1, G1G2G3(link = "identity"), trace = TRUE)
Coef(fit) # Estimated p1, p2 and f
-rbind(y, sum(y)*fitted(fit))
+rbind(ymat, sum(ymat)*fitted(fit))
sqrt(diag(vcov(fit)))
}
\keyword{models}
diff --git a/man/Huggins89.t1.Rd b/man/Huggins89.t1.Rd
new file mode 100644
index 0000000..50f0c3a
--- /dev/null
+++ b/man/Huggins89.t1.Rd
@@ -0,0 +1,110 @@
+\name{Huggins89.t1}
+\alias{Huggins89.t1}
+\docType{data}
+\title{
+ Table 1 of Huggins (1989)
+}
+\description{
+ Simulated capture data set for the linear logistic model
+ depending on an occasion covariate and an individual
+ covariate for 10 trapping occasions and 20 individuals.
+
+
+%% ~~ A concise (1-5 lines) description of the dataset. ~~
+}
+\usage{data(Huggins89.t1)}
+\format{
+ The format is a data frame.
+
+
+%chr "Huggins89.t1"
+
+
+}
+\details{
+ Table 1 of Huggins (1989) gives this toy data set.
+ Note that variables \code{z1},\ldots,\code{z10} are
+ occasion-specific variables. They correspond to the
+ response variables \code{y1},\ldots,\code{y10} which
+ have values 1 for capture and 0 for not captured.
+
+
+%% ~~ If necessary, more details than the __description__ above ~~
+}
+%\source{
+%% ~~ reference to a publication or URL from which the data were obtained ~~
+%}
+\references{
+
+Huggins, R. M. (1989)
+On the statistical analysis of capture experiments.
+\emph{Biometrika},
+\bold{76}, 133--140.
+
+
+%% ~~ possibly secondary sources and usages ~~
+}
+\examples{
+\dontrun{
+small.Huggins89.t1 <- transform(Huggins89.t1, Zedd = z1, Z2 = z2, Z3 = z3)
+small.Huggins89.t1 <- subset(small.Huggins89.t1, y1 + y2 + y3 > 0)
+# fit1 is the bottom equation on p.133, but this is only for the 1st 3 responses.
+# Currently posbernoulli.tb() cannot handle more than 3 Bernoulli variates.
+# The fit is not very good.
+fit1 <-
+ vglm(cbind(y1, y2, y3) ~ x2 + Zedd,
+ xij = list(Zedd ~ z1 + z2 + z3 + Z2 + Z3 - 1),
+ posbernoulli.tb(parallel.t = TRUE), maxit = 155,
+ data = small.Huggins89.t1, trace = TRUE,
+ form2 = ~ x2 + Zedd + z1 + z2 + z3 + Z2 + Z3)
+coef(fit1)
+coef(fit1, matrix = TRUE) # M_t model
+constraints(fit1)
+summary(fit1)
+fit1 at extra$N.hat # Estimate of the population size N
+fit1 at extra$SE.N.hat # Its standard error
+
+
+fit.t <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
+ posbernoulli.t, data = Huggins89.t1, trace = TRUE)
+coef(fit.t)
+coef(fit.t, matrix = TRUE) # M_t model
+summary(fit.t)
+fit.t at extra$N.hat # Estimate of the population size N
+fit.t at extra$SE.N.hat # Its standard error
+
+
+fit.b <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
+ posbernoulli.b, data = Huggins89.t1, trace = TRUE)
+coef(fit.b)
+coef(fit.b, matrix = TRUE) # M_b model
+summary(fit.b)
+fit.b at extra$N.hat
+fit.b at extra$SE.N.hat
+
+
+fit.0 <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
+ posbernoulli.b(parallel.b = TRUE), data = Huggins89.t1,
+ trace = TRUE)
+coef(fit.0, matrix = TRUE) # M_0 model (version 1)
+coef(fit.0)
+summary(fit.0)
+fit.0 at extra$N.hat
+fit.0 at extra$SE.N.hat
+
+
+Fit.0 <- vglm(cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9, y10) ~ x2,
+ posbernoulli.t(parallel.t = TRUE), data = Huggins89.t1,
+ trace = TRUE)
+coef(Fit.0)
+coef(Fit.0, matrix = TRUE) # M_0 model (version 2)
+summary(Fit.0)
+Fit.0 at extra$N.hat
+Fit.0 at extra$SE.N.hat
+}
+}
+\keyword{datasets}
+% data(Huggins89.t1)
+%## maybe str(Huggins89.t1) ; plot(Huggins89.t1) ...
+
+
diff --git a/man/Opt.Rd b/man/Opt.Rd
index 0ef6307..535996c 100644
--- a/man/Opt.Rd
+++ b/man/Opt.Rd
@@ -12,16 +12,21 @@ Opt(object, ...)
\arguments{
\item{object}{ An object for which the computation or
extraction of an optimum (or optima) is meaningful.
+
+
}
\item{\dots}{ Other arguments fed into the specific
methods function of the model. Sometimes they are fed
into the methods function for \code{\link{Coef}}.
+
+
}
}
\details{
Different models can define an optimum in different ways.
Many models have no such notion or definition.
+
Optima occur in quadratic and additive ordination,
e.g., CQO or UQO or CAO.
For these models the optimum is the value of the latent
@@ -34,10 +39,13 @@ Opt(object, ...)
At an optimum, the fitted value of the response is
called the \emph{maximum}.
+
}
\value{
The value returned depends specifically on the methods
function invoked.
+
+
}
\references{
@@ -47,16 +55,20 @@ canonical Gaussian ordination.
\emph{Ecological Monographs},
\bold{74}, 685--701.
+
Yee, T. W. (2006)
Constrained additive ordination.
\emph{Ecology}, \bold{87}, 203--213.
+
}
\author{ Thomas W. Yee }
\note{
In ordination, the optimum of a species is sometimes
called the \emph{species score}.
+
+
}
%\section{Warning }{
%}
@@ -65,11 +77,13 @@ called the \emph{species score}.
\code{Opt.qrrvglm},
\code{\link{Max}},
\code{\link{Tol}}.
+
+
}
\examples{
set.seed(111) # This leads to the global solution
-hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
# vvv p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
# vvv Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
# vvv Trocterr, Zoraspin) ~
@@ -79,9 +93,9 @@ hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
# vvv Opt(p1)
\dontrun{
-index = 1:ncol(p1 at y)
-persp(p1, col=index, las=1, lwd=2, main="Vertical lines at the optima")
-abline(v=Opt(p1), lty=2, col=index)
+index <- 1:ncol(depvar(p1))
+persp(p1, col = index, las = 1, lwd = 2, main = "Vertical lines at the optima")
+abline(v = Opt(p1), lty = 2, col = index)
}
}
\keyword{models}
diff --git a/man/Perom.Rd b/man/Perom.Rd
new file mode 100644
index 0000000..400e8bf
--- /dev/null
+++ b/man/Perom.Rd
@@ -0,0 +1,75 @@
+\name{Perom}
+\alias{Perom}
+\docType{data}
+\title{
+ Captures of peromyscus maniculatus
+
+
+%% ~~ data name/kind ... ~~
+}
+\description{
+
+ Captures of \emph{peromyscus maniculatus} collected at East
+ Stuart Gulch, Colorado, USA.
+
+%% ~~ A concise (1-5 lines) description of the dataset. ~~
+}
+\usage{data(Perom)}
+\format{
+ The format is:
+ chr "Perom"
+
+}
+\details{
+
+ The columns
+ represent the sex (\code{m} or \code{f}),
+ the ages (\code{y}: young, \code{sa}: semi-adult, \code{a}: adult),
+ the weights in grams, and the
+ capture histories of 38 individuals over 6 trapping
+ occasions (1: captured, 0: not captured).
+
+
+ The data set was collected by V. Reid and distributed
+ with the \pkg{CAPTURE} program of Otis et al. (1978).
+
+
+%% ~~ If necessary, more details than the __description__ above ~~
+}
+%\source{
+%% ~~ reference to a publication or URL from which the data were obtained ~~
+%}
+\references{
+
+
+Huggins, R. M. (1991)
+Some practical aspects of a conditional likelihood
+approach to capture experiments.
+\emph{Biometrics},
+\bold{47}, 725--732.
+
+
+
+ Otis, D. L. et al. (1978)
+ Statistical inference from capture data on closed animal populations,
+ \emph{Wildlife Monographs},
+ \bold{62}, 3--135.
+
+
+%% ~~ possibly secondary sources and usages ~~
+}
+
+\seealso{
+ \code{\link[VGAM:posbernoulli.t]{posbernoulli.t}}.
+
+}
+
+\examples{
+head(Perom)
+\dontrun{
+fit1 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + age,
+ posbernoulli.t(parallel.t = TRUE), data = Perom, trace = TRUE)
+coef(fit1)
+coef(fit1, matrix = TRUE)
+}}
+\keyword{datasets}
diff --git a/man/Qvar.Rd b/man/Qvar.Rd
index 57639d1..f3cae04 100644
--- a/man/Qvar.Rd
+++ b/man/Qvar.Rd
@@ -15,7 +15,8 @@ Quasi-variances Preprocessing Function
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
-Qvar(object, factorname = NULL, coef.indices = NULL, labels = NULL,
+Qvar(object, factorname = NULL, which.eta = 1,
+ coef.indices = NULL, labels = NULL,
dispersion = NULL, reference.name = "(reference)", estimates = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -31,6 +32,19 @@ Qvar(object, factorname = NULL, coef.indices = NULL, labels = NULL,
%% ~~Describe \code{object} here~~
}
+\item{which.eta}{
+ A single integer from the set \code{1:M}.
+ Specifies which linear predictor to use.
+ Let the value of \code{which.eta} be called \eqn{j}.
+ Then the factor should appear in that linear predictor, hence
+ the \eqn{j}th row of the constraint matrix corresponding
+ to the factor should have at least one nonzero value.
+ Currently the \eqn{j}th row must have exactly one nonzero value
+ because programming it for more than one nonzero value is difficult.
+
+
+}
+
\item{factorname}{
Character.
If the \code{\link{vglm}} object contains more than one
@@ -194,6 +208,7 @@ Qvar(object, factorname = NULL, coef.indices = NULL, labels = NULL,
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
+# Example 1
data("ships", package = "MASS")
Shipmodel <- vglm(incidents ~ type + year + period,
@@ -221,6 +236,36 @@ fit3 <- rcim(Qvar(cbind(0, rbind(0, vcov(Shipmodel)[2:5, 2:5])),
(QuasiVar <- diag(predict(fit3)[, c(TRUE, FALSE)]) / 2) # Version 2
(QuasiSE <- sqrt(quasiVar))
\dontrun{ plotqvar(fit3) }
+
+
+# Example 2: a model with M > 1 linear predictors
+\dontrun{ require(VGAMdata)
+xs.nz.f <- subset(xs.nz, sex == "F")
+xs.nz.f <- subset(xs.nz.f, !is.na(babies) & !is.na(age) & !is.na(ethnic))
+xs.nz.f$babies <- as.numeric(as.character(xs.nz.f$babies))
+xs.nz.f <- subset(xs.nz.f, babies >= 0)
+xs.nz.f <- subset(xs.nz.f, as.numeric(as.character(ethnic)) <= 2)
+
+clist <- list("bs(age, df = 4)" = rbind(1, 0),
+ "bs(age, df = 3)" = rbind(0, 1),
+ "ethnic" = diag(2),
+ "(Intercept)" = diag(2))
+fit1 <- vglm(babies ~ bs(age, df = 4) + bs(age, df = 3) + ethnic,
+ zipoissonff(zero = NULL), xs.nz.f,
+ constraints = clist, trace = TRUE)
+Fit1 <- rcim(Qvar(fit1, "ethnic", which.eta = 1),
+ normal1("explink", imethod = 1), maxit = 99, trace = TRUE)
+Fit2 <- rcim(Qvar(fit1, "ethnic", which.eta = 2),
+ normal1("explink", imethod = 1), maxit = 99, trace = TRUE)
+}
+\dontrun{ par(mfrow = c(1, 2))
+plotqvar(Fit1, scol = "blue", pch = 16,
+ main = expression(eta[1]),
+ slwd = 1.5, las = 1, length.arrows = 0.07)
+plotqvar(Fit2, scol = "blue", pch = 16,
+ main = expression(eta[2]),
+ slwd = 1.5, las = 1, length.arrows = 0.07)
+}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
diff --git a/man/SUR.Rd b/man/SUR.Rd
new file mode 100644
index 0000000..5c0ffc7
--- /dev/null
+++ b/man/SUR.Rd
@@ -0,0 +1,192 @@
+\name{SUR}
+\alias{SUR}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Seemingly Unrelated Regressions
+%% ~~function to do ... ~~
+}
+\description{
+Fits a system of seemingly unrelated regressions.
+%% ~~ A concise (1-5 lines) description of what the function does. ~~
+}
+\usage{
+SUR(mle.normal = FALSE,
+ divisor = c("n", "n-max(pj,pk)", "sqrt((n-pj)*(n-pk))"),
+ parallel = FALSE, apply.parint = TRUE,
+ Varcov = NULL, matrix.arg = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+% \item{estimator}{
+%Character.
+%What estimator is computed.
+%% ~~Describe \code{estimator} here~~
+%}
+
+ \item{mle.normal}{
+ Logical.
+ If \code{TRUE} then the MLE, assuming multivariate normal errors,
+ is computed;
+ the effect is just to add a \code{loglikelihood} slot to the
+ returned object.
+ Then it results in the \emph{maximum likelihood estimator}.
+
+
+}
+
+
+ \item{divisor}{
+Character, partial matching allowed and the first choice is the default.
+The divisor for the estimate of the covariances.
+If \code{"n"} then the estimate will be biased.
+If the others then the estimate will be unbiased for some elements.
+If \code{mle.normal = TRUE} and this argument is not \code{"n"} then
+a warning or an error will result.
+
+
+}
+ \item{parallel, apply.parint}{
+ See
+ \code{\link{CommonVGAMffArguments}}.
+
+
+}
+ \item{Varcov}{
+ Numeric.
+ This may be assigned a variance-covariance of the errors.
+ If \code{matrix.arg} then this is a \eqn{M \times M}{M x M} matrix.
+ If \code{!matrix.arg} then this is a \eqn{M \times M}{M x M} matrix in
+ matrix-band format (a vector with at least \eqn{M} and
+ at most \code{M*(M+1)/2} elements).
+
+
+}
+ \item{matrix.arg}{
+ Logical.
+ Of single length.
+
+
+}
+}
+\details{
+ Proposed by Zellner (1962), the basic
+ seemingly unrelated regressions (SUR)
+ model is a set of LMs (\eqn{M > 1} of them) tied together
+ at the error term level.
+ Each LM's model matrix may potentially have its own set
+ of predictor variables.
+
+
+ Zellner's efficient (ZEF) estimator (also known as
+ \emph{Zellner's two-stage Aitken estimator})
+ can be obtained by setting
+ \code{maxit = 1}
+ (and possibly \code{divisor = "sqrt"} or
+ \code{divisor = "n-max"}).
+
+
+ The default value of \code{maxit} (in \code{\link{vglm.control}})
+ probably means \emph{iterative GLS} (IGLS) estimator is computed because
+ IRLS will probably iterate to convergence.
+ IGLS means, at each iteration, the residuals are used to estimate
+ the error variance-covariance matrix, and then the matrix is used
+ in the GLS.
+ The IGLS estimator is also known
+ as \emph{Zellner's iterative Aitken estimator}, or IZEF.
+
+
+}
+\value{
+
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as
+ \code{\link{vglm}} and \code{\link{vgam}}.
+
+
+}
+\references{
+
+ Zellner, A. (1962)
+ An Efficient Method of Estimating Seemingly Unrelated
+ Regressions and Tests for Aggregation Bias.
+ \emph{J. Amer. Statist. Assoc.},
+ \bold{57}(298), 348--368.
+
+
+ Kmenta, J. and Gilbert, R. F. (1968)
+ Small Sample Properties of Alternative Estimators
+ of Seemingly Unrelated Regressions.
+ \emph{J. Amer. Statist. Assoc.},
+ \bold{63}(324), 1180--1200.
+
+
+}
+\author{
+
+ T. W. Yee.
+
+
+}
+\section{Warning }{
+ The default convergence criterion may be a little loose.
+ Try setting \code{epsilon = 1e-11}, especially
+ with \code{mle.normal = TRUE}.
+
+
+}
+\note{
+ The fitted object has slot \code{@extra$ncols_X_lm} which is
+ a \eqn{M} vector with the number of parameters for each LM.
+ Also, \code{@misc$values.divisor} is the \eqn{M}-vector of
+ \code{divisor} values.
+
+
+ Constraint matrices are needed in order to specify which response
+ variables that each term on the RHS of the formula is a
+ regressor for.
+ See the \code{constraints} argument of \code{\link{vglm}}
+ for more information.
+
+
+% This \pkg{VGAM} family function is currently experimental.
+
+
+}
+
+%% ~Make other sections like Warning with \section{Warning }{....} ~
+
+\seealso{
+ \code{\link{normal1}},
+ \code{\link{gew}}.
+
+
+}
+\examples{
+# Obtain some of the results of p.1199 of Kmenta and Gilbert (1968)
+clist <- list("(Intercept)" = diag(2),
+ "capital.g" = rbind(1, 0),
+ "value.g" = rbind(1, 0),
+ "capital.w" = rbind(0, 1),
+ "value.w" = rbind(0, 1))
+zef1 <- vglm(cbind(invest.g, invest.w) ~
+ capital.g + value.g + capital.w + value.w,
+ SUR(divisor = "sqrt"), maxit = 1,
+ data = gew, trace = TRUE, constraints = clist)
+
+round(coef(zef1, matrix = TRUE), dig = 4) # ZEF
+zef1 at extra$ncols_X_lm
+zef1 at misc$divisor
+zef1 at misc$values.divisor
+round(sqrt(diag(vcov(zef1))), dig = 4) # SEs
+
+mle1 <- vglm(cbind(invest.g, invest.w) ~
+ capital.g + value.g + capital.w + value.w,
+ SUR(mle.normal = TRUE, divisor = "n-max"),
+ epsilon = 1e-11,
+ data = gew, trace = TRUE, constraints = clist)
+round(coef(mle1, matrix = TRUE), dig = 4) # MLE
+round(sqrt(diag(vcov(mle1))), dig = 4) # SEs
+}
+% Add one or more standard keywords, see file 'KEYWORDS' in the
+% R documentation directory.
+\keyword{models}
+\keyword{regression}
diff --git a/man/VGAM-package.Rd b/man/VGAM-package.Rd
index 5f8890a..2ca157d 100644
--- a/man/VGAM-package.Rd
+++ b/man/VGAM-package.Rd
@@ -32,6 +32,8 @@ For detailed control of fitting,
each of these has its own control function, e.g.,
\code{\link{vglm.control}}.
The package uses S4 (see \code{\link[methods]{methods-package}}).
+A companion package called \pkg{VGAMdata} contains some larger
+data sets which were shifted from \pkg{VGAM}.
The classes of GLMs and GAMs are special cases of VGLMs and VGAMs.
@@ -87,13 +89,19 @@ Maintainer: Thomas Yee \email{t.yee at auckland.ac.nz}.
This package is undergoing continual development and improvement.
Until my monograph comes out and this package is released as version 1.0-0
the user should treat everything subject to change.
- This includes the family function names, many of the internals,
- the use of link functions, and slot names.
+ This includes the
+ family function names,
+ argument names,
+ many of the internals,
+ the use of link functions,
+ and slot names.
Some future pain can be minimized by using good programming
techniques, e.g., using extractor/accessor functions such as
\code{coef()}, \code{weights()}, \code{vcov()},
\code{predict()}.
Nevertheless, please expect changes in all aspects of the package.
+ See the \code{NEWS} file for a list of changes from version to
+ version.
@@ -108,14 +116,6 @@ Vector Generalized Linear and Additive Models.
\emph{Monograph in preparation}.
-
-Yee, T. W. (2010)
-The \pkg{VGAM} package for categorical data analysis.
-\emph{Journal of Statistical Software},
-\bold{32}, 1--34.
-\url{http://www.jstatsoft.org/v32/i10/}.
-
-
Yee, T. W. and Hastie, T. J. (2003)
Reduced-rank vector generalized linear models.
\emph{Statistical Modelling},
@@ -150,6 +150,20 @@ The \code{VGAM} Package.
\emph{R News}, \bold{8}, 28--39.
+Yee, T. W. (2010)
+The \pkg{VGAM} package for categorical data analysis.
+\emph{Journal of Statistical Software},
+\bold{32}, 1--34.
+\url{http://www.jstatsoft.org/v32/i10/}.
+
+
+ Yee, T. W. (2013)
+ Reduced-rank vector generalized linear models with two linear predictors.
+ \emph{Computational Statistics and Data Analysis}.
+
+
+
+
(Oldish) documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee/VGAM}
contains some further information and examples.
@@ -178,12 +192,12 @@ contains some further information and examples.
\examples{
# Example 1; proportional odds model
pneumo <- transform(pneumo, let = log(exposure.time))
-(fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
-depvar(fit) # Better than using fit at y; dependent variable (response)
-weights(fit, type = "prior") # Number of observations
-coef(fit, matrix = TRUE) # p.179, in McCullagh and Nelder (1989)
-constraints(fit) # Constraint matrices
-summary(fit)
+(fit1 <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
+depvar(fit1) # Better than using fit1 at y; dependent variable (response)
+weights(fit1, type = "prior") # Number of observations
+coef(fit1, matrix = TRUE) # p.179, in McCullagh and Nelder (1989)
+constraints(fit1) # Constraint matrices
+summary(fit1)
# Example 2; zero-inflated Poisson model
@@ -192,56 +206,57 @@ zdata <- transform(zdata, pstr0 = logit(-0.5 + 1*x2, inverse = TRUE),
lambda = loge( 0.5 + 2*x2, inverse = TRUE))
zdata <- transform(zdata, y = rzipois(nn, lambda, pstr0 = pstr0))
with(zdata, table(y))
-fit <- vglm(y ~ x2, zipoisson, zdata, trace = TRUE)
-coef(fit, matrix = TRUE) # These should agree with the above values
+fit2 <- vglm(y ~ x2, zipoisson, zdata, trace = TRUE)
+coef(fit2, matrix = TRUE) # These should agree with the above values
# Example 3; fit a two species GAM simultaneously
-fit2 <- vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2, 3)),
+fit3 <- vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2, 3)),
binomialff(mv = TRUE), hunua)
-coef(fit2, matrix = TRUE) # Not really interpretable
-\dontrun{ plot(fit2, se = TRUE, overlay = TRUE, lcol = 1:2, scol = 1:2)
+coef(fit3, matrix = TRUE) # Not really interpretable
+\dontrun{ plot(fit3, se = TRUE, overlay = TRUE, lcol = 3:4, scol = 3:4)
ooo <- with(hunua, order(altitude))
-with(hunua, matplot(altitude[ooo], fitted(fit2)[ooo,], type = "l", lwd = 2,
+with(hunua, matplot(altitude[ooo], fitted(fit3)[ooo, ], type = "l",
+ lwd = 2, col = 3:4,
xlab = "Altitude (m)", ylab = "Probability of presence", las = 1,
main = "Two plant species' response curves", ylim = c(0, 0.8)))
with(hunua, rug(altitude)) }
# Example 4; LMS quantile regression
-fit <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), dat = bmi.nz,
- trace = TRUE)
-head(predict(fit))
-head(fitted(fit))
-head(bmi.nz) # Person 1 is near the lower quartile among people his age
-head(cdf(fit))
+fit4 <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1),
+ data = bmi.nz, trace = TRUE)
+head(predict(fit4))
+head(fitted(fit4))
+head(bmi.nz) # Person 1 is near the lower quartile among people his age
+head(cdf(fit4))
\dontrun{ par(mfrow = c(1, 1), bty = "l", mar = c(5,4,4,3)+0.1, xpd = TRUE)
-qtplot(fit, percentiles = c(5,50,90,99), main = "Quantiles", las = 1,
+qtplot(fit4, percentiles = c(5,50,90,99), main = "Quantiles", las = 1,
xlim = c(15, 90), ylab = "BMI", lwd = 2, lcol = 4) # Quantile plot
ygrid <- seq(15, 43, len = 100) # BMI ranges
par(mfrow = c(1, 1), lwd = 2) # Density plot
-aa <- deplot(fit, x0 = 20, y = ygrid, xlab = "BMI", col = "black",
+aa <- deplot(fit4, x0 = 20, y = ygrid, xlab = "BMI", col = "black",
main = "Density functions at Age = 20 (black), 42 (red) and 55 (blue)")
aa
-aa <- deplot(fit, x0 = 42, y = ygrid, add = TRUE, llty = 2, col = "red")
-aa <- deplot(fit, x0 = 55, y = ygrid, add = TRUE, llty = 4, col = "blue",
+aa <- deplot(fit4, x0 = 42, y = ygrid, add = TRUE, llty = 2, col = "red")
+aa <- deplot(fit4, x0 = 55, y = ygrid, add = TRUE, llty = 4, col = "blue",
Attach = TRUE)
aa at post$deplot # Contains density function values
}
# Example 5; GEV distribution for extremes
-(fit <- vglm(maxtemp ~ 1, egev, data = oxtemp, trace = TRUE))
-head(fitted(fit))
-coef(fit, matrix = TRUE)
-Coef(fit)
-vcov(fit)
-vcov(fit, untransform = TRUE)
-sqrt(diag(vcov(fit))) # Approximate standard errors
-\dontrun{ rlplot(fit) }
+(fit5 <- vglm(maxtemp ~ 1, egev, data = oxtemp, trace = TRUE))
+head(fitted(fit5))
+coef(fit5, matrix = TRUE)
+Coef(fit5)
+vcov(fit5)
+vcov(fit5, untransform = TRUE)
+sqrt(diag(vcov(fit5))) # Approximate standard errors
+\dontrun{ rlplot(fit5) }
}
diff --git a/man/alaplace3.Rd b/man/alaplace3.Rd
index 77f27b3..9ce1175 100644
--- a/man/alaplace3.Rd
+++ b/man/alaplace3.Rd
@@ -36,6 +36,7 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
regression quantile, which is easier to understand.
See below for details.
+
}
\item{llocation, lscale, lkappa}{ Character.
Parameter link functions for
@@ -49,6 +50,7 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
However, \code{llocation} is best left alone since the theory
only works properly with the identity link.
+
}
\item{ilocation, iscale, ikappa}{
Optional initial values.
@@ -56,6 +58,7 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
appropriate length.
The default is to choose the value internally.
+
}
\item{parallelLocation, intparloc}{ Logical.
Should the quantiles be parallel on the transformed scale
@@ -78,6 +81,7 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
Initialization method.
Either the value 1, 2, 3 or 4.
+
}
\item{dfmu.init}{
Degrees of freedom for the cubic smoothing spline fit applied to
@@ -85,6 +89,7 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
See \code{\link{vsmooth.spline}}.
Used only when \code{imethod = 3}.
+
}
\item{shrinkage.init}{
How much shrinkage is used when initializing \eqn{\xi}{xi}.
@@ -94,6 +99,7 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
This argument is used only when \code{imethod = 4}.
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
\item{Scale.arg}{
The value of the scale parameter \eqn{\sigma}{sigma}.
@@ -108,12 +114,14 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
% This is because the expected information matrix is diagonal,
% i.e., the location and scale parameters are asymptotically independent.
+
}
\item{digt }{
Passed into \code{\link[base]{Round}} as the \code{digits} argument
for the \code{tau} values;
used cosmetically for labelling.
+
}
\item{zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
@@ -121,6 +129,7 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
the default is to model all the \eqn{\sigma}{sigma}
and \eqn{\kappa}{kappa} as an intercept-only term.
+
}
}
\details{
@@ -276,24 +285,24 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
\code{\link{amlnormal}},
\code{\link{koenker}}.
+
}
\examples{
+\dontrun{
# Example 1: quantile regression with smoothing splines
adata <- data.frame(x = sort(runif(n <- 500)))
mymu <- function(x) exp(-2 + 6*sin(2*x-0.2) / (x+0.5)^2)
adata <- transform(adata, y = rpois(n, lambda = mymu(x)))
mytau <- c(0.25, 0.75); mydof <- 4
-fit = vgam(y ~ s(x, df = mydof),
- alaplace1(tau = mytau, llocation = "loge",
- parallelLoc = FALSE),
- adata, trace = TRUE)
-fitp = vgam(y ~ s(x, df = mydof),
- alaplace1(tau = mytau, llocation = "loge", parallelLoc = TRUE),
- adata, trace = TRUE)
+fit <- vgam(y ~ s(x, df = mydof),
+ alaplace1(tau = mytau, llocation = "loge",
+ parallelLoc = FALSE), adata, trace = TRUE)
+fitp <- vgam(y ~ s(x, df = mydof), data = adata, trace = TRUE,
+ alaplace1(tau = mytau, llocation = "loge", parallelLoc = TRUE))
-\dontrun{ par(las = 1); mylwd = 1.5
+par(las = 1); mylwd = 1.5
with(adata, plot(x, jitter(y, factor = 0.5), col = "red",
main = "Example 1; green: parallelLoc = TRUE",
ylab = "y", pch = "o", cex = 0.75))
@@ -301,31 +310,31 @@ with(adata, matlines(x, fitted(fit ), col = "blue",
lty = "solid", lwd = mylwd))
with(adata, matlines(x, fitted(fitp), col = "green",
lty = "solid", lwd = mylwd))
-finexgrid = seq(0, 1, len = 1001)
+finexgrid <- seq(0, 1, len = 1001)
for(ii in 1:length(mytau))
lines(finexgrid, qpois(p = mytau[ii], lambda = mymu(finexgrid)),
- col = "blue", lwd = mylwd) }
+ col = "blue", lwd = mylwd)
fit at extra # Contains useful information
# Example 2: regression quantile at a new tau value from an existing fit
# Nb. regression splines are used here since it is easier.
-fitp2 = vglm(y ~ bs(x, df = mydof),
- family = alaplace1(tau = mytau, llocation = "loge",
- parallelLoc = TRUE),
- adata, trace = TRUE)
+fitp2 <- vglm(y ~ bs(x, df = mydof),
+ family = alaplace1(tau = mytau, llocation = "loge",
+ parallelLoc = TRUE),
+ adata, trace = TRUE)
-newtau = 0.5 # Want to refit the model with this tau value
-fitp3 = vglm(y ~ 1 + offset(predict(fitp2)[,1]),
+newtau <- 0.5 # Want to refit the model with this tau value
+fitp3 <- vglm(y ~ 1 + offset(predict(fitp2)[,1]),
family = alaplace1(tau = newtau, llocation = "loge"),
adata)
-\dontrun{ with(adata, plot(x, jitter(y, factor = 0.5), col = "red",
- pch = "o", cex = 0.75, ylab = "y",
- main = "Example 2; parallelLoc = TRUE"))
+with(adata, plot(x, jitter(y, factor = 0.5), col = "red",
+ pch = "o", cex = 0.75, ylab = "y",
+ main = "Example 2; parallelLoc = TRUE"))
with(adata, matlines(x, fitted(fitp2), col = "blue",
lty = 1, lwd = mylwd))
with(adata, matlines(x, fitted(fitp3), col = "black",
- lty = 1, lwd = mylwd)) }
+ lty = 1, lwd = mylwd))
@@ -333,24 +342,24 @@ with(adata, matlines(x, fitted(fitp3), col = "black",
# successive solutions which are added to previous solutions; use a log
# link to ensure an increasing quantiles at any value of x.
-mytau = seq(0.2, 0.9, by = 0.1)
-answer = matrix(0, nrow(adata), length(mytau)) # Stores the quantiles
-adata = transform(adata, offsety = y*0)
-usetau = mytau
+mytau <- seq(0.2, 0.9, by = 0.1)
+answer <- matrix(0, nrow(adata), length(mytau)) # Stores the quantiles
+adata <- transform(adata, offsety = y*0)
+usetau <- mytau
for(ii in 1:length(mytau)) {
# cat("\n\nii = ", ii, "\n")
- adata = transform(adata, usey = y-offsety)
- iloc = ifelse(ii == 1, with(adata, median(y)), 1.0) # Well-chosen!
- mydf = ifelse(ii == 1, 5, 3) # Maybe less smoothing will help
- lloc = ifelse(ii == 1, "identity", "loge") # 2nd value must be "loge"
- fit3 = vglm(usey ~ ns(x, df = mydf), data = adata, trace = TRUE,
- alaplace1(tau = usetau[ii], lloc = lloc, iloc = iloc))
- answer[,ii] = (if(ii == 1) 0 else answer[,ii-1]) + fitted(fit3)
- adata = transform(adata, offsety = answer[,ii])
+ adata <- transform(adata, usey = y-offsety)
+ iloc <- ifelse(ii == 1, with(adata, median(y)), 1.0) # Well-chosen!
+ mydf <- ifelse(ii == 1, 5, 3) # Maybe less smoothing will help
+ lloc <- ifelse(ii == 1, "identity", "loge") # 2nd value must be "loge"
+ fit3 <- vglm(usey ~ ns(x, df = mydf), data = adata, trace = TRUE,
+ alaplace1(tau = usetau[ii], lloc = lloc, iloc = iloc))
+ answer[,ii] <- (if(ii == 1) 0 else answer[,ii-1]) + fitted(fit3)
+ adata <- transform(adata, offsety = answer[,ii])
}
# Plot the results.
-\dontrun{ with(adata, plot(x, y, col = "blue",
+with(adata, plot(x, y, col = "blue",
main = paste("Noncrossing and nonparallel; tau = ",
paste(mytau, collapse = ", "))))
with(adata, matlines(x, answer, col = "orange", lty = 1))
@@ -359,7 +368,8 @@ with(adata, matlines(x, answer, col = "orange", lty = 1))
with(adata, plot(x, y, col = "blue", xlim = c(0, 0.2), ylim = 0:1,
main = paste("Noncrossing and nonparallel; tau = ",
paste(mytau, collapse = ", "))))
-with(adata, matlines(x, answer, col = "orange", lty = 1)) }
+with(adata, matlines(x, answer, col = "orange", lty = 1))
+}
}
\keyword{models}
\keyword{regression}
diff --git a/man/alaplaceUC.Rd b/man/alaplaceUC.Rd
index ec59d62..7b25b66 100644
--- a/man/alaplaceUC.Rd
+++ b/man/alaplaceUC.Rd
@@ -100,7 +100,7 @@ x <- seq(-5, 5, by = 0.01)
loc <- 0; sigma <- 1.5; kappa <- 2
\dontrun{ plot(x, dalap(x, loc, sigma, kappa = kappa), type = "l", col = "blue",
main = "Blue is density, red is cumulative distribution function",
- ylim = c(0,1), sub = "Purple are 5, 10, ..., 95 percentiles",
+ ylim = c(0, 1), sub = "Purple are 5, 10, ..., 95 percentiles",
las = 1, ylab = "", cex.main = 0.5)
abline(h = 0, col = "blue", lty = 2)
lines(qalap(seq(0.05, 0.95, by = 0.05), loc, sigma, kappa = kappa),
@@ -109,7 +109,7 @@ lines(qalap(seq(0.05, 0.95, by = 0.05), loc, sigma, kappa = kappa),
lines(x, palap(x, loc, sigma, kappa = kappa), type = "l", col = "red")
abline(h = 0, lty = 2) }
-pp = seq(0.05, 0.95, by = 0.05) # Test two functions
+pp <- seq(0.05, 0.95, by = 0.05) # Test two functions
max(abs(palap(qalap(pp, loc, sigma, kappa = kappa),
loc, sigma, kappa = kappa) - pp)) # Should be 0
}
diff --git a/man/amlbinomial.Rd b/man/amlbinomial.Rd
index 7edc971..7bf7d38 100644
--- a/man/amlbinomial.Rd
+++ b/man/amlbinomial.Rd
@@ -113,34 +113,34 @@ amlbinomial(w.aml = 1, parallel = FALSE, digw = 4, link = "logit")
\examples{
# Example: binomial data with lots of trials per observation
set.seed(1234)
-sizevec = rep(100, length=(nn <- 200))
-mydat = data.frame(x = sort(runif(nn)))
-mydat = transform(mydat, prob = logit(-0+2.5*x+x^2, inverse = TRUE))
-mydat = transform(mydat, y = rbinom(nn, size = sizevec, prob = prob))
-(fit = vgam(cbind(y, sizevec - y) ~ s(x, df = 3),
- amlbinomial(w = c(0.01, 0.2, 1, 5, 60)),
- mydat, trace = TRUE))
+sizevec <- rep(100, length = (nn <- 200))
+mydat <- data.frame(x = sort(runif(nn)))
+mydat <- transform(mydat, prob = logit(-0 + 2.5*x + x^2, inverse = TRUE))
+mydat <- transform(mydat, y = rbinom(nn, size = sizevec, prob = prob))
+(fit <- vgam(cbind(y, sizevec - y) ~ s(x, df = 3),
+ amlbinomial(w = c(0.01, 0.2, 1, 5, 60)),
+ mydat, trace = TRUE))
fit at extra
\dontrun{
-par(mfrow=c(1,2))
+par(mfrow = c(1,2))
# Quantile plot
-with(mydat, plot(x, jitter(y), col="blue", las=1, main=
- paste(paste(round(fit at extra$percentile, dig=1), collapse=", "),
+with(mydat, plot(x, jitter(y), col = "blue", las = 1, main =
+ paste(paste(round(fit at extra$percentile, dig = 1), collapse = ", "),
"percentile-expectile curves")))
-with(mydat, matlines(x, 100 * fitted(fit), lwd=2, col="blue", lty=1))
+with(mydat, matlines(x, 100 * fitted(fit), lwd = 2, col = "blue", lty = 1))
# Compare the fitted expectiles with the quantiles
-with(mydat, plot(x, jitter(y), col="blue", las=1, main=
- paste(paste(round(fit at extra$percentile, dig=1), collapse=", "),
+with(mydat, plot(x, jitter(y), col = "blue", las = 1, main =
+ paste(paste(round(fit at extra$percentile, dig = 1), collapse = ", "),
"percentile curves are red")))
-with(mydat, matlines(x, 100 * fitted(fit), lwd=2, col="blue", lty=1))
+with(mydat, matlines(x, 100 * fitted(fit), lwd = 2, col = "blue", lty = 1))
for(ii in fit at extra$percentile)
with(mydat, matlines(x, 100 *
- qbinom(p=ii/100, size=sizevec, prob=prob) / sizevec,
- col="red", lwd=2, lty=1))
+ qbinom(p = ii/100, size = sizevec, prob = prob) / sizevec,
+ col = "red", lwd = 2, lty = 1))
}
}
\keyword{models}
diff --git a/man/amlexponential.Rd b/man/amlexponential.Rd
index dbbe289..002736a 100644
--- a/man/amlexponential.Rd
+++ b/man/amlexponential.Rd
@@ -131,13 +131,13 @@ amlexponential(w.aml = 1, parallel = FALSE, imethod = 1, digw = 4,
}
\examples{
-nn = 2000
-mydat = data.frame(x = seq(0, 1, length = nn))
-mydat = transform(mydat, mu = loge(-0+1.5*x+0.2*x^2, inverse = TRUE))
-mydat = transform(mydat, mu = loge(0-sin(8*x), inverse = TRUE))
-mydat = transform(mydat, y = rexp(nn, rate = 1/mu))
-(fit = vgam(y ~ s(x,df = 5), amlexponential(w = c(0.001,0.1,0.5,5,60)),
- mydat, trace = TRUE))
+nn <- 2000
+mydat <- data.frame(x = seq(0, 1, length = nn))
+mydat <- transform(mydat, mu = loge(-0 + 1.5*x + 0.2*x^2, inverse = TRUE))
+mydat <- transform(mydat, mu = loge(0 - sin(8*x), inverse = TRUE))
+mydat <- transform(mydat, y = rexp(nn, rate = 1/mu))
+(fit <- vgam(y ~ s(x,df = 5), amlexponential(w = c(0.001, 0.1, 0.5, 5, 60)),
+ mydat, trace = TRUE))
fit at extra
\dontrun{ # These plots are against the sqrt scale (to increase clarity)
diff --git a/man/amlnormal.Rd b/man/amlnormal.Rd
index 761f945..12e6529 100644
--- a/man/amlnormal.Rd
+++ b/man/amlnormal.Rd
@@ -138,60 +138,53 @@ amlnormal(w.aml = 1, parallel = FALSE, lexpectile = "identity",
}
\examples{
+\dontrun{
# Example 1
-ooo = with(bmi.nz, order(age))
-bmi.nz = bmi.nz[ooo,] # Sort by age
-(fit = vglm(BMI ~ bs(age), fam=amlnormal(w.aml=0.1), bmi.nz))
-fit at extra # Gives the w value and the percentile
-coef(fit, matrix=TRUE)
+ooo <- with(bmi.nz, order(age))
+bmi.nz <- bmi.nz[ooo,] # Sort by age
+(fit <- vglm(BMI ~ bs(age), fam = amlnormal(w.aml = 0.1), bmi.nz))
+fit at extra # Gives the w value and the percentile
+coef(fit, matrix = TRUE)
-\dontrun{
# Quantile plot
-with(bmi.nz, plot(age, BMI, col="blue", main=
- paste(round(fit at extra$percentile, dig=1),
+with(bmi.nz, plot(age, BMI, col = "blue", main =
+ paste(round(fit at extra$percentile, dig = 1),
"expectile-percentile curve")))
-with(bmi.nz, lines(age, c(fitted(fit)), col="black")) }
-
-
+with(bmi.nz, lines(age, c(fitted(fit)), col = "black"))
# Example 2
# Find the w values that give the 25, 50 and 75 percentiles
-findw = function(w, percentile=50) {
- fit2 = vglm(BMI ~ bs(age), fam=amlnormal(w=w), data=bmi.nz)
- fit2 at extra$percentile - percentile
+findw <- function(w, percentile = 50) {
+ fit2 <- vglm(BMI ~ bs(age), fam = amlnormal(w = w), data = bmi.nz)
+ fit2 at extra$percentile - percentile
}
-\dontrun{
# Quantile plot
-with(bmi.nz, plot(age, BMI, col="blue", las=1, main=
- "25, 50 and 75 expectile-percentile curves")) }
-for(myp in c(25,50,75)) {
+with(bmi.nz, plot(age, BMI, col = "blue", las = 1, main =
+ "25, 50 and 75 expectile-percentile curves"))
+for(myp in c(25, 50, 75)) {
# Note: uniroot() can only find one root at a time
- bestw = uniroot(f=findw, interval=c(1/10^4, 10^4), percentile=myp)
- fit2 = vglm(BMI ~ bs(age), fam=amlnormal(w=bestw$root), data=bmi.nz)
-\dontrun{
- with(bmi.nz, lines(age, c(fitted(fit2)), col="red")) }
+ bestw <- uniroot(f = findw, interval = c(1/10^4, 10^4), percentile = myp)
+ fit2 <- vglm(BMI ~ bs(age), fam = amlnormal(w = bestw$root), data = bmi.nz)
+ with(bmi.nz, lines(age, c(fitted(fit2)), col = "red"))
}
-
-
# Example 3; this is Example 1 but with smoothing splines and
# a vector w and a parallelism assumption.
-ooo = with(bmi.nz, order(age))
-bmi.nz = bmi.nz[ooo,] # Sort by age
-fit3 = vgam(BMI ~ s(age, df=4), fam=amlnormal(w=c(.1,1,10), parallel=TRUE),
- bmi.nz, trac=TRUE)
+ooo <- with(bmi.nz, order(age))
+bmi.nz <- bmi.nz[ooo,] # Sort by age
+fit3 <- vgam(BMI ~ s(age, df = 4), bmi.nz, trace = TRUE,
+ fam = amlnormal(w = c(0.1, 1, 10), parallel = TRUE))
fit3 at extra # The w values, percentiles and weighted deviances
# The linear components of the fit; not for human consumption:
-coef(fit3, matrix=TRUE)
+coef(fit3, matrix = TRUE)
-\dontrun{
# Quantile plot
-with(bmi.nz, plot(age, BMI, col="blue", main=
- paste(paste(round(fit3 at extra$percentile, dig=1), collapse=", "),
+with(bmi.nz, plot(age, BMI, col="blue", main =
+ paste(paste(round(fit3 at extra$percentile, dig = 1), collapse = ", "),
"expectile-percentile curves")))
-with(bmi.nz, matlines(age, fitted(fit3), col=1:fit3 at extra$M, lwd=2))
-with(bmi.nz, lines(age, c(fitted(fit )), col="black")) # For comparison
+with(bmi.nz, matlines(age, fitted(fit3), col = 1:fit3 at extra$M, lwd = 2))
+with(bmi.nz, lines(age, c(fitted(fit )), col = "black")) # For comparison
}
}
\keyword{models}
diff --git a/man/amlpoisson.Rd b/man/amlpoisson.Rd
index e00ed12..9a79416 100644
--- a/man/amlpoisson.Rd
+++ b/man/amlpoisson.Rd
@@ -141,18 +141,18 @@ amlpoisson(w.aml = 1, parallel = FALSE, imethod = 1, digw = 4,
\examples{
set.seed(1234)
-mydat = data.frame(x = sort(runif(nn <- 200)))
-mydat = transform(mydat, y = rpois(nn, exp(0-sin(8*x))))
-(fit = vgam(y ~ s(x), fam=amlpoisson(w.aml=c(0.02, 0.2, 1, 5, 50)),
- mydat, trace=TRUE))
+mydat <- data.frame(x = sort(runif(nn <- 200)))
+mydat <- transform(mydat, y = rpois(nn, exp(0 - sin(8*x))))
+(fit <- vgam(y ~ s(x), fam = amlpoisson(w.aml = c(0.02, 0.2, 1, 5, 50)),
+ mydat, trace = TRUE))
fit at extra
\dontrun{
# Quantile plot
-with(mydat, plot(x, jitter(y), col="blue", las=1, main=
- paste(paste(round(fit at extra$percentile, dig=1), collapse=", "),
+with(mydat, plot(x, jitter(y), col = "blue", las = 1, main =
+ paste(paste(round(fit at extra$percentile, dig = 1), collapse = ", "),
"percentile-expectile curves")))
-with(mydat, matlines(x, fitted(fit), lwd=2)) }
+with(mydat, matlines(x, fitted(fit), lwd = 2)) }
}
\keyword{models}
\keyword{regression}
diff --git a/man/auuc.Rd b/man/auuc.Rd
index 4aa502b..b5a0d0d 100644
--- a/man/auuc.Rd
+++ b/man/auuc.Rd
@@ -23,18 +23,26 @@ and Engineering have been combined) and the socio-economic
status (SES) of their fathers
(1 = highest, down to 4 = lowest).
+
}
\source{
Dr Tony Morrison.
+
+
}
\references{
+
Wild, C. J. and Seber, G. A. F. (2000)
\emph{Chance Encounters: A First Course in Data Analysis and Inference},
New York: Wiley.
+
}
\examples{
+auuc
+\dontrun{
round(fitted(grc(auuc)))
round(fitted(grc(auuc, Rank = 2)))
}
+}
\keyword{datasets}
diff --git a/man/backPain.Rd b/man/backPain.Rd
index 5299087..be1a3a5 100644
--- a/man/backPain.Rd
+++ b/man/backPain.Rd
@@ -36,12 +36,14 @@
Anderson, J. A. (1984) Regression and Ordered Categorical
Variables. \emph{J. R. Statist. Soc. B}, \bold{46(1)}, 1-30.
+
Yee, T. W. (2010)
The \pkg{VGAM} package for categorical data analysis.
\emph{Journal of Statistical Software},
\bold{32}, 1--34.
\url{http://www.jstatsoft.org/v32/i10/}.
+
}
\examples{
summary(backPain)
diff --git a/man/beta.ab.Rd b/man/beta.ab.Rd
index 83f321e..7082876 100644
--- a/man/beta.ab.Rd
+++ b/man/beta.ab.Rd
@@ -14,15 +14,10 @@ beta.ab(lshape1 = "loge", lshape2 = "loge",
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lshape1, lshape2}{
- Parameter link functions applied to the two shape parameters.
+ \item{lshape1, lshape2, i1, i2}{
+ Details at \code{\link{CommonVGAMffArguments}}.
See \code{\link{Links}} for more choices.
- The log link (defaults) ensures that the parameters are positive.
- }
- \item{i1, i2}{
- Initial value for the first and second shape parameters respectively.
- A \code{NULL} value means it is obtained in the \code{initialize} slot.
}
\item{trim}{
@@ -31,16 +26,19 @@ beta.ab(lshape1 = "loge", lshape2 = "loge",
\code{y} before the mean is computed. This is used when computing
initial values, and guards against outliers.
+
}
\item{A, B}{
Lower and upper limits of the distribution.
The defaults correspond to the \emph{standard beta distribution}
where the response lies between 0 and 1.
+
}
\item{parallel, zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
}
\details{
@@ -115,6 +113,7 @@ beta.ab(lshape1 = "loge", lshape2 = "loge",
The response must have values in the interval (\eqn{A}, \eqn{B}).
\pkg{VGAM} 0.7-4 and prior called this function \code{\link{betaff}}.
+
}
\seealso{
@@ -129,17 +128,18 @@ beta.ab(lshape1 = "loge", lshape2 = "loge",
\code{\link{rbetanorm}},
\code{\link{kumar}}.
+
}
\examples{
-bdata = data.frame(y = rbeta(n = 1000, shape1 = exp(0), shape2 = exp(1)))
-fit = vglm(y ~ 1, beta.ab(lshape1 = "identity", lshape2 = "identity"),
- data = bdata, trace = TRUE, crit = "coef")
-fit = vglm(y ~ 1, beta.ab, bdata, trace = TRUE, crit = "coef")
+bdata <- data.frame(y = rbeta(n = 1000, shape1 = exp(0), shape2 = exp(1)))
+fit <- vglm(y ~ 1, beta.ab(lshape1 = "identity", lshape2 = "identity"),
+ data = bdata, trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, beta.ab, bdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit) # Useful for intercept-only models
-bdata = transform(bdata, Y = 5 + 8 * y) # From 5 to 13, not 0 to 1
-fit = vglm(Y ~ 1, beta.ab(A = 5, B = 13), bdata, trace = TRUE)
+bdata <- transform(bdata, Y = 5 + 8 * y) # From 5 to 13, not 0 to 1
+fit <- vglm(Y ~ 1, beta.ab(A = 5, B = 13), bdata, trace = TRUE)
Coef(fit)
c(meanY = with(bdata, mean(Y)), head(fitted(fit),2))
}
@@ -147,8 +147,8 @@ c(meanY = with(bdata, mean(Y)), head(fitted(fit),2))
\keyword{regression}
% 3/1/06; this works well:
-% fit = vglm(y~1, beta.abqn(link = logoff(offset = 1), tr = TRUE, cri = "c")
+% fit <- vglm(y~1, beta.abqn(link = logoff(offset = 1), tr = TRUE, crit = "c")
% 3/1/06; this does not work so well:
-% it = vglm(y~1, beta.abqn(link = logoff(offset = 0), tr = TRUE, cri = "c")
+% it <- vglm(y~1, beta.abqn(link = logoff(offset = 0), tr = TRUE, crit = "c")
% Interesting!!
diff --git a/man/betaII.Rd b/man/betaII.Rd
index b1528e6..8b036c3 100644
--- a/man/betaII.Rd
+++ b/man/betaII.Rd
@@ -42,7 +42,7 @@ betaII(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
The beta II distribution has density
\deqn{f(y) = y^{p-1} / [b^p B(p,q) \{1 + y/b\}^{p+q}]}{%
f(y) = y^(p-1) / [b^p B(p,q) (1 + y/b)^(p+q)]}
- for \eqn{b > 0}, \eqn{p > 0}, \eqn{q > 0}, \eqn{y > 0}.
+ for \eqn{b > 0}, \eqn{p > 0}, \eqn{q > 0}, \eqn{y \geq 0}{y >= 0}.
Here, \eqn{b} is the scale parameter \code{scale},
and the others are shape parameters.
The mean is
@@ -87,10 +87,10 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
-bdata = data.frame(y = rsinmad(2000, shape1.a = 1, 6, 2)) # Not genuine data!
-fit = vglm(y ~ 1, betaII, bdata, trace = TRUE)
-fit = vglm(y ~ 1, betaII(ishape2.p = 0.7, ishape3.q = 0.7),
- bdata, trace = TRUE)
+bdata <- data.frame(y = rsinmad(2000, shape1.a = 1, exp(2), exp(1))) # Not genuine data!
+fit <- vglm(y ~ 1, betaII, bdata, trace = TRUE)
+fit <- vglm(y ~ 1, betaII(ishape2.p = 0.7, ishape3.q = 0.7),
+ bdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/betabinomUC.Rd b/man/betabinomUC.Rd
index 9c8de53..78d0e33 100644
--- a/man/betabinomUC.Rd
+++ b/man/betabinomUC.Rd
@@ -103,25 +103,23 @@ rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
}
\examples{
-set.seed(1)
-rbetabinom(10, 100, prob = 0.5)
-set.seed(1)
-rbinom(10, 100, prob = 0.5) # The same since rho = 0
+set.seed(1); rbetabinom(10, 100, prob = 0.5)
+set.seed(1); rbinom(10, 100, prob = 0.5) # The same since rho = 0
-\dontrun{ N = 9; xx = 0:N; s1 = 2; s2 = 3
-dy = dbetabinom.ab(xx, size = N, shape1 = s1, shape2 = s2)
+\dontrun{ N <- 9; xx <- 0:N; s1 <- 2; s2 <- 3
+dy <- dbetabinom.ab(xx, size = N, shape1 = s1, shape2 = s2)
barplot(rbind(dy, dbinom(xx, size = N, prob = s1 / (s1+s2))),
beside = TRUE, col = c("blue","green"), las = 1,
- main = paste("Beta-binomial (size=",N,", shape1=",s1,
- ", shape2=",s2,") (blue) vs\n",
- " Binomial(size=", N, ", prob=", s1 / (s1+s2), ") (green)", sep = ""),
+ main = paste("Beta-binomial (size=",N,", shape1=", s1,
+ ", shape2=", s2, ") (blue) vs\n",
+ " Binomial(size=", N, ", prob=", s1/(s1+s2), ") (green)", sep = ""),
names.arg = as.character(xx), cex.main = 0.8)
-sum(dy*xx) # Check expected values are equal
+sum(dy * xx) # Check expected values are equal
sum(dbinom(xx, size = N, prob = s1 / (s1+s2))*xx)
-cumsum(dy) - pbetabinom.ab(xx, N, shape1 = s1, shape2 = s2)
+cumsum(dy) - pbetabinom.ab(xx, N, shape1 = s1, shape2 = s2)
-y = rbetabinom.ab(n = 10000, size = N, shape1 = s1, shape2 = s2)
-ty = table(y)
+y <- rbetabinom.ab(n = 10000, size = N, shape1 = s1, shape2 = s2)
+ty <- table(y)
barplot(rbind(dy, ty / sum(ty)),
beside = TRUE, col = c("blue","red"), las = 1,
main = paste("Beta-binomial (size=",N,", shape1=",s1,
diff --git a/man/betabinomial.Rd b/man/betabinomial.Rd
index 1483e75..080fb26 100644
--- a/man/betabinomial.Rd
+++ b/man/betabinomial.Rd
@@ -20,6 +20,7 @@ betabinomial(lmu = "logit", lrho = "logit",
The defaults ensure the parameters remain in \eqn{(0,1)},
however, see the warning below.
+
}
\item{irho}{
Optional initial value for the correlation parameter.
@@ -28,6 +29,7 @@ betabinomial(lmu = "logit", lrho = "logit",
Having \code{irho = NULL} means an initial value is obtained internally,
though this can give unsatisfactory results.
+
}
\item{imethod}{
An integer with value \code{1} or \code{2} or \ldots,
@@ -35,6 +37,7 @@ betabinomial(lmu = "logit", lrho = "logit",
If failure to converge occurs try the another value
and/or else specify a value for \code{irho}.
+
}
\item{zero}{
An integer specifying which
@@ -45,6 +48,7 @@ betabinomial(lmu = "logit", lrho = "logit",
\code{zero = NULL}.
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
\item{shrinkage.init, nsimEIM}{
See \code{\link{CommonVGAMffArguments}} for more information.
@@ -52,6 +56,7 @@ betabinomial(lmu = "logit", lrho = "logit",
Using the argument \code{nsimEIM} may offer large advantages for large
values of \eqn{N} and/or large data sets.
+
}
}
\details{
@@ -160,17 +165,18 @@ betabinomial(lmu = "logit", lrho = "logit",
\section{Warning }{
- If the estimated rho parameter is close to zero then it pays to try
- \code{lrho = "rhobit"}. One day this may become the default link function.
+ If the estimated rho parameter is close to zero then it pays to
+ try \code{lrho = "rhobit"}. One day this may become the default
+ link function.
- This family function is prone to numerical difficulties
- due to the expected information matrices not being positive-definite
+ This family function is prone to numerical difficulties due to
+ the expected information matrices not being positive-definite
or ill-conditioned over some regions of the parameter space.
- If problems occur try setting \code{irho} to some numerical value,
- \code{nsimEIM = 100}, say,
- or else use \code{etastart} argument of
- \code{\link{vglm}}, etc.
+ If problems occur try setting \code{irho} to some numerical
+ value, \code{nsimEIM = 100}, say, or else use \code{etastart}
+ argument of \code{\link{vglm}}, etc.
+
}
\seealso{
@@ -185,30 +191,30 @@ betabinomial(lmu = "logit", lrho = "logit",
}
\examples{
# Example 1
-bdata = data.frame(N = 10, mu = 0.5, rho = 0.8)
-bdata = transform(bdata,
- y = rbetabinom(n=100, size = N, prob = mu, rho = rho))
-fit = vglm(cbind(y, N-y) ~ 1, betabinomial, bdata, trace = TRUE)
+bdata <- data.frame(N = 10, mu = 0.5, rho = 0.8)
+bdata <- transform(bdata,
+ y = rbetabinom(n=100, size = N, prob = mu, rho = rho))
+fit <- vglm(cbind(y, N-y) ~ 1, betabinomial, bdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
-head(cbind(fit at y, weights(fit, type = "prior")))
+head(cbind(depvar(fit), weights(fit, type = "prior")))
# Example 2
-fit = vglm(cbind(R, N-R) ~ 1, betabinomial, lirat,
- trace = TRUE, subset = N > 1)
+fit <- vglm(cbind(R, N-R) ~ 1, betabinomial, lirat,
+ trace = TRUE, subset = N > 1)
coef(fit, matrix = TRUE)
Coef(fit)
t(fitted(fit))
-t(fit at y)
+t(depvar(fit))
t(weights(fit, type = "prior"))
# Example 3, which is more complicated
-lirat = transform(lirat, fgrp = factor(grp))
-summary(lirat) # Only 5 litters in group 3
-fit2 = vglm(cbind(R, N-R) ~ fgrp + hb, betabinomial(zero = 2),
- data = lirat, trace = TRUE, subset = N > 1)
+lirat <- transform(lirat, fgrp = factor(grp))
+summary(lirat) # Only 5 litters in group 3
+fit2 <- vglm(cbind(R, N-R) ~ fgrp + hb, betabinomial(zero = 2),
+ data = lirat, trace = TRUE, subset = N > 1)
coef(fit2, matrix = TRUE)
\dontrun{ with(lirat, plot(hb[N > 1], fit2 at misc$rho,
xlab = "Hemoglobin", ylab = "Estimated rho",
@@ -217,12 +223,12 @@ coef(fit2, matrix = TRUE)
with(lirat, plot(hb, R / N, pch = as.character(grp), col = grp, las = 1,
xlab = "Hemoglobin level", ylab = "Proportion Dead",
main = "Fitted values (lines)"))
-smalldf = with(lirat, lirat[N > 1, ])
+smalldf <- with(lirat, lirat[N > 1, ])
for(gp in 1:4) {
- xx = with(smalldf, hb[grp == gp])
- yy = with(smalldf, fitted(fit2)[grp == gp])
- ooo = order(xx)
- lines(xx[ooo], yy[ooo], col = gp) } }
+ xx <- with(smalldf, hb[grp == gp])
+ yy <- with(smalldf, fitted(fit2)[grp == gp])
+ ooo <- order(xx)
+ lines(xx[ooo], yy[ooo], col = gp) } }
}
\keyword{models}
\keyword{regression}
diff --git a/man/betabinomial.ab.Rd b/man/betabinomial.ab.Rd
index aed6c94..83664c1 100644
--- a/man/betabinomial.ab.Rd
+++ b/man/betabinomial.ab.Rd
@@ -181,9 +181,9 @@ betabinomial.ab(lshape12 = "loge", i1 = 1, i2 = NULL,
}
\examples{
# Example 1
-N = 10; s1 = exp(1); s2 = exp(2)
-y = rbetabinom.ab(n = 100, size = N, shape1 = s1, shape2 = s2)
-fit = vglm(cbind(y, N-y) ~ 1, betabinomial.ab, trace = TRUE)
+N <- 10; s1 <- exp(1); s2 <- exp(2)
+y <- rbetabinom.ab(n = 100, size = N, shape1 = s1, shape2 = s2)
+fit <- vglm(cbind(y, N-y) ~ 1, betabinomial.ab, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
head(fit at misc$rho) # The correlation parameter
@@ -191,8 +191,8 @@ head(cbind(depvar(fit), weights(fit, type = "prior")))
# Example 2
-fit = vglm(cbind(R, N-R) ~ 1, betabinomial.ab, data = lirat,
- trace = TRUE, subset = N > 1)
+fit <- vglm(cbind(R, N-R) ~ 1, betabinomial.ab, data = lirat,
+ trace = TRUE, subset = N > 1)
coef(fit, matrix = TRUE)
Coef(fit)
fit at misc$rho # The correlation parameter
@@ -206,9 +206,9 @@ all.equal(c(fitted(fit)),
# Example 3, which is more complicated
-lirat = transform(lirat, fgrp = factor(grp))
+lirat <- transform(lirat, fgrp = factor(grp))
summary(lirat) # Only 5 litters in group 3
-fit2 = vglm(cbind(R, N-R) ~ fgrp + hb, betabinomial.ab(zero = 2),
+fit2 <- vglm(cbind(R, N-R) ~ fgrp + hb, betabinomial.ab(zero = 2),
data = lirat, trace = TRUE, subset = N > 1)
coef(fit2, matrix = TRUE)
coef(fit2, matrix = TRUE)[, 1] -
@@ -221,11 +221,11 @@ with(lirat, plot(hb, R / N, pch = as.character(grp), col = grp, las = 1,
xlab = "Hemoglobin level", ylab = "Proportion Dead",
main = "Fitted values (lines)"))
-smalldf = with(lirat, lirat[N > 1, ])
+smalldf <- with(lirat, lirat[N > 1, ])
for(gp in 1:4) {
- xx = with(smalldf, hb[grp == gp])
- yy = with(smalldf, fitted(fit2)[grp == gp])
- ooo = order(xx)
+ xx <- with(smalldf, hb[grp == gp])
+ yy <- with(smalldf, fitted(fit2)[grp == gp])
+ ooo <- order(xx)
lines(xx[ooo], yy[ooo], col = gp) } }
}
\keyword{models}
diff --git a/man/betageometric.Rd b/man/betageometric.Rd
index a4f2364..a9de4df 100644
--- a/man/betageometric.Rd
+++ b/man/betageometric.Rd
@@ -108,9 +108,9 @@ betageometric(lprob = "logit", lshape = "loge",
}
\examples{
-bg.data = data.frame(y = 0:11, wts = c(227,123,72,42,21,31,11,14,6,4,7,28))
-fit = vglm(y ~ 1, betageometric, bg.data, weight = wts, trace = TRUE)
-fitg = vglm(y ~ 1, geometric, bg.data, weight = wts, trace = TRUE)
+bdata <- data.frame(y = 0:11, wts = c(227,123,72,42,21,31,11,14,6,4,7,28))
+fit <- vglm(y ~ 1, betageometric, bdata, weight = wts, trace = TRUE)
+fitg <- vglm(y ~ 1, geometric, bdata, weight = wts, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
sqrt(diag(vcov(fit, untransform = TRUE)))
diff --git a/man/bilogis4UC.Rd b/man/bilogis4UC.Rd
index 8a3c2d2..4c62d31 100644
--- a/man/bilogis4UC.Rd
+++ b/man/bilogis4UC.Rd
@@ -17,7 +17,7 @@ rbilogis4(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1)
\arguments{
\item{x1, x2, q1, q2}{vector of quantiles.}
\item{n}{number of observations.
- Must be a positive integer of length 1.}
+ Same as \code{\link[stats]{rlogis}}. }
\item{loc1, loc2}{the location parameters \eqn{l_1}{l1} and \eqn{l_2}{l2}.}
\item{scale1, scale2}{the scale parameters \eqn{s_1}{s1} and \eqn{s_2}{s2}.}
\item{log}{
@@ -31,6 +31,8 @@ rbilogis4(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1)
\code{dbilogis4} gives the density,
\code{pbilogis4} gives the distribution function, and
\code{rbilogis4} generates random deviates (a two-column matrix).
+
+
}
\references{
@@ -54,22 +56,22 @@ Bivariate logistic distributions.
\seealso{
\code{\link{bilogistic4}}.
+
}
\examples{
\dontrun{ par(mfrow = c(1, 3))
-n = 2000
-ymat = rbilogis4(n, loc1 = 5, loc2 = 7, scale2 = exp(1))
-myxlim = c(-2, 15)
-myylim = c(-10, 30)
+n <- 2000
+ymat <- rbilogis4(n, loc1 = 5, loc2 = 7, scale2 = exp(1))
+myxlim <- c(-2, 15); myylim <- c(-10, 30)
plot(ymat, xlim = myxlim, ylim = myylim)
-N = 100
-x1 = seq(myxlim[1], myxlim[2], len = N)
-x2 = seq(myylim[1], myylim[2], len = N)
-ox = expand.grid(x1, x2)
-z = dbilogis4(ox[,1], ox[,2], loc1 = 5, loc2 = 7, scale2 = exp(1))
+N <- 100
+x1 <- seq(myxlim[1], myxlim[2], len = N)
+x2 <- seq(myylim[1], myylim[2], len = N)
+ox <- expand.grid(x1, x2)
+z <- dbilogis4(ox[,1], ox[,2], loc1 = 5, loc2 = 7, scale2 = exp(1))
contour(x1, x2, matrix(z, N, N), main = "density")
-z = pbilogis4(ox[,1], ox[,2], loc1 = 5, loc2 = 7, scale2 = exp(1))
+z <- pbilogis4(ox[,1], ox[,2], loc1 = 5, loc2 = 7, scale2 = exp(1))
contour(x1, x2, matrix(z, N, N), main = "cdf") }
}
\keyword{distribution}
diff --git a/man/binom2.or.Rd b/man/binom2.or.Rd
index 89c02fe..3bc519a 100644
--- a/man/binom2.or.Rd
+++ b/man/binom2.or.Rd
@@ -9,6 +9,7 @@
model can be fitted.
The odds ratio is used as a measure of dependency.
+
}
\usage{
binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
@@ -64,7 +65,8 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
}
}
\details{
- Known also as the \emph{Palmgren model}, the bivariate logistic model is
+ Also known informally as the \emph{Palmgren model},
+ the bivariate logistic model is
a full-likelihood based model defined as two logistic regressions plus
\code{log(oratio) = eta3} where \code{eta3} is the third linear/additive
predictor relating the odds ratio to explanatory variables.
@@ -199,8 +201,8 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
}
\examples{
# Fit the model in Table 6.7 in McCullagh and Nelder (1989)
-coalminers = transform(coalminers, Age = (age - 42) / 5)
-fit = vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or(zero = NULL), coalminers)
+coalminers <- transform(coalminers, Age = (age - 42) / 5)
+fit <- vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or(zero = NULL), coalminers)
fitted(fit)
summary(fit)
coef(fit, matrix = TRUE)
@@ -208,7 +210,7 @@ c(weights(fit, type = "prior")) * fitted(fit) # Table 6.8
\dontrun{ with(coalminers, matplot(Age, fitted(fit), type = "l", las = 1,
xlab = "(age - 42) / 5", lwd = 2))
-with(coalminers, matpoints(Age, fit at y, col=1:4))
+with(coalminers, matpoints(Age, depvar(fit), col=1:4))
legend(x = -4, y = 0.5, lty = 1:4, col = 1:4, lwd = 2,
legend=c("1 = (Breathlessness=0, Wheeze=0)",
"2 = (Breathlessness=0, Wheeze=1)",
@@ -217,19 +219,22 @@ legend(x = -4, y = 0.5, lty = 1:4, col = 1:4, lwd = 2,
# Another model: pet ownership
-petdata = subset(xs.nz, ethnic == "0" & age < 70 & sex == "M") # More homogeneous
-petdata = na.omit(petdata[, c("cat", "dog", "age")])
+\dontrun{ require(VGAMdata)
+# More homogeneous:
+petdata <- subset(xs.nz, ethnic == "0" & age < 70 & sex == "M")
+petdata <- na.omit(petdata[, c("cat", "dog", "age")])
summary(petdata)
with(petdata, table(cat, dog)) # Can compute the odds ratio
-fit = vgam(cbind((1-cat)*(1-dog), (1-cat)*dog,
- cat*(1-dog), cat*dog) ~ s(age, df = 5),
- binom2.or(zero = 3), data = petdata, trace = TRUE)
+fit <- vgam(cbind((1-cat)*(1-dog), (1-cat)*dog,
+ cat *(1-dog), cat *dog) ~ s(age, df = 5),
+ binom2.or(zero = 3), data = petdata, trace = TRUE)
colSums(depvar(fit))
coef(fit, matrix = TRUE)
+}
\dontrun{ # Plot the estimated probabilities
-ooo = order(with(petdata, age))
+ooo <- order(with(petdata, age))
matplot(with(petdata, age)[ooo], fitted(fit)[ooo, ], type = "l",
xlab = "Age", ylab = "Probability", main = "Pet ownership",
ylim = c(0, max(fitted(fit))), las = 1, lwd = 1.5)
diff --git a/man/binom2.orUC.Rd b/man/binom2.orUC.Rd
index f74b5a1..0704033 100644
--- a/man/binom2.orUC.Rd
+++ b/man/binom2.orUC.Rd
@@ -28,23 +28,27 @@ dbinom2.or(mu1,
The arguments \code{mu1}, \code{mu2}, \code{oratio} are recycled to
length \code{n}.
+
}
\item{mu1, mu2}{
The marginal probabilities.
Only \code{mu1} is needed if \code{exchangeable = TRUE}.
Values should be between 0 and 1.
+
}
\item{oratio}{
Odds ratio. Must be numeric and positive.
The default value of unity means the responses are statistically
independent.
-
+
+
}
\item{exchangeable}{
Logical. If \code{TRUE}, the two marginal probabilities are constrained
to be equal.
-
+
+
}
\item{twoCols}{
Logical.
@@ -53,20 +57,24 @@ dbinom2.or(mu1,
If \code{FALSE}, then a \eqn{n} \eqn{\times}{*} \eqn{4} matrix of 1s
and 0s is returned.
+
}
\item{colnames}{
The \code{dimnames} argument of
\code{\link[base]{matrix}} is assigned \code{list(NULL, colnames)}.
+
}
\item{tol}{
Tolerance for testing independence. Should be some
small positive numerical value.
+
}
\item{ErrorCheck}{
Logical. Do some error checking of the input parameters?
+
}
}
@@ -76,46 +84,50 @@ dbinom2.or(mu1,
The data might be fitted with the \pkg{VGAM} family function
\code{\link{binom2.or}}.
+
The function \code{dbinom2.or} does not really compute the density
(because that does not make sense here) but rather returns the
four joint probabilities.
+
}
\value{
The function \code{rbinom2.or} returns
either a 2 or 4 column matrix of 1s and 0s, depending on the argument
\code{twoCols}.
+
The function \code{dbinom2.or} returns
a 4 column matrix of joint probabilities; each row adds up to unity.
+
}
\author{ T. W. Yee }
\seealso{
\code{\link{binom2.or}}.
+
}
\examples{
-# Example 1
-nn = 2000
-ymat = rbinom2.or(n = nn, mu1 = 0.8, oratio = exp(2), exch = TRUE)
-(mytab = table(ymat[,1], ymat[,2], dnn=c("Y1", "Y2")))
-(myor = mytab["0","0"] * mytab["1","1"] / (mytab["1","0"] * mytab["0","1"]))
-fit = vglm(ymat ~ 1, binom2.or(exch = TRUE))
+nn <- 2000 # Example 1
+ymat <- rbinom2.or(n = nn, mu1 = 0.8, oratio = exp(2), exch = TRUE)
+(mytab <- table(ymat[, 1], ymat[, 2], dnn = c("Y1", "Y2")))
+(myor <- mytab["0","0"] * mytab["1","1"] / (mytab["1","0"] * mytab["0","1"]))
+fit <- vglm(ymat ~ 1, binom2.or(exch = TRUE))
coef(fit, matrix = TRUE)
-# Example 2
-x = sort(runif(nn))
-mu1 = logit(-2+4*x, inv = TRUE)
-mu2 = logit(-1+3*x, inv = TRUE)
-dmat = dbinom2.or(mu1 = mu1, mu2 = mu2, oratio = exp(2))
-ymat = rbinom2.or(n = nn, mu1 = mu1, mu2 = mu2, oratio = exp(2))
-fit2 = vglm(ymat ~ x, binom2.or)
+bdata <- data.frame(x2 = sort(runif(nn))) # Example 2
+bdata <- transform(bdata, mu1 = logit(-2 + 4*x2, inverse = TRUE),
+ mu2 = logit(-1 + 3*x2, inverse = TRUE))
+dmat <- with(bdata, dbinom2.or(mu1 = mu1, mu2 = mu2, oratio = exp(2)))
+ymat <- with(bdata, rbinom2.or(n = nn, mu1 = mu1, mu2 = mu2, oratio = exp(2)))
+fit2 <- vglm(ymat ~ x2, binom2.or, data = bdata)
coef(fit2, matrix = TRUE)
\dontrun{
-matplot(x, dmat, lty = 1:4, col = 1:4, type = "l",
- main = "Joint probabilities", ylim = 0:1, lwd = 2)
+matplot(with(bdata, x2), dmat, lty = 1:4, col = 1:4, type = "l",
+ main = "Joint probabilities", ylim = 0:1, lwd = 2,
+ ylab = "Probabilities", xlab = "x2", las = 1)
legend(x = 0, y = 0.5, lty = 1:4, col = 1:4, lwd = 2,
legend = c("1 = (y1=0, y2=0)", "2 = (y1=0, y2=1)",
"3 = (y1=1, y2=0)", "4 = (y1=1, y2=1)"))
diff --git a/man/binom2.rho.Rd b/man/binom2.rho.Rd
index d798ba0..b22336c 100644
--- a/man/binom2.rho.Rd
+++ b/man/binom2.rho.Rd
@@ -7,11 +7,13 @@
Fits a bivariate probit model to two binary responses.
The correlation parameter rho is the measure of dependency.
+
}
\usage{
binom2.rho(lrho = "rhobit", lmu = "probit", imu1 = NULL, imu2 = NULL,
- irho = NULL, imethod = 1,
- zero = 3, exchangeable = FALSE, nsimEIM = NULL)
+ irho = NULL, imethod = 1, zero = 3,
+ exchangeable = FALSE, grho = seq(-0.95, 0.95, by = 0.05),
+ nsimEIM = NULL)
binom2.Rho(rho = 0, imu1 = NULL, imu2 = NULL,
exchangeable = FALSE, nsimEIM = NULL)
}
@@ -21,6 +23,7 @@ binom2.Rho(rho = 0, imu1 = NULL, imu2 = NULL,
Link function applied to the \eqn{\rho}{rho} association parameter.
See \code{\link{Links}} for more choices.
+
}
\item{lmu}{
Link function applied to the marginal probabilities.
@@ -33,11 +36,13 @@ binom2.Rho(rho = 0, imu1 = NULL, imu2 = NULL,
If given, this should lie between \eqn{-1} and \eqn{1}.
See below for more comments.
+
}
\item{imu1, imu2}{
Optional initial values for the two marginal probabilities.
May be a vector.
+
}
\item{zero}{
Which linear/additive predictor is modelled as an intercept only?
@@ -45,37 +50,42 @@ binom2.Rho(rho = 0, imu1 = NULL, imu2 = NULL,
Numerically, the \eqn{\rho}{rho} parameter is easiest modelled as
an intercept only, hence the default.
+
}
\item{exchangeable}{
Logical.
If \code{TRUE}, the two marginal probabilities are constrained to
be equal.
+
}
- \item{imethod, nsimEIM}{
+ \item{imethod, nsimEIM, grho}{
See \code{\link{CommonVGAMffArguments}} for more information.
- A value of at least 100 is recommended;
+ A value of at least 100 for \code{nsimEIM} is recommended;
the larger the value the better.
+
}
\item{rho}{
Numeric vector.
Values are recycled to the needed length,
and ought to be in range.
+
}
}
\details{
- The \emph{bivariate probit model} was one of the earliest regression
- models to handle two binary responses jointly. It has a probit
- link for each of the two marginal probabilities, and models the
- association between the responses by the \eqn{\rho}{rho} parameter
- of a standard bivariate normal distribution (with zero means and
- unit variances). One can think of the joint probabilities being
- \eqn{\Phi(\eta_1,\eta_2;\rho)}{Phi(eta1,eta2;rho)} where \eqn{\Phi}{Phi}
- is the cumulative distribution function of a standard bivariate normal
- distribution.
+ The \emph{bivariate probit model} was one of the
+ earliest regression models to handle two binary responses
+ jointly. It has a probit link for each of the two marginal
+ probabilities, and models the association between the
+ responses by the \eqn{\rho}{rho} parameter of a standard
+ bivariate normal distribution (with zero means and unit
+ variances). One can think of the joint probabilities being
+ \eqn{\Phi(\eta_1,\eta_2;\rho)}{Phi(eta1,eta2;rho)} where
+ \eqn{\Phi}{Phi} is the cumulative distribution function of a
+ standard bivariate normal distribution.
Explicitly, the default model is
@@ -93,29 +103,31 @@ binom2.Rho(rho = 0, imu1 = NULL, imu2 = NULL,
The default models \eqn{\eta_3}{eta3} as a single parameter only,
- i.e., an intercept-only model for rho, but this can be circumvented by setting
- \code{zero = NULL} in order to model rho as a function of all the
- explanatory variables.
+ i.e., an intercept-only model for rho, but this can be
+ circumvented by setting \code{zero = NULL} in order to model
+ rho as a function of all the explanatory variables.
- The bivariate probit model should not be confused with a \emph{bivariate
- logit model} with a probit link (see \code{\link{binom2.or}}).
- The latter uses the odds ratio to quantify the association. Actually,
- the bivariate logit model is recommended over the bivariate probit
- model because the odds ratio is a more natural way of measuring the
- association between two binary responses.
+ The bivariate probit model should not be confused with
+ a \emph{bivariate logit model} with a probit link (see
+ \code{\link{binom2.or}}). The latter uses the odds ratio to
+ quantify the association. Actually, the bivariate logit model
+ is recommended over the bivariate probit model because the
+ odds ratio is a more natural way of measuring the association
+ between two binary responses.
}
\value{
- An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
- The object is used by modelling functions such as \code{\link{vglm}},
- and \code{\link{vgam}}.
+ An object of class \code{"vglmff"} (see
+ \code{\link{vglmff-class}}). The object is used by modelling
+ functions such as \code{\link{vglm}}, and \code{\link{vgam}}.
- When fitted, the \code{fitted.values} slot of the object contains the
- four joint probabilities, labelled as
- \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0), (0,1), (1,0), (1,1), respectively.
+ When fitted, the \code{fitted.values} slot of the object
+ contains the four joint probabilities, labelled as
+ \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0), (0,1), (1,0), (1,1),
+ respectively.
}
@@ -131,6 +143,12 @@ Freedman, D. A. (2010)
the Social Sciences}, Cambridge: Cambridge University Press.
+Freedman, D. A. and Sekhon, J. S. (2010)
+ Endogeneity in probit response models.
+ \emph{Political Analysis},
+ \bold{18}, 138--150.
+
+
}
\author{ Thomas W. Yee }
\note{
@@ -163,10 +181,23 @@ Freedman, D. A. (2010)
the fitted object, with \code{rho} as the component name.
+ In some econometrics applications
+ (e.g., Freedman 2010, Freedman and Sekhon 2010)
+ one response is used as an explanatory variable,
+ e.g., a \emph{recursive} binomial probit model.
+ Such will not work here.
+ Historically, the bivariate probit model was the first VGAM I
+ ever wrote, based on Ashford and Sowden (1970). I don't think
+ they ever thought of it either! Hence the criticisms raised
+ go beyond the use of what was originally intended.
+
+
+
}
\seealso{
\code{\link{rbinom2.rho}},
\code{\link{rhobit}},
+ \code{\link{pnorm2}},
\code{\link{binom2.or}},
\code{\link{loglinb2}},
\code{\link{coalminers}},
@@ -174,10 +205,14 @@ Freedman, D. A. (2010)
\code{\link{rhobit}},
\code{\link{fisherz}}.
+
+
+
}
\examples{
-coalminers = transform(coalminers, Age = (age - 42) / 5)
-fit = vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.rho, coalminers, trace = TRUE)
+coalminers <- transform(coalminers, Age = (age - 42) / 5)
+fit <- vglm(cbind(nBnW, nBW, BnW, BW) ~ Age,
+ binom2.rho, data = coalminers, trace = TRUE)
summary(fit)
coef(fit, matrix = TRUE)
}
diff --git a/man/binom2.rhoUC.Rd b/man/binom2.rhoUC.Rd
index 74af690..4c119fc 100644
--- a/man/binom2.rhoUC.Rd
+++ b/man/binom2.rhoUC.Rd
@@ -58,10 +58,12 @@ dbinom2.rho(mu1,
The \code{dimnames} argument of
\code{\link[base]{matrix}} is assigned \code{list(NULL, colnames)}.
+
}
\item{ErrorCheck}{
Logical. Do some error checking of the input parameters?
+
}
}
@@ -71,40 +73,43 @@ dbinom2.rho(mu1,
The data might be fitted with the \pkg{VGAM} family function
\code{\link{binom2.rho}}.
+
The function \code{dbinom2.rho} does not really compute the density
(because that does not make sense here) but rather returns the
four joint probabilities.
+
}
\value{
The function \code{rbinom2.rho} returns
either a 2 or 4 column matrix of 1s and 0s, depending on the argument
\code{twoCols}.
+
The function \code{dbinom2.rho} returns
a 4 column matrix of joint probabilities; each row adds up to unity.
+
}
\author{ T. W. Yee }
\seealso{
\code{\link{binom2.rho}}.
+
}
\examples{
-# Example 1
-(myrho <- rhobit(2, inverse = TRUE))
-ymat = rbinom2.rho(nn <- 2000, mu1 = 0.8, rho = myrho, exch = TRUE)
-(mytab = table(ymat[, 1], ymat[, 2], dnn = c("Y1", "Y2")))
-fit = vglm(ymat ~ 1, binom2.rho(exch = TRUE))
+(myrho <- rhobit(2, inverse = TRUE)) # Example 1
+ymat <- rbinom2.rho(nn <- 2000, mu1 = 0.8, rho = myrho, exch = TRUE)
+(mytab <- table(ymat[, 1], ymat[, 2], dnn = c("Y1", "Y2")))
+fit <- vglm(ymat ~ 1, binom2.rho(exch = TRUE))
coef(fit, matrix = TRUE)
-# Example 2
-bdata = data.frame(x2 = sort(runif(nn)))
-bdata = transform(bdata, mu1 = probit(-2+4*x2, inverse = TRUE),
- mu2 = probit(-1+3*x2, inverse = TRUE))
-dmat = with(bdata, dbinom2.rho(mu1, mu2, myrho))
-ymat = with(bdata, rbinom2.rho(nn, mu1, mu2, myrho))
-fit2 = vglm(ymat ~ x2, binom2.rho, bdata)
+bdata <- data.frame(x2 = sort(runif(nn))) # Example 2
+bdata <- transform(bdata, mu1 = probit(-2+4*x2, inverse = TRUE),
+ mu2 = probit(-1+3*x2, inverse = TRUE))
+dmat <- with(bdata, dbinom2.rho(mu1, mu2, myrho))
+ymat <- with(bdata, rbinom2.rho(nn, mu1, mu2, myrho))
+fit2 <- vglm(ymat ~ x2, binom2.rho, bdata)
coef(fit2, matrix = TRUE)
\dontrun{ matplot(with(bdata, x2), dmat, lty = 1:4, col = 1:4,
type = "l", main = "Joint probabilities",
diff --git a/man/binomialff.Rd b/man/binomialff.Rd
index a79ba5d..fe79ba9 100644
--- a/man/binomialff.Rd
+++ b/man/binomialff.Rd
@@ -10,8 +10,8 @@
}
\usage{
binomialff(link = "logit", dispersion = 1, mv = FALSE,
- onedpar = !mv, parallel = FALSE, zero = NULL, bred = FALSE,
- earg.link = FALSE)
+ onedpar = !mv, parallel = FALSE, apply.parint = FALSE,
+ zero = NULL, bred = FALSE, earg.link = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -70,11 +70,26 @@ binomialff(link = "logit", dispersion = 1, mv = FALSE,
}
- \item{bred, earg.link}{
+ \item{apply.parint, earg.link}{
Details at \code{\link{CommonVGAMffArguments}}.
}
+ \item{bred}{
+ Details at \code{\link{CommonVGAMffArguments}}.
+ Setting \code{bred = TRUE} should work for
+ multiple responses (\code{mv = TRUE}) and
+ all \pkg{VGAM} link functions;
+ it has been tested for
+ \code{\link{logit}} only (and it gives similar
+ results to \pkg{brglm} but not identical),
+ and further testing is required.
+ One result from fitting bias reduced binary regression
+ is that finite regression coefficients occur when
+ the data is separable (see example below).
+
+
+ }
}
\details{
This function is largely to mimic \code{\link[stats:Binomial]{binomial}},
@@ -198,6 +213,10 @@ binomialff(link = "logit", dispersion = 1, mv = FALSE,
Currently, only a single known dispersion parameter is handled well.
+ See the above note regarding \code{bred}.
+
+
+
The maximum likelihood estimate will not exist if the data is
\emph{completely separable} or \emph{quasi-completely separable}.
See Chapter 10 of Altman et al. (2004) for more details,
@@ -237,6 +256,20 @@ coef(glm.fail)
vglm.ok <- vglm(cbind(r, n-r) ~ offset(logv) + 1,
binomialff(link = cloglog), ridout, trace = TRUE)
coef(vglm.ok)
+
+
+# Separable data
+set.seed(123)
+threshold <- 0
+bdata <- data.frame(x2 = sort(rnorm(nn <- 100)))
+bdata <- transform(bdata, y1 = ifelse(x2 < threshold, 0, 1))
+fit <- vglm(y1 ~ x2, binomialff(bred = TRUE),
+ data = bdata, criter = "coef", trace = TRUE)
+coef(fit, matrix = TRUE) # Finite!!
+summary(fit)
+\dontrun{ plot(depvar(fit) ~ x2, data = bdata, col = "blue", las = 1)
+lines(fitted(fit) ~ x2, data = bdata, col = "orange")
+abline(v = threshold, col = "gray", lty = "dashed") }
}
\keyword{models}
\keyword{regression}
diff --git a/man/binormal.Rd b/man/binormal.Rd
index 8d8e0c6..e00ef66 100644
--- a/man/binormal.Rd
+++ b/man/binormal.Rd
@@ -1,7 +1,7 @@
\name{binormal}
\alias{binormal}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Bivariate normal distribution }
+\title{ Bivariate normal distribution family function }
\description{
Maximum likelihood estimation of the five parameters of a bivariate
normal distribution.
@@ -9,7 +9,7 @@
}
\usage{
binormal(lmean1 = "identity", lmean2 = "identity",
- lsd1 = "loge", lsd2 = "loge",
+ lsd1 = "loge", lsd2 = "loge",
lrho = "rhobit",
imean1 = NULL, imean2 = NULL,
isd1 = NULL, isd2 = NULL,
@@ -63,6 +63,14 @@ binormal(lmean1 = "identity", lmean2 = "identity",
}
+\section{Warning}{
+ This function may be renamed to \code{normal2()} or something like that
+ at a later date.
+
+
+}
+
+
%\references{
%
%}
@@ -81,12 +89,13 @@ binormal(lmean1 = "identity", lmean2 = "identity",
\seealso{
\code{\link{normal1}},
- \code{\link{gaussianff}}.
+ \code{\link{gaussianff}},
+ \code{\link{pnorm2}}.
}
\examples{
-nn <- 1000
+set.seed(123); nn <- 1000
bdata <- data.frame(x2 = runif(nn), x3 = runif(nn))
bdata <- transform(bdata, y1 = rnorm(nn, 1 + 2*x2),
y2 = rnorm(nn, 3 + 4*x2))
@@ -95,6 +104,15 @@ fit1 <- vglm(cbind(y1, y2) ~ x2,
coef(fit1, matrix = TRUE)
constraints(fit1)
summary(fit1)
+
+# Estimated P(Y1 <= y1, Y2 <= y2) under the fitted model
+var1 <- loge(2 * predict(fit1)[, "log(sd1)"], inverse = TRUE)
+var2 <- loge(2 * predict(fit1)[, "log(sd2)"], inverse = TRUE)
+cov12 <- rhobit(predict(fit1)[, "rhobit(rho)"], inverse = TRUE)
+head(with(bdata, pnorm2(y1, y2,
+ mean1 = predict(fit1)[, "mean1"],
+ mean2 = predict(fit1)[, "mean2"],
+ var1 = var1, var2 = var2, cov12 = cov12)))
}
\keyword{models}
\keyword{regression}
diff --git a/man/bisa.Rd b/man/bisa.Rd
index a670c8b..3dd990c 100644
--- a/man/bisa.Rd
+++ b/man/bisa.Rd
@@ -19,12 +19,14 @@ bisa(lshape = "loge", lscale = "loge",
See \code{\link{Links}} for more choices.
A log link is the default for both because they are positive.
+
}
\item{iscale, ishape}{
Initial values for \eqn{a} and \eqn{b}.
A \code{NULL} means an initial value is chosen internally using
\code{imethod}.
+
}
\item{imethod}{
An integer with value \code{1} or \code{2} or \code{3} which
@@ -32,6 +34,7 @@ bisa(lshape = "loge", lscale = "loge",
try the other value, or else specify a value for
\code{ishape} and/or \code{iscale}.
+
}
\item{zero}{
An integer-valued vector specifying which
@@ -39,6 +42,7 @@ bisa(lshape = "loge", lscale = "loge",
The default is none of them.
If used, choose one value from the set \{1,2\}.
+
}
}
\details{
diff --git a/man/bisaUC.Rd b/man/bisaUC.Rd
index cf8e797..98f08dc 100644
--- a/man/bisaUC.Rd
+++ b/man/bisaUC.Rd
@@ -19,8 +19,11 @@ rbisa(n, shape, scale = 1)
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
- \item{n}{number of observations.
- If \code{length(n) > 1} then the length is taken to be the number required. }
+ \item{n}{
+ Same as in \code{\link[stats]{runif}}.
+
+
+ }
\item{shape, scale}{
the (positive) shape and scale parameters.
@@ -36,6 +39,7 @@ rbisa(n, shape, scale = 1)
\code{pbisa} gives the distribution function, and
\code{qbisa} gives the quantile function, and
\code{rbisa} generates random deviates.
+
}
\author{ T. W. Yee }
\details{
@@ -45,35 +49,38 @@ rbisa(n, shape, scale = 1)
for estimating the parameters,
for more details.
+
}
%\note{
%}
\seealso{
\code{\link{bisa}}.
+
+
}
\examples{
\dontrun{
-x = seq(0, 6, len=400)
-plot(x, dbisa(x, shape=1), type="l", col="blue", ylab="Density", lwd=2,
- main="X ~ Birnbaum-Saunders(shape, scale=1)", ylim=c(0,1.3), lty=3)
-lines(x, dbisa(x, shape=2), col="red", lty=2, lwd=2)
-lines(x, dbisa(x, shape=0.5), col="green", lty=1, lwd=2)
-legend(x=3, y=0.9, legend=paste("shape =",c(0.5,1,2)),
- col=c("green","blue","red"), lty=1:3, lwd=2)
+x <- seq(0, 6, len = 400)
+plot(x, dbisa(x, shape = 1), type = "l", col = "blue",
+ ylab = "Density", lwd = 2, ylim = c(0,1.3), lty = 3,
+ main = "X ~ Birnbaum-Saunders(shape, scale = 1)")
+lines(x, dbisa(x, shape = 2), col = "red", lty = 2, lwd = 2)
+lines(x, dbisa(x, shape = 0.5), col = "green", lty = 1, lwd = 2)
+legend(x = 3, y = 0.9, legend = paste("shape = ",c(0.5, 1,2)),
+ col = c("green","blue","red"), lty = 1:3, lwd = 2)
-shape = 1
-x = seq(0.0, 4, len=401)
-plot(x, dbisa(x, shape=shape), type="l", col="blue", las=1, ylab="",
- main="Blue is density, red is cumulative distribution function",
- sub="Purple lines are the 10,20,...,90 percentiles", ylim=0:1)
-abline(h=0, col="blue", lty=2)
-lines(x, pbisa(x, shape=shape), col="red")
-probs = seq(0.1, 0.9, by=0.1)
-Q = qbisa(probs, shape=shape)
-lines(Q, dbisa(Q, shape=shape), col="purple", lty=3, type="h")
-pbisa(Q, shape=shape) - probs # Should be all zero
-abline(h=probs, col="purple", lty=3)
-lines(Q, pbisa(Q, shape), col="purple", lty=3, type="h")
+shape <- 1; x <- seq(0.0, 4, len = 401)
+plot(x, dbisa(x, shape = shape), type = "l", col = "blue", las = 1, ylab = "",
+ main = "Blue is density, red is cumulative distribution function",
+ sub = "Purple lines are the 10,20,...,90 percentiles", ylim = 0:1)
+abline(h = 0, col = "blue", lty = 2)
+lines(x, pbisa(x, shape = shape), col = "red")
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qbisa(probs, shape = shape)
+lines(Q, dbisa(Q, shape = shape), col = "purple", lty = 3, type = "h")
+pbisa(Q, shape = shape) - probs # Should be all zero
+abline(h = probs, col = "purple", lty = 3)
+lines(Q, pbisa(Q, shape), col = "purple", lty = 3, type = "h")
}
}
\keyword{distribution}
diff --git a/man/bivgamma.mckay.Rd b/man/bivgamma.mckay.Rd
index 2e0a988..3d8f874 100644
--- a/man/bivgamma.mckay.Rd
+++ b/man/bivgamma.mckay.Rd
@@ -10,7 +10,7 @@
\usage{
bivgamma.mckay(lscale = "loge", lshape1 = "loge", lshape2 = "loge",
iscale = NULL, ishape1 = NULL, ishape2 = NULL,
- imethod=1, zero = 1)
+ imethod = 1, zero = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -19,15 +19,18 @@ bivgamma.mckay(lscale = "loge", lshape1 = "loge", lshape2 = "loge",
parameters \eqn{a}, \eqn{p} and \eqn{q} respectively.
See \code{\link{Links}} for more choices.
+
}
\item{iscale, ishape1, ishape2}{
Optional initial values for \eqn{a}, \eqn{p} and \eqn{q} respectively.
The default is to compute them internally.
+
}
\item{imethod, zero}{
See \code{\link{CommonVGAMffArguments}}.
+
}
}
\details{
@@ -115,16 +118,16 @@ New York: Springer.
}
\examples{
-shape1 = exp(1); shape2 = exp(2); scalepar = exp(3)
-mdata = data.frame(y1 = rgamma(nn <- 1000, shape=shape1, scale=scalepar))
-mdata = transform(mdata, zedd = rgamma(nn, shape=shape2, scale=scalepar))
-mdata = transform(mdata, y2 = y1 + zedd) # Z is defined as Y2-y1|Y1=y1
-fit = vglm(cbind(y1, y2) ~ 1, bivgamma.mckay, mdata, trace = TRUE)
+shape1 <- exp(1); shape2 = exp(2); scalepar = exp(3)
+mdata <- data.frame(y1 = rgamma(nn <- 1000, shape = shape1, scale = scalepar))
+mdata <- transform(mdata, zedd = rgamma(nn, shape = shape2, scale = scalepar))
+mdata <- transform(mdata, y2 = y1 + zedd) # Z is defined as Y2-y1|Y1=y1
+fit <- vglm(cbind(y1, y2) ~ 1, bivgamma.mckay, mdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
vcov(fit)
-colMeans(depvar(fit)) # Check moments
+colMeans(depvar(fit)) # Check moments
head(fitted(fit), 1)
}
\keyword{models}
diff --git a/man/bmi.nz.Rd b/man/bmi.nz.Rd
index 11e5464..8bba559 100644
--- a/man/bmi.nz.Rd
+++ b/man/bmi.nz.Rd
@@ -48,7 +48,7 @@ Health Study: design and baseline findings.
}
\examples{
\dontrun{ with(bmi.nz, plot(age, BMI, col = "blue"))
-fit = vgam(BMI ~ s(age, df = c(2, 4, 2)), fam = lms.yjn, bmi.nz, trace = TRUE)
+fit <- vgam(BMI ~ s(age, df = c(2, 4, 2)), lms.yjn, data = bmi.nz, trace = TRUE)
qtplot(fit, pcol = "blue", tcol = "brown", lcol = "brown") }
}
\keyword{datasets}
diff --git a/man/borel.tanner.Rd b/man/borel.tanner.Rd
index de04958..f37a55e 100644
--- a/man/borel.tanner.Rd
+++ b/man/borel.tanner.Rd
@@ -109,8 +109,8 @@ Boston: Birkhauser.
}
\examples{
-bdata = data.frame(y = rbort(n <- 200))
-fit = vglm(y ~ 1, borel.tanner, bdata, trace = TRUE, crit = "c")
+bdata <- data.frame(y = rbort(n <- 200))
+fit <- vglm(y ~ 1, borel.tanner, bdata, trace = TRUE, crit = "c")
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/calibrate.Rd b/man/calibrate.Rd
index c67a111..6416a69 100644
--- a/man/calibrate.Rd
+++ b/man/calibrate.Rd
@@ -8,17 +8,25 @@
particular `methods' which depend on the `class' of the first
argument.
+
}
\usage{
calibrate(object, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{object}{ An object for which a calibration is desired. }
- \item{\dots}{ Additional arguments affecting the calibration produced.
+ \item{object}{ An object for which a calibration is desired.
+
+
+ }
+ \item{\dots}{
+ Additional arguments affecting the calibration produced.
Usually the most important argument in \code{\dots} is
\code{newdata} which, for \code{calibrate}, contains new
- \emph{response} data, \bold{Y}, say. }
+ \emph{response} data, \bold{Y}, say.
+
+
+ }
}
\details{
Given a regression model with explanatory variables \bold{X} and
@@ -27,6 +35,7 @@ calibrate(object, ...)
regression model.
It can be loosely thought of as the opposite of \code{\link{predict}}
(which takes an \bold{X} and returns a \bold{Y}.)
+
}
\value{
@@ -38,6 +47,8 @@ calibrate(object, ...)
linear combinations of the \bold{X}).
See the specific \code{calibrate} methods functions to see
what they return.
+
+
}
%\references{
%}
@@ -46,33 +57,37 @@ calibrate(object, ...)
This function was not called \code{predictx} because of the
inability of constrained ordination models to return \bold{X};
they can only return the latent variable values (site scores) instead.
+
+
}
\seealso{
\code{\link{predict}},
\code{\link{calibrate.qrrvglm}}.
+
+
}
\examples{
\dontrun{
-hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
set.seed(123)
-p1 = cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
- WaterCon + BareSand + FallTwig +
- CoveMoss + CoveHerb + ReflLux,
- family = poissonff, data = hspider, Rank = 1,
- df1.nl = c(Zoraspin=2, 1.9),
- Bestof = 3, Crow1positive = TRUE)
+p1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
+ WaterCon + BareSand + FallTwig +
+ CoveMoss + CoveHerb + ReflLux,
+ family = poissonff, data = hspider, Rank = 1,
+ df1.nl = c(Zoraspin = 2, 1.9),
+ Bestof = 3, Crow1positive = TRUE)
-siteNos = 1:2 # Calibrate these sites
-cp1 = calibrate(p1, new=data.frame(p1 at y[siteNos,]), trace=TRUE)
+siteNos <- 1:2 # Calibrate these sites
+cp1 <- calibrate(p1, new = data.frame(depvar(p1)[siteNos,]), trace = TRUE)
# Graphically compare the actual site scores with their calibrated values
-persp(p1, main="Solid=actual, dashed=calibrated site scores",
- label=TRUE, col="blue", las=1)
-abline(v=lv(p1)[siteNos], lty=1, col=1:length(siteNos)) # actual site scores
-abline(v=cp1, lty=2, col=1:length(siteNos)) # calibrated values
+persp(p1, main = "Solid=actual, dashed=calibrated site scores",
+ label = TRUE, col = "blue", las = 1)
+abline(v = lv(p1)[siteNos], lty = 1, col = 1:length(siteNos)) # actual site scores
+abline(v = cp1, lty = 2, col = 1:length(siteNos)) # calibrated values
}
}
\keyword{models}
diff --git a/man/calibrate.qrrvglm.Rd b/man/calibrate.qrrvglm.Rd
index e90bc84..012dda2 100644
--- a/man/calibrate.qrrvglm.Rd
+++ b/man/calibrate.qrrvglm.Rd
@@ -16,12 +16,15 @@ calibrate.qrrvglm(object, newdata = NULL,
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{ The fitted CQO/CAO model.
+
}
\item{newdata}{ A data frame with new response data
(usually new species data).
The default is to use the original data used to fit the model;
however, the calibration may take a long time to compute
because the computations are expensive.
+
+
}
\item{type}{ What type of result is to be returned.
The first are the calibrated latent variables or site scores.
@@ -39,6 +42,8 @@ calibrate.qrrvglm(object, newdata = NULL,
\code{"vcov"} is unavailable, so all 3 are returned.
For CQO models,
\code{"vcov"} is available, so all 4 are returned.
+
+
}
\item{initial.vals}{ Initial values for the search.
For rank-1 models, this should be a vector of length
@@ -47,6 +52,8 @@ calibrate.qrrvglm(object, newdata = NULL,
the number of rows in \code{newdata}.
The default is a grid defined by arguments in
\code{\link{calibrate.qrrvglm.control}}.
+
+
}
\item{\dots}{
Arguments that are fed into
@@ -72,24 +79,32 @@ calibrate.qrrvglm(object, newdata = NULL,
}
\value{
The argument \code{type} determines what is returned.
- If \code{type = "all3or4"} then all the \code{type} values are returned
- in a list, with the following components.
+ If \code{type = "all3or4"} then all the \code{type} values
+ are returned in a list, with the following components.
Each component has length \code{nrow(newdata)}.
-
-
- \item{lv}{Calibrated latent variables or site scores. }
+
+
+ \item{lv}{Calibrated latent variables or site scores.
+
+
+ }
\item{predictors }{linear/quadratic or additive predictors.
For example, for Poisson families, this will be on a log scale,
- and for binomial families, this will be on a logit scale.}
+ and for binomial families, this will be on a logit scale.
+
+ }
\item{response}{Fitted values of the response, evaluated at the
- calibrated latent variables or site scores.}
- \item{vcov}{Estimated variance-covariance matrix of the
calibrated latent variables or site scores.
- Actually, these are stored in an array whose last dimension
- is \code{nrow(newdata)}.
+
}
-}
-\references{
+ \item{vcov}{Estimated variance-covariance matrix of the
+ calibrated latent variables or site scores. Actually,
+ these are stored in an array whose last dimension is
+ \code{nrow(newdata)}.
+
+
+ }
+} \references{
Yee, T. W. (2012)
On constrained and unconstrained
@@ -132,16 +147,16 @@ Cambridge.
}
\examples{
\dontrun{
-hspider[,1:6] = scale(hspider[,1:6]) # Standardize the environmental variables
+hspider[,1:6] <- scale(hspider[, 1:6]) # Standardize the environmental variables
set.seed(123)
-p1 = cqo(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
+p1 <- cqo(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
WaterCon + BareSand + FallTwig +
CoveMoss + CoveHerb + ReflLux,
family = poissonff, data = hspider, Rank = 1,
IToler = TRUE, Crow1positive = TRUE)
-siteNos = 1:2 # Calibrate these sites
-cp1 = calibrate(p1, new=data.frame(p1 at y[siteNos,]), trace = TRUE)
+siteNos <- 1:2 # Calibrate these sites
+cp1 <- calibrate(p1, new = data.frame(depvar(p1)[siteNos, ]), trace = TRUE)
}
\dontrun{
diff --git a/man/calibrate.qrrvglm.control.Rd b/man/calibrate.qrrvglm.control.Rd
index ca41392..4c253b2 100644
--- a/man/calibrate.qrrvglm.control.Rd
+++ b/man/calibrate.qrrvglm.control.Rd
@@ -85,17 +85,17 @@ On constrained and unconstrained quadratic ordination.
}
\examples{
-\dontrun{ hspider[,1:6] = scale(hspider[,1:6]) # Needed when ITol = TRUE
+\dontrun{ hspider[,1:6] <- scale(hspider[,1:6]) # Needed when ITol = TRUE
set.seed(123)
-p1 = cqo(cbind(Alopacce, Alopcune, Pardlugu, Pardnigr,
- Pardpull, Trocterr, Zoraspin) ~
- WaterCon + BareSand + FallTwig +
- CoveMoss + CoveHerb + ReflLux,
- family = poissonff, data = hspider, ITol = TRUE)
+p1 <- cqo(cbind(Alopacce, Alopcune, Pardlugu, Pardnigr,
+ Pardpull, Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig +
+ CoveMoss + CoveHerb + ReflLux,
+ family = poissonff, data = hspider, ITol = TRUE)
sort(p1 at misc$deviance.Bestof) # A history of all the iterations
-siteNos = 1:2 # Calibrate these sites
-cp1 = calibrate(p1, new = data.frame(p1 at y[siteNos,]), trace = TRUE)
+siteNos <- 1:2 # Calibrate these sites
+cp1 <- calibrate(p1, new = data.frame(depvar(p1)[siteNos, ]), trace = TRUE)
}
\dontrun{
diff --git a/man/cao.Rd b/man/cao.Rd
index 975e8fa..a665c1b 100644
--- a/man/cao.Rd
+++ b/man/cao.Rd
@@ -25,7 +25,7 @@ cao(formula, family, data = list(),
formula is used to construct the latent variables, upon which the
smooths are applied. All the variables in the formula are used
for the construction of latent variables except for those specified
- by the argument \code{Norrr}, which is itself a formula. The LHS
+ by the argument \code{noRRR}, which is itself a formula. The LHS
of the formula contains the response variables, which should be a
matrix with each column being a response (species).
@@ -178,14 +178,14 @@ cao(formula, family, data = list(),
Currently, only \code{Rank=1} is implemented, and only
- \code{Norrr = ~1} models are handled.
+ \code{noRRR = ~1} models are handled.
% Poisson and binary responses are implemented (viz.,
% \code{\link{poissonff}}, \code{\link{binomialff}}), and
% dispersion parameters for these must be assumed known. Hence using
% \code{\link{quasipoissonff}} and \code{\link{quasibinomialff}} will
-% currently fail. Also, currently, only \code{Norrr = ~ 1} models are
+% currently fail. Also, currently, only \code{noRRR = ~ 1} models are
% handled.
@@ -197,7 +197,7 @@ cao(formula, family, data = list(),
The \eqn{\eta_s}{eta_s} is an additive predictor for species \eqn{s},
and it models the probabilities of presence as an additive model on
the logit scale. The matrix \eqn{C} is estimated from the data, as
- well as the smooth functions \eqn{f_s}. The argument \code{Norrr = ~
+ well as the smooth functions \eqn{f_s}. The argument \code{noRRR = ~
1} specifies that the vector \eqn{x_1}{x_1}, defined for RR-VGLMs
and QRR-VGLMs, is simply a 1 for an intercept.
Here, the intercept in the model is absorbed into the functions.
@@ -306,35 +306,35 @@ Constrained additive ordination.
\examples{
\dontrun{
-hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
set.seed(149) # For reproducible results
-ap1 = cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull) ~
- WaterCon + BareSand + FallTwig +
- CoveMoss + CoveHerb + ReflLux,
- family = poissonff, data = hspider, Rank = 1,
- df1.nl = c(Pardpull=2.7, 2.5),
- Bestof = 7, Crow1positive = FALSE)
+ap1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull) ~
+ WaterCon + BareSand + FallTwig +
+ CoveMoss + CoveHerb + ReflLux,
+ family = poissonff, data = hspider, Rank = 1,
+ df1.nl = c(Pardpull=2.7, 2.5),
+ Bestof = 7, Crow1positive = FALSE)
sort(ap1 at misc$deviance.Bestof) # A history of all the iterations
Coef(ap1)
ccoef(ap1)
-par(mfrow=c(2,2))
+par(mfrow = c(2, 2))
plot(ap1) # All the curves are unimodal; some quite symmetric
-par(mfrow=c(1,1), las=1)
-index = 1:ncol(ap1 at y)
-lvplot(ap1, lcol=index, pcol=index, y=TRUE)
+par(mfrow = c(1, 1), las = 1)
+index <- 1:ncol(depvar(ap1))
+lvplot(ap1, lcol = index, pcol = index, y = TRUE)
-trplot(ap1, label=TRUE, col=index)
-abline(a=0, b=1, lty=2)
+trplot(ap1, label = TRUE, col = index)
+abline(a=0, b = 1, lty = 2)
-trplot(ap1, label=TRUE, col="blue", log="xy", whichSp=c(1,3))
-abline(a=0, b=1, lty=2)
+trplot(ap1, label = TRUE, col = "blue", log = "xy", whichSp = c(1,3))
+abline(a=0, b = 1, lty = 2)
-persp(ap1, col=index, lwd=2, label=TRUE)
-abline(v=Opt(ap1), lty=2, col=index)
-abline(h=Max(ap1), lty=2, col=index)
+persp(ap1, col = index, lwd = 2, label = TRUE)
+abline(v = Opt(ap1), lty = 2, col = index)
+abline(h = Max(ap1), lty = 2, col = index)
}
}
\keyword{models}
diff --git a/man/cao.control.Rd b/man/cao.control.Rd
index 35389c7..9c06465 100644
--- a/man/cao.control.Rd
+++ b/man/cao.control.Rd
@@ -10,10 +10,11 @@
}
\usage{
-cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit=NULL,
- Crow1positive=TRUE, epsilon = 1.0e-05, Etamat.colmax = 10,
- GradientFunction=FALSE, iKvector = 0.1, iShape = 0.1,
- Norrr = ~ 1, SmallNo = 5.0e-13, Use.Init.Poisson.QO=TRUE,
+cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
+ Crow1positive = TRUE, epsilon = 1.0e-05, Etamat.colmax = 10,
+ GradientFunction = FALSE, iKvector = 0.1, iShape = 0.1,
+ noRRR = ~ 1, Norrr = NA,
+ SmallNo = 5.0e-13, Use.Init.Poisson.QO = TRUE,
Bestof = if (length(Cinit)) 1 else 10, maxitl = 10,
imethod = 1, bf.epsilon = 1.0e-7, bf.maxit = 10,
Maxit.optim = 250, optim.maxit = 20, SD.sitescores = 1.0,
@@ -25,7 +26,7 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit=NULL,
\item{Rank}{
The numerical rank \eqn{R} of the model, i.e., the number of latent
- variables. Currently only \code{Rank=1} is implemented.
+ variables. Currently only \code{Rank = 1} is implemented.
}
\item{all.knots}{
@@ -48,7 +49,7 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit=NULL,
\item{Crow1positive}{
Logical vector of length \code{Rank} (recycled if necessary): are
the elements of the first row of \bold{C} positive? For example,
- if \code{Rank} is 4, then specifying \code{Crow1positive=c(FALSE,
+ if \code{Rank} is 4, then specifying \code{Crow1positive = c(FALSE,
TRUE)} will force \bold{C[1,1]} and \bold{C[1,3]} to be negative,
and \bold{C[1,2]} and \bold{C[1,4]} to be positive.
@@ -65,7 +66,7 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit=NULL,
of memory used by \code{.Init.Poisson.QO()}. It is the maximum
number of columns allowed for the pseudo-response and its weights.
In general, the larger the value, the better the initial value.
- Used only if \code{Use.Init.Poisson.QO=TRUE}.
+ Used only if \code{Use.Init.Poisson.QO = TRUE}.
}
@@ -103,18 +104,27 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit=NULL,
%}
- \item{Norrr}{
+ \item{noRRR}{
Formula giving terms that are \emph{not} to be included in the
reduced-rank regression (or formation of the latent variables).
The default is to omit the intercept term from the latent variables.
- Currently, only \code{Norrr = ~ 1} is implemented.
+ Currently, only \code{noRRR = ~ 1} is implemented.
+
+ }
+ \item{Norrr}{
+ Defunct. Please use \code{noRRR}.
+ Use of \code{Norrr} will become an error soon.
+
}
+
+
+
% \item{Parscale}{
% Numerical and positive-valued vector of length \bold{C}
% (recycled if necessary). Passed into \code{optim(...,
-% control=list(parscale=Parscale))}; the elements of \bold{C} become
-% \bold{C} / \code{Parscale}. Setting \code{ITolerances=TRUE} results
+% control = list(parscale = Parscale))}; the elements of \bold{C} become
+% \bold{C} / \code{Parscale}. Setting \code{ITolerances = TRUE} results
% in line searches that are very large, therefore \bold{C} has to be
% scaled accordingly to avoid large step sizes.
@@ -264,11 +274,11 @@ London: Chapman & Hall.
}
\author{T. W. Yee}
\note{
- The argument \code{df1.nl} can be inputted in the format \code{c(spp1=2,
- spp2=3, 2.5)}, say, meaning the default value is 2.5, but two species
+ The argument \code{df1.nl} can be inputted in the format \code{c(spp1 = 2,
+ spp2 = 3, 2.5)}, say, meaning the default value is 2.5, but two species
have alternative values.
- If \code{spar1=0} and \code{df1.nl=0} then this represents fitting
+ If \code{spar1 = 0} and \code{df1.nl = 0} then this represents fitting
linear functions (CLO). Currently, this is handled in the awkward
manner of setting \code{df1.nl} to be a small positive value, so that
the smooth is almost linear but not quite.
@@ -280,29 +290,29 @@ London: Chapman & Hall.
}
\examples{\dontrun{
-hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
set.seed(123)
-ap1 = cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
- WaterCon + BareSand + FallTwig +
- CoveMoss + CoveHerb + ReflLux,
- family = poissonff, data = hspider,
- df1.nl = c(Zoraspin=2.3, 2.1),
- Bestof = 10, Crow1positive = FALSE)
+ap1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
+ WaterCon + BareSand + FallTwig +
+ CoveMoss + CoveHerb + ReflLux,
+ family = poissonff, data = hspider,
+ df1.nl = c(Zoraspin = 2.3, 2.1),
+ Bestof = 10, Crow1positive = FALSE)
sort(ap1 at misc$deviance.Bestof) # A history of all the iterations
Coef(ap1)
-par(mfrow=c(2,3)) # All or most of the curves are unimodal; some are
-plot(ap1, lcol = "blue") # quite symmetric. Hence a CQO model should be ok
+par(mfrow = c(2, 3)) # All or most of the curves are unimodal; some are
+plot(ap1, lcol = "blue") # quite symmetric. Hence a CQO model should be ok
-par(mfrow=c(1,1), las=1)
-index = 1:ncol(ap1 at y) # lvplot is jagged because only 28 sites
-lvplot(ap1, lcol = index, pcol = index, y=TRUE)
+par(mfrow = c(1, 1), las = 1)
+index = 1:ncol(depvar(ap1)) # lvplot is jagged because only 28 sites
+lvplot(ap1, lcol = index, pcol = index, y = TRUE)
-trplot(ap1, label=TRUE, col=index)
+trplot(ap1, label = TRUE, col = index)
abline(a = 0, b = 1, lty = 2)
-persp(ap1, label=TRUE, col=1:4)
+persp(ap1, label = TRUE, col = 1:4)
}
}
\keyword{models}
@@ -321,7 +331,7 @@ persp(ap1, label=TRUE, col=1:4)
%% GradientFunction = FALSE,
% iKvector = 0.1,
% iShape = 0.1,
-% Norrr = ~1,
+% noRRR = ~1,
%% Parscale = 1,
% SmallNo = 5e-13,
% Use.Init.Poisson.QO = TRUE,
diff --git a/man/cardioid.Rd b/man/cardioid.Rd
index 61193cd..2c55a7a 100644
--- a/man/cardioid.Rd
+++ b/man/cardioid.Rd
@@ -94,6 +94,7 @@ Singapore: World Scientific.
}
\examples{
+\dontrun{
cdata <- data.frame(y = rcard(n = 1000, mu = 4, rho = 0.45))
fit <- vglm(y ~ 1, cardioid, cdata, trace = TRUE)
coef(fit, matrix=TRUE)
@@ -101,6 +102,7 @@ Coef(fit)
c(with(cdata, mean(y)), head(fitted(fit), 1))
summary(fit)
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/cenpoisson.Rd b/man/cenpoisson.Rd
index f945d0c..ed123db 100644
--- a/man/cenpoisson.Rd
+++ b/man/cenpoisson.Rd
@@ -80,62 +80,62 @@ cenpoisson(link = "loge", imu = NULL)
}
\examples{
# Example 1: right censored data
-set.seed(123); U = 20
-cdata = data.frame(y = rpois(N <- 100, exp(3)))
-cdata = transform(cdata, cy = pmin(U, y),
- rcensored = (y >= U))
-cdata = transform(cdata, status = ifelse(rcensored, 0, 1))
+set.seed(123); U <- 20
+cdata <- data.frame(y = rpois(N <- 100, exp(3)))
+cdata <- transform(cdata, cy = pmin(U, y),
+ rcensored = (y >= U))
+cdata <- transform(cdata, status = ifelse(rcensored, 0, 1))
with(cdata, table(cy))
with(cdata, table(rcensored))
with(cdata, table(ii <- print(SurvS4(cy, status)))) # Check; U+ means >= U
-fit = vglm(SurvS4(cy, status) ~ 1, cenpoisson, cdata, trace = TRUE)
+fit <- vglm(SurvS4(cy, status) ~ 1, cenpoisson, cdata, trace = TRUE)
coef(fit, matrix = TRUE)
table(print(depvar(fit))) # Another check; U+ means >= U
# Example 2: left censored data
-L = 15
-cdata = transform(cdata, cY = pmax(L, y),
- lcensored = y < L) # Note y < L, not cY == L or y <= L
-cdata = transform(cdata, status = ifelse(lcensored, 0, 1))
+L <- 15
+cdata <- transform(cdata, cY = pmax(L, y),
+ lcensored = y < L) # Note y < L, not cY == L or y <= L
+cdata <- transform(cdata, status = ifelse(lcensored, 0, 1))
with(cdata, table(cY))
with(cdata, table(lcensored))
with(cdata, table(ii <- print(SurvS4(cY, status, type = "left")))) # Check
-fit = vglm(SurvS4(cY, status, type = "left") ~ 1, cenpoisson, cdata, trace = TRUE)
+fit <- vglm(SurvS4(cY, status, type = "left") ~ 1, cenpoisson, cdata, trace = TRUE)
coef(fit, matrix = TRUE)
# Example 3: interval censored data
-cdata = transform(cdata, Lvec = rep(L, len = N),
- Uvec = rep(U, len = N))
-cdata = transform(cdata, icensored = Lvec <= y & y < Uvec) # Not lcensored or rcensored
+cdata <- transform(cdata, Lvec = rep(L, len = N),
+ Uvec = rep(U, len = N))
+cdata <- transform(cdata, icensored = Lvec <= y & y < Uvec) # Not lcensored or rcensored
with(cdata, table(icensored))
-cdata = transform(cdata, status = rep(3, N)) # 3 means interval censored
-cdata = transform(cdata, status = ifelse(rcensored, 0, status)) # 0 means right censored
-cdata = transform(cdata, status = ifelse(lcensored, 2, status)) # 2 means left censored
+cdata <- transform(cdata, status = rep(3, N)) # 3 means interval censored
+cdata <- transform(cdata, status = ifelse(rcensored, 0, status)) # 0 means right censored
+cdata <- transform(cdata, status = ifelse(lcensored, 2, status)) # 2 means left censored
# Have to adjust Lvec and Uvec because of the (start, end] format:
-cdata$Lvec[with(cdata, icensored)] = cdata$Lvec[with(cdata, icensored)] - 1
-cdata$Uvec[with(cdata, icensored)] = cdata$Uvec[with(cdata, icensored)] - 1
-cdata$Lvec[with(cdata, lcensored)] = cdata$Lvec[with(cdata, lcensored)] # Unchanged
-cdata$Lvec[with(cdata, rcensored)] = cdata$Uvec[with(cdata, rcensored)] # Unchanged
+cdata$Lvec[with(cdata, icensored)] <- cdata$Lvec[with(cdata, icensored)] - 1
+cdata$Uvec[with(cdata, icensored)] <- cdata$Uvec[with(cdata, icensored)] - 1
+cdata$Lvec[with(cdata, lcensored)] <- cdata$Lvec[with(cdata, lcensored)] # Unchanged
+cdata$Lvec[with(cdata, rcensored)] <- cdata$Uvec[with(cdata, rcensored)] # Unchanged
with(cdata, table(ii <- print(SurvS4(Lvec, Uvec, status, type = "interval")))) # Check
-fit = vglm(SurvS4(Lvec, Uvec, status, type = "interval") ~ 1,
- cenpoisson, cdata, trace = TRUE)
+fit <- vglm(SurvS4(Lvec, Uvec, status, type = "interval") ~ 1,
+ cenpoisson, cdata, trace = TRUE)
coef(fit, matrix = TRUE)
table(print(depvar(fit))) # Another check
# Example 4: Add in some uncensored observations
-index = (1:N)[with(cdata, icensored)]
-index = head(index, 4)
-cdata$status[index] = 1 # actual or uncensored value
-cdata$Lvec[index] = cdata$y[index]
+index <- (1:N)[with(cdata, icensored)]
+index <- head(index, 4)
+cdata$status[index] <- 1 # actual or uncensored value
+cdata$Lvec[index] <- cdata$y[index]
with(cdata, table(ii <- print(SurvS4(Lvec, Uvec, status,
type = "interval")))) # Check
-fit = vglm(SurvS4(Lvec, Uvec, status, type = "interval") ~ 1,
- cenpoisson, cdata, trace = TRUE, crit = "c")
+fit <- vglm(SurvS4(Lvec, Uvec, status, type = "interval") ~ 1,
+ cenpoisson, cdata, trace = TRUE, crit = "c")
coef(fit, matrix = TRUE)
table(print(depvar(fit))) # Another check
}
diff --git a/man/cgumbel.Rd b/man/cgumbel.Rd
index 7aa49f0..2e2da41 100644
--- a/man/cgumbel.Rd
+++ b/man/cgumbel.Rd
@@ -99,30 +99,30 @@ London: Springer-Verlag.
\examples{
# Example 1
-ystar = venice[["r1"]] # Use the first order statistic as the response
-n = length(ystar)
-L = runif(n, 100, 104) # Lower censoring points
-U = runif(n, 130, 135) # Upper censoring points
-y = pmax(L, ystar) # Left censored
-y = pmin(U, y) # Right censored
-extra = list(leftcensored = ystar < L, rightcensored = ystar > U)
-fit = vglm(y ~ scale(year), data=venice, trace=TRUE, extra=extra,
- cgumbel(mean=FALSE, perc=c(5,25,50,75,95)))
-coef(fit, matrix=TRUE)
+ystar <- venice[["r1"]] # Use the first order statistic as the response
+nn <- length(ystar)
+L <- runif(nn, 100, 104) # Lower censoring points
+U <- runif(nn, 130, 135) # Upper censoring points
+y <- pmax(L, ystar) # Left censored
+y <- pmin(U, y) # Right censored
+extra <- list(leftcensored = ystar < L, rightcensored = ystar > U)
+fit <- vglm(y ~ scale(year), data = venice, trace = TRUE, extra = extra,
+ cgumbel(mean = FALSE, perc = c(5, 25, 50, 75, 95)))
+coef(fit, matrix = TRUE)
head(fitted(fit))
fit at extra
# Example 2: simulated data
-n = 1000
-ystar = rgumbel(n, loc=1, scale=exp(0.5)) # The uncensored data
-L = runif(n, -1, 1) # Lower censoring points
-U = runif(n, 2, 5) # Upper censoring points
-y = pmax(L, ystar) # Left censored
-y = pmin(U, y) # Right censored
-\dontrun{par(mfrow=c(1,2)); hist(ystar); hist(y);}
-extra = list(leftcensored = ystar < L, rightcensored = ystar > U)
-fit = vglm(y ~ 1, trace=TRUE, extra=extra, cgumbel)
-coef(fit, matrix=TRUE)
+nn <- 1000
+ystar <- rgumbel(nn, loc = 1, scale = exp(0.5)) # The uncensored data
+L <- runif(nn, -1, 1) # Lower censoring points
+U <- runif(nn, 2, 5) # Upper censoring points
+y <- pmax(L, ystar) # Left censored
+y <- pmin(U, y) # Right censored
+\dontrun{par(mfrow = c(1, 2)); hist(ystar); hist(y);}
+extra <- list(leftcensored = ystar < L, rightcensored = ystar > U)
+fit <- vglm(y ~ 1, trace = TRUE, extra = extra, cgumbel)
+coef(fit, matrix = TRUE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/chest.nz.Rd b/man/chest.nz.Rd
index 3f0604a..c5e602a 100644
--- a/man/chest.nz.Rd
+++ b/man/chest.nz.Rd
@@ -35,9 +35,11 @@
}
\examples{
-fit = vgam(cbind(nolnor, nolr, lnor, lr) ~ s(age, c(4, 3)),
- binom2.or(exchan = TRUE, zero = NULL), data = chest.nz)
+\dontrun{
+fit <- vgam(cbind(nolnor, nolr, lnor, lr) ~ s(age, c(4, 3)),
+ binom2.or(exchan = TRUE, zero = NULL), data = chest.nz)
coef(fit, matrix = TRUE)
+}
\dontrun{ plot(fit, which.cf = 2, se = TRUE) }
}
\keyword{datasets}
diff --git a/man/chinese.nz.Rd b/man/chinese.nz.Rd
index 06f9b23..ffa1d79 100644
--- a/man/chinese.nz.Rd
+++ b/man/chinese.nz.Rd
@@ -9,7 +9,7 @@
}
\usage{data(chinese.nz)}
\format{
- A data frame with 26 observations on the following 4 variables.
+ A data frame with 27 observations on the following 4 variables.
\describe{
\item{\code{year}}{Year. }
\item{\code{male}}{Number of Chinese males. }
@@ -18,16 +18,12 @@
}
}
\details{
- The NZ total for the years 1867 and 1871 exclude the Maori population.
- The second value of 4583 looks erroneous, as seen by the plot below.
-
-
Historically, there was a large exodus of Chinese from the Guangdong
region starting in the mid-1800s to the gold fields of
South Island of New Zealand,
California,
and Southern Australia, etc.
- Racial discrimination then meant that only men were allowed
+ Discrimination then meant that only men were allowed
entry, to hinder permanent settlement.
In the case of New Zealand, the government relaxed its
immigration laws after WWII to allow wives of Chinese already in NZ to join them
@@ -37,38 +33,49 @@
normal over time.
+ The NZ total for the years 1867 and 1871 exclude the Maori population.
+ Three modifications have been made to the female column to make
+ the data internally consistent with the original table.
+
+
+% The second value of 4583 looks erroneous, as seen by the plot below.
+
+
}
%\source{
%}
\references{
Page 6 of \emph{Aliens At My Table: Asians as New Zealanders See Them}
- by M. Ip and N. Murphy,
- (2005), Penguin.
+ by M. Ip and N. Murphy, (2005).
+ Penguin Books.
+ Auckland, New Zealand.
}
\examples{
\dontrun{ par(mfrow = c(1, 2))
-plot(female/(male+female) ~ year, chinese.nz, type = "b",
+plot(female / (male + female) ~ year, chinese.nz, type = "b",
ylab = "Proportion", col = "blue", las = 1,
main = "Proportion of NZ Chinese that are female")
abline(h = 0.5, lty = "dashed", col = "gray")
-fit1.cnz = vglm(cbind(female, male) ~ year, binomialff, chinese.nz)
-fit2.cnz = vglm(cbind(female, male) ~ poly(year, 2), binomialff, chinese.nz)
-fit4.cnz = vglm(cbind(female, male) ~ bs(year, 4), binomialff, chinese.nz)
+fit1.cnz <- vglm(cbind(female, male) ~ year, binomialff, chinese.nz)
+fit2.cnz <- vglm(cbind(female, male) ~ poly(year, 2), binomialff, chinese.nz)
+fit4.cnz <- vglm(cbind(female, male) ~ bs(year, 5), binomialff, chinese.nz)
lines(fitted(fit1.cnz) ~ year, chinese.nz, col = "purple")
lines(fitted(fit2.cnz) ~ year, chinese.nz, col = "green")
-lines(fitted(fit4.cnz) ~ year, chinese.nz, col = "orange")
+lines(fitted(fit4.cnz) ~ year, chinese.nz, col = "orange", lwd = 2)
legend("bottomright", col = c("purple", "green", "orange"),
lty = 1, leg = c("linear", "quadratic", "B-spline"))
-
plot(100*(male+female)/nz ~ year, chinese.nz, type = "b", ylab = "Percent",
ylim = c(0, max(100*(male+female)/nz)), col = "blue", las = 1,
main = "Percent of NZers that are Chinese")
abline(h = 0, lty = "dashed", col = "gray") }
}
\keyword{datasets}
+
+
+% Albany, Auckland, New Zealand.
diff --git a/man/constraints.Rd b/man/constraints.Rd
index f8ec53b..20a4f42 100644
--- a/man/constraints.Rd
+++ b/man/constraints.Rd
@@ -10,7 +10,8 @@
}
\usage{
constraints(object, ...)
-constraints.vlm(object, type = c("lm", "term"), all = TRUE, which, ...)
+constraints.vlm(object, type = c("lm", "term"), all = TRUE, which,
+ matrix.out = FALSE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -34,6 +35,14 @@ constraints.vlm(object, type = c("lm", "term"), all = TRUE, which, ...)
}
+ \item{matrix.out}{
+ Logical. If \code{TRUE} then the constraint matrices
+ are \code{\link[base]{cbind}()ed} together.
+ The result is usually more compact because the default
+ is a list of constraint matrices.
+
+
+ }
\item{\dots}{
Other possible arguments such as \code{type}.
diff --git a/man/cqo.Rd b/man/cqo.Rd
index 291b509..75bdacd 100644
--- a/man/cqo.Rd
+++ b/man/cqo.Rd
@@ -25,6 +25,7 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
variables in each linear predictor can be chosen by specifying
constraint matrices.
+
}
\item{family}{
a function of class \code{"vglmff"} (see \code{\link{vglmff-class}})
@@ -44,25 +45,30 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
Also, \code{\link{quasipoissonff}} and \code{\link{quasibinomialff}}
may or may not work.
+
% \code{negbinomial(deviance = TRUE)},
% \code{gamma2(deviance = TRUE)}.
+
}
\item{data}{
an optional data frame containing the variables in the model.
By default the variables are taken from \code{environment(formula)},
typically the environment from which \code{cqo} is called.
+
}
\item{weights}{ an optional vector or matrix of (prior) weights
to be used in the fitting process.
Currently, this argument should not be used.
+
}
\item{subset}{
an optional logical vector specifying a subset of
observations to be used in the fitting process.
+
}
\item{na.action}{
a function which indicates what should happen when the data contain
@@ -70,6 +76,7 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
\code{\link[base]{options}}, and is \code{na.fail} if that is unset.
The ``factory-fresh'' default is \code{na.omit}.
+
}
\item{etastart}{
starting values for the linear predictors.
@@ -77,6 +84,7 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
If \eqn{M = 1} then it may be a vector.
Currently, this argument probably should not be used.
+
}
\item{mustart}{
starting values for the
@@ -84,17 +92,20 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
Some family functions do not make use of this argument.
Currently, this argument probably should not be used.
+
}
\item{coefstart}{
starting values for the
coefficient vector.
Currently, this argument probably should not be used.
+
}
\item{control}{
a list of parameters for controlling the fitting process.
See \code{\link{qrrvglm.control}} for details.
+
}
\item{offset}{
This argument must not be used.
@@ -110,11 +121,13 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
The default (and presently only) method \code{cqo.fit}
uses \emph{iteratively reweighted least squares} (IRLS).
+
}
\item{model}{
a logical value indicating whether the \emph{model frame}
should be assigned in the \code{model} slot.
+
}
\item{x.arg, y.arg}{
logical values indicating whether
@@ -122,15 +135,18 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
process should be assigned in the \code{x} and \code{y} slots.
Note the model matrix is the LM model matrix.
+
% ; to get the VGLM
% model matrix type \code{model.matrix(vglmfit)} where
% \code{vglmfit} is a \code{vglm} object.
+
}
\item{contrasts}{
an optional list. See the \code{contrasts.arg}
of \code{model.matrix.default}.
+
}
\item{constraints}{
an optional list of constraint matrices.
@@ -145,11 +161,13 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
Constraint matrices for \eqn{x_2}{x_2} variables are taken as the
identity matrix.
+
}
\item{extra}{
an optional list with any extra information that might be needed
by the family function.
+
}
% \item{qr.arg}{ logical value indicating whether
% the slot \code{qr}, which returns the QR decomposition of the
@@ -159,10 +177,12 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
logical value indicating whether smart prediction
(\code{\link{smartpred}}) will be used.
+
}
\item{\dots}{
further arguments passed into \code{\link{qrrvglm.control}}.
+
}
}
\details{
@@ -196,7 +216,7 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
contain the regression coefficients. The tolerance matrices
satisfy \eqn{T_s = -\frac12 D_s^{-1}}{T_s = -(0.5 D_s^(-1)}.
Many important CQO details are directly related to arguments
- in \code{\link{qrrvglm.control}}, e.g., the argument \code{Norrr}
+ in \code{\link{qrrvglm.control}}, e.g., the argument \code{noRRR}
specifies which variables comprise \eqn{x_1}{x_1}.
@@ -320,7 +340,7 @@ original FORTRAN code into C.
\code{\link{vglm}} and \code{\link{rrvglm.control}}.
The most important arguments are
\code{Rank},
- \code{Norrr},
+ \code{noRRR},
\code{Bestof},
\code{ITolerances},
\code{EqualTolerances},
@@ -438,11 +458,12 @@ Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
+
}
\examples{
\dontrun{
# Example 1; Fit an unequal tolerances model to the hunting spiders data
-hspider[,1:6] <- scale(hspider[,1:6]) # Standardize the environmental variables
+hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental variables
set.seed(1234) # For reproducibility of the results
p1ut <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
@@ -560,5 +581,5 @@ for(ii in 1:S) {
\keyword{models}
\keyword{regression}
-%legend("topright", x=1, y=135, leg = colnames(p1ut at y), col = clr,
+%legend("topright", x=1, y=135, leg = colnames(depvar(p1ut)), col = clr,
% pch = 1:S, merge = TRUE, bty = "n", lty = 1:S, lwd = 2)
diff --git a/man/crashes.Rd b/man/crashes.Rd
index b3594fa..174a860 100644
--- a/man/crashes.Rd
+++ b/man/crashes.Rd
@@ -88,9 +88,10 @@ data(alclevels)
}
\seealso{
- \code{\link{rrvglm}},
- \code{\link{rcim}},
- \code{\link{grc}}.
+ \code{\link[VGAM]{rrvglm}},
+ \code{\link[VGAM]{rcim}},
+ \code{\link[VGAM]{grc}}.
+
}
\examples{
@@ -106,9 +107,11 @@ abline(v = sort(1 + c((0:7) * 24, (0:6) * 24 + 12)), lty = "dashed",
col = c("purple", "orange")) }
# Goodmans RC models
+\dontrun{
fitgrc1 <- grc(alcoff) # Rank-1 model
fitgrc2 <- grc(alcoff, Rank = 2, Corner = FALSE, Uncor = TRUE)
-print(Coef(fitgrc2), dig = 2)
+Coef(fitgrc2)
+}
\dontrun{ biplot(fitgrc2, scaleA = 2.3, Ccol = "blue", Acol = "orange",
Clabels = as.character(1:23), xlim = c(-1.3, 2.3),
ylim = c(-1.2, 1)) }
@@ -126,5 +129,6 @@ print(Coef(fitgrc2), dig = 2)
%\alias{crashp} Table 45, p.84
%\alias{alcoff} Table 3, p.121
%\alias{alclevels} Table 2, p.132
+% print(Coef(fitgrc2), dig = 2)
diff --git a/man/cratio.Rd b/man/cratio.Rd
index e194610..eb2beb9 100644
--- a/man/cratio.Rd
+++ b/man/cratio.Rd
@@ -17,11 +17,13 @@ cratio(link = "logit", parallel = FALSE, reverse = FALSE, zero = NULL,
Link function applied to the \eqn{M} continuation ratio probabilities.
See \code{\link{Links}} for more choices.
+
}
\item{parallel}{
A logical, or formula specifying which terms have
equal/unequal coefficients.
+
}
\item{reverse}{
Logical.
@@ -32,6 +34,7 @@ cratio(link = "logit", parallel = FALSE, reverse = FALSE, zero = NULL,
\eqn{\eta_j = logit(P[Y<j+1|Y\leq j+1])}{eta_j=logit(P[Y<j+1|Y<=j+1])}
will be used.
+
}
\item{zero}{
An integer-valued vector specifying which
@@ -39,10 +42,12 @@ cratio(link = "logit", parallel = FALSE, reverse = FALSE, zero = NULL,
The values must be from the set \{1,2,\ldots,\eqn{M}\}.
The default value means none are modelled as intercept-only terms.
+
}
\item{whitespace}{
See \code{\link{CommonVGAMffArguments}} for information.
+
}
}
\details{
@@ -70,14 +75,17 @@ cratio(link = "logit", parallel = FALSE, reverse = FALSE, zero = NULL,
}
\references{
+
Agresti, A. (2002)
\emph{Categorical Data Analysis},
2nd ed. New York: Wiley.
+
Simonoff, J. S. (2003)
\emph{Analyzing Categorical Data},
New York: Springer-Verlag.
+
McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
@@ -89,29 +97,28 @@ The \pkg{VGAM} package for categorical data analysis.
\url{http://www.jstatsoft.org/v32/i10/}.
-Documentation accompanying the \pkg{VGAM} package at
-\url{http://www.stat.auckland.ac.nz/~yee}
-contains further information and examples.
-
}
\author{ Thomas W. Yee }
\note{
- The response should be either a matrix of counts (with row sums that
- are all positive), or a factor. In both cases, the \code{y} slot
- returned by \code{vglm}/\code{vgam}/\code{rrvglm} is the matrix
+ The response should be either a matrix of counts
+ (with row sums that are all positive), or a
+ factor. In both cases, the \code{y} slot returned by
+ \code{vglm}/\code{vgam}/\code{rrvglm} is the matrix
of counts.
- For a nominal (unordered) factor response, the multinomial
- logit model (\code{\link{multinomial}}) is more appropriate.
+ For a nominal (unordered) factor response, the
+ multinomial logit model (\code{\link{multinomial}})
+ is more appropriate.
- Here is an example of the usage of the \code{parallel} argument.
- If there are covariates \code{x1}, \code{x2} and \code{x3}, then
- \code{parallel = TRUE ~ x1 + x2 -1} and
- \code{parallel = FALSE ~ x3} are equivalent. This would constrain
- the regression coefficients for \code{x1} and \code{x2} to be
- equal; those of the intercepts and \code{x3} would be different.
+ Here is an example of the usage of the \code{parallel}
+ argument. If there are covariates \code{x1}, \code{x2}
+ and \code{x3}, then \code{parallel = TRUE ~ x1 + x2 -1}
+ and \code{parallel = FALSE ~ x3} are equivalent. This
+ would constrain the regression coefficients for \code{x1}
+ and \code{x2} to be equal; those of the intercepts and
+ \code{x3} would be different.
}
@@ -120,25 +127,27 @@ contains further information and examples.
response is a matrix;
see \code{\link[base:factor]{ordered}}.
+
}
\seealso{
- \code{\link{sratio}},
- \code{\link{acat}},
- \code{\link{cumulative}},
- \code{\link{multinomial}},
- \code{\link{pneumo}},
+ \code{\link{sratio}},
+ \code{\link{acat}},
+ \code{\link{cumulative}},
+ \code{\link{multinomial}},
+ \code{\link{pneumo}},
\code{\link{logit}},
\code{\link{probit}},
\code{\link{cloglog}},
\code{\link{cauchit}}.
+
}
\examples{
-pneumo = transform(pneumo, let = log(exposure.time))
-(fit = vglm(cbind(normal, mild, severe) ~ let,
- cratio(parallel = TRUE), pneumo))
+pneumo <- transform(pneumo, let = log(exposure.time))
+(fit <- vglm(cbind(normal, mild, severe) ~ let,
+ cratio(parallel = TRUE), pneumo))
coef(fit, matrix = TRUE)
constraints(fit)
predict(fit)
diff --git a/man/crime.us.Rd b/man/crime.us.Rd
deleted file mode 100644
index c00972d..0000000
--- a/man/crime.us.Rd
+++ /dev/null
@@ -1,81 +0,0 @@
-\name{crime.us}
-\alias{crime.us}
-\docType{data}
-\title{
-Estimated Crime in 2009 in USA
-
-}
-\description{
-Crime totals and rates, cross-classified by US state, during 2009.
-
-}
-\usage{data(crime.us)}
-\format{
- A data frame with 50 observations on the following 22 variables.
- \describe{
- \item{\code{State}}{a character vector. White spaces have been
- replaced by underscores. }
- \item{\code{Population}}{a numeric vector}
- \item{\code{ViolentCrimeTotal}}{a numeric vector}
- \item{\code{Murder}}{a numeric vector}
- \item{\code{Rape}}{a numeric vector}
- \item{\code{Robbery}}{a numeric vector}
- \item{\code{Assault}}{a numeric vector}
- \item{\code{PropertyCrimeTotal}}{a numeric vector}
- \item{\code{Burglary}}{a numeric vector}
- \item{\code{LarcenyTheft}}{a numeric vector}
- \item{\code{MotorVehicleTheft}}{a numeric vector}
- \item{\code{ViolentCrimeRate}}{a numeric vector}
- \item{\code{MurderRate}}{a numeric vector}
- \item{\code{RapeRate}}{a numeric vector}
- \item{\code{RobberyRate}}{a numeric vector}
- \item{\code{AssaultRate}}{a numeric vector}
- \item{\code{PropertyCrimeRate}}{a numeric vector}
- \item{\code{BurglaryRate}}{a numeric vector}
- \item{\code{LarcenyTheftRate}}{a numeric vector}
- \item{\code{MotorVehicleTheftRate}}{a numeric vector}
- \item{\code{stateNumber}}{a numeric vector, running from 1 to 50.}
- \item{\code{abbrev}}{State name as a character vector}
- }
-}
-\details{
- Each row is a state of the United States of America.
- The first half of the columns tend to be totals,
- and the second half are crime rates per 100,000 population.
-
-
- The data frame was downloaded as a \code{.csv} file and edited.
- The full column names are:
- State, Population, Violent crime total, Murder and nonnegligent
- Manslaughter, Forcible rape, Robbery, Aggravated assault, Property
- crime total, Burglary, Larceny-theft, Motor vehicle theft, Violent
- Crime rate, Murder and nonnegligent manslaughter rate, Forcible
- rape rate, Robbery rate, Aggravated assault rate, Property crime
- rate, Burglary rate, Larceny-theft rate, Motor vehicle theft rate,
- state Number, abbreviation.
- Technical details governing the data set are given in the URL.
-
-
-
-%% ~~ If necessary, more details than the __description__ above ~~
-}
-\source{
- \url{http://www.ucrdatatool.gov},
- \url{http://www.ucrdatatool.gov/Search/Crime/State/RunCrimeOneYearofData.cfm}
-
-}
-%%\references{
-%% ~~ possibly secondary sources and usages ~~
-%%}
-\examples{
-\dontrun{ # Louisiana is the one outlier
-plot(MurderRate ~ stateNumber, crime.us,
- axes = FALSE, type = "h", col = 1:6,
- main = "USA murder rates in 2009 (per 100,000 population)")
-axis(1, with(crime.us, abbrev), at = with(crime.us, stateNumber),
- col = 1:6, col.tick = 1:6, cex.lab = 0.5)
-axis(2) }
-tail(crime.us[ sort.list(with(crime.us, MurderRate)), ])
-}
-\keyword{datasets}
-% data(crime.us)
diff --git a/man/cumulative.Rd b/man/cumulative.Rd
index d9cb751..a851de8 100644
--- a/man/cumulative.Rd
+++ b/man/cumulative.Rd
@@ -10,10 +10,10 @@
}
\usage{
cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
- mv = FALSE, intercept.apply = FALSE, whitespace = FALSE)
+ mv = FALSE, apply.parint = FALSE, whitespace = FALSE)
}
-%scumulative(link="logit",
-% lscale="loge", escale = list(),
+%scumulative(link = "logit",
+% lscale = "loge", escale = list(),
% parallel = FALSE, sparallel = TRUE, reverse = FALSE, iscale = 1)
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -25,6 +25,7 @@ cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
\code{\link{probit}}/\code{\link{cloglog}}/\code{\link{cauchit}}/\ldots
models.
+
}
% \item{lscale}{
% Link function applied to the \eqn{J} scaling parameters.
@@ -81,23 +82,26 @@ cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
}
- \item{intercept.apply}{
+ \item{apply.parint}{
Logical.
Whether the \code{parallel} argument should be applied to the intercept term.
This should be set to \code{TRUE} for \code{link=}
\code{\link{golf}},
\code{\link{polf}},
\code{\link{nbolf}}.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
% \item{iscale}{
% Numeric. Initial values for the scale parameters.
+
% }
\item{whitespace}{
See \code{\link{CommonVGAMffArguments}} for information.
+
}
}
diff --git a/man/dagum.Rd b/man/dagum.Rd
index 1f6714d..f93fd07 100644
--- a/man/dagum.Rd
+++ b/man/dagum.Rd
@@ -48,7 +48,7 @@ The Dagum distribution has a cumulative distribution function
which leads to a probability density function
\deqn{f(y) = ap y^{ap-1} / [b^{ap} \{1 + (y/b)^a\}^{p+1}]}{%
f(y) = ap y^(ap-1) / [b^(ap) (1 + (y/b)^a)^(p+1)]}
- for \eqn{a > 0}, \eqn{b > 0}, \eqn{p > 0}, \eqn{y > 0}.
+ for \eqn{a > 0}, \eqn{b > 0}, \eqn{p > 0}, \eqn{y \geq 0}{y >= 0}.
Here, \eqn{b} is the scale parameter \code{scale},
and the others are shape parameters.
The mean is
@@ -100,12 +100,13 @@ while estimates for \eqn{a} and \eqn{p} can be considered unbiased for
\code{\link{paralogistic}},
\code{\link{invparalogistic}}.
+
}
\examples{
-ddata = data.frame(y = rdagum(n = 3000, exp(1), exp(2), exp(1)))
-fit = vglm(y ~ 1, dagum, ddata, trace = TRUE)
-fit = vglm(y ~ 1, dagum(ishape1.a = exp(1)), ddata, trace = TRUE)
+ddata <- data.frame(y = rdagum(n = 3000, exp(1), exp(2), exp(1)))
+fit <- vglm(y ~ 1, dagum, ddata, trace = TRUE)
+fit <- vglm(y ~ 1, dagum(ishape1.a = exp(1)), ddata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/dagumUC.Rd b/man/dagumUC.Rd
index ce5aa57..17821c0 100644
--- a/man/dagumUC.Rd
+++ b/man/dagumUC.Rd
@@ -10,12 +10,13 @@
generation for the Dagum distribution with shape parameters \code{a}
and \code{p}, and scale parameter \code{scale}.
+
}
\usage{
-ddagum(x, shape1.a, scale, shape2.p, log = FALSE)
-pdagum(q, shape1.a, scale, shape2.p)
-qdagum(p, shape1.a, scale, shape2.p)
-rdagum(n, shape1.a, scale, shape2.p)
+ddagum(x, shape1.a, scale = 1, shape2.p, log = FALSE)
+pdagum(q, shape1.a, scale = 1, shape2.p)
+qdagum(p, shape1.a, scale = 1, shape2.p)
+rdagum(n, shape1.a, scale = 1, shape2.p)
}
\arguments{
\item{x, q}{vector of quantiles.}
@@ -40,32 +41,57 @@ rdagum(n, shape1.a, scale, shape2.p)
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
Hoboken, NJ, USA: Wiley-Interscience.
+
}
\author{ T. W. Yee }
\details{
See \code{\link{dagum}}, which is the \pkg{VGAM} family function
for estimating the parameters by maximum likelihood estimation.
+
}
\note{
The Dagum distribution is a special case of the 4-parameter
generalized beta II distribution.
+
}
\seealso{
\code{\link{dagum}},
\code{\link{genbetaII}}.
+
}
\examples{
-ddata = data.frame(y = rdagum(n = 3000, 4, 6, 2))
-fit = vglm(y ~ 1, dagum(ishape1.a = 2.1), ddata, trace = TRUE, crit = "coef")
-coef(fit, matrix = TRUE)
-Coef(fit)
+probs <- seq(0.1, 0.9, by = 0.1)
+shape1.a <- 1; shape2.p <- 2
+# Should be 0:
+max(abs(pdagum(qdagum(p = probs, shape1.a = shape1.a, shape2.p = shape2.p),
+ shape1.a = shape1.a, shape2.p = shape2.p) - probs))
+
+\dontrun{ par(mfrow = c(1, 2))
+x <- seq(-0.01, 5, len = 401)
+plot(x, dexp(x), type = "l", col = "black", ylab = "", las = 1, ylim = c(0, 1),
+ main = "Black is standard exponential, others are ddagum(x, ...)")
+lines(x, ddagum(x, shape1.a = shape1.a, shape2.p = 1), col = "orange")
+lines(x, ddagum(x, shape1.a = shape1.a, shape2.p = 2), col = "blue")
+lines(x, ddagum(x, shape1.a = shape1.a, shape2.p = 5), col = "green")
+legend("topright", col = c("orange","blue","green"), lty = rep(1, len = 3),
+ legend = paste("shape1.a =", shape1.a, ", shape2.p =", c(1, 2, 5)))
+
+plot(x, pexp(x), type = "l", col = "black", ylab = "", las = 1,
+ main = "Black is standard exponential, others are pdagum(x, ...)")
+lines(x, pdagum(x, shape1.a = shape1.a, shape2.p = 1), col = "orange")
+lines(x, pdagum(x, shape1.a = shape1.a, shape2.p = 2), col = "blue")
+lines(x, pdagum(x, shape1.a = shape1.a, shape2.p = 5), col = "green")
+legend("bottomright", col = c("orange","blue","green"), lty = rep(1, len = 3),
+ legend = paste("shape1.a =", shape1.a, ", shape2.p =", c(1, 2, 5)))
+}
}
\keyword{distribution}
diff --git a/man/dcennormal1.Rd b/man/dcennormal1.Rd
index f768a73..342b1d5 100644
--- a/man/dcennormal1.Rd
+++ b/man/dcennormal1.Rd
@@ -75,23 +75,23 @@ dcennormal1(r1 = 0, r2 = 0, lmu = "identity", lsd = "loge",
}
\examples{\dontrun{# Repeat the simulations described in Harter and Moore (1966)
-SIMS = 100 # Number of simulations (change this to 1000)
-mu.save = sd.save = rep(NA, len = SIMS)
-r1 = 0; r2 = 4; nn = 20
+SIMS <- 100 # Number of simulations (change this to 1000)
+mu.save <- sd.save <- rep(NA, len = SIMS)
+r1 <- 0; r2 <- 4; nn <- 20
for(sim in 1:SIMS) {
- y = sort(rnorm(nn))
- y = y[(1+r1):(nn-r2)] # Delete r1 smallest and r2 largest
- fit = vglm(y ~ 1, dcennormal1(r1 = r1, r2 = r2))
- mu.save[sim] = predict(fit)[1,1]
- sd.save[sim] = exp(predict(fit)[1,2]) # Assumes a log link and ~ 1
+ y <- sort(rnorm(nn))
+ y <- y[(1+r1):(nn-r2)] # Delete r1 smallest and r2 largest
+ fit <- vglm(y ~ 1, dcennormal1(r1 = r1, r2 = r2))
+ mu.save[sim] <- predict(fit)[1,1]
+ sd.save[sim] <- exp(predict(fit)[1,2]) # Assumes a log link and ~ 1
}
c(mean(mu.save), mean(sd.save)) # Should be c(0,1)
c(sd(mu.save), sd(sd.save))
}
# Data from Sarhan and Greenberg (1962); MLEs are mu = 9.2606, sd = 1.3754
-strontium90 = data.frame(y = c(8.2, 8.4, 9.1, 9.8, 9.9))
-fit = vglm(y ~ 1, dcennormal1(r1 = 2, r2 = 3, isd = 6), strontium90, trace = TRUE)
+strontium90 <- data.frame(y = c(8.2, 8.4, 9.1, 9.8, 9.9))
+fit <- vglm(y ~ 1, dcennormal1(r1 = 2, r2 = 3, isd = 6), strontium90, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/deplot.lmscreg.Rd b/man/deplot.lmscreg.Rd
index eb3ed12..5d65a1d 100644
--- a/man/deplot.lmscreg.Rd
+++ b/man/deplot.lmscreg.Rd
@@ -15,26 +15,39 @@ deplot.lmscreg(object, newdata = NULL, x0, y.arg, plot.it = TRUE, ...)
an object produced by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}} with a family function beginning with
\code{"lms."}, e.g., \code{\link{lms.yjn}}.
+
+
}
\item{newdata}{ Optional data frame containing secondary variables such
as sex. It should have a maximum of one row.
The default is to use the original data.
+
+
}
\item{x0}{ Numeric. The value of the primary variable at which to
make the `slice'.
+
+
}
\item{y.arg}{ Numerical vector. The values of the response variable
at which to evaluate the density. This should be a grid that is fine
enough to ensure the plotted curves are smooth. }
\item{plot.it}{ Logical. Plot it? If \code{FALSE} no plot will
- be done. }
+ be done.
+
+
+ }
\item{\dots}{ Graphical parameter that are passed into
\code{\link{plotdeplot.lmscreg}}.
+
+
}
}
\details{
This function calls, e.g., \code{deplot.lms.yjn} in order to compute
the density function.
+
+
}
\value{
The original \code{object} but with a list
@@ -44,21 +57,23 @@ deplot.lmscreg(object, newdata = NULL, x0, y.arg, plot.it = TRUE, ...)
data frame constructed out of the \code{x0} argument. }
\item{y}{ The argument \code{y.arg} above. }
\item{density}{ Vector of the density function values evaluated at \code{y.arg}. }
+
+
}
\references{
+
Yee, T. W. (2004)
Quantile regression via vector generalized additive models.
\emph{Statistics in Medicine}, \bold{23}, 2295--2315.
-Documentation accompanying the \pkg{VGAM} package at
-\url{http://www.stat.auckland.ac.nz/~yee}
-contains further information and examples.
}
\author{ Thomas W. Yee }
\note{
\code{\link{plotdeplot.lmscreg}} actually does the plotting.
+
+
}
\seealso{
\code{\link{plotdeplot.lmscreg}},
@@ -66,15 +81,17 @@ contains further information and examples.
\code{\link{lms.bcn}},
\code{\link{lms.bcg}},
\code{\link{lms.yjn}}.
+
+
}
\examples{\dontrun{
-fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bmi.nz)
-ygrid = seq(15, 43, by=0.25)
-deplot(fit, x0=20, y=ygrid, xlab="BMI", col="green", llwd=2,
- main="BMI distribution at ages 20 (green), 40 (blue), 60 (red)")
-deplot(fit, x0=40, y=ygrid, add=TRUE, col="blue", llwd=2)
-deplot(fit, x0=60, y=ygrid, add=TRUE, col="red", llwd=2) -> a
+fit <- vgam(BMI ~ s(age, df = c(4, 2)), fam = lms.bcn(zero = 1), data = bmi.nz)
+ygrid <- seq(15, 43, by = 0.25)
+deplot(fit, x0 = 20, y = ygrid, xlab = "BMI", col = "green", llwd = 2,
+ main = "BMI distribution at ages 20 (green), 40 (blue), 60 (red)")
+deplot(fit, x0 = 40, y = ygrid, add = TRUE, col = "blue", llwd = 2)
+deplot(fit, x0 = 60, y = ygrid, add = TRUE, col = "red", llwd = 2) -> a
names(a at post$deplot)
a at post$deplot$newdata
diff --git a/man/depvar.Rd b/man/depvar.Rd
index 2a7b2f3..5d0213e 100644
--- a/man/depvar.Rd
+++ b/man/depvar.Rd
@@ -56,10 +56,10 @@ depvar(object, ...)
}
\examples{
-pneumo = transform(pneumo, let = log(exposure.time))
-(fit = vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
-fit at y # Sample proportions (not recommended)
-depvar(fit) # Better than using fit at y; dependent variable (response)
+pneumo <- transform(pneumo, let = log(exposure.time))
+(fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
+fit at y # Sample proportions (not recommended)
+depvar(fit) # Better than using fit at y; dependent variable (response)
weights(fit, type = "prior") # Number of observations
}
\keyword{models}
diff --git a/man/eexpUC.Rd b/man/eexpUC.Rd
index f25ac0f..cc00cdc 100644
--- a/man/eexpUC.Rd
+++ b/man/eexpUC.Rd
@@ -11,6 +11,7 @@
expectile function and random generation for the distribution
associated with the expectiles of an exponential distribution.
+
}
\usage{
deexp(x, rate = 1, log = FALSE)
@@ -22,14 +23,23 @@ reexp(n, rate = 1)
\arguments{
\item{x, p, q}{
See \code{\link{deunif}}.
+
+
+ }
+ \item{n, rate, log}{
+ See \code{\link[stats:Exponential]{rexp}}.
+
+
}
- \item{n, rate, log}{See \code{\link[stats:Exponential]{rexp}}.}
\item{Maxit_nr, Tol_nr}{
See \code{\link{deunif}}.
+
+
}
}
\details{
+
General details are given in \code{\link{deunif}}
including
a note regarding the terminology used.
@@ -50,6 +60,7 @@ For \code{qeexp} the Newton-Raphson algorithm is used to solve for
Numerical problems may occur when values of \code{p} are
very close to 0 or 1.
+
}
\value{
\code{deexp(x)} gives the density function \eqn{g(x)}.
@@ -58,6 +69,7 @@ very close to 0 or 1.
the value \eqn{y} such that \eqn{G(y)=p}.
\code{reexp(n)} gives \eqn{n} random variates from \eqn{G}.
+
}
%\references{
@@ -83,18 +95,19 @@ very close to 0 or 1.
\code{\link{denorm}},
\code{\link{dexp}}.
+
}
\examples{
-my_p = 0.25; y = rexp(nn <- 1000)
-(myexp = qeexp(my_p))
+my_p <- 0.25; y <- rexp(nn <- 1000)
+(myexp <- qeexp(my_p))
sum(myexp - y[y <= myexp]) / sum(abs(myexp - y)) # Should be my_p
-\dontrun{ par(mfrow=c(2,1))
-yy = seq(-0, 4, len = nn)
+\dontrun{ par(mfrow = c(2,1))
+yy <- seq(-0, 4, len = nn)
plot(yy, deexp(yy), col = "blue", ylim = 0:1, xlab = "y", ylab = "g(y)",
type = "l", main = "g(y) for Exp(1); dotted green is f(y) = dexp(y)")
-lines(yy, dexp(yy), col="darkgreen", lty="dotted", lwd=2) # 'original'
+lines(yy, dexp(yy), col = "darkgreen", lty = "dotted", lwd = 2) # 'original'
plot(yy, peexp(yy), type = "l", col = "blue", ylim = 0:1,
xlab = "y", ylab = "G(y)", main = "G(y) for Exp(1)")
@@ -104,25 +117,25 @@ lines(yy, pexp(yy), col = "darkgreen", lty = "dotted", lwd = 2) }
\keyword{distribution}
%# Equivalently:
-%I1 = mean(y <= myexp) * mean( myexp - y[y <= myexp])
-%I2 = mean(y > myexp) * mean(-myexp + y[y > myexp])
+%I1 <- mean(y <= myexp) * mean( myexp - y[y <= myexp])
+%I2 <- mean(y > myexp) * mean(-myexp + y[y > myexp])
%I1 / (I1 + I2) # Should be my_p
%# Or:
-%I1 = sum( myexp - y[y <= myexp])
-%I2 = sum(-myexp + y[y > myexp])
+%I1 <- sum( myexp - y[y <= myexp])
+%I2 <- sum(-myexp + y[y > myexp])
%# Non-standard exponential
-%myrate = 8
-%yy = rexp(nn, rate=myrate)
-%(myexp = qeexp(my_p, rate=myrate))
+%myrate <- 8
+%yy <- rexp(nn, rate = myrate)
+%(myexp <- qeexp(my_p, rate = myrate))
%sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy)) # Should be my_p
%peexp(-Inf, rate = myrate) # Should be 0
%peexp( Inf, rate = myrate) # Should be 1
-%peexp(mean(yy), rate=myrate) # Should be 0.5
-%abs(qeexp(0.5, rate=myrate) - mean(yy)) # Should be 0
-%abs(peexp(myexp, rate=myrate) - my_p) # Should be 0
-%integrate(f = deexp, lower=-1, upper = Inf, rate=myrate) # Should be 1
+%peexp(mean(yy), rate = myrate) # Should be 0.5
+%abs(qeexp(0.5, rate = myrate) - mean(yy)) # Should be 0
+%abs(peexp(myexp, rate = myrate) - my_p) # Should be 0
+%integrate(f = deexp, lower = -1, upper = Inf, rate = myrate) # Should be 1
diff --git a/man/enormUC.Rd b/man/enormUC.Rd
index 3283d27..23dcbfb 100644
--- a/man/enormUC.Rd
+++ b/man/enormUC.Rd
@@ -11,6 +11,7 @@
expectile function and random generation for the distribution
associated with the expectiles of a normal distribution.
+
}
\usage{
denorm(x, mean = 0, sd = 1, log = FALSE)
@@ -22,10 +23,18 @@ renorm(n, mean = 0, sd = 1)
\arguments{
\item{x, p, q}{
See \code{\link{deunif}}.
+
+
+ }
+ \item{n, mean, sd, log}{
+ See \code{\link[stats:Normal]{rnorm}}.
+
+
}
- \item{n, mean, sd, log}{See \code{\link[stats:Normal]{rnorm}}.}
\item{Maxit_nr, Tol_nr}{
See \code{\link{deunif}}.
+
+
}
}
\details{
@@ -50,6 +59,7 @@ For \code{qenorm} the Newton-Raphson algorithm is used to solve for
Numerical problems may occur when values of \code{p} are
very close to 0 or 1.
+
}
\value{
\code{denorm(x)} gives the density function \eqn{g(x)}.
@@ -58,6 +68,7 @@ very close to 0 or 1.
the value \eqn{y} such that \eqn{G(y)=p}.
\code{renorm(n)} gives \eqn{n} random variates from \eqn{G}.
+
}
%\references{
@@ -85,32 +96,33 @@ very close to 0 or 1.
\code{\link{amlnormal}},
\code{\link{lms.bcn}}.
+
}
\examples{
-my_p = 0.25; y = rnorm(nn <- 1000)
-(myexp = qenorm(my_p))
-sum(myexp - y[y <= myexp]) / sum(abs(myexp - y)) # Should be my_p
+my_p <- 0.25; y <- rnorm(nn <- 1000)
+(myexp <- qenorm(my_p))
+sum(myexp - y[y <= myexp]) / sum(abs(myexp - y)) # Should be my_p
# Non-standard normal
-mymean = 1; mysd = 2
-yy = rnorm(nn, mymean, mysd)
-(myexp = qenorm(my_p, mymean, mysd))
+mymean <- 1; mysd <- 2
+yy <- rnorm(nn, mymean, mysd)
+(myexp <- qenorm(my_p, mymean, mysd))
sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy)) # Should be my_p
penorm(-Inf, mymean, mysd) # Should be 0
penorm( Inf, mymean, mysd) # Should be 1
penorm(mean(yy), mymean, mysd) # Should be 0.5
abs(qenorm(0.5, mymean, mysd) - mean(yy)) # Should be 0
-abs(penorm(myexp, mymean, mysd) - my_p) # Should be 0
-integrate(f = denorm, lower=-Inf, upper = Inf,
+abs(penorm(myexp, mymean, mysd) - my_p) # Should be 0
+integrate(f = denorm, lower = -Inf, upper = Inf,
mymean, mysd) # Should be 1
\dontrun{
par(mfrow = c(2, 1))
-yy = seq(-3, 3, len = nn)
+yy <- seq(-3, 3, len = nn)
plot(yy, denorm(yy), type = "l", col="blue", xlab = "y", ylab = "g(y)",
main = "g(y) for N(0,1); dotted green is f(y) = dnorm(y)")
-lines(yy, dnorm(yy), col="darkgreen", lty="dotted", lwd=2) # 'original'
+lines(yy, dnorm(yy), col = "darkgreen", lty = "dotted", lwd = 2) # 'original'
plot(yy, penorm(yy), type = "l", col = "blue", ylim = 0:1,
xlab = "y", ylab = "G(y)", main = "G(y) for N(0,1)")
diff --git a/man/enzyme.Rd b/man/enzyme.Rd
index 7991237..c38ba56 100644
--- a/man/enzyme.Rd
+++ b/man/enzyme.Rd
@@ -34,11 +34,13 @@ Watts, D. G. (1981)
}
\seealso{
-\code{\link{micmen}}.
+\code{\link[VGAM]{micmen}}.
}
\examples{
+\dontrun{
fit <- vglm(velocity ~ 1, micmen, data = enzyme, trace = TRUE,
- form2 = ~ conc - 1, crit = "crit")
+ form2 = ~ conc - 1, crit = "crit")
summary(fit)
}
+}
\keyword{datasets}
diff --git a/man/eunifUC.Rd b/man/eunifUC.Rd
index 8a88160..d9bfe28 100644
--- a/man/eunifUC.Rd
+++ b/man/eunifUC.Rd
@@ -23,22 +23,30 @@ reunif(n, min = 0, max = 1)
\item{x, q}{
Vector of expectiles.
See the terminology note below.
+
}
\item{p}{
Vector of probabilities. % (tau or \eqn{\tau}).
These should lie in \eqn{(0,1)}.
+
+ }
+ \item{n, min, max, log}{
+ See \code{\link[stats:Uniform]{runif}}.
+
+
}
- \item{n, min, max, log}{See \code{\link[stats:Uniform]{runif}}.}
\item{Maxit_nr}{
Numeric.
Maximum number of Newton-Raphson iterations allowed.
A warning is issued if convergence is not obtained for all \code{p}
values.
+
}
\item{Tol_nr}{
Numeric.
Small positive value specifying the tolerance or precision to which
the expectiles are computed.
+
}
}
\details{
@@ -91,6 +99,7 @@ For \code{qeunif} the Newton-Raphson algorithm is used to solve for
Numerical problems may occur when values of \code{p} are
very close to 0 or 1.
+
}
\value{
\code{deunif(x)} gives the density function \eqn{g(x)}.
@@ -133,24 +142,25 @@ quantile and expectile regression.
\code{\link{dunif}},
\code{\link{dkoenker}}.
+
}
\examples{
-my_p = 0.25; y = runif(nn <- 1000)
-(myexp = qeunif(my_p))
-sum(myexp - y[y <= myexp]) / sum(abs(myexp - y)) # Should be my_p
+my_p <- 0.25; y <- runif(nn <- 1000)
+(myexp <- qeunif(my_p))
+sum(myexp - y[y <= myexp]) / sum(abs(myexp - y)) # Should be my_p
# Equivalently:
-I1 = mean(y <= myexp) * mean( myexp - y[y <= myexp])
-I2 = mean(y > myexp) * mean(-myexp + y[y > myexp])
-I1 / (I1 + I2) # Should be my_p
+I1 <- mean(y <= myexp) * mean( myexp - y[y <= myexp])
+I2 <- mean(y > myexp) * mean(-myexp + y[y > myexp])
+I1 / (I1 + I2) # Should be my_p
# Or:
-I1 = sum( myexp - y[y <= myexp])
-I2 = sum(-myexp + y[y > myexp])
+I1 <- sum( myexp - y[y <= myexp])
+I2 <- sum(-myexp + y[y > myexp])
# Non-standard uniform
-mymin = 1; mymax = 8
-yy = runif(nn, mymin, mymax)
-(myexp = qeunif(my_p, mymin, mymax))
+mymin <- 1; mymax <- 8
+yy <- runif(nn, mymin, mymax)
+(myexp <- qeunif(my_p, mymin, mymax))
sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy)) # Should be my_p
peunif(mymin, mymin, mymax) # Should be 0
peunif(mymax, mymin, mymax) # Should be 1
@@ -159,11 +169,11 @@ abs(qeunif(0.5, mymin, mymax) - mean(yy)) # Should be 0
abs(qeunif(0.5, mymin, mymax) - (mymin+mymax)/2) # Should be 0
abs(peunif(myexp, mymin, mymax) - my_p) # Should be 0
integrate(f = deunif, lower = mymin - 3, upper = mymax + 3,
- min = mymin, max = mymax) # Should be 1
+ min = mymin, max = mymax) # Should be 1
\dontrun{
par(mfrow = c(2,1))
-yy = seq(0.0, 1.0, len = nn)
+yy <- seq(0.0, 1.0, len = nn)
plot(yy, deunif(yy), type = "l", col = "blue", ylim = c(0, 2),
xlab = "y", ylab = "g(y)", main = "g(y) for Uniform(0,1)")
lines(yy, dunif(yy), col = "darkgreen", lty = "dotted", lwd = 2) # 'original'
diff --git a/man/expexp.Rd b/man/expexp.Rd
index 3cb8ddb..d95396d 100644
--- a/man/expexp.Rd
+++ b/man/expexp.Rd
@@ -136,13 +136,13 @@ expexp(lshape = "loge", lscale = "loge",
\examples{
# A special case: exponential data
edata <- data.frame(y = rexp(n <- 1000))
-fit = vglm(y ~ 1, fam = expexp, edata, trace = TRUE, maxit = 99)
-coef(fit, matrix=TRUE)
+fit <- vglm(y ~ 1, fam = expexp, edata, trace = TRUE, maxit = 99)
+coef(fit, matrix = TRUE)
Coef(fit)
# Ball bearings data (number of million revolutions before failure)
-bbearings = c(17.88, 28.92, 33.00, 41.52, 42.12, 45.60,
+bbearings <- c(17.88, 28.92, 33.00, 41.52, 42.12, 45.60,
48.80, 51.84, 51.96, 54.12, 55.56, 67.80, 68.64, 68.64,
68.88, 84.12, 93.12, 98.64, 105.12, 105.84, 127.92,
128.04, 173.40)
@@ -154,11 +154,11 @@ logLik(fit) # Authors get -112.9763
# Failure times of the airconditioning system of an airplane
-acplane = c(23, 261, 87, 7, 120, 14, 62, 47,
+acplane <- c(23, 261, 87, 7, 120, 14, 62, 47,
225, 71, 246, 21, 42, 20, 5, 12, 120, 11, 3, 14,
71, 11, 14, 11, 16, 90, 1, 16, 52, 95)
-fit = vglm(acplane ~ 1, fam = expexp(ishape = 0.8, isc = 0.15),
- trace = TRUE, maxit = 99)
+fit <- vglm(acplane ~ 1, fam = expexp(ishape = 0.8, isc = 0.15),
+ trace = TRUE, maxit = 99)
coef(fit, matrix = TRUE)
Coef(fit) # Authors get c(shape=0.8130, scale=0.0145)
logLik(fit) # Authors get log-lik -152.264
diff --git a/man/fgm.Rd b/man/fgm.Rd
index b5ba0d4..8b9c483 100644
--- a/man/fgm.Rd
+++ b/man/fgm.Rd
@@ -13,27 +13,10 @@ fgm(lapar="rhobit", iapar = NULL, imethod = 1, nsimEIM = 200)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lapar}{
- Link function applied to the association parameter
- \eqn{\alpha}{alpha}, which is real.
- See \code{\link{Links}} for more choices.
+ \item{lapar, iapar, imethod, nsimEIM}{
+ Details at \code{\link{CommonVGAMffArguments}}.
+ See \code{\link{Links}} for more link function choices.
- }
- \item{iapar}{
- Numeric. Optional initial value for \eqn{\alpha}{alpha}.
- By default, an initial value is chosen internally.
- If a convergence failure occurs try assigning a different value.
- Assigning a value will override the argument \code{imethod}.
-
- }
- \item{imethod}{
- An integer with value \code{1} or \code{2} which
- specifies the initialization method. If failure to converge occurs
- try the other value, or else specify a value for \code{ia}.
-
- }
- \item{nsimEIM}{
- See \code{\link{CommonVGAMffArguments}} for more information.
}
}
@@ -49,17 +32,20 @@ fgm(lapar="rhobit", iapar = NULL, imethod = 1, nsimEIM = 200)
When \eqn{\alpha = 0}{alpha=0} the random variables are
independent.
+
% A variant of Newton-Raphson is used, which only seems to work for an
% intercept model.
% It is a very good idea to set \code{trace=TRUE}.
% This \pkg{VGAM} family function is prone to numerical difficulties.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
@@ -68,6 +54,7 @@ Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
\emph{Extreme Value and Related Models with Applications in Engineering and Science},
Hoboken, NJ, USA: Wiley-Interscience.
+
}
\author{ T. W. Yee }
\note{
@@ -76,8 +63,10 @@ Hoboken, NJ, USA: Wiley-Interscience.
This is because each marginal distribution corresponds to a standard
uniform distribution.
+
% This \pkg{VGAM} family function should be used with caution.
+
}
\seealso{
@@ -85,11 +74,12 @@ Hoboken, NJ, USA: Wiley-Interscience.
\code{\link{frank}},
\code{\link{morgenstern}}.
+
}
\examples{
-ymat = rfgm(n = 1000, alpha = rhobit(3, inverse = TRUE))
+ymat <- rfgm(n = 1000, alpha = rhobit(3, inverse = TRUE))
\dontrun{plot(ymat, col = "blue")}
-fit = vglm(ymat ~ 1, fam = fgm, trace = TRUE)
+fit <- vglm(ymat ~ 1, fam = fgm, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
head(fitted(fit))
diff --git a/man/fill.Rd b/man/fill.Rd
index 9cd086f..05eec65 100644
--- a/man/fill.Rd
+++ b/man/fill.Rd
@@ -146,40 +146,39 @@ fill(x, values = 0, ncolx = ncol(x))
}
\examples{
fill(runif(5))
-fill(runif(5), ncol=3)
-fill(runif(5), val=1, ncol=3)
+fill(runif(5), ncol = 3)
+fill(runif(5), val = 1, ncol = 3)
# Generate eyes data for the examples below. Eyes are independent (OR=1).
-nn = 1000 # Number of people
+nn <- 1000 # Number of people
eyesdat = data.frame(lop = round(runif(nn), 2),
rop = round(runif(nn), 2),
age = round(rnorm(nn, 40, 10)))
-eyesdat = transform(eyesdat,
+eyesdat <- transform(eyesdat,
mop = (lop + rop) / 2, # Mean ocular pressure
op = (lop + rop) / 2, # Value unimportant unless plotting
# op = lop, # Choose this if plotting
eta1 = 0 - 2*lop + 0.04*age, # Linear predictor for left eye
eta2 = 0 - 2*rop + 0.04*age) # Linear predictor for right eye
-eyesdat = transform(eyesdat,
- leye = rbinom(nn, size=1, prob=logit(eta1, inverse=TRUE)),
- reye = rbinom(nn, size=1, prob=logit(eta2, inverse=TRUE)))
+eyesdat <- transform(eyesdat,
+ leye = rbinom(nn, size = 1, prob = logit(eta1, inverse = TRUE)),
+ reye = rbinom(nn, size = 1, prob = logit(eta2, inverse = TRUE)))
# Example 1
# All effects are linear
-fit1 = vglm(cbind(leye,reye) ~ op + age,
- family = binom2.or(exchangeable=TRUE, zero=3),
- data=eyesdat, trace=TRUE,
- xij = list(op ~ lop + rop + fill(lop)),
- form2 = ~ op + lop + rop + fill(lop) + age)
+fit1 <- vglm(cbind(leye,reye) ~ op + age,
+ family = binom2.or(exchangeable=TRUE, zero=3),
+ data=eyesdat, trace=TRUE,
+ xij = list(op ~ lop + rop + fill(lop)),
+ form2 = ~ op + lop + rop + fill(lop) + age)
head(model.matrix(fit1, type="lm")) # LM model matrix
head(model.matrix(fit1, type="vlm")) # Big VLM model matrix
coef(fit1)
-coef(fit1, matrix=TRUE) # Unchanged with 'xij'
+coef(fit1, matrix = TRUE) # Unchanged with 'xij'
constraints(fit1)
-max(abs(predict(fit1)-predict(fit1, new=eyesdat))) # Predicts correctly
+max(abs(predict(fit1)-predict(fit1, new = eyesdat))) # Predicts correctly
summary(fit1)
-\dontrun{
-plotvgam(fit1, se=TRUE) # Wrong, e.g., because it plots against op, not lop.
+\dontrun{ plotvgam(fit1, se = TRUE) # Wrong, e.g., because it plots against op, not lop.
# So set op=lop in the above for a correct plot.
}
@@ -187,45 +186,44 @@ plotvgam(fit1, se=TRUE) # Wrong, e.g., because it plots against op, not lop.
# Example 2
# Model OR as a linear function of mop
-fit2 = vglm(cbind(leye,reye) ~ op + age,
- binom2.or(exchangeable=TRUE, zero=NULL),
- data=eyesdat, trace=TRUE,
- xij = list(op ~ lop + rop + mop),
- form2 = ~ op + lop + rop + mop + age)
-head(model.matrix(fit2, type="lm")) # LM model matrix
-head(model.matrix(fit2, type="vlm")) # Big VLM model matrix
+fit2 <- vglm(cbind(leye,reye) ~ op + age, data = eyesdat, trace = TRUE,
+ binom2.or(exchangeable = TRUE, zero = NULL),
+ xij = list(op ~ lop + rop + mop),
+ form2 = ~ op + lop + rop + mop + age)
+head(model.matrix(fit2, type = "lm")) # LM model matrix
+head(model.matrix(fit2, type = "vlm")) # Big VLM model matrix
coef(fit2)
-coef(fit2, matrix=TRUE) # Unchanged with 'xij'
-max(abs(predict(fit2)-predict(fit2, new=eyesdat))) # Predicts correctly
+coef(fit2, matrix = TRUE) # Unchanged with 'xij'
+max(abs(predict(fit2) - predict(fit2, new = eyesdat))) # Predicts correctly
summary(fit2)
-\dontrun{
-plotvgam(fit2, se=TRUE) # Wrong because it plots against op, not lop.
+\dontrun{ plotvgam(fit2, se = TRUE) # Wrong because it plots against op, not lop.
}
# Example 3. This model uses regression splines on ocular pressure.
# It uses a trick to ensure common basis functions.
-BS = function(x, ...) bs(c(x,...), df=3)[1:length(x),,drop=FALSE] # trick
-
-fit3 = vglm(cbind(leye,reye) ~ BS(lop,rop) + age,
- family = binom2.or(exchangeable=TRUE, zero=3),
- data=eyesdat, trace=TRUE,
- xij = list(BS(lop,rop) ~ BS(lop,rop) +
- BS(rop,lop) +
- fill(BS(lop,rop))),
- form2 = ~ BS(lop,rop) + BS(rop,lop) + fill(BS(lop,rop)) +
- lop + rop + age)
-head(model.matrix(fit3, type="lm")) # LM model matrix
-head(model.matrix(fit3, type="vlm")) # Big VLM model matrix
+BS <- function(x, ...)
+ bs(c(x,...), df = 3)[1:length(x), , drop = FALSE] # trick
+
+fit3 <- vglm(cbind(leye,reye) ~ BS(lop,rop) + age,
+ family = binom2.or(exchangeable = TRUE, zero = 3),
+ data = eyesdat, trace = TRUE,
+ xij = list(BS(lop,rop) ~ BS(lop,rop) +
+ BS(rop,lop) +
+ fill(BS(lop,rop))),
+ form2 = ~ BS(lop,rop) + BS(rop,lop) + fill(BS(lop,rop)) +
+ lop + rop + age)
+head(model.matrix(fit3, type = "lm")) # LM model matrix
+head(model.matrix(fit3, type = "vlm")) # Big VLM model matrix
coef(fit3)
-coef(fit3, matrix=TRUE)
+coef(fit3, matrix = TRUE)
summary(fit3)
fit3 at smart.prediction
-max(abs(predict(fit3)-predict(fit3, new=eyesdat))) # Predicts correctly
-predict(fit3, new=head(eyesdat)) # Note the 'scalar' OR, i.e., zero=3
-max(abs(head(predict(fit3))-predict(fit3, new=head(eyesdat)))) # Should be 0
+max(abs(predict(fit3) - predict(fit3, new = eyesdat))) # Predicts correctly
+predict(fit3, new = head(eyesdat)) # Note the 'scalar' OR, i.e., zero=3
+max(abs(head(predict(fit3)) - predict(fit3, new = head(eyesdat)))) # Should be 0
\dontrun{
-plotvgam(fit3, se=TRUE, xlab="lop") # Correct
+plotvgam(fit3, se = TRUE, xlab = "lop") # Correct
}
}
\keyword{models}
@@ -237,8 +235,8 @@ plotvgam(fit3, se=TRUE, xlab="lop") # Correct
%\code{fill1(x, value=0, ncolx=ncol(x))} and create .Rd file for
%\code{zero} argument.]
-%eyesdat$leye = ifelse(runif(n) < exp(eta1)/(1+exp(eta1)), 1, 0)
-%eyesdat$reye = ifelse(runif(n) < exp(eta2)/(1+exp(eta2)), 1, 0)
+%eyesdat$leye <- ifelse(runif(n) < exp(eta1)/(1+exp(eta1)), 1, 0)
+%eyesdat$reye <- ifelse(runif(n) < exp(eta2)/(1+exp(eta2)), 1, 0)
% \deqn{logit P(Y_k=1) = f_k(x_{ijk}) }{%
% logit P(Y_k=1) = f_k(x_{ijk}) }
diff --git a/man/fisherz.Rd b/man/fisherz.Rd
index 6dfd0e3..2035bb3 100644
--- a/man/fisherz.Rd
+++ b/man/fisherz.Rd
@@ -50,9 +50,12 @@ fisherz(theta, bminvalue = NULL, bmaxvalue = NULL,
}
\value{
For \code{deriv = 0},
- \code{0.5 * log((1+theta)/(1-theta))} when \code{inverse = FALSE},
+ \code{0.5 * log((1+theta)/(1-theta))}
+ (same as \code{atanh(theta)})
+ when \code{inverse = FALSE},
and if \code{inverse = TRUE} then
- \code{(exp(2*theta)-1)/(exp(2*theta)+1)}.
+ \code{(exp(2*theta)-1)/(exp(2*theta)+1)}
+ (same as \code{tanh(theta)}).
For \code{deriv = 1}, then the function returns
@@ -81,6 +84,7 @@ fisherz(theta, bminvalue = NULL, bmaxvalue = NULL,
The link function \code{\link{rhobit}} is very similar to \code{fisherz},
e.g., just twice the value of \code{fisherz}.
+ This link function may be renamed to \code{atanhlink} in the near future.
}
@@ -88,6 +92,7 @@ fisherz(theta, bminvalue = NULL, bmaxvalue = NULL,
\seealso{
\code{\link{Links}},
\code{\link{rhobit}},
+ \code{\link{atanh}},
\code{\link{logit}}.
@@ -96,8 +101,8 @@ fisherz(theta, bminvalue = NULL, bmaxvalue = NULL,
theta <- seq(-0.99, 0.99, by = 0.01)
y <- fisherz(theta)
\dontrun{ plot(theta, y, type = "l", las = 1, ylab = "",
- main = "fisherz(theta)")
-abline(v = 0, h = 0, lty = 2) }
+ main = "fisherz(theta)", col = "blue")
+abline(v = (-1):1, h = 0, lty = 2, col = "gray") }
x <- c(seq(-1.02, -0.98, by = 0.01), seq(0.97, 1.02, by = 0.01))
fisherz(x) # Has NAs
diff --git a/man/fisk.Rd b/man/fisk.Rd
index 3c33863..332654d 100644
--- a/man/fisk.Rd
+++ b/man/fisk.Rd
@@ -43,7 +43,7 @@ fisk(lshape1.a = "loge", lscale = "loge",
The Fisk distribution has density
\deqn{f(y) = a y^{a-1} / [b^a \{1 + (y/b)^a\}^2]}{%
f(y) = a y^(a-1) / [b^a (1 + (y/b)^a)^2]}
- for \eqn{a > 0}, \eqn{b > 0}, \eqn{y > 0}.
+ for \eqn{a > 0}, \eqn{b > 0}, \eqn{y \geq 0}{y >= 0}.
Here, \eqn{b} is the scale parameter \code{scale},
and \code{a} is a shape parameter.
The cumulative distribution function is
diff --git a/man/fiskUC.Rd b/man/fiskUC.Rd
index 2f045ae..f5a44b1 100644
--- a/man/fiskUC.Rd
+++ b/man/fiskUC.Rd
@@ -66,8 +66,8 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
-fdata = data.frame(y = rfisk(n = 1000, 4, 6))
-fit = vglm(y ~ 1, fisk, data = fdata, trace = TRUE, crit = "coef")
+fdata <- data.frame(y = rfisk(n = 1000, 4, 6))
+fit <- vglm(y ~ 1, fisk, data = fdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/fittedvlm.Rd b/man/fittedvlm.Rd
index c9a16f8..2d29ac7 100644
--- a/man/fittedvlm.Rd
+++ b/man/fittedvlm.Rd
@@ -89,14 +89,14 @@ Chambers, J. M. and T. J. Hastie (eds) (1992)
}
\examples{
# Categorical regression example 1
-pneumo = transform(pneumo, let = log(exposure.time))
-(fit = vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
+pneumo <- transform(pneumo, let = log(exposure.time))
+(fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
fitted(fit)
# LMS quantile regression example 2
-fit = vgam(BMI ~ s(age, df = c(4, 2)),
- fam = lms.bcn(zero = 1), data = bmi.nz, trace = TRUE)
-head(predict(fit, type = "response")) # The following three are equal
+fit <- vgam(BMI ~ s(age, df = c(4, 2)),
+ lms.bcn(zero = 1), data = bmi.nz, trace = TRUE)
+head(predict(fit, type = "response")) # Equal to the the following two:
head(fitted(fit))
predict(fit, type = "response", newdata = head(bmi.nz))
}
diff --git a/man/fnormal1.Rd b/man/fnormal1.Rd
index 8755ca2..52d135c 100644
--- a/man/fnormal1.Rd
+++ b/man/fnormal1.Rd
@@ -4,6 +4,7 @@
\title{ Folded Normal Distribution Family Function }
\description{
Fits a (generalized) folded (univariate) normal distribution.
+
}
\usage{
fnormal1(lmean = "identity", lsd = "loge", imean = NULL, isd = NULL,
@@ -33,15 +34,18 @@ fnormal1(lmean = "identity", lsd = "loge", imean = NULL, isd = NULL,
A \code{NULL} means a value is computed internally.
See \code{\link{CommonVGAMffArguments}}.
+
}
\item{a1, a2}{
Positive weights, called \eqn{a_1}{a1} and \eqn{a_2}{a2} below.
Each must be of length 1.
+
}
\item{nsimEIM, imethod, zero}{
See \code{\link{CommonVGAMffArguments}}.
+
}
}
\details{
diff --git a/man/frechet.Rd b/man/frechet.Rd
index 3bebcbc..77f5625 100644
--- a/man/frechet.Rd
+++ b/man/frechet.Rd
@@ -155,9 +155,10 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
+\dontrun{
set.seed(123)
fdata <- data.frame(y1 = rfrechet(nn <- 1000, shape = 2 + exp(1)))
-\dontrun{ with(fdata, hist(y1)) }
+with(fdata, hist(y1))
fit2 <- vglm(y1 ~ 1, frechet2, fdata, trace = TRUE)
coef(fit2, matrix = TRUE)
Coef(fit2)
@@ -166,6 +167,7 @@ with(fdata, mean(y1))
head(weights(fit2, type = "working"))
vcov(fit2)
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/frechetUC.Rd b/man/frechetUC.Rd
index cb6ce5b..775bcdd 100644
--- a/man/frechetUC.Rd
+++ b/man/frechetUC.Rd
@@ -9,6 +9,7 @@
Density, distribution function, quantile function and random
generation for the three parameter Frechet distribution.
+
}
\usage{
dfrechet(x, location = 0, scale = 1, shape, log = FALSE)
@@ -22,6 +23,7 @@ rfrechet(n, location = 0, scale = 1, shape)
\item{n}{number of observations.
Passed into \code{\link[stats:Uniform]{runif}}.
+
}
\item{location, scale, shape}{the location parameter \eqn{a},
scale parameter \eqn{b}, and shape parameter \eqn{s}.}
@@ -29,6 +31,7 @@ rfrechet(n, location = 0, scale = 1, shape)
Logical.
If \code{log = TRUE} then the logarithm of the density is returned.
+
}
}
@@ -38,13 +41,17 @@ rfrechet(n, location = 0, scale = 1, shape)
\code{qfrechet} gives the quantile function, and
\code{rfrechet} generates random deviates.
+
}
\references{
+
Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
\emph{Extreme Value and Related Models with Applications in
Engineering and Science},
Hoboken, NJ, USA: Wiley-Interscience.
+
+
}
\author{ T. W. Yee }
\details{
@@ -54,6 +61,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
of the probability density function and range restrictions on
the parameters.
+
}
%\note{
%}
@@ -61,16 +69,18 @@ Hoboken, NJ, USA: Wiley-Interscience.
\code{\link{frechet2}}.
% \code{\link{frechet3}}.
+
}
\examples{
-\dontrun{ shape = 5
-x = seq(-0.1, 3.5, len = 401)
+\dontrun{ shape <- 5
+x <- seq(-0.1, 3.5, len = 401)
plot(x, dfrechet(x, shape = shape), type = "l", ylab = "", las = 1,
main = "Frechet density divided into 10 equal areas; orange = cdf")
abline(h = 0, col = "blue", lty = 2)
-qq = qfrechet(seq(0.1, 0.9,by = 0.1), shape = shape)
+qq <- qfrechet(seq(0.1, 0.9, by = 0.1), shape = shape)
lines(qq, dfrechet(qq, shape = shape), col = "purple", lty = 3, type = "h")
-lines(x, pfrechet(q = x, shape = shape), col = "orange") }
+lines(x, pfrechet(q = x, shape = shape), col = "orange")
+}
}
\keyword{distribution}
diff --git a/man/freund61.Rd b/man/freund61.Rd
index b6548fa..d99d498 100644
--- a/man/freund61.Rd
+++ b/man/freund61.Rd
@@ -172,9 +172,9 @@ A bivariate extension of the exponential distribution.
}
\examples{
-fdata = data.frame(y1 = rexp(nn <- 200, rate = 4))
-fdata = transform(fdata, y2 = rexp(nn, rate = 8))
-fit = vglm(cbind(y1, y2) ~ 1, fam = freund61, fdata, trace = TRUE)
+fdata <- data.frame(y1 = rexp(nn <- 200, rate = 4))
+fdata <- transform(fdata, y2 = rexp(nn, rate = 8))
+fit <- vglm(cbind(y1, y2) ~ 1, fam = freund61, fdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
vcov(fit)
@@ -182,11 +182,11 @@ head(fitted(fit))
summary(fit)
# y1 and y2 are independent, so fit an independence model
-fit2 = vglm(cbind(y1, y2) ~ 1, fam = freund61(indep = TRUE),
- fdata, trace = TRUE)
+fit2 <- vglm(cbind(y1, y2) ~ 1, fam = freund61(indep = TRUE),
+ fdata, trace = TRUE)
coef(fit2, matrix = TRUE)
constraints(fit2)
-pchisq(2 * (logLik(fit)-logLik(fit2)), # p-value
+pchisq(2 * (logLik(fit)-logLik(fit2)), # p-value
df = df.residual(fit2) - df.residual(fit), lower.tail = FALSE)
}
\keyword{models}
diff --git a/man/gamma2.Rd b/man/gamma2.Rd
index 6f1ce29..ce7c9f3 100644
--- a/man/gamma2.Rd
+++ b/man/gamma2.Rd
@@ -10,7 +10,7 @@
\usage{
gamma2(lmu = "loge", lshape = "loge",
imethod = 1, ishape = NULL,
- parallel = FALSE, intercept.apply = FALSE,
+ parallel = FALSE, apply.parint = FALSE,
deviance.arg = FALSE, zero = -2)
}
%- maybe also 'usage' for other objects documented here.
@@ -69,8 +69,8 @@ gamma2(lmu = "loge", lshape = "loge",
}
- \item{parallel, intercept.apply}{
- See \code{\link{CommonVGAMffArguments}} for more information.
+ \item{parallel, apply.parint}{
+ Details at \code{\link{CommonVGAMffArguments}}.
}
diff --git a/man/gaussianff.Rd b/man/gaussianff.Rd
index fb471d6..2991dfa 100644
--- a/man/gaussianff.Rd
+++ b/man/gaussianff.Rd
@@ -75,6 +75,7 @@ gaussianff(dispersion = 0, parallel = FALSE, zero = NULL)
If in doubt, type something like \code{weights(object, type="working")}
after the model has been fitted.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -82,16 +83,19 @@ gaussianff(dispersion = 0, parallel = FALSE, zero = NULL)
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
}
\references{
McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
Yee, T. W. and Wild, C. J. (1996)
Vector generalized additive models.
\emph{Journal of the Royal Statistical Society, Series B, Methodological},
\bold{58}, 481--493.
+
}
\author{ Thomas W. Yee }
@@ -101,6 +105,7 @@ gaussianff(dispersion = 0, parallel = FALSE, zero = NULL)
\code{\link[stats]{glm}}.
The \code{"ff"} in the name is added to avoid any masking problems.
+
}
% \section{Warning }{
@@ -108,17 +113,20 @@ gaussianff(dispersion = 0, parallel = FALSE, zero = NULL)
% be cautious.
%
%
+% }
\seealso{
\code{\link{normal1}},
- \code{\link{huber}},
+ \code{\link{huber2}},
\code{\link{lqnorm}},
\code{\link{binormal}},
+ \code{\link{SUR}}.
\code{vlm},
\code{\link{vglm}},
\code{\link{vgam}},
\code{\link{rrvglm}}.
+
}
\examples{
diff --git a/man/genbetaII.Rd b/man/genbetaII.Rd
index 2fd8549..c36fb7f 100644
--- a/man/genbetaII.Rd
+++ b/man/genbetaII.Rd
@@ -60,7 +60,7 @@ genbetaII(lshape1.a = "loge", lscale = "loge", lshape2.p = "loge", lshape3.q = "
The 4-parameter generalized beta II distribution has density
\deqn{f(y) = a y^{ap-1} / [b^{ap} B(p,q) \{1 + (y/b)^a\}^{p+q}]}{%
f(y) = a y^(ap-1) / [b^(ap) B(p,q) (1 + (y/b)^a)^(p+q)]}
- for \eqn{a > 0}, \eqn{b > 0}, \eqn{p > 0}, \eqn{q > 0}, \eqn{y > 0}.
+ for \eqn{a > 0}, \eqn{b > 0}, \eqn{p > 0}, \eqn{q > 0}, \eqn{y \geq 0}{y >= 0}.
Here \eqn{B} is the beta function, and
\eqn{b} is the scale parameter \code{scale},
while the others are shape parameters.
@@ -132,6 +132,7 @@ More improvements could be made here.
}
\examples{
+\dontrun{
gdata <- data.frame(y = rsinmad(3000, exp(2), exp(2), exp(1))) # A special case!
fit <- vglm(y ~ 1, genbetaII, gdata, trace = TRUE)
fit <- vglm(y ~ 1, data = gdata, trace = TRUE,
@@ -140,5 +141,6 @@ coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/geometric.Rd b/man/geometric.Rd
index 21a5865..0f11db8 100644
--- a/man/geometric.Rd
+++ b/man/geometric.Rd
@@ -1,19 +1,26 @@
\name{geometric}
\alias{geometric}
+\alias{truncgeometric}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Geometric Distribution }
+\title{ Geometric (Truncated and Untruncated) Distributions }
\description{
- Maximum likelihood estimation for the geometric distribution.
+ Maximum likelihood estimation for the geometric
+ and truncated geometric distributions.
+
+
}
\usage{
geometric(link = "logit", expected = TRUE, imethod = 1,
iprob = NULL, zero = NULL)
+truncgeometric(upper.limit = Inf,
+ link = "logit", expected = TRUE, imethod = 1,
+ iprob = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{link}{
Parameter link function applied to the
- parameter \eqn{p}{prob}, which lies in the unit interval.
+ probability parameter \eqn{p}{prob}, which lies in the unit interval.
See \code{\link{Links}} for more choices.
@@ -23,17 +30,22 @@ geometric(link = "logit", expected = TRUE, imethod = 1,
Fisher scoring is used if \code{expected = TRUE}, else Newton-Raphson.
}
- \item{imethod}{
- An integer with value \code{1} or \code{2} or \code{3} which
- specifies the initialization method for the probability.
- If failure to converge occurs try another value.
+ \item{iprob, imethod, zero}{
+ See \code{\link{CommonVGAMffArguments}} for more details.
}
- \item{iprob, zero}{
- See \code{\link{CommonVGAMffArguments}} for more details.
+
+ \item{upper.limit}{
+ Numeric.
+ Upper values.
+ As a vector, it is recycled across responses first.
+ The default value means both family functions should give the same result.
+
+
}
+
}
\details{
A random variable \eqn{Y} has a 1-parameter geometric distribution
@@ -49,9 +61,19 @@ geometric(link = "logit", expected = TRUE, imethod = 1,
negative binomial distribution (see \code{\link{negbinomial}}).
If \eqn{Y} has a geometric distribution with parameter \eqn{p}{prob} then
\eqn{Y+1} has a positive-geometric distribution with the same parameter.
+ Multiple responses are permitted.
- Multiple responses are permitted.
+ For \code{truncgeometric()},
+ the (upper) truncated geometric distribution can have response integer
+ values from 0 to \code{upper.limit}.
+ It has density \code{prob * (1 - prob)^y / [1-(1-prob)^(1+upper.limit)]}.
+
+
+ For a generalized truncated geometric distribution with
+ integer values \eqn{L} to \eqn{U}, say, subtract \eqn{L}
+ from the response and feed in \eqn{U-L} as the upper limit.
+
}
@@ -70,7 +92,13 @@ geometric(link = "logit", expected = TRUE, imethod = 1,
}
-\author{ T. W. Yee }
+\author{
+T. W. Yee.
+Help from Viet Hoang Quoc is gratefully acknowledged.
+
+
+
+}
%\note{
%
%}
@@ -90,13 +118,32 @@ geometric(link = "logit", expected = TRUE, imethod = 1,
gdata <- data.frame(x2 = runif(nn <- 1000) - 0.5)
gdata <- transform(gdata, x3 = runif(nn) - 0.5,
x4 = runif(nn) - 0.5)
-gdata <- transform(gdata, eta = 1.0 - 1.0 * x2 + 2.0 * x3)
+gdata <- transform(gdata, eta = -1.0 - 1.0 * x2 + 2.0 * x3)
gdata <- transform(gdata, prob = logit(eta, inverse = TRUE))
gdata <- transform(gdata, y1 = rgeom(nn, prob))
with(gdata, table(y1))
-fit <- vglm(y1 ~ x2 + x3 + x4, geometric, gdata, trace = TRUE)
-coef(fit, matrix = TRUE)
-summary(fit)
+fit1 <- vglm(y1 ~ x2 + x3 + x4, geometric, gdata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+summary(fit1)
+
+# Truncated geometric (between 0 and upper.limit)
+upper.limit <- 5
+tdata <- subset(gdata, y1 <= upper.limit)
+nrow(tdata) # Less than nn
+fit2 <- vglm(y1 ~ x2 + x3 + x4, truncgeometric(upper.limit),
+ data = tdata, trace = TRUE)
+coef(fit2, matrix = TRUE)
+
+# Generalized truncated geometric (between lower.limit and upper.limit)
+lower.limit <- 1
+upper.limit <- 8
+gtdata <- subset(gdata, lower.limit <= y1 & y1 <= upper.limit)
+with(gtdata, table(y1))
+nrow(gtdata) # Less than nn
+fit3 <- vglm(y1 - lower.limit ~ x2 + x3 + x4,
+ truncgeometric(upper.limit - lower.limit),
+ data = gtdata, trace = TRUE)
+coef(fit3, matrix = TRUE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/gev.Rd b/man/gev.Rd
index f16d75f..a549d86 100644
--- a/man/gev.Rd
+++ b/man/gev.Rd
@@ -265,19 +265,19 @@ egev(llocation = "identity", lscale = "loge", lshape = logoff(offset = 0.5),
}
\examples{
+\dontrun{
# Multivariate example
fit1 <- vgam(cbind(r1, r2) ~ s(year, df = 3), gev(zero = 2:3),
- venice, trace = TRUE)
+ data = venice, trace = TRUE)
coef(fit1, matrix = TRUE)
head(fitted(fit1))
-\dontrun{ par(mfrow = c(1, 2), las = 1)
+par(mfrow = c(1, 2), las = 1)
plot(fit1, se = TRUE, lcol = "blue", scol = "forestgreen",
main = "Fitted mu(year) function (centered)", cex.main = 0.8)
with(venice, matplot(year, depvar(fit1)[, 1:2], ylab = "Sea level (cm)",
col = 1:2, main = "Highest 2 annual sea levels", cex.main = 0.8))
with(venice, lines(year, fitted(fit1)[,1], lty = "dashed", col = "blue"))
-legend("topleft", lty = "dashed", col = "blue", "Fitted 95 percentile") }
-
+legend("topleft", lty = "dashed", col = "blue", "Fitted 95 percentile")
# Univariate example
(fit <- vglm(maxtemp ~ 1, egev, oxtemp, trace = TRUE))
@@ -286,8 +286,9 @@ coef(fit, matrix = TRUE)
Coef(fit)
vcov(fit)
vcov(fit, untransform = TRUE)
-sqrt(diag(vcov(fit))) # Approximate standard errors
-\dontrun{ rlplot(fit) }
+sqrt(diag(vcov(fit))) # Approximate standard errors
+rlplot(fit)
+}
}
\keyword{models}
\keyword{regression}
diff --git a/man/gevUC.Rd b/man/gevUC.Rd
index 3475388..f055252 100644
--- a/man/gevUC.Rd
+++ b/man/gevUC.Rd
@@ -101,9 +101,8 @@ London: Springer-Verlag.
}
\examples{
-\dontrun{
-x <- seq(-3, 3, by = 0.01)
-loc <- 0; sigma <- 1; xi <- -0.4
+\dontrun{ loc <- 2; sigma <- 1; xi <- -0.4
+x <- seq(loc - 3, loc + 3, by = 0.01)
plot(x, dgev(x, loc, sigma, xi), type = "l", col = "blue", ylim = c(0,1),
main = "Blue is density, red is cumulative distribution function",
sub = "Purple are 5,10,...,95 percentiles", ylab = "", las = 1)
diff --git a/man/gew.Rd b/man/gew.Rd
index 0fc9d09..35f4f61 100644
--- a/man/gew.Rd
+++ b/man/gew.Rd
@@ -4,33 +4,93 @@
\title{ General Electric and Westinghouse Data }
\description{
General Electric and Westinghouse capital data.
+
}
\usage{data(gew)}
\format{
- A data frame with 20 observations on the following 6 variables.
+ A data frame with 20 observations on the following 7 variables.
+ All variables are numeric vectors.
+ Variables ending in \code{.g} correspond to General Electric and
+ those ending in \code{.w} are Westinghouse.
\describe{
- \item{y1}{a numeric vector which may be regarded as investment
- figures for the two companies}
- \item{x1}{market values}
- \item{x2}{capital stocks}
- \item{y2}{a numeric vector which may be regarded as investment
- figures for the two companies}
- \item{x3}{market values}
- \item{x4}{capital stocks}
+ \item{year}{The observations are the years from 1934 to 1953}
+ \item{invest.g, invest.w}{investment figures.
+These are \eqn{I=} Gross investment =
+additions to plant and equipment plus maintenance and repairs
+in millions of dollars deflated by \eqn{P_1}.
+
+
+ }
+ \item{capital.g, capital.w}{capital stocks.
+These are \eqn{C=} The stock of plant and equipment =
+accumulated sum of net additions to plant and equipment deflated
+by \eqn{P_1} minus depreciation allowance deflated by \eqn{P_3}.
+
+
+ }
+ \item{value.g, value.w}{market values.
+These are \eqn{F=} Value of the firm =
+price of common and preferred shares at December 31
+(or average price of December 31 and January 31 of the following year)
+times number of common and preferred shares outstanding plus
+total book value of debt at December 31 in millions of
+dollars deflated by \eqn{P_2}.
+
+
+}
}
}
\details{
- The period is 1934 to 1953.
+ These data are a subset of a table in Boot and de Wit (1960),
+ also known as the Grunfeld data.
+ It is used a lot in econometrics,
+ e.g., for seemingly unrelated regressions (see \code{SUR}).
+
+
+ Here,
+ \eqn{P_1 =} Implicit price deflator of producers durable
+ equipment (base 1947),
+ \eqn{P_2 =} Implicit price deflator of G.N.P.
+ (base 1947),
+ \eqn{P_3 =} Depreciation expense deflator = ten years
+ moving average of wholesale price index of metals and metal
+ products (base 1947).
+
+
+
+
+
}
\source{
- Unknown.
+
+ Table 10 of:
+ Boot, J. C. G. and de Wit, G. M. (1960)
+ Investment Demand: An Empirical Contribution to the Aggregation Problem.
+ \emph{International Economic Review},
+ \bold{1}, 3--30.
+
+
+ Grunfeld, Y. (1958)
+ The Determinants of Corporate Investment.
+ Unpublished PhD Thesis (Chicago).
+
+
+}
+\seealso{
+ \code{\link[VGAM:SUR]{SUR}},
+ \url{http://statmath.wu.ac.at/~zeileis/grunfeld}.
+
+
}
+
\references{
-Zellner, A. (1962)
-An efficient method of estimating seemingly unrelated regressions
-and tests for aggregation bias.
-\emph{Journal of the American Statistical Association},
-\bold{57}, 348--368.
+
+ Zellner, A. (1962)
+ An efficient method of estimating seemingly unrelated regressions
+ and tests for aggregation bias.
+ \emph{Journal of the American Statistical Association},
+ \bold{57}, 348--368.
+
}
\examples{
diff --git a/man/golf.Rd b/man/golf.Rd
index b8c56ac..72ac5be 100644
--- a/man/golf.Rd
+++ b/man/golf.Rd
@@ -28,9 +28,9 @@ golf(theta, lambda = 1, cutpoint = NULL,
If \code{golf()} is used as the link function in
\code{\link{cumulative}} then, if the cutpoints are known, then
one should choose
- \code{reverse = TRUE, parallel = TRUE, intercept.apply = TRUE}.
+ \code{reverse = TRUE, parallel = TRUE, apply.parint = TRUE}.
If the cutpoints are unknown, then choose
- \code{reverse = TRUE, parallel = TRUE, intercept.apply = FALSE}.
+ \code{reverse = TRUE, parallel = TRUE, apply.parint = FALSE}.
}
@@ -133,7 +133,7 @@ gdata <- transform(gdata, cuty = Cut(y1, breaks = cutpoints))
with(gdata, plot(x2, x3, col = cuty, pch = as.character(cuty))) }
with(gdata, table(cuty) / sum(table(cuty)))
fit <- vglm(cuty ~ x2 + x3, cumulative(mv = TRUE,
- reverse = TRUE, parallel = TRUE, intercept.apply = TRUE,
+ reverse = TRUE, parallel = TRUE, apply.parint = TRUE,
link = golf(cutpoint = cutpoints[2:3], lambda = lambda)),
data = gdata, trace = TRUE)
head(depvar(fit))
@@ -149,17 +149,17 @@ fit at misc
\keyword{regression}
% # Another example
-% nn = 1000
-% x2 = sort(runif(nn))
-% x3 = runif(nn)
-% shape = exp(0.0)
-% mymu = exp( 3 + 1 * x2 - 2 * x3)
-% y1 = rnbinom(nn, mu=mymu, size=shape)
-% cuty = Cut(y1)
-% fit = vglm(cuty ~ x2 + x3, fam = cumulative(link = "golf", rev = TRUE,
+% nn <- 1000
+% x2 <- sort(runif(nn))
+% x3 <- runif(nn)
+% shape <- exp(0.0)
+% mymu <- exp( 3 + 1 * x2 - 2 * x3)
+% y1 <- rnbinom(nn, mu=mymu, size=shape)
+% cuty <- Cut(y1)
+% fit <- vglm(cuty ~ x2 + x3, fam = cumulative(link = "golf", rev = TRUE,
% mv = TRUE, parallel = TRUE, earg = list(lambda=shape)))
% coef(fit)
-% fit = vglm(cuty ~ x2 + x3, fam = cumulative(link = "probit", rev = TRUE,
+% fit <- vglm(cuty ~ x2 + x3, fam = cumulative(link = "probit", rev = TRUE,
% mv = TRUE, parallel = TRUE))
% coef(fit, matrix = TRUE)
% coef(fit)
diff --git a/man/gpdUC.Rd b/man/gpdUC.Rd
index 8815781..ddf2657 100644
--- a/man/gpdUC.Rd
+++ b/man/gpdUC.Rd
@@ -102,8 +102,8 @@ London: Springer-Verlag.
}
\examples{
-\dontrun{ x <- seq(-0.2, 3, by = 0.01)
-loc <- 0; sigma <- 1; xi <- -0.4
+\dontrun{ loc <- 2; sigma <- 1; xi <- -0.4
+x <- seq(loc - 0.2, loc + 3, by = 0.01)
plot(x, dgpd(x, loc, sigma, xi), type = "l", col = "blue", ylim = c(0, 1),
main = "Blue is density, red is cumulative distribution function",
sub = "Purple are 5,10,...,95 percentiles", ylab = "", las = 1)
@@ -113,6 +113,8 @@ lines(qgpd(seq(0.05, 0.95, by = 0.05), loc, sigma, xi),
col = "purple", lty = 3, type = "h")
lines(x, pgpd(x, loc, sigma, xi), type = "l", col = "red")
abline(h = 0, lty = 2)
+
+pgpd(qgpd(seq(0.05, 0.95, by = 0.05), loc, sigma, xi), loc, sigma, xi)
}
}
\keyword{distribution}
diff --git a/man/grain.us.Rd b/man/grain.us.Rd
index a0a2e0c..35d2202 100644
--- a/man/grain.us.Rd
+++ b/man/grain.us.Rd
@@ -39,10 +39,12 @@ Nested reduced-rank autoregressive models for multiple time series.
}
\examples{
-cgrain = scale(grain.us, scale = FALSE) # Center the time series only
-fit = vglm(cgrain ~ 1, rrar(Rank = c(4, 1)),
- eps = 1e-3, step = 0.5, trace = TRUE, maxit = 40)
+\dontrun{
+cgrain <- scale(grain.us, scale = FALSE) # Center the time series only
+fit <- vglm(cgrain ~ 1, rrar(Rank = c(4, 1)),
+ epsilon = 1e-3, stepsize = 0.5, trace = TRUE, maxit = 50)
summary(fit)
}
+}
\keyword{datasets}
diff --git a/man/grc.Rd b/man/grc.Rd
index 0f91dda..4f3010b 100644
--- a/man/grc.Rd
+++ b/man/grc.Rd
@@ -283,14 +283,15 @@ assistance from Alfian F. Hadi.
\code{\link{alcoff}},
\code{\link{crashi}},
\code{\link{auuc}},
- \code{\link{olympic}},
+ \code{\link[VGAM:olym08]{olym08}},
+ \code{\link[VGAM:olym12]{olym12}},
\code{\link{poissonff}}.
}
\examples{
-grc1 <- grc(auuc) # Undergraduate enrolments at Auckland University in 1990
+grc1 <- grc(auuc) # Undergraduate enrolments at Auckland University in 1990
fitted(grc1)
summary(grc1)
@@ -299,33 +300,34 @@ fitted(grc2)
summary(grc2)
-# 2008 Summer Olympic Games in Beijing
-top10 <- head(olympic, n = 10)
-oly1 <- with(top10, grc(cbind(gold, silver, bronze)))
-round(fitted(oly1))
-round(resid(oly1, type = "response"), dig = 1) # Response residuals
-summary(oly1)
-Coef(oly1)
+# 2012 Summer Olympic Games in London
+\dontrun{ top10 <- head(oly12, n = 10)
+grc.oly1 <- with(top10, grc(cbind(gold, silver, bronze)))
+round(fitted(grc.oly1))
+round(resid(grc.oly1, type = "response"), dig = 1) # Response residuals
+summary(grc.oly1)
+Coef(grc.oly1)
+}
# Roughly median polish
rcim0 <- rcim(auuc, fam = alaplace2(tau = 0.5, intparloc = TRUE), trace = TRUE)
round(fitted(rcim0), dig = 0)
-round(100 * (fitted(rcim0) - auuc) / auuc, dig = 0) # Discrepancy
+round(100 * (fitted(rcim0) - auuc) / auuc, dig = 0) # Discrepancy
rcim0 at y
round(coef(rcim0, matrix = TRUE), dig = 2)
-print(Coef(rcim0, matrix = TRUE), dig = 3)
+Coef(rcim0, matrix = TRUE)
# constraints(rcim0)
names(constraints(rcim0))
# Compare with medpolish():
(med.a <- medpolish(auuc))
fv <- med.a$overall + outer(med.a$row, med.a$col, "+")
-round(100 * (fitted(rcim0) - fv) / fv) # Hopefully should be all 0s
+round(100 * (fitted(rcim0) - fv) / fv) # Hopefully should be all 0s
}
\keyword{models}
\keyword{regression}
-% plot(oly1)
+% plot(grc.oly1)
% oly2 <- with(top10, grc(cbind(gold,silver,bronze), Rank = 2)) # Saturated model
% round(fitted(oly2))
% round(fitted(oly2)) - with(top10, cbind(gold,silver,bronze))
@@ -337,5 +339,6 @@ round(100 * (fitted(rcim0) - fv) / fv) # Hopefully should be all 0s
% Then \code{.grc.df} is deleted before exiting the function.
+% print(Coef(rcim0, matrix = TRUE), dig = 3)
diff --git a/man/hormone.Rd b/man/hormone.Rd
index 5af3fcc..6b39d06 100644
--- a/man/hormone.Rd
+++ b/man/hormone.Rd
@@ -2,12 +2,12 @@
\alias{hormone}
\docType{data}
\title{
- Hormone Data
+ Hormone Assay Data
}
\description{
- A data set described in Carroll and Ruppert (1988)
- concerning hormone assay.
+ A hormone assay data set from Carroll and Ruppert (1988).
+
%% ~~ A concise (1-5 lines) description of the dataset. ~~
}
@@ -18,10 +18,12 @@
\describe{
\item{\code{X}}{a numeric vector, suitable as the x-axis in
a scatter plot.
+ The reference method.
}
\item{\code{Y}}{a numeric vector, suitable as the y-axis in
a scatter plot.
+ The test method.
}
}
@@ -29,8 +31,22 @@
\details{
%% ~~ If necessary, more details than the __description__ above ~~
-The data is described in
-Carroll and Ruppert (1988).
+The data is given in Table 2.4 of
+Carroll and Ruppert (1988), and was downloaded
+from \url{http://www.stat.tamu.edu/~carroll}.
+The book describes the data as follows.
+The data are the results of two assay methods for hormone
+data; the scale of the data as presented is not
+particularly meaningful, and the original source
+of the data refused permission to divulge further
+information. As in a similar example of
+Leurgans (1980), the old or reference method is
+being used to predict the new or test method.
+The overall goal is to see whether we can reproduce
+the test-method measurements with the reference-method
+measurements.
+Thus calibration might be of interest for the data.
+
}
@@ -39,6 +55,8 @@ Carroll and Ruppert (1988).
% Originally,
%}
+
+
\references{
Carroll, R. J. and Ruppert, D. (1988)
@@ -46,15 +64,24 @@ Carroll and Ruppert (1988).
New York, USA: Chapman & Hall.
- Yee, T. W. (2012)
- Two-parameter reduced-rank vector generalized linear models.
- \emph{In preparation}.
+ Leurgans, S. (1980)
+ Evaluating laboratory measurement techniques.
+ \emph{Biostatistics Casebook}.
+ Eds.: Miller, R. G. Jr., and Efron, B. and
+ Brown, B. W. Jr., and Moses, L.
+ New York, USA: Wiley.
+
+
+ Yee, T. W. (2013)
+ Reduced-rank vector generalized linear models with two linear predictors.
+ \emph{Computational Statistics and Data Analysis}.
+
}
\seealso{
- \code{\link{normal1}},
- \code{\link{rrvglm}}.
+ \code{\link[VGAM]{normal1}},
+ \code{\link[VGAM]{rrvglm}}.
}
@@ -62,6 +89,7 @@ Carroll and Ruppert (1988).
\examples{
+\dontrun{
data(hormone)
summary(hormone)
@@ -80,7 +108,8 @@ logLik(modelI)
logLik(modelII) # Less than logLik(modelI)
-# Reproduce Equations (1)--(3) on p.65 of Carroll and Ruppert (1988)
+# Reproduce the top 3 equations on p.65 of Carroll and Ruppert (1988).
+# They are called Equations (1)--(3) here.
# Equation (1)
hormone <- transform(hormone, rX = 1 / X)
@@ -89,31 +118,37 @@ fit1 <- vglm(Y ~ 1 + X + rX, family = normal1(zero = NULL),
constraints = clist, data = hormone, trace = TRUE)
coef(fit1, matrix = TRUE)
summary(fit1) # Actually, the intercepts do not seem significant
-\dontrun{ plot(Y ~ X, hormone, col = "blue")
-lines(fitted(fit1) ~ X, hormone, col = "orange") }
+plot(Y ~ X, hormone, col = "blue")
+lines(fitted(fit1) ~ X, hormone, col = "orange")
# Equation (2)
fit2 <- rrvglm(Y ~ 1 + X, normal1(zero = NULL), hormone, trace = TRUE)
coef(fit2, matrix = TRUE)
-\dontrun{ plot(Y ~ X, hormone, col = "blue")
+plot(Y ~ X, hormone, col = "blue")
lines(fitted(fit2) ~ X, hormone, col = "red")
# Add +- 2 SEs
lines(fitted(fit2) + 2 * exp(predict(fit2)[, "log(sd)"]) ~ X,
hormone, col = "orange")
lines(fitted(fit2) - 2 * exp(predict(fit2)[, "log(sd)"]) ~ X,
- hormone, col = "orange") }
+ hormone, col = "orange")
# Equation (3)
# Does not fit well because the loge link for the mean is not good.
fit3 <- rrvglm(Y ~ 1 + X, maxit = 300, data = hormone, trace = TRUE,
normal1(lmean = "loge", zero = NULL))
coef(fit3, matrix = TRUE)
-\dontrun{ plot(Y ~ X, hormone, col = "blue") # Does not look okay.
+plot(Y ~ X, hormone, col = "blue") # Does not look okay.
lines(exp(predict(fit3)[, 1]) ~ X, hormone, col = "red")
# Add +- 2 SEs
lines(fitted(fit3) + 2 * exp(predict(fit3)[, "log(sd)"]) ~ X,
hormone, col = "orange")
lines(fitted(fit3) - 2 * exp(predict(fit3)[, "log(sd)"]) ~ X,
- hormone, col = "orange") }
+ hormone, col = "orange")
+}
}
\keyword{datasets}
+
+
+% from \url{http://www.stat.tamu.edu/~carroll/data/hormone_data.txt}.
+
+
diff --git a/man/huber.Rd b/man/huber.Rd
index 8ae879b..11d3254 100644
--- a/man/huber.Rd
+++ b/man/huber.Rd
@@ -1,5 +1,5 @@
-\name{huber}
-\alias{huber}
+\name{huber2}
+\alias{huber2}
\alias{huber1}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Huber's least favourable distribution family function }
@@ -11,8 +11,8 @@
}
\usage{
huber1(llocation = "identity", k = 0.862, imethod = 1)
-huber(llocation = "identity", lscale = "loge",
- k = 0.862, imethod = 1, zero = 2)
+huber2(llocation = "identity", lscale = "loge",
+ k = 0.862, imethod = 1, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -74,7 +74,7 @@ huber(llocation = "identity", lscale = "loge",
}
\note{
- Warning: actually, \code{huber()} may be erroneous since the
+ Warning: actually, \code{huber2()} may be erroneous since the
first derivative is not continuous when there are two parameters
to estimate. \code{huber1()} is fine in this respect.
@@ -100,16 +100,16 @@ hdata <- transform(hdata, y = rhuber(NN, mu = coef1 + coef2 * x2))
hdata$x2[1] <- 0.0 # Add an outlier
hdata$y[1] <- 10
-fit.huber <- vglm(y ~ x2, huber (imethod = 3), hdata, trace = TRUE)
+fit.huber2 <- vglm(y ~ x2, huber2(imethod = 3), hdata, trace = TRUE)
fit.huber1 <- vglm(y ~ x2, huber1(imethod = 3), hdata, trace = TRUE)
-coef(fit.huber, matrix = TRUE)
-summary(fit.huber)
+coef(fit.huber2, matrix = TRUE)
+summary(fit.huber2)
\dontrun{ # Plot the results
plot(y ~ x2, hdata, col = "blue", las = 1)
-lines(fitted(fit.huber) ~ x2, hdata, col = "darkgreen", lwd = 2)
+lines(fitted(fit.huber2) ~ x2, hdata, col = "darkgreen", lwd = 2)
fit.lm <- lm(y ~ x2, hdata) # Compare to a LM:
lines(fitted(fit.lm) ~ x2, hdata, col = "lavender", lwd = 3)
diff --git a/man/huberUC.Rd b/man/huberUC.Rd
index bfb94be..d17a0ef 100644
--- a/man/huberUC.Rd
+++ b/man/huberUC.Rd
@@ -43,7 +43,7 @@
}
\details{
- Details are given in \code{\link{huber}}, the
+ Details are given in \code{\link{huber2}}, the
\pkg{VGAM} family function for estimating the
parameters \code{mu} and \code{sigma}.
@@ -67,9 +67,11 @@
% Huber, P. J. and Ronchetti, E. (2009)
% \emph{Robust Statistics}, 2nd ed. New York: Wiley.
%
+%
% Huber, P. J. and Ronchetti, E. (2009) Robust Statistics
% (2nd ed.). Wiley, New York.
%
+%
%}
\author{
@@ -82,7 +84,7 @@
}
\seealso{
- \code{\link{huber}}.
+ \code{\link{huber2}}.
}
@@ -91,8 +93,7 @@ set.seed(123456)
edhuber(1:5, k = 1.5)
rhuber(5)
-# Plot cdf and pdf
-\dontrun{ mu <- 3; xx <- seq(-2, 7, len = 100)
+\dontrun{ mu <- 3; xx <- seq(-2, 7, len = 100) # Plot CDF and PDF
plot(xx, dhuber(xx, mu = mu), type = "l", col = "blue", las = 1, ylab = "",
main = "blue is density, red is cumulative distribution function",
sub = "Purple lines are the 10,20,...,90 percentiles",
@@ -104,7 +105,7 @@ Q <- qhuber(probs, mu = mu)
lines(Q, dhuber(Q, mu = mu), col = "purple", lty = 3, type = "h")
lines(Q, phuber(Q, mu = mu), col = "purple", lty = 3, type = "h")
abline(h = probs, col = "purple", lty = 3)
-phuber(Q, mu = mu) - probs # Should be all 0s
+phuber(Q, mu = mu) - probs # Should be all 0s
}
}
\keyword{distribution}
diff --git a/man/hued.Rd b/man/hued.Rd
deleted file mode 100644
index 80a71f3..0000000
--- a/man/hued.Rd
+++ /dev/null
@@ -1,55 +0,0 @@
-\name{hued}
-\alias{hued}
-\docType{data}
-\title{
-Harvard University Degrees Conferred by Student Ethnicity
-
-%% ~~ data name/kind ... ~~
-}
-\description{
- A two-way table of counts; there are 7 ethnic groups by 12
- degrees.
-
-}
-\usage{data(hued)}
-\format{
- The format is:
- chr "hued"
-
-}
-\details{
- The rownames and colnames have been edited.
- The full names are:
-Asian/Pacific Islander,
-Black/Non-Hispanic,
-Hispanic,
-International Students,
-Native American,
-White/Non-Hispanic,
-Unknown/Other.
-The academic year was 2009--2010.
-GSAS stands for Graduate School of Arts and Sciences.
-The Other group includes students reported as Two or More Races.
-See the URL below for more technical details supporting the data.
-
-
-
-%% ~~ If necessary, more details than the __description__ above ~~
-}
-\source{
- \url{http://www.provost.harvard.edu/institutional_research/factbook.php}
-
-}
-\seealso{
- \code{\link{huie}},
- \code{\link{huse}}.
-
-}
-
-%%\references{
-%% ~~ possibly secondary sources and usages ~~
-%%}
-\examples{
-print(hued)
-}
-\keyword{datasets}
diff --git a/man/huggins91.Rd b/man/huggins91.Rd
deleted file mode 100644
index ce5fab8..0000000
--- a/man/huggins91.Rd
+++ /dev/null
@@ -1,182 +0,0 @@
-\name{huggins91}
-\alias{huggins91}
-%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Huggins (1991) Capture-recapture Model Family Function
- (approximation only) }
-\description{
- Fits a Huggins (1991) capture-recapture model to a matrix of 0s
- and 1s: animals sampled on several occasions and individual
- animals caught at least once.
-
-}
-\usage{
-huggins91(link = "logit", parallel = TRUE,
- iprob = NULL, eim.not.oim = TRUE)
-}
-%- maybe also 'usage' for other objects documented here.
-\arguments{
- \item{link, parallel, iprob}{
- See \code{\link{CommonVGAMffArguments}} for information.
- The \code{parallel} argument should generally be left alone since
- parallelism is assumed by Huggins (1991).
-
- }
- \item{eim.not.oim}{
- Logical. If \code{TRUE} use the EIM, else the OIM.
-
- }
-}
-\details{
- This model operates on a response matrix of 0s and 1s. Each of
- at least two columns is an occasion where animals are potentially
- captured (e.g., a field trip), and each row is an individual
- animal. Capture is a 1, else a 0. Each row has at least one
- capture. It is well-known that animals are affected by capture,
- e.g., trap-shy or trap-happy. This \pkg{VGAM} family function
- attempts to allow the capture history to be modelled. This
- involves the use of the \code{xij} argument. Ignoring capture
- history effects would mean \code{\link{posbinomial}} could
- be used by aggregating over the sampling occasions.
-
-
- Huggins (1991) suggests a model involving maximizing a
- conditional likelihood.
- The form of this is a numerator divided by a denominator,
- where the true model has part of the linear/additive predictor
- modelling capture history applying to the numerator only,
- so that part is set to zero in the denominator.
- The numerator of the conditional likelihood corresponds
- to a sequence of Bernoulli trials,
- with at least one success,
- for each animal.
-
-
- Unfortunately the Huggins model is too difficult to fit in this
- package, and one can only use the \emph{same} linear/additive
- predictor in the numerator as the denominator. Hence this
- \pkg{VGAM} family function does \emph{not} implement the model
- properly.
-
-
- The number of linear/additive predictors is twice the number
- of sampling occasions, i.e., \eqn{M = 2T}, say.
- The first two correspond to the first sampling occasion,
- the next two correspond to the second sampling occasion, etc.
- Even-numbered linear/additive predictors should correspond to what
- would happen if no capture had occurred (they belong to
- the denominator.)
- Odd-numbered linear/additive predictors correspond to
- what actually happened (they belong to the numerator.)
-
-
- The fitted value for column \eqn{t} is the \eqn{t}th
- numerator probability divided by the denominator.
-
-
-}
-\value{
- An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
- The object is used by modelling functions such as \code{\link{vglm}},
- and \code{\link{vgam}}.
-
-
-}
-\references{
-
-Huggins, R. M. (1991)
-Some practical aspects of a conditional likelihood
-approach to capture experiments.
-\emph{Biometrics},
-\bold{47}, 725--732.
-
-
-}
-\author{ Thomas W. Yee }
-
-\note{
- The \code{weights} argument of \code{\link{vglm}} need not be
- assigned, and the default is just a matrix of ones.
-
-
- This \pkg{VGAM} family function is currently more complicated
- than it needs to be, e.g., it is possible to
- simplify \eqn{M = T}, say.
-
-
-}
-
-\section{Warning }{
- This \pkg{VGAM} family function is experimental and does not
- work properly because the linear/additive predictor in the
- numerator and denominator must be the same.
- The parameter estimates of the Huggins (1991) model ought to
- be similar (probably in between, in some sense) to two models:
- Model 1 is where the capture history variable is included,
- Model 2 is where the capture history variable is not included.
- See the example below.
- A third model, called Model 3, allows for 'half' the capture
- history to be put in both numerator and denominator. This
- might be thought of as a compromise between Models 1 and 2,
- and may be useful as a crude approximation.
-
-
- Under- or over-flow may occur if the data is ill-conditioned.
-
-
-}
-\seealso{
- \code{\link{vglm.control}} for \code{xij},
- \code{\link{dhuggins91}},
- \code{\link{rhuggins91}}.
- \code{\link{posbinomial}}.
-
-
-}
-
-\examples{
-set.seed(123); nTimePts <- 5
-hdata <- rhuggins91(n = 1000, nTimePts = nTimePts, pvars = 2)
-
-# The truth: xcoeffs are c(-2, 1, 2) and capeffect = -1
-
-# Model 1 is where capture history information is used
-model1 <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2 + Chistory,
- huggins91, data = hdata, trace = TRUE,
- xij = list(Chistory ~ ch0 + zch0 +
- ch1 + zch1 + ch2 + zch2 +
- ch3 + zch3 + ch4 + zch4 - 1),
- form2 = ~ 1 + x2 + Chistory +
- ch0 + ch1 + ch2 + ch3 + ch4 +
- zch0 + zch1 + zch2 + zch3 + zch4)
-
-coef(model1, matrix = TRUE) # Biased!!
-summary(model1)
-head(fitted(model1))
-head(model.matrix(model1, type = "vlm"), 21)
-head(hdata)
-
-# Model 2 is where no capture history information is used
-model2 <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2,
- huggins91, data = hdata, trace = TRUE)
-coef(model2, matrix = TRUE) # Biased!!
-summary(model2)
-
-# Model 3 is where half the capture history is used in both
-# the numerator and denominator
-set.seed(123); nTimePts = 5
-hdata2 <- rhuggins91(n = 1000, nTimePts = nTimePts, pvars = 2,
- double.ch = TRUE)
-head(hdata2) # 2s have replaced the 1s in hdata
-model3 <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2 + Chistory,
- huggins91, data = hdata2, trace = TRUE,
- xij = list(Chistory ~ ch0 + zch0 +
- ch1 + zch1 + ch2 + zch2 +
- ch3 + zch3 + ch4 + zch4 - 1),
- form2 = ~ 1 + x2 + Chistory +
- ch0 + ch1 + ch2 + ch3 + ch4 +
- zch0 + zch1 + zch2 + zch3 + zch4)
-coef(model3, matrix = TRUE) # Biased!!
-}
-\keyword{models}
-\keyword{regression}
-
diff --git a/man/huggins91UC.Rd b/man/huggins91UC.Rd
deleted file mode 100644
index 462a179..0000000
--- a/man/huggins91UC.Rd
+++ /dev/null
@@ -1,141 +0,0 @@
-\name{huggins91UC}
-\alias{huggins91UC}
-\alias{dhuggins91}
-\alias{rhuggins91}
-%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Huggins (1991) Capture-recapture Model }
-\description{
- Density, and random generation
- for the Huggins (1991) capture-recapture model.
-
-}
-\usage{
-rhuggins91(n, nTimePts = 5, pvars = length(xcoeff), xcoeff = c(-2, 1, 2),
- capeffect = -1, double.ch = FALSE,
- link = "logit", earg.link = FALSE)
-dhuggins91(x, prob, prob0 = prob, log = FALSE)
-}
-%- maybe also 'usage' for other objects documented here.
-\arguments{
- \item{x}{
- response vector or matrix.
- Should have values of 0s or 1s.
-
- }
-
- \item{nTimePts}{Number of sampling occasions.
- Called \eqn{T} in \code{\link{huggins91}}.
-
-
- }
- \item{n}{number of observations.
- Usually a single positive integer, else the length of the vector
- is used.
-
-
- }
-
- \item{capeffect}{
- Numeric, the capture effect.
- Added to the linear predictor if captured previously.
- A positive or negative value corresponds to
- a trap-happy and trap-shy effect respectively.
-
-
- }
-
- \item{double.ch}{
- Logical.
- If \code{TRUE} then the values of \code{ch0}, \code{ch1}, \ldots are
- 2 or 0, else 1 or 0.
- Setting this argument \code{TRUE} means that a model can be fitted
- with half the capture history in both denominator and numerator
- (this is a compromise of the Huggins (1991) model where the full
- capture history only appears in the numerator).
-
-
- }
-
- \item{pvars}{ Number of other numeric covariates that make up
- the linear predictor.
- Labelled \code{x1}, \code{x2}, \ldots,
- where the first is an intercept, and the others are
- independent standard \code{\link[stats:Uniform]{runif}} random variates.
- The first \code{pvars} elements of \code{xcoeff} are used.
-
-
- }
-
- \item{xcoeff}{
- The regression coefficients of the linear predictor.
- These correspond to \code{x1}, \code{x2}, \ldots,
- and the first is for the intercept.
- The length of \code{xcoeff} must be at least \code{pvars}.
-
-
- }
-
- \item{link, earg.link}{
- The former is used to generate the probabilities for capture
- at each occasion.
- Other details at \code{\link{CommonVGAMffArguments}}.
-
-
- }
- \item{prob, prob0}{
- Matrix of probabilities for the numerator and denominators
- respectively.
- The default does \emph{not} correspond to the
- Huggins (1991) model since the denominator should
- be free of any capture history, i.e., as if it had
- never been caught before.
-
-
- }
- \item{log}{
- Logical. Return the logarithm of the answer?
-
-
- }
-
-}
-\details{
- The form of the conditional likelihood is
- described in \code{\link{huggins91}}.
-
-
-}
-\value{
- \code{dhuggins91} gives the density,
- \code{rhuggins91} returns a data frame with some attributes.
- The function generates random deviates
- (\eqn{T} columns labelled \code{y1}, \code{y2}, \ldots)
- for the response.
- Some indicator columns are also included
- (those starting with \code{ch} are for previous capture history,
- and those starting with \code{z} are zero),
- and these are useful for the \code{xij} argument.
-
-
-
-}
-%\references{ }
-\author{ Thomas W. Yee }
-\note{
- These functions are experimental and do not follow the
- usual conventions of \code{d}- and \code{r}-type R functions.
-
-
-}
-
-\seealso{
- \code{\link{huggins91}}.
-
-
-}
-\examples{
-set.seed(123); rhuggins91(n = 10)
-set.seed(123); rhuggins91(n = 10, double.ch = TRUE)
-attributes(rhuggins91(n = 10))
-}
-\keyword{distribution}
diff --git a/man/huie.Rd b/man/huie.Rd
deleted file mode 100644
index e64087d..0000000
--- a/man/huie.Rd
+++ /dev/null
@@ -1,59 +0,0 @@
-\name{huie}
-\alias{huie}
-\docType{data}
-\title{
-Harvard University International Enrollments
-
-%% ~~ data name/kind ... ~~
-}
-\description{
- A two-way table of counts; there are 12
- degrees and 8 areas of the world.
-
-%% ~~ A concise (1-5 lines) description of the dataset. ~~
-}
-\usage{data(huie)}
-\format{
- The format is:
- chr "huie"
-}
-\details{
- The rownames and colnames have been edited.
- The full colnames are:
-Africa,
-Asia,
-Europe,
-Caribbean and Central and and South America,
-Middle East,
-North America,
-Oceania,
-Stateless.
-
-The data was for the autumn (Fall) of 2010.
-GSAS stands for Graduate School of Arts and Sciences.
-See the URL below for more technical details supporting the data.
-
-
-
-%% ~~ If necessary, more details than the __description__ above ~~
-}
-\source{
- \url{http://www.provost.harvard.edu/institutional_research/factbook.php}
-
-
-%% ~~ reference to a publication or URL from which the data were obtained ~~
-}
-\seealso{
- \code{\link{hued}},
- \code{\link{huse}}.
-
-}
-
-%%\references{
-%% ~~ possibly secondary sources and usages ~~
-%%}
-\examples{
-print(huie)
-## maybe str(huie) ; plot(huie) ...
-}
-\keyword{datasets}
diff --git a/man/hunua.Rd b/man/hunua.Rd
index 89cbe8d..3824729 100644
--- a/man/hunua.Rd
+++ b/man/hunua.Rd
@@ -7,6 +7,7 @@
Altitude is explanatory, and there are binary responses
(presence/absence = 1/0 respectively) for 17 plant species.
+
}
\usage{data(hunua)}
\format{
@@ -38,27 +39,32 @@
of 17 plant species was recorded, as well as the altitude.
Each site was of area size 200\eqn{m^2}{m^2}.
+
}
\source{
Dr Neil Mitchell, University of Auckland.
+
+
}
%\references{
% None.
%}
\seealso{
- \code{\link{waitakere}}.
+ \code{\link{waitakere}}.
+
+
}
\examples{
# Fit a GAM using vgam() and compare it with the Waitakere Ranges one
-fit.h = vgam(agaaus ~ s(altitude, df=2), binomialff, hunua)
+fit.h <- vgam(agaaus ~ s(altitude, df = 2), binomialff, hunua)
\dontrun{
-plot(fit.h, se=TRUE, lcol="red", scol="red",
- main="Red is Hunua, Blue is Waitakere") }
-head(predict(fit.h, hunua, type="response"))
+plot(fit.h, se = TRUE, lcol = "red", scol = "red",
+ main = "Red is Hunua, Blue is Waitakere") }
+head(predict(fit.h, hunua, type = "response"))
-fit.w = vgam(agaaus ~ s(altitude, df=2), binomialff, waitakere)
+fit.w <- vgam(agaaus ~ s(altitude, df = 2), binomialff, waitakere)
\dontrun{
-plot(fit.w, se=TRUE, lcol="blue", scol="blue", add=TRUE) }
-head(predict(fit.w, hunua, type="response")) # Same as above?
+plot(fit.w, se = TRUE, lcol = "blue", scol = "blue", add = TRUE) }
+head(predict(fit.w, hunua, type = "response")) # Same as above?
}
\keyword{datasets}
diff --git a/man/huse.Rd b/man/huse.Rd
deleted file mode 100644
index 8879d6d..0000000
--- a/man/huse.Rd
+++ /dev/null
@@ -1,73 +0,0 @@
-\name{huse}
-\alias{huse}
-\docType{data}
-\title{
-Harvard University Numbers of Ladder Faculty by School and Ethnicity
-
-%% ~~ data name/kind ... ~~
-}
-\description{
- A two-way table of counts; there are 14
- schools and 5 race/ethnicities.
-
-%% ~~ A concise (1-5 lines) description of the dataset. ~~
-}
-\usage{data(huse)}
-\format{
- The format is:
- chr "huse"
-}
-\details{
- Ladder faculty members of Harvard University are cross-classified by
- their school and their race/ethnicity.
- This was for the period 2010--1.
- Ladder Faculty are defined as Assistant Professors or Convertible
- Instructors, Associate Professors, and Professors that have
- been appointed in certain Schools.
-
-
- Abbreviations:
- FAS = Faculty of Arts and Sciences = Humanities + Social Sciences +
- Natural Sciences + SEAS,
- Natural Sciences = Life Sciences + Physical Sciences,
- SEAS = School of Engineering and Applied Sciences,
- HBS = Harvard Business School,
- HMS = Harvard Medical School,
- HSPH = Harvard School of Public Health,
- HLS = Harvard Law School,
- HKS = Harvard Kennedy School,
- HGSE = Harvard Graduate School of Education,
- GSD = Graduate School of Design ,
- HDS = Harvard Divinity School,
- HSDM = Harvard School of Dental Medicine.
-
-
- See the URL below for many technical details supporting the data.
- The table was constructed from pp.31--2 from the source.
-
-
-%% ~~ If necessary, more details than the __description__ above ~~
-}
-\source{
- \url{http://www.provost.harvard.edu/institutional_research/factbook.php}
-
-%% ~~ reference to a publication or URL from which the data were obtained ~~
-}
-\references{
- \emph{Harvard University Office of the Senior Vice Provost Faculty
- Development \& Diversity: 2010 Annual Report}.
-
-
-%% ~~ possibly secondary sources and usages ~~
-}
-\seealso{
- \code{\link{hued}},
- \code{\link{huie}}.
-
-}
-
-\examples{
-print(huse)
-## maybe str(huse) ; plot(huse) ...
-}
-\keyword{datasets}
diff --git a/man/inv.gaussianff.Rd b/man/inv.gaussianff.Rd
index 2138a0f..cd63fdc 100644
--- a/man/inv.gaussianff.Rd
+++ b/man/inv.gaussianff.Rd
@@ -10,7 +10,7 @@
\usage{
inv.gaussianff(lmu = "loge", llambda = "loge",
imethod = 1, ilambda = NULL,
- parallel = FALSE, intercept.apply = FALSE,
+ parallel = FALSE, apply.parint = FALSE,
shrinkage.init = 0.99, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -21,7 +21,7 @@ inv.gaussianff(lmu = "loge", llambda = "loge",
See \code{\link{Links}} for more choices.
}
- \item{ilambda, parallel, intercept.apply}{
+ \item{ilambda, parallel, apply.parint}{
See \code{\link{CommonVGAMffArguments}} for more information.
}
diff --git a/man/invbinomial.Rd b/man/invbinomial.Rd
index c574ed0..2431f80 100644
--- a/man/invbinomial.Rd
+++ b/man/invbinomial.Rd
@@ -114,4 +114,4 @@ sum(weights(fit, type = "work")) # sum of the working weights
\keyword{models}
\keyword{regression}
-%fit = vglm(y ~ 1, invbinomial(ilambda=1), tr=TRUE, cri="c", checkwz=FALSE)
+%fit <- vglm(y ~ 1, invbinomial(ilambda=1), tr=TRUE, cri="c", checkwz=FALSE)
diff --git a/man/invlomax.Rd b/man/invlomax.Rd
index 22ba763..27aa2c2 100644
--- a/man/invlomax.Rd
+++ b/man/invlomax.Rd
@@ -43,7 +43,7 @@ invlomax(lscale = "loge", lshape2.p = "loge",
The inverse Lomax distribution has density
\deqn{f(y) = p y^{p-1} / [b^p \{1 + y/b\}^{p+1}]}{%
f(y) = p y^(p-1) / [b^p (1 + y/b)^(p+1)]}
- for \eqn{b > 0}, \eqn{p > 0}, \eqn{y > 0}.
+ for \eqn{b > 0}, \eqn{p > 0}, \eqn{y \geq 0}{y >= 0}.
Here, \eqn{b} is the scale parameter \code{scale},
and \code{p} is a shape parameter.
The mean does not exist; \code{NA}s are returned as the fitted values.
diff --git a/man/invlomaxUC.Rd b/man/invlomaxUC.Rd
index d60bb88..3dc40bc 100644
--- a/man/invlomaxUC.Rd
+++ b/man/invlomaxUC.Rd
@@ -66,8 +66,8 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
-idata = data.frame(y = rinvlomax(n = 1000, 6, 2))
-fit = vglm(y ~ 1, invlomax, idata, trace = TRUE, crit = "coef")
+idata <- data.frame(y = rinvlomax(n = 1000, exp(2), exp(1)))
+fit <- vglm(y ~ 1, invlomax, idata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/invparalogistic.Rd b/man/invparalogistic.Rd
index 86587ab..c0fec2e 100644
--- a/man/invparalogistic.Rd
+++ b/man/invparalogistic.Rd
@@ -42,7 +42,7 @@ It is the 3-parameter Dagum distribution with \eqn{a=p}.
The inverse paralogistic distribution has density
\deqn{f(y) = a^2 y^{a^2-1} / [b^{a^2} \{1 + (y/b)^a\}^{a+1}]}{%
f(y) = a^2 y^(a^2-1) / [b^(a^2) (1 + (y/b)^a)^(a+1)]}
- for \eqn{a > 0}, \eqn{b > 0}, \eqn{y > 0}.
+ for \eqn{a > 0}, \eqn{b > 0}, \eqn{y \geq 0}{y >= 0}.
Here, \eqn{b} is the scale parameter \code{scale},
and \eqn{a} is the shape parameter.
The mean is
diff --git a/man/invparalogisticUC.Rd b/man/invparalogisticUC.Rd
index b74c21b..8ea3906 100644
--- a/man/invparalogisticUC.Rd
+++ b/man/invparalogisticUC.Rd
@@ -10,6 +10,7 @@
generation for the inverse paralogistic distribution with
shape parameters \code{a} and \code{p}, and scale parameter \code{scale}.
+
}
\usage{
dinvparalogistic(x, shape1.a, scale = 1, log = FALSE)
@@ -37,6 +38,7 @@ rinvparalogistic(n, shape1.a, scale = 1)
\code{qinvparalogistic} gives the quantile function, and
\code{rinvparalogistic} generates random deviates.
+
}
\references{
@@ -66,7 +68,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
-idata <- data.frame(y = rinvparalogistic(n = 3000, 4, 6))
+idata <- data.frame(y = rinvparalogistic(n = 3000, exp(1), exp(2)))
fit <- vglm(y ~ 1, invparalogistic(ishape1.a = 2.1),
idata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
diff --git a/man/is.parallel.Rd b/man/is.parallel.Rd
index cbccf3d..1362279 100644
--- a/man/is.parallel.Rd
+++ b/man/is.parallel.Rd
@@ -60,10 +60,13 @@ is.parallel.vglm(object, type = c("term", "lm"), \dots)
\examples{
-fit <- vglm(educ ~ bs(age) * sex + ethnic, cumulative(parallel = TRUE), xs.nz)
+\dontrun{ require(VGAMdata)
+fit <- vglm(educ ~ bs(age) * sex + ethnic,
+ cumulative(parallel = TRUE), xs.nz[1:200, ])
is.parallel(fit)
is.parallel(fit, type = "lm") # For each column of the LM matrix
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/is.zero.Rd b/man/is.zero.Rd
index 685affa..cf368b4 100644
--- a/man/is.zero.Rd
+++ b/man/is.zero.Rd
@@ -54,7 +54,8 @@ is.zero.vglm(object, \dots)
}
\examples{
-fit <- vglm(cbind(cat, dog) ~ bs(age) * sex + ethnic, binom2.or, xs.nz)
+coalminers <- transform(coalminers, Age = (age - 42) / 5)
+fit <- vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or(zero = NULL), coalminers)
is.zero(fit)
is.zero(coef(fit, matrix = TRUE))
}
diff --git a/man/lambertW.Rd b/man/lambertW.Rd
index 5d58ae0..9443a65 100644
--- a/man/lambertW.Rd
+++ b/man/lambertW.Rd
@@ -80,9 +80,10 @@ this would give \eqn{W(z) \leq -1}{W(z) <= -1}.
}
\examples{ \dontrun{
-curve(lambertW, -exp(-1), 3, xlim = c(-1, 3), ylim = c(-2, 1), col = "red")
-abline(v = -exp(-1), h = -1, lty = "dotted")
-abline(h = 0, v = 0, lty = "dashed", lwd = 2) }
+curve(lambertW, -exp(-1), 3, xlim = c(-1, 3), ylim = c(-2, 1),
+ las = 1, col = "orange")
+abline(v = -exp(-1), h = -1, lwd = 2, lty = "dotted", col = "gray")
+abline(h = 0, v = 0, lty = "dashed", col = "blue") }
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
diff --git a/man/lv.Rd b/man/latvar.Rd
similarity index 69%
rename from man/lv.Rd
rename to man/latvar.Rd
index 58ce9f2..cdec921 100644
--- a/man/lv.Rd
+++ b/man/latvar.Rd
@@ -1,21 +1,31 @@
\name{lv}
\alias{lv}
+\alias{latvar}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Latent Variables }
\description{
Generic function for the \emph{latent variables} of a model.
+
+
}
\usage{
lv(object, ...)
+latvar(object, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{object}{ An object for which the extraction of latent
- variables is meaningful.
+ \item{object}{
+ An object for which the extraction of latent
+ variables is meaningful.
+
+
}
- \item{\dots}{ Other arguments fed into the specific
- methods function of the model. Sometimes they are fed
- into the methods function for \code{\link{Coef}}.
+ \item{\dots}{
+ Other arguments fed into the specific
+ methods function of the model. Sometimes they are fed
+ into the methods function for \code{\link{Coef}}.
+
+
}
}
\details{
@@ -58,6 +68,11 @@ Constrained additive ordination.
\author{ Thomas W. Yee }
\note{
+ \code{\link{latvar}} and \code{\link{lv}} are identical and will remain
+ available for a short while.
+ But soon \code{\link{lv}} will be withdrawn.
+
+
Latent variables are not really applicable to
\code{\link{vglm}}/\code{\link{vgam}} models.
@@ -66,9 +81,9 @@ Constrained additive ordination.
\seealso{
- \code{lv.qrrvglm},
- \code{lv.rrvglm},
- \code{lv.cao},
+ \code{latvar.qrrvglm},
+ \code{latvar.rrvglm},
+ \code{latvar.cao},
\code{\link{lvplot}}.
@@ -76,15 +91,15 @@ Constrained additive ordination.
\examples{
\dontrun{
-hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+hspider[, 1:6] <- scale(hspider[, 1:6]) # Standardized environmental vars
set.seed(123)
p1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
family = poissonff, data = hspider, Rank = 1, df1.nl =
c(Zoraspin = 2.5, 3), Bestof = 3, Crow1positive = TRUE)
-var(lv(p1)) # Scaled to unit variance # Scaled to unit variance
-c(lv(p1)) # Estimated site scores
+var(latvar(p1)) # Scaled to unit variance # Scaled to unit variance
+c(latvar(p1)) # Estimated site scores
}
}
\keyword{models}
diff --git a/man/leipnik.Rd b/man/leipnik.Rd
index 5eba793..570ced0 100644
--- a/man/leipnik.Rd
+++ b/man/leipnik.Rd
@@ -95,7 +95,7 @@ leipnik(lmu = "logit", llambda = "loge", imu = NULL, ilambda = NULL)
}
\examples{
-ldata <- data.frame(y = rnorm(n = 2000, mean = 0.5, sd = 0.1)) # Not proper data
+ldata <- data.frame(y = rnorm(n = 2000, mean = 0.5, sd = 0.1)) # Not proper data
fit <- vglm(y ~ 1, leipnik(ilambda = 1), ldata, trace = TRUE, checkwz = FALSE)
fit <- vglm(y ~ 1, leipnik(ilambda = 1, llambda = logoff(offset = 1)),
ldata, trace = TRUE, crit = "coef")
@@ -111,4 +111,4 @@ sum(weights(fit, type = "w")) # sum of the working weights
\keyword{models}
\keyword{regression}
-%fit = vglm(y ~ 1, leipnik(ilambda=1), tr=TRUE, cri="c", checkwz=FALSE)
+%fit <- vglm(y ~ 1, leipnik(ilambda=1), tr=TRUE, cri="c", checkwz=FALSE)
diff --git a/man/lirat.Rd b/man/lirat.Rd
index 27a41b6..555a676 100644
--- a/man/lirat.Rd
+++ b/man/lirat.Rd
@@ -66,7 +66,7 @@ are accounted for.
\examples{
\dontrun{
# cf. Figure 3 of Moore and Tsiatis (1991)
-plot(R/N ~ hb, data = lirat, pch = as.character(grp), col = grp,
+plot(R / N ~ hb, data = lirat, pch = as.character(grp), col = grp,
las = 1, xlab = "Hemoglobin level", ylab = "Proportion Dead") }
}
\keyword{datasets}
diff --git a/man/lms.bcn.Rd b/man/lms.bcn.Rd
index ee7a196..16a55f9 100644
--- a/man/lms.bcn.Rd
+++ b/man/lms.bcn.Rd
@@ -11,7 +11,7 @@
lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
llambda = "identity", lmu = "identity", lsigma = "loge",
dfmu.init = 4, dfsigma.init = 2, ilambda = 1,
- isigma = NULL, expectiles = FALSE)
+ isigma = NULL, tol0 = 0.001, expectiles = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -75,6 +75,12 @@ lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
}
+ \item{tol0}{
+ Small positive number,
+ the tolerance for testing if lambda is equal to zero.
+
+
+ }
\item{expectiles}{
Experimental; please do not use.
@@ -233,6 +239,7 @@ contains further information and examples.
}
\examples{
+\dontrun{ require(VGAMdata)
mysubset <- subset(xs.nz, sex == "M" & ethnic == "1" & Study1)
mysubset <- transform(mysubset, BMI = weight / height^2)
BMIdata <- mysubset[, c("age", "BMI")]
@@ -252,6 +259,7 @@ head(cdf(fit)) # Person 56 is probably overweight, given his age
fit0 <- vgam(BMI ~ s(age, df = 4), lms.bcn(zero = c(1,3)), BMIdata)
fit1 <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), BMIdata,
etastart = predict(fit0))
+}
\dontrun{
# Quantile plot
diff --git a/man/loge.Rd b/man/loge.Rd
index cf09998..e14ca76 100644
--- a/man/loge.Rd
+++ b/man/loge.Rd
@@ -38,12 +38,12 @@ nloge(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
\details{
The log link function is very commonly used for parameters that
are positive.
+ Here, all logarithms are natural logarithms, i.e., to base \eqn{e}.
Numerical values of \code{theta} close to 0 or out of range
result in
\code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
-
The function \code{loge} computes
\eqn{\log(\theta)}{log(theta)} whereas \code{nloge} computes
\eqn{-\log(\theta)=\log(1/\theta)}{-log(theta)=log(1/theta)}.
@@ -61,9 +61,6 @@ nloge(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
else if \code{inverse = TRUE} then it returns the reciprocal.
- Here, all logarithms are natural logarithms, i.e., to base \eqn{e}.
-
-
}
\references{
McCullagh, P. and Nelder, J. A. (1989)
diff --git a/man/logit.Rd b/man/logit.Rd
index 94d5198..4dcb6f4 100644
--- a/man/logit.Rd
+++ b/man/logit.Rd
@@ -20,6 +20,7 @@ elogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
Numeric or character.
See below for further details.
+
}
\item{bvalue, bminvalue, bmaxvalue}{
See \code{\link{Links}}.
@@ -120,7 +121,8 @@ elogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
\code{\link{cloglog}},
\code{\link{cauchit}},
\code{\link{logistic1}},
- \code{\link{loge}}.
+ \code{\link{loge}},
+ \code{\link{mlogit}}.
}
diff --git a/man/loglinb2.Rd b/man/loglinb2.Rd
index 89aec32..38811a8 100644
--- a/man/loglinb2.Rd
+++ b/man/loglinb2.Rd
@@ -15,9 +15,11 @@ loglinb2(exchangeable = FALSE, zero = NULL)
\item{exchangeable}{ Logical.
If \code{TRUE}, the two marginal probabilities are constrained to
be equal. Should be set \code{TRUE} for ears, eyes, etc. data.
+
}
\item{zero}{ Which linear/additive predictor is modelled as an
intercept only? A \code{NULL} means none of them.
+
}
}
@@ -36,6 +38,7 @@ loglinb2(exchangeable = FALSE, zero = NULL)
\eqn{(\eta_1,\eta_2,\eta_3)^T = (u_1,u_2,u_{12})^T}{(eta1,eta2,eta3) =
(u1,u2,u12)}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -82,13 +85,14 @@ McCullagh, P. and Nelder, J. A. (1989)
\code{\link{binom2.rho}},
\code{\link{loglinb3}}.
+
}
\examples{
coalminers <- transform(coalminers, Age = (age - 42) / 5)
# Get the n x 4 matrix of counts
-temp <- vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or, coalminers)
-counts <- round(c(weights(temp, type = "prior")) * temp at y)
+fit.temp <- vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or, coalminers)
+counts <- round(c(weights(fit.temp, type = "prior")) * depvar(fit.temp))
# Create a n x 2 matrix response for loglinb2()
# bwmat <- matrix(c(0,0, 0,1, 1,0, 1,1), 4, 2, byrow = TRUE)
@@ -101,7 +105,7 @@ newminers <- data.frame(bln = kronecker(matof1, bwmat[,1]),
newminers <- newminers[with(newminers, wt) > 0,]
fit <- vglm(cbind(bln,wheeze) ~ Age, loglinb2, weight = wt, data = newminers)
-coef(fit, matrix = TRUE) # Same! (at least for the log odds-ratio)
+coef(fit, matrix = TRUE) # Same! (at least for the log odds-ratio)
summary(fit)
# Try reconcile this with McCullagh and Nelder (1989), p.234
diff --git a/man/loglinb3.Rd b/man/loglinb3.Rd
index d5af096..ef6eb08 100644
--- a/man/loglinb3.Rd
+++ b/man/loglinb3.Rd
@@ -14,9 +14,12 @@ loglinb3(exchangeable = FALSE, zero = NULL)
\item{exchangeable}{ Logical.
If \code{TRUE}, the three marginal probabilities are constrained to
be equal.
+
}
\item{zero}{ Which linear/additive predictor is modelled as an
intercept only? A \code{NULL} means none.
+
+
}
}
@@ -85,12 +88,14 @@ contains further information and examples.
response need to appear in the data set, therefore data sets
will need to be large in order for this family function to work.
+
}
\seealso{
\code{\link{loglinb2}},
\code{\link{hunua}}.
+
}
\examples{
fit <- vglm(cbind(cyadea, beitaw, kniexc) ~ altitude, loglinb3, hunua)
diff --git a/man/lomax.Rd b/man/lomax.Rd
index f711945..290c55e 100644
--- a/man/lomax.Rd
+++ b/man/lomax.Rd
@@ -5,10 +5,12 @@
\description{
Maximum likelihood estimation of the 2-parameter
Lomax distribution.
+
}
\usage{
lomax(lscale = "loge", lshape3.q = "loge",
- iscale = NULL, ishape3.q = 2, zero = NULL)
+ iscale = NULL, ishape3.q = NULL,
+ gshape3.q = exp(-5:5), zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -17,18 +19,31 @@ lomax(lscale = "loge", lshape3.q = "loge",
(positive) parameters \code{scale} and \code{q}.
See \code{\link{Links}} for more choices.
+
}
\item{iscale, ishape3.q}{
Optional initial values for \code{scale} and \code{q}.
+
}
- \item{zero}{
- An integer-valued vector specifying which
- linear/additive predictors are modelled as intercepts only.
- Here, the values must be from the set \{1,2\} which correspond to
- \code{scale}, \code{q}, respectively.
+
+ \item{gshape3.q, zero}{
+ See
+ \code{\link{CommonVGAMffArguments}}.
+
}
+
+% \item{zero}{
+% An integer-valued vector specifying which
+% linear/additive predictors are modelled as intercepts only.
+% Here, the values must be from the set \{1,2\} which correspond to
+% \code{scale}, \code{q}, respectively.
+% }
+
+
+
+
}
\details{
The 2-parameter Lomax distribution is the 4-parameter
@@ -43,7 +58,7 @@ It is probably more widely known as the Pareto (II) distribution.
The Lomax distribution has density
\deqn{f(y) = q / [b \{1 + y/b\}^{1+q}]}{%
f(y) = q / [b (1 + y/b)^(1+q)]}
- for \eqn{b > 0}, \eqn{q > 0}, \eqn{y > 0}.
+ for \eqn{b > 0}, \eqn{q > 0}, \eqn{y \geq 0}{y >= 0}.
Here, \eqn{b} is the scale parameter \code{scale},
and \code{q} is a shape parameter.
The cumulative distribution function is
@@ -94,9 +109,8 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
-ldata <- data.frame(y = rlomax(n = 1000, exp(1), exp(2)))
+ldata <- data.frame(y = rlomax(n = 1000, scale = exp(1), exp(2)))
fit <- vglm(y ~ 1, lomax, ldata, trace = TRUE)
-fit <- vglm(y ~ 1, lomax(iscale = exp(1), ishape3.q = exp(2)), ldata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/lomaxUC.Rd b/man/lomaxUC.Rd
index 93df89f..61d1fba 100644
--- a/man/lomaxUC.Rd
+++ b/man/lomaxUC.Rd
@@ -35,35 +35,59 @@ rlomax(n, scale = 1, shape3.q)
\code{plomax} gives the distribution function,
\code{qlomax} gives the quantile function, and
\code{rlomax} generates random deviates.
+
+
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
Hoboken, NJ, USA: Wiley-Interscience.
+
}
\author{ T. W. Yee }
\details{
See \code{\link{lomax}}, which is the \pkg{VGAM} family function
for estimating the parameters by maximum likelihood estimation.
+
}
\note{
The Lomax distribution is a special case of the 4-parameter
generalized beta II distribution.
+
}
\seealso{
\code{\link{lomax}},
\code{\link{genbetaII}}.
+
}
\examples{
-ldata <- data.frame(y = rlomax(n = 2000, 6, 2))
-fit <- vglm(y ~ 1, lomax(ishape3.q = 2.1), ldata, trace = TRUE, crit = "coef")
-coef(fit, matrix = TRUE)
-Coef(fit)
+probs <- seq(0.1, 0.9, by = 0.1)
+max(abs(plomax(qlomax(p = probs, shape3.q = 1), shape3.q = 1) - probs)) # Should be 0
+
+\dontrun{ par(mfrow = c(1, 2))
+x <- seq(-0.01, 5, len = 401)
+plot(x, dexp(x), type = "l", col = "black", ylab = "", las = 1, ylim = c(0, 3),
+ main = "Black is standard exponential, others are dlomax(x, shape3.q)")
+lines(x, dlomax(x, shape3.q = 1), col = "orange")
+lines(x, dlomax(x, shape3.q = 2), col = "blue")
+lines(x, dlomax(x, shape3.q = 5), col = "green")
+legend("topright", col = c("orange","blue","green"), lty = rep(1, len = 3),
+ legend = paste("shape3.q =", c(1, 2, 5)))
+
+plot(x, pexp(x), type = "l", col = "black", ylab = "", las = 1,
+ main = "Black is standard exponential, others are plomax(x, shape3.q)")
+lines(x, plomax(x, shape3.q = 1), col = "orange")
+lines(x, plomax(x, shape3.q = 2), col = "blue")
+lines(x, plomax(x, shape3.q = 5), col = "green")
+legend("bottomright", col = c("orange","blue","green"), lty = rep(1, len = 3),
+ legend = paste("shape3.q =", c(1, 2, 5)))
+}
}
\keyword{distribution}
diff --git a/man/lvplot.qrrvglm.Rd b/man/lvplot.qrrvglm.Rd
index 981aee2..e397120 100644
--- a/man/lvplot.qrrvglm.Rd
+++ b/man/lvplot.qrrvglm.Rd
@@ -34,32 +34,54 @@ lvplot.qrrvglm(object, varlvI = FALSE, reference = NULL,
\item{object}{ A CQO or UQO object. }
\item{varlvI}{
Logical that is fed into \code{\link{Coef.qrrvglm}}.
+
}
\item{reference}{
Integer or character that is fed into \code{\link{Coef.qrrvglm}}.
+
}
\item{add}{ Logical. Add to an existing plot? If \code{FALSE}, a new
- plot is made. }
- \item{plot.it}{ Logical. Plot it? }
+ plot is made.
+
+ }
+ \item{plot.it}{ Logical. Plot it?
+
+ }
\item{rug}{ Logical. If \code{TRUE}, a rug plot is plotted at the
foot of the plot (applies to rank-1 models only).
These values are jittered to expose ties.
+
+
}
\item{y}{ Logical. If \code{TRUE}, the responses will be plotted
(applies only to rank-1 models and if \code{type = "fitted.values"}.)
+
+
}
\item{type}{ Either \code{"fitted.values"} or \code{"predictors"},
specifies whether the y-axis is on the response or eta-scales
respectively.
+
+
}
- \item{xlab}{ Caption for the x-axis. See \code{\link[graphics]{par}}. }
- \item{ylab}{ Caption for the y-axis. See \code{\link[graphics]{par}}. }
+ \item{xlab}{ Caption for the x-axis. See \code{\link[graphics]{par}}.
+
+
+ }
+ \item{ylab}{ Caption for the y-axis. See \code{\link[graphics]{par}}.
+
+
+ }
\item{pcex}{ Character expansion of the points.
Here, for rank-1 models, points are the response \emph{y} data.
For rank-2 models, points are the optima.
- See the \code{cex} argument in \code{\link[graphics]{par}}. }
+ See the \code{cex} argument in \code{\link[graphics]{par}}.
+
+ }
\item{pcol}{ Color of the points.
- See the \code{col} argument in \code{\link[graphics]{par}}. }
+ See the \code{col} argument in \code{\link[graphics]{par}}.
+
+ }
\item{pch}{ Either an integer specifying a symbol or a single character
to be used as the default in plotting points.
See \code{\link[graphics]{par}}.
@@ -67,18 +89,28 @@ For rank-2 models, points are the optima.
}
\item{llty}{ Line type.
Rank-1 models only.
- See the \code{lty} argument of \code{\link[graphics]{par}}. }
+ See the \code{lty} argument of \code{\link[graphics]{par}}.
+
+ }
\item{lcol}{ Line color.
Rank-1 models only.
- See the \code{col} argument of \code{\link[graphics]{par}}. }
+ See the \code{col} argument of \code{\link[graphics]{par}}.
+
+ }
\item{llwd}{ Line width.
Rank-1 models only.
- See the \code{lwd} argument of \code{\link[graphics]{par}}. }
+ See the \code{lwd} argument of \code{\link[graphics]{par}}.
+
+ }
\item{label.arg}{ Logical. Label the optima and \bold{C}?
- (applies only to rank-2 models only). }
+ (applies only to rank-2 models only).
+
+ }
\item{adj.arg}{ Justification of text strings for labelling the optima
(applies only to rank-2 models only).
- See the \code{adj} argument of \code{\link[graphics]{par}}. }
+ See the \code{adj} argument of \code{\link[graphics]{par}}.
+
+ }
\item{ellipse}{
Numerical, of length 0 or 1 (applies only to rank-2 models only).
@@ -195,33 +227,39 @@ For rank-2 models, points are the optima.
% }
\item{check.ok}{ Logical. Whether a check is performed to see
- that \code{Norrr = ~ 1} was used.
+ that \code{noRRR = ~ 1} was used.
It doesn't make sense to have a latent variable plot unless this is so.
}
+
\item{\dots}{ Arguments passed into the \code{plot} function
when setting up the entire plot. Useful arguments here include
\code{xlim} and \code{ylim}.
}
+
}
\details{
This function only works for rank-1 and rank-2 QRR-VGLMs with argument
- \code{Norrr = ~ 1}.
+ \code{noRRR = ~ 1}.
+
For unequal-tolerances models, the latent variable axes can be
rotated so that at least one of the tolerance matrices is diagonal;
see \code{\link{Coef.qrrvglm}} for details.
+
Arguments beginning with ``\code{p}'' correspond to the points e.g.,
\code{pcex} and \code{pcol} correspond to the size and color of the
points. Such ``\code{p}'' arguments should be vectors of length 1,
or \eqn{n}, the number of sites. For the rank-2 model, arguments
beginning with ``\code{p}'' correspond to the optima.
+
}
\value{
Returns a matrix of latent variables (site scores)
regardless of whether a plot was produced or not.
+
}
\references{
@@ -240,16 +278,20 @@ canonical Gaussian ordination.
drawn even if requested, i.e., if its tolerance matrix is not
positive-definite.
+
% Stationary points which are not bell-shaped will not be plotted
% at all.
+
Plotting \bold{C} gives a visual display of the weights (loadings)
of each of the variables used in the linear combination defining each
latent variable.
+
The arguments \code{elty}, \code{ecol} and \code{elwd}, may be replaced
in the future by \code{llty}, \code{lcol} and \code{llwd}, respectively.
+
For rank-1 models, a similar function to this one is
\code{\link{perspqrrvglm}}. It plots the fitted values on a more
fine grid rather than at the actual site scores here. The result is a
@@ -258,9 +300,11 @@ canonical Gaussian ordination.
the truth without an appreciation of the statistical variability in
the estimates.
+
% Yet to do: allow for the contour line to correspond to the tolerance
% matrix itself. zz ??
+
In the example below, the data comes from an equal-tolerances model.
The species' tolerance matrices are all the identity matrix,
and the optimums are at (0,0), (1,1) and (-2,0) for species 1, 2,
@@ -274,6 +318,7 @@ canonical Gaussian ordination.
of the species are unequal and (ii) the contours of these tolerance
matrices are not included in the ordination diagram.
+
}
\seealso{
@@ -282,6 +327,8 @@ canonical Gaussian ordination.
\code{\link{Coef.qrrvglm}},
\code{\link[graphics]{par}},
\code{\link{cqo}}.
+
+
}
\examples{
diff --git a/man/makeham.Rd b/man/makeham.Rd
index 0c7ab13..1c1d7b5 100644
--- a/man/makeham.Rd
+++ b/man/makeham.Rd
@@ -9,7 +9,8 @@
}
\usage{
makeham(lshape = "loge", lscale = "loge", lepsilon = "loge",
- ishape = NULL, iscale = NULL, iepsilon = 0.3,
+ ishape = NULL, iscale = NULL, iepsilon = NULL,
+ gshape = exp(-5:5), gscale = exp(-5:5), gepsilon = exp(-4:1),
nsimEIM = 500, oim.mean = TRUE, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -39,6 +40,12 @@ makeham(lshape = "loge", lscale = "loge", lepsilon = "loge",
}
+ \item{gshape, gscale, gepsilon}{
+ See \code{\link{CommonVGAMffArguments}}.
+ }
+
+
+
\item{nsimEIM, zero}{
See \code{\link{CommonVGAMffArguments}}.
Argument \code{probs.y} is used only when \code{imethod = 2}.
diff --git a/man/maxwellUC.Rd b/man/maxwellUC.Rd
index 18c8f7b..73a3c3a 100644
--- a/man/maxwellUC.Rd
+++ b/man/maxwellUC.Rd
@@ -19,10 +19,9 @@ qmaxwell(p, a)
rmaxwell(n, a)
}
\arguments{
- \item{x, q}{vector of quantiles.}
- \item{p}{vector of probabilities.}
- \item{n}{number of observations.
- A single positive integer.
+ \item{x, q, p, n}{
+ Same as \code{\link[stats:Uniform]{Uniform}}.
+
}
\item{a}{the parameter.}
@@ -39,8 +38,10 @@ rmaxwell(n, a)
\code{qmaxwell} gives the quantile function, and
\code{rmaxwell} generates random deviates.
+
}
\references{
+
Balakrishnan, N. and Nevzorov, V. B. (2003)
\emph{A Primer on Statistical Distributions}.
Hoboken, New Jersey: Wiley.
diff --git a/man/mlogit.Rd b/man/mlogit.Rd
index 4eeb437..b5a1b32 100644
--- a/man/mlogit.Rd
+++ b/man/mlogit.Rd
@@ -73,6 +73,8 @@ mlogit(theta, refLevel = "last", M = NULL, whitespace = FALSE,
Numerical instability may occur when \code{theta} is
close to 1 or 0 (for \code{mlogit}).
One way of overcoming this is to use, e.g., \code{bvalue}.
+ Currently \code{care.exp()} is used to avoid \code{NA}s being
+ returned if the probability is too close to 1.
}
@@ -87,16 +89,16 @@ mlogit(theta, refLevel = "last", M = NULL, whitespace = FALSE,
\examples{
pneumo <- transform(pneumo, let = log(exposure.time))
fit <- vglm(cbind(normal, mild, severe) ~ let,
- multinomial, trace = TRUE, pneumo) # For illustration only
+ multinomial, trace = TRUE, pneumo) # For illustration only!
fitted(fit)
predict(fit)
-mlogit(predict(fit))
-mlogit(predict(fit), refLevel = 1) # For illustration only
-mlogit(predict(fit)) - fitted(fit) # Should be all 0s
+mlogit(fitted(fit))
+mlogit(fitted(fit)) - predict(fit) # Should be all 0s
-mlogit(fitted(fit), inverse = TRUE)
-mlogit(fitted(fit), inverse = TRUE) - predict(fit) # Should be all 0s
+mlogit(predict(fit), inverse = TRUE)
+mlogit(predict(fit), inverse = TRUE, refLevel = 1) # For illustration only
+mlogit(predict(fit), inverse = TRUE) - fitted(fit) # Should be all 0s
mlogit(fitted(fit), deriv = 1)
mlogit(fitted(fit), deriv = 2)
diff --git a/man/multinomial.Rd b/man/multinomial.Rd
index 09c79d1..1112362 100644
--- a/man/multinomial.Rd
+++ b/man/multinomial.Rd
@@ -19,16 +19,20 @@ multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
linear/additive predictors are modelled as intercepts only.
Any values must be from the set \{1,2,\ldots,\eqn{M}\}.
The default value means none are modelled as intercept-only terms.
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
}
\item{parallel}{
A logical, or formula specifying which terms have
equal/unequal coefficients.
+
}
\item{nointercept, whitespace}{
See \code{\link{CommonVGAMffArguments}} for more details.
+
}
\item{refLevel}{
Either a single positive integer or a value of the factor.
@@ -42,6 +46,7 @@ multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
\code{drop.unused.levels = FALSE}).
See the example below.
+
}
}
\details{
@@ -282,21 +287,21 @@ nn <- 100 # Number of people who travel to work
M <- 3 # There are M+1 models of transport to go to work
ycounts <- matrix(0, nn, M+1)
ycounts[cbind(1:nn, sample(x = M+1, size = nn, replace = TRUE))] = 1
-dimnames(ycounts) = list(NULL, c("bus","train","car","walk"))
-gotowork = data.frame(cost.bus = runif(nn), time.bus = runif(nn),
- cost.train= runif(nn), time.train= runif(nn),
- cost.car = runif(nn), time.car = runif(nn),
- cost.walk = runif(nn), time.walk = runif(nn))
-gotowork = round(gotowork, dig = 2) # For convenience
-gotowork = transform(gotowork,
- Cost.bus = cost.bus - cost.walk,
- Cost.car = cost.car - cost.walk,
- Cost.train = cost.train - cost.walk,
- Cost = cost.train - cost.walk, # for labelling
- Time.bus = time.bus - time.walk,
- Time.car = time.car - time.walk,
- Time.train = time.train - time.walk,
- Time = time.train - time.walk) # for labelling
+dimnames(ycounts) <- list(NULL, c("bus","train","car","walk"))
+gotowork <- data.frame(cost.bus = runif(nn), time.bus = runif(nn),
+ cost.train= runif(nn), time.train= runif(nn),
+ cost.car = runif(nn), time.car = runif(nn),
+ cost.walk = runif(nn), time.walk = runif(nn))
+gotowork <- round(gotowork, dig = 2) # For convenience
+gotowork <- transform(gotowork,
+ Cost.bus = cost.bus - cost.walk,
+ Cost.car = cost.car - cost.walk,
+ Cost.train = cost.train - cost.walk,
+ Cost = cost.train - cost.walk, # for labelling
+ Time.bus = time.bus - time.walk,
+ Time.car = time.car - time.walk,
+ Time.train = time.train - time.walk,
+ Time = time.train - time.walk) # for labelling
fit <- vglm(ycounts ~ Cost + Time,
multinomial(parall = TRUE ~ Cost + Time - 1),
xij = list(Cost ~ Cost.bus + Cost.train + Cost.car,
diff --git a/man/nbcanlink.Rd b/man/nbcanlink.Rd
index 5602550..8d2f45f 100644
--- a/man/nbcanlink.Rd
+++ b/man/nbcanlink.Rd
@@ -74,9 +74,9 @@ nbcanlink(theta, size = NULL, wrt.eta = NULL, bvalue = NULL,
}
\references{
- Yee, T. W. (2012)
- Two-parameter reduced-rank vector generalized linear models.
- \emph{In preparation}.
+ Yee, T. W. (2013)
+ Reduced-rank vector generalized linear models with two linear predictors.
+ \emph{Computational Statistics and Data Analysis}.
Hilbe, J. M. (2011)
diff --git a/man/nbolf.Rd b/man/nbolf.Rd
index 0570542..5be7abe 100644
--- a/man/nbolf.Rd
+++ b/man/nbolf.Rd
@@ -25,7 +25,7 @@ nbolf(theta, cutpoint = NULL, k = NULL,
The cutpoints should be non-negative integers.
If \code{nbolf()} is used as the link function in
\code{\link{cumulative}} then one should choose
- \code{reverse = TRUE, parallel = TRUE, intercept.apply = TRUE}.
+ \code{reverse = TRUE, parallel = TRUE, apply.parint = TRUE}.
}
@@ -93,7 +93,8 @@ nbolf(theta, cutpoint = NULL, k = NULL,
\code{\link{polf}},
\code{\link{golf}},
\code{nbolf2},
- \code{\link{cumulative}}.
+ \code{\link{cumulative}},
+ \code{\link{CommonVGAMffArguments}}.
}
@@ -124,10 +125,10 @@ cutpoints <- c(-Inf, 10, 20, Inf)
cuty <- Cut(y1, breaks = cutpoints)
\dontrun{ plot(x2, x3, col = cuty, pch = as.character(cuty)) }
table(cuty) / sum(table(cuty))
-fit <- vglm(cuty ~ x2 + x3, cumulative(reverse = TRUE,
- parallel = TRUE, intercept.apply = TRUE,
- link = nbolf(cutpoint = cutpoints[2:3], k = k),
- mv = TRUE), trace = TRUE)
+fit <- vglm(cuty ~ x2 + x3, trace = TRUE,
+ cumulative(reverse = TRUE, mv = TRUE,
+ parallel = TRUE, apply.parint = TRUE,
+ link = nbolf(cutpoint = cutpoints[2:3], k = k)))
head(depvar(fit))
head(fitted(fit))
head(predict(fit))
diff --git a/man/negbinomial.Rd b/man/negbinomial.Rd
index 8b5c33f..d239712 100644
--- a/man/negbinomial.Rd
+++ b/man/negbinomial.Rd
@@ -262,9 +262,9 @@ Fitting the negative binomial distribution to biological data.
\bold{9}, 174--200.
- Yee, T. W. (2012)
- Two-parameter reduced-rank vector generalized linear models.
- \emph{In preparation}.
+ Yee, T. W. (2013)
+ Reduced-rank vector generalized linear models with two linear predictors.
+ \emph{Computational Statistics and Data Analysis}.
@@ -440,7 +440,7 @@ ci.mydiff <- mydiff + c(-1.96, 1.96) * se.mydiff
ci.delta0 <- ci.exp.mydiff <- exp(ci.mydiff)
(ci.phi0 <- 1 + 1 / rev(ci.delta0)) # The 95 percent conf. interval for phi0
-confint_nb1(nb1) # Quick way to get it
+Confint.nb1(nb1) # Quick way to get it
summary(glm(y3 ~ x2 + x3, quasipoisson, mydata))$disper # cf. moment estimator
}
diff --git a/man/negbinomial.size.Rd b/man/negbinomial.size.Rd
index 5631d52..30d7b87 100644
--- a/man/negbinomial.size.Rd
+++ b/man/negbinomial.size.Rd
@@ -76,9 +76,9 @@ Hilbe, J. M. (2011)
Cambridge: Cambridge University Press.
- Yee, T. W. (2012)
- Two-parameter reduced-rank vector generalized linear models.
- \emph{In preparation}.
+ Yee, T. W. (2013)
+ Reduced-rank vector generalized linear models with two linear predictors.
+ \emph{Computational Statistics and Data Analysis}.
diff --git a/man/normal1.Rd b/man/normal1.Rd
index af9ca19..26b6513 100644
--- a/man/normal1.Rd
+++ b/man/normal1.Rd
@@ -10,7 +10,7 @@
\usage{
normal1(lmean = "identity", lsd = "loge", lvar = "loge",
var.arg = FALSE, imethod = 1, isd = NULL, parallel = FALSE,
- intercept.apply = FALSE, zero = -2)
+ apply.parint = FALSE, smallno = 1e-05, zero = -2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -40,10 +40,19 @@ normal1(lmean = "identity", lsd = "loge", lvar = "loge",
}
- \item{imethod, parallel, isd, intercept.apply, zero}{
+ \item{smallno}{
+ Numeric, positive but close to 0.
+ Used specifically for quasi-variances; if the link for the
+ mean is \code{\link{explink}} then any non-positive value
+ of \code{eta} is replaced by this quantity (hopefully,
+ temporarily and only during early iterations).
+
+
+ }
+ \item{imethod, parallel, isd, apply.parint, zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
If \code{lmean = loge} then try \code{imethod = 2}.
- Argument \code{intercept.apply} refers to whether the parallelism
+ Argument \code{apply.parint} refers to whether the parallelism
constraint is applied to the intercept too.
@@ -94,7 +103,8 @@ normal1(lmean = "identity", lsd = "loge", lvar = "loge",
\code{\link{fnormal1}},
\code{\link{skewnormal1}},
\code{\link{dcennormal1}},
- \code{\link{huber}},
+ \code{\link{SUR}},
+ \code{\link{huber2}},
\code{\link{studentt}},
\code{\link{binormal}},
\code{\link[stats:Normal]{dnorm}}.
@@ -111,7 +121,7 @@ fit1 <- vglm(y1 ~ x2, normal1(zero = NULL), ndata, trace = TRUE)
coef(fit1, matrix = TRUE)
fit2 <- vglm(cbind(y2, y3) ~ x2, data = ndata, trace = TRUE,
normal1(var = TRUE, parallel = TRUE,
- intercept.apply = TRUE, zero = NULL))
+ apply.parint = TRUE, zero = NULL))
coef(fit2, matrix = TRUE)
# Generate data from N(mu = theta = 10, sigma = theta) and estimate theta.
@@ -120,7 +130,7 @@ ndata <- data.frame(y = rnorm(100, m = theta, sd = theta))
fit3 <- vglm(y ~ 1, normal1(lsd = "identity"), ndata,
constraints = list("(Intercept)" = rbind(1, 1)))
fit4 <- vglm(y ~ 1, normal1(lsd = "identity", parallel = TRUE,
- intercept.apply = TRUE, zero = NULL), ndata)
+ apply.parint = TRUE, zero = NULL), ndata)
coef(fit3, matrix = TRUE)
coef(fit4, matrix = TRUE) # Same as fit3
}
diff --git a/man/notdocumentedyet.Rd b/man/notdocumentedyet.Rd
index 882a327..8d4e074 100644
--- a/man/notdocumentedyet.Rd
+++ b/man/notdocumentedyet.Rd
@@ -2,7 +2,27 @@
\alias{notdocumentedyet}
%
%
-%
+% 201302;
+% \alias{pgamma.deriv.unscaled}
+% \alias{pgamma.deriv}
+% \alias{digami}
+%
+% 201212;
+\alias{binom2.rho.ss}
+%
+% 20121105;
+\alias{N.hat.posbernoulli}
+\alias{aux.posbernoulli}
+\alias{posbern.aux}
+\alias{Rank}
+\alias{Rank.rrvglm}
+\alias{Rank.qrrvglm}
+\alias{Rank.cao}
+% 20121015; delete this later
+%\alias{huggins91.old}
+%
+% 20120912
+\alias{arwz2wz}
%
% 20120813 New links (no earg)
\alias{Dtheta.deta}
@@ -116,8 +136,8 @@
%
%
% 20110321; misc. datasets.
-\alias{fibre1.5}
-\alias{fibre15}
+%\alias{fibre1.5}
+%\alias{fibre15}
%
%
% 20120206; for RR-NB, or rrn.tex.
@@ -126,9 +146,9 @@
%
% 20110202; for Melbourne; these include datasets.
\alias{azprocedure}
-\alias{confint_rrnb}
-\alias{confint_nb1}
-\alias{gala}
+\alias{Confint.rrnb}
+\alias{Confint.nb1}
+%\alias{gala}
\alias{mmt}
%
%
@@ -232,7 +252,7 @@
% \alias{canonical.Blist}
% \alias{cao.fit}
\alias{car.all}
-% \alias{care.exp}
+\alias{care.exp}
\alias{ccoef.Coef.cao}
\alias{ccoef.Coef.qrrvglm}
\alias{ccoef.cao}
@@ -282,8 +302,8 @@
\alias{effects}
% \alias{effects.vgam}
% \alias{effects.vlm}
-% \alias{ei}
-% \alias{eij}
+% \alias{eifun}
+% \alias{eijfun}
\alias{erfc}
\alias{eta2theta}
%\alias{explink}
@@ -340,15 +360,16 @@
\alias{lm}
\alias{lm2qrrvlm.model.matrix}
\alias{lm2vlm.model.matrix}
+\alias{vlm2lm.model.matrix}
\alias{lms.bcg.control}
\alias{lms.bcn.control}
\alias{lms.yjn.control}
\alias{lmscreg.control}
\alias{logLik.vlm}
-% \alias{lv.Coef.cao} 20090505
-\alias{lv.Coef.qrrvglm}
+% \alias{lv.Coef.cao} 20090505
+\alias{latvar.Coef.qrrvglm}
\alias{lv.cao}
-\alias{lv.qrrvglm}
+\alias{latvar.qrrvglm}
\alias{lvplot.cao}
\alias{m2adefault}
\alias{m2avglm}
@@ -395,7 +416,7 @@
\alias{plotvglm}
\alias{plotvlm}
\alias{plotvsmooth.spline}
-\alias{pnorm2}
+% \alias{pnorm2} done 20120910
% \alias{poissonqn}
\alias{poly}
\alias{predict}
@@ -435,7 +456,7 @@
\alias{qtplot.vextremes}
\alias{qtplot.vglm}
\alias{quasiff}
-\alias{rainfall}
+% \alias{rainfall}
% \alias{remove.arg}
% \alias{replace.constraints}
\alias{resid}
@@ -468,7 +489,7 @@
% \alias{rrr.normalize}
\alias{rrvglm.control.Gaussian}
% \alias{rrvglm.fit}
-\alias{rss.vgam}
+\alias{ResSS.vgam}
\alias{s.vam}
\alias{scale.default}
\alias{simple.exponential}
diff --git a/man/olym.Rd b/man/olym.Rd
new file mode 100644
index 0000000..d457c5a
--- /dev/null
+++ b/man/olym.Rd
@@ -0,0 +1,79 @@
+\name{olympics}
+\alias{olym08}
+\alias{olym12}
+\docType{data}
+\title{ 2008 and 2012 Summer Olympic Final Medal Count Data}
+\description{
+ Final medal count, by country, for the Summer
+ 2008 and 2012 Olympic Games.
+
+
+}
+\usage{
+data(olym08)
+data(olym12)
+}
+\format{
+ A data frame with 87 or 85 observations on the following 6 variables.
+ \describe{
+ \item{\code{rank}}{a numeric vector, overall ranking of the countries. }
+ \item{\code{country}}{a factor. }
+ \item{\code{gold}}{a numeric vector, number of gold medals. }
+ \item{\code{silver}}{a numeric vector, number of silver medals. }
+ \item{\code{bronze}}{a numeric vector, number of bronze medals. }
+ \item{\code{totalmedal}}{a numeric vector, total number of medals. }
+
+% \item{\code{country}}{a factor. character vector. }
+
+ }
+}
+\details{
+ The events were held during
+ (i) August 8--24, 2008, in Beijing; and
+ (ii) 27 July--12 August, 2012, in London.
+
+
+% This is a simple two-way contingency table of counts.
+
+}
+\source{
+\url{http://www.associatedcontent.com/article/979484/2008_summer_olympic_medal_count_total.html},
+\url{http://www.london2012.com/medals/medal-count/}.
+
+
+}
+\references{
+ The official English website was/is \url{http://en.beijing2008.cn}
+ and \url{http://www.london2012.com}.
+ Help from Viet Hoang Quoc is gratefully acknowledged.
+
+
+}
+\seealso{
+ \code{\link[VGAM]{grc}}.
+
+
+}
+
+\examples{
+summary(olym08)
+summary(olym12)
+## maybe str(olym08) ; plot(olym08) ...
+\dontrun{ par(mfrow = c(1, 2))
+myylim <- c(0, 55)
+with(head(olym08, n = 8),
+barplot(rbind(gold, silver, bronze),
+ col = c("gold", "grey", "brown"), # No "silver" or "bronze"!
+ names.arg = country, cex.names = 0.5, ylim = myylim,
+ beside = TRUE, main = "2008 Summer Olympic Final Medal Count",
+ ylab = "Medal count", las = 1,
+ sub = "Top 8 countries; 'gold'=gold, 'grey'=silver, 'brown'=bronze"))
+with(head(olym12, n = 8),
+barplot(rbind(gold, silver, bronze),
+ col = c("gold", "grey", "brown"), # No "silver" or "bronze"!
+ names.arg = country, cex.names = 0.5, ylim = myylim,
+ beside = TRUE, main = "2012 Summer Olympic Final Medal Count",
+ ylab = "Medal count", las = 1,
+ sub = "Top 8 countries; 'gold'=gold, 'grey'=silver, 'brown'=bronze")) }
+}
+\keyword{datasets}
diff --git a/man/olympic.Rd b/man/olympic.Rd
deleted file mode 100644
index a4cfe41..0000000
--- a/man/olympic.Rd
+++ /dev/null
@@ -1,56 +0,0 @@
-\name{olympic}
-\alias{olympic}
-\docType{data}
-\title{ 2008 Summer Olympic Final Medal Count Data}
-\description{
- Final count of medal winners by country for the 2008 Summer Olympic
- games in Beijing.
-
-
-}
-\usage{data(olympic)}
-\format{
- A data frame with 87 observations on the following 6 variables.
- \describe{
- \item{\code{rank}}{a numeric vector, overall ranking of the countries. }
- \item{\code{country}}{a character vector. }
- \item{\code{gold}}{a numeric vector, number of gold medals. }
- \item{\code{silver}}{a numeric vector, number of silver medals. }
- \item{\code{bronze}}{a numeric vector, number of bronze medals. }
- \item{\code{totalmedal}}{a numeric vector, total number of medals. }
- }
-}
-\details{
- The event was held during August 8--24, 2008, in Beijing.
-
-
-% This is a simple two-way contingency table of counts.
-
-}
-\source{
-\url{http://www.associatedcontent.com/article/979484/2008_summer_olympic_medal_count_total.html}.
-
-}
-\references{
- The official English webite was
- \url{http://en.beijing2008.cn}.
-
-}
-\seealso{
- \code{\link{grc}}.
-
-
-}
-
-\examples{
-summary(olympic)
-## maybe str(olympic) ; plot(olympic) ...
-\dontrun{ with(head(olympic, n = 8),
-barplot(rbind(gold,silver,bronze),
- col = c("gold","grey","brown"), # No "silver" or "bronze"!
- names.arg = country, cex.names = 0.5,
- beside = TRUE, main = "2008 Summer Olympic Final Medal Count",
- ylab = "Medal count", las = 1,
- sub = "Top 8 countries; 'gold'=gold, 'grey'=silver, 'brown'=bronze")) }
-}
-\keyword{datasets}
diff --git a/man/oxtemp.Rd b/man/oxtemp.Rd
index 74439d4..eb1c43e 100644
--- a/man/oxtemp.Rd
+++ b/man/oxtemp.Rd
@@ -27,7 +27,7 @@
% \references{
% }
\examples{
-fit <- vglm(maxtemp ~ 1, egev, data = oxtemp, trace = TRUE)
+\dontrun{ fit <- vglm(maxtemp ~ 1, egev, data = oxtemp, trace = TRUE) }
}
\keyword{datasets}
diff --git a/man/paralogistic.Rd b/man/paralogistic.Rd
index c9681f6..8cd7b88 100644
--- a/man/paralogistic.Rd
+++ b/man/paralogistic.Rd
@@ -42,7 +42,7 @@ It is the 3-parameter Singh-Maddala distribution with \eqn{a=q}.
The 2-parameter paralogistic has density
\deqn{f(y) = a^2 y^{a-1} / [b^a \{1 + (y/b)^a\}^{1+a}]}{%
f(y) = a^2 y^(a-1) / [b^a (1 + (y/b)^a)^(1+a)]}
- for \eqn{a > 0}, \eqn{b > 0}, \eqn{y > 0}.
+ for \eqn{a > 0}, \eqn{b > 0}, \eqn{y \geq 0}{y >= 0}.
Here, \eqn{b} is the scale parameter \code{scale},
and \eqn{a} is the shape parameter.
The mean is
@@ -61,6 +61,7 @@ provided \eqn{a > 1}; these are returned as the fitted values.
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and Actuarial Sciences},
Hoboken, NJ, USA: Wiley-Interscience.
diff --git a/man/paralogisticUC.Rd b/man/paralogisticUC.Rd
index 593741c..e68aaa7 100644
--- a/man/paralogisticUC.Rd
+++ b/man/paralogisticUC.Rd
@@ -67,7 +67,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
-pdata <- data.frame(y = rparalogistic(n = 3000, 4, 6))
+pdata <- data.frame(y = rparalogistic(n = 3000, exp(1), exp(2)))
fit <- vglm(y ~ 1, paralogistic(ishape1.a = 2.1), pdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
diff --git a/man/pareto1.Rd b/man/pareto1.Rd
index 2d16db3..9fc28b4 100644
--- a/man/pareto1.Rd
+++ b/man/pareto1.Rd
@@ -8,6 +8,7 @@
by maximum likelihood estimation.
Also includes the upper truncated Pareto(I) distribution.
+
}
\usage{
pareto1(lshape = "loge", location = NULL)
@@ -176,7 +177,7 @@ coef(fit, matrix = TRUE)
summary(fit) # Standard errors are incorrect!!
# Here, alpha is assumed known
-fit2 <- vglm(y ~ 1, pareto1(location = alpha), pdat, trace = TRUE, crit = "coef")
+fit2 <- vglm(y ~ 1, pareto1(location = alpha), pdat, trace = TRUE)
fit2 at extra # alpha stored here
head(fitted(fit2))
coef(fit2, matrix = TRUE)
@@ -185,8 +186,8 @@ summary(fit2) # Standard errors are okay
# Upper truncated Pareto distribution
lower <- 2; upper <- 8; kay <- exp(2)
pdat3 <- data.frame(y = rtpareto(n = 100, lower = lower,
- upper = upper, shape = kay))
-fit3 <- vglm(y ~ 1, tpareto1(lower, upper), pdat3, trace = TRUE, cri = "coef")
+ upper = upper, shape = kay))
+fit3 <- vglm(y ~ 1, tpareto1(lower, upper), pdat3, trace = TRUE)
coef(fit3, matrix = TRUE)
c(fit3 at misc$lower, fit3 at misc$upper)
}
diff --git a/man/paretoIV.Rd b/man/paretoIV.Rd
index 05d37f7..9fc6282 100644
--- a/man/paretoIV.Rd
+++ b/man/paretoIV.Rd
@@ -122,7 +122,7 @@ paretoII(location = 0, lscale = "loge", lshape = "loge",
Brazauskas, V. (2003)
Information matrix for Pareto(IV), Burr, and related distributions.
-\emph{Comm.\ Statist.\ Theory and Methods}
+\emph{Comm. Statist. Theory and Methods}
\bold{32}, 315--325.
diff --git a/man/paretoIVUC.Rd b/man/paretoIVUC.Rd
index 141f05e..dca45c7 100644
--- a/man/paretoIVUC.Rd
+++ b/man/paretoIVUC.Rd
@@ -64,16 +64,19 @@ rparetoI(n, scale = 1, shape = 1)
functions beginning with the letter \code{r} generates random deviates.
}
\references{
+
Brazauskas, V. (2003)
Information matrix for Pareto(IV), Burr, and related
distributions.
-\emph{Comm.\ Statist.\ Theory and Methods}
+\emph{Comm. Statist. Theory and Methods}
\bold{32}, 315--325.
+
Arnold, B. C. (1983)
\emph{Pareto Distributions}.
Fairland, Maryland: International Cooperative Publishing House.
+
}
\author{ T. W. Yee }
\details{
diff --git a/man/perks.Rd b/man/perks.Rd
index 8e77348..b600a85 100644
--- a/man/perks.Rd
+++ b/man/perks.Rd
@@ -10,6 +10,7 @@
\usage{
perks(lshape = "loge", lscale = "loge",
ishape = NULL, iscale = NULL,
+ gshape = exp(-5:5), gscale = exp(-5:5),
nsimEIM = 500, oim.mean = FALSE, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -36,6 +37,11 @@ perks(lshape = "loge", lscale = "loge",
}
+ \item{gshape, gscale}{
+ See \code{\link{CommonVGAMffArguments}}.
+
+
+ }
\item{nsimEIM, zero}{
See \code{\link{CommonVGAMffArguments}}.
diff --git a/man/perksUC.Rd b/man/perksUC.Rd
index 5deee4a..01ecbbc 100644
--- a/man/perksUC.Rd
+++ b/man/perksUC.Rd
@@ -43,6 +43,7 @@ rperks(n, shape, scale = 1)
\details{
See \code{\link{perks}} for details.
+
}
%\note{
%
diff --git a/man/persp.qrrvglm.Rd b/man/persp.qrrvglm.Rd
index 9da7d8f..57fd302 100644
--- a/man/persp.qrrvglm.Rd
+++ b/man/persp.qrrvglm.Rd
@@ -4,7 +4,7 @@
\title{ Perspective plot for QRR-VGLMs }
\description{
Produces a perspective plot for a CQO model (QRR-VGLM). It is only
-applicable for rank-1 or rank-2 models with argument \code{Norrr = ~ 1}.
+applicable for rank-1 or rank-2 models with argument \code{noRRR = ~ 1}.
}
\usage{
@@ -133,7 +133,7 @@ perspqrrvglm(x, varlvI = FALSE, reference = NULL, plot.it = TRUE,
To view rare species, use the \code{whichSpecies} argument to select
a subset of the species.
- A perspective plot will be performed if \code{Norrr = ~ 1}, and
+ A perspective plot will be performed if \code{noRRR = ~ 1}, and
\code{Rank = 1} or \code{2}. Also, all the tolerance matrices of
those species to be plotted must be positive-definite.
diff --git a/man/pgamma.deriv.Rd b/man/pgamma.deriv.Rd
new file mode 100644
index 0000000..acb7f38
--- /dev/null
+++ b/man/pgamma.deriv.Rd
@@ -0,0 +1,128 @@
+\name{pgamma.deriv}
+\alias{pgamma.deriv}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{
+Derivatives of the Incomplete Gamma Integral
+}
+\description{
+The first two derivatives of the incomplete gamma integral.
+
+}
+\usage{
+pgamma.deriv(q, shape, tmax = 100)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{q, shape}{
+ As in \code{\link[stats]{pgamma}} but
+ these must be vectors of positive values only and finite.
+
+
+}
+% \item{shape}{
+% A vector of positive values.
+%
+%}
+ \item{tmax}{
+ Maximum number of iterations allowed in the computation
+ (per \code{q} value).
+
+}
+}
+\details{
+ Write \eqn{x = q} and \code{shape =} \eqn{a}.
+ The first and second derivatives with respect to \eqn{q} and \eqn{a}
+ are returned. This function is similar in spirit to
+ \code{\link[stats]{pgamma}};
+ define
+ \deqn{P(a,x) = \frac{1}{\Gamma(a)} \int_0^x t^{a-1} e^{-t} dt}{P(a,x) =
+ 1/Gamma(a) integral_0^x t^(a-1) exp(-t) dt}
+ so that
+ \eqn{P(a, x)} is \code{pgamma(x, a)}.
+ Currently a 6-column matrix is returned (in the future this
+ may change and an argument may be supplied so that only what
+ is required by the user is computed.)
+
+
+ The computations use a series expansion
+ for \eqn{a \leq x \leq 1}{a <= x <= 1} or
+ or \eqn{x < a}, else
+ otherwise a continued fraction expansion.
+ Machine overflow can occur for large values of \eqn{x}
+ when \eqn{x} is much greater than \eqn{a}.
+
+
+
+}
+\value{
+ The first 5 columns, running from left to right, are the derivatives
+ with respect to:
+ \eqn{x},
+ \eqn{x^2},
+ \eqn{a},
+ \eqn{a^2},
+ \eqn{xa}.
+ The 6th column is \eqn{P(a, x)} (but it is not as accurate
+ as calling \code{\link[stats]{pgamma}} directly).
+
+
+}
+\references{
+
+ Moore, R. J. (1982)
+ Algorithm AS 187: Derivatives of the Incomplete Gamma Integral.
+ \emph{Journal of the Royal Statistical Society, Series C}
+ \emph{(Applied Statistics)},
+ \bold{31}(3), 330--335.
+
+
+}
+\author{
+ T. W. Yee wrote the wrapper function to the Fortran subroutine
+ written by R. J. Moore. The subroutine was modified to run using
+ double precision.
+ The original code came from \url{http://lib.stat.cmu.edu/apstat/187}.
+
+
+}
+\note{
+ If convergence does not occur then try increasing the value of
+ \code{tmax}.
+
+
+ Yet to do: add more arguments to give greater flexibility in
+ the accuracy desired and to compute only quantities that are
+ required by the user.
+
+
+}
+
+%% ~Make other sections like Warning with \section{Warning }{....} ~
+
+\seealso{
+ \code{\link{pgamma.deriv.unscaled}},
+ \code{\link[stats]{pgamma}}.
+
+
+}
+\examples{
+x <- seq(2, 10, length = 501)
+head(ans <- pgamma.deriv(x, 2))
+\dontrun{ par(mfrow = c(2, 3))
+for (jay in 1:6)
+ plot(x, ans[, jay], type = "l", col = "blue", cex.lab = 1.5,
+ cex.axis = 1.5, las = 1, log = "x",
+ main = colnames(ans)[jay], xlab = "q", ylab = "") }
+}
+% Add one or more standard keywords, see file 'KEYWORDS' in the
+% R documentation directory.
+\keyword{math}
+
+
+
+% Some part of R-2.15.2/src/library/stats/man/GammaDist.Rd used
+
+
+% An error in the article?
+% I believe comments in the code (C in fortran).
+% for \eqn{a \leq x \leq 1}{a <= x <= 1}, and
diff --git a/man/pgamma.deriv.unscaled.Rd b/man/pgamma.deriv.unscaled.Rd
new file mode 100644
index 0000000..66df99f
--- /dev/null
+++ b/man/pgamma.deriv.unscaled.Rd
@@ -0,0 +1,102 @@
+\name{pgamma.deriv.unscaled}
+\alias{pgamma.deriv.unscaled}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{
+Derivatives of the Incomplete Gamma Integral (Unscaled Version)
+}
+\description{
+The first two derivatives of the incomplete gamma integral
+with scaling.
+
+}
+\usage{
+pgamma.deriv.unscaled(q, shape)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{q, shape}{
+ As in \code{\link[stats]{pgamma}}
+ and \code{\link{pgamma.deriv}} but
+ these must be vectors of positive values only and finite.
+
+
+}
+}
+\details{
+ Define
+ \deqn{G(x, a) = \int_0^x t^{a-1} e^{-t} dt}{G(a,x) =
+ integral_0^x t^(a-1) exp(-t) dt}
+ so that
+ \eqn{G(x, a)} is \code{pgamma(x, a) * gamma(a)}.
+ Write \eqn{x = q} and \code{shape =} \eqn{a}.
+ The 0th and first and second derivatives with respect to \eqn{a}
+ of \eqn{G} are returned. This function is similar in spirit to
+ \code{\link{pgamma.deriv}}
+ but here there is no gamma function to scale things.
+ Currently a 3-column matrix is returned (in the future this
+ may change and an argument may be supplied so that only what
+ is required by the user is computed.)
+ This function is based on Wingo (1989).
+
+
+}
+\value{
+ The 3 columns, running from left to right, are the \code{0:2}th derivatives
+ with respect to \eqn{a}.
+
+
+}
+\references{
+
+ See \code{\link{truncweibull}}.
+
+
+}
+\author{
+ T. W. Yee.
+
+
+}
+
+
+%\note{
+% If convergence does not occur then try increasing the value of
+% \code{tmax}.
+%
+%}
+
+\section{Warning }{
+ These function seems inaccurate for \code{q = 1} and \code{q = 2};
+ see the plot below.
+
+
+}
+
+
+
+
+\seealso{
+ \code{\link{pgamma.deriv}},
+ \code{\link[stats]{pgamma}}.
+
+
+}
+\examples{
+x <- 3; aa <- seq(0.3, 04, by = 0.01)
+ans.u <- pgamma.deriv.unscaled(x, aa)
+head(ans.u)
+
+\dontrun{ par(mfrow = c(1, 3))
+for (jay in 1:3) {
+ plot(aa, ans.u[, jay], type = "l", col = "blue", cex.lab = 1.5,
+ cex.axis = 1.5, las = 1, main = colnames(ans.u)[jay],
+ log = "", xlab = "shape", ylab = "")
+ abline(h = 0, v = 1:2, lty = "dashed", col = "gray") # Inaccurate at 1 and 2
+}
+}
+}
+% Add one or more standard keywords, see file 'KEYWORDS' in the
+% R documentation directory.
+\keyword{math}
+
+
diff --git a/man/plackett.Rd b/man/plackett.Rd
index 1a74248..24802ba 100644
--- a/man/plackett.Rd
+++ b/man/plackett.Rd
@@ -98,8 +98,9 @@ A class of bivariate distributions.
}
\examples{
+\dontrun{
ymat <- rplack(n = 2000, oratio = exp(2))
-\dontrun{plot(ymat, col = "blue")}
+plot(ymat, col = "blue")
fit <- vglm(ymat ~ 1, fam = plackett, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
@@ -107,6 +108,7 @@ vcov(fit)
head(fitted(fit))
summary(fit)
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/plotvgam.Rd b/man/plotvgam.Rd
index aa9a397..ca39d14 100644
--- a/man/plotvgam.Rd
+++ b/man/plotvgam.Rd
@@ -176,9 +176,9 @@ contains further information and examples.
}
\examples{
-coalminers = transform(coalminers, Age = (age - 42) / 5)
-fit = vgam(cbind(nBnW, nBW, BnW, BW) ~ s(Age),
- binom2.or(zero = NULL), coalminers)
+coalminers <- transform(coalminers, Age = (age - 42) / 5)
+fit <- vgam(cbind(nBnW, nBW, BnW, BW) ~ s(Age),
+ binom2.or(zero = NULL), coalminers)
\dontrun{ par(mfrow = c(1,3))
plot(fit, se = TRUE, ylim = c(-3, 2), las = 1)
plot(fit, se = TRUE, which.cf = 1:2, lcol = "blue", scol = "orange",
diff --git a/man/pneumo.Rd b/man/pneumo.Rd
index 029ed13..a32798d 100644
--- a/man/pneumo.Rd
+++ b/man/pneumo.Rd
@@ -31,6 +31,12 @@ data set, the two most severe categories were combined.
}
+\seealso{
+ \code{\link{cumulative}}.
+
+
+}
+
\references{
McCullagh, P. and Nelder, J. A. (1989)
@@ -40,7 +46,7 @@ data set, the two most severe categories were combined.
}
\examples{
# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
-pneumo = transform(pneumo, let = log(exposure.time))
+pneumo <- transform(pneumo, let = log(exposure.time))
vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo)
}
\keyword{datasets}
diff --git a/man/pnorm2UC.Rd b/man/pnorm2UC.Rd
new file mode 100644
index 0000000..0dac580
--- /dev/null
+++ b/man/pnorm2UC.Rd
@@ -0,0 +1,124 @@
+\name{pnorm2}
+\alias{pnorm2}
+\title{Bivariate normal distribution cumulative distribution function}
+\description{
+% Density,
+ Cumulative distribution function
+% quantile function
+% and
+% random generation
+ for the bivariate normal distribution distribution.
+
+}
+\usage{
+pnorm2(x1, x2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
+}
+\arguments{
+ \item{x1, x2}{vector of quantiles.}
+ \item{mean1, mean2, var1, var2, cov12}{
+ vector of means, variances and the covariance.
+
+% standard deviations and correlation parameter.
+
+ }
+% \item{n}{number of observations. }
+% \item{log}{
+% Logical.
+% If \code{log = TRUE} then the logarithm of the density is returned.
+% }
+
+
+% \item{rho}{
+% See \code{\link{binormal}}.
+% }
+
+
+
+}
+\value{
+% \code{dmakeham} gives the density,
+ \code{pnorm2} gives the cumulative distribution function.
+% \code{qmakeham} gives the quantile function, and
+% \code{rmakeham} generates random deviates.
+
+
+}
+% \author{ T. W. Yee }
+\details{
+
+ The default arguments correspond to the standard bivariate normal
+ distribution with correlation parameter \eqn{\rho = 0}{rho = 0}.
+ That is, two independent standard normal distibutions.
+ Let \code{sd1} be \code{sqrt(var1)} and
+ written \eqn{\sigma_1}{sigma_1}, etc.
+ Then the general formula for the correlation coefficient is
+ \eqn{\rho = cov / (\sigma_1 \sigma_2)}{rho = cov / (sigma_1 * sigma_2)}
+ where \eqn{cov} is argument \code{cov12}.
+ Thus if arguments \code{var1} and \code{var2} are left alone then
+ \code{cov12} can be inputted with \eqn{\rho}{rho}.
+
+
+ One can think of this function as an extension of
+ \code{\link[stats]{pnorm}} to two dimensions, however note
+ that the argument names have been changed for \pkg{VGAM}
+ 0.9-1 onwards.
+
+
+}
+\references{
+
+ Based on Donnelly (1973),
+ the code was translated from FORTRAN to ratfor using struct, and
+ then from ratfor to C manually.
+ The function was originally called \code{bivnor}, and TWY only
+ wrote a wrapper function.
+
+
+ Donnelly, T. G. (1973)
+ Algorithm 462: Bivariate Normal Distribution.
+ \emph{Communications of the ACM},
+ \bold{16}, 638.
+
+
+
+% It gives the probability that a bivariate normal exceeds (ah, ak).
+% Here, gh and gk are 0.5 times the right tail areas of ah, ak under a N(0, 1)
+% distribution.
+
+
+}
+
+
+\section{Warning}{
+ Being based on an approximation, the results may be negative!
+ Also, this function may be renamed to \code{pnormal2()}, or
+ something similar, at a later date.
+
+
+}
+
+
+
+
+%\note{
+%
+%}
+\seealso{
+ \code{\link[stats]{pnorm}},
+ \code{\link{binormal}},
+ \code{\link{normal1}}.
+
+
+}
+\examples{
+yvec <- c(-5, -1.96, 0, 1.96, 5)
+ymat <- expand.grid(yvec, yvec)
+cbind(ymat, pnorm2(ymat[, 1], ymat[, 2]))
+
+\dontrun{ rhovec <- seq(-0.95, 0.95, by = 0.01)
+plot(rhovec, pnorm2(0, 0, cov12 = rhovec), type = "l", col = "blue", las = 1)
+abline(v = 0, h = 0.25, col = "gray", lty = "dashed") }
+}
+\keyword{distribution}
+
+
diff --git a/man/poissonff.Rd b/man/poissonff.Rd
index 77aad99..0391be6 100644
--- a/man/poissonff.Rd
+++ b/man/poissonff.Rd
@@ -21,6 +21,7 @@ poissonff(link = "loge", dispersion = 1, onedpar = FALSE, imu = NULL,
See \code{\link{Links}} for more choices
and information.
+
}
\item{dispersion}{
Dispersion parameter. By default, maximum
@@ -30,6 +31,7 @@ poissonff(link = "loge", dispersion = 1, onedpar = FALSE, imu = NULL,
else specify a known positive value (or values if the response
is a matrix---one value per column).
+
}
\item{onedpar}{
One dispersion parameter? If the response is a matrix,
@@ -38,14 +40,17 @@ poissonff(link = "loge", dispersion = 1, onedpar = FALSE, imu = NULL,
Setting \code{onedpar=TRUE} will pool them so that there is only
one dispersion parameter to be estimated.
+
}
\item{parallel}{
A logical or formula. Used only if the response is a matrix.
+
}
\item{imu, imethod}{
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
\item{zero}{
An integer-valued vector specifying which linear/additive predictors
@@ -58,6 +63,11 @@ poissonff(link = "loge", dispersion = 1, onedpar = FALSE, imu = NULL,
}
\item{bred, earg.link}{
Details at \code{\link{CommonVGAMffArguments}}.
+ Setting \code{bred = TRUE} should work for
+ multiple responses and all \pkg{VGAM} link functions;
+ it has been tested for
+ \code{\link{loge}},
+ \code{\link{identity}} but further testing is required.
}
@@ -159,10 +169,13 @@ poissonff(link = "loge", dispersion = 1, onedpar = FALSE, imu = NULL,
\examples{
poissonff()
-pdata <- data.frame(x = rnorm(nn <- 100))
-pdata <- transform(pdata, y = rpois(nn, exp(1+x)))
-(fit <- vglm(y ~ x, family = poissonff, pdata))
-coef(fit, matrix = TRUE)
+set.seed(123)
+pdata <- data.frame(x2 = rnorm(nn <- 100))
+pdata <- transform(pdata, y1 = rpois(nn, exp(1 + x2)))
+(fit1 <- vglm(y1 ~ x2, family = poissonff, pdata))
+(fit2 <- vglm(y1 ~ x2, family = poissonff(bred = TRUE), pdata))
+coef(fit1, matrix = TRUE)
+coef(fit2, matrix = TRUE)
nn <- 200
cdata <- data.frame(x2 = rnorm(nn), x3 = rnorm(nn), x4 = rnorm(nn))
diff --git a/man/polf.Rd b/man/polf.Rd
index 8bc00e3..5912f52 100644
--- a/man/polf.Rd
+++ b/man/polf.Rd
@@ -22,7 +22,7 @@ polf(theta, cutpoint = NULL,
The cutpoints should be non-negative integers.
If \code{polf()} is used as the link function in
\code{\link{cumulative}} then one should choose
- \code{reverse = TRUE, parallel = TRUE, intercept.apply = TRUE}.
+ \code{reverse = TRUE, parallel = TRUE, apply.parint = TRUE}.
}
\item{inverse, deriv, short, tag}{
@@ -130,10 +130,11 @@ cutpoints <- c(-Inf, 10, 20, Inf)
pdata <- transform(pdata, cuty = Cut(y1, breaks = cutpoints))
\dontrun{ with(pdata, plot(x2, x3, col = cuty, pch = as.character(cuty))) }
with(pdata, table(cuty) / sum(table(cuty)))
-fit <- vglm(cuty ~ x2 + x3, cumulative(reverse = TRUE,
- parallel = TRUE, intercept.apply = TRUE,
- link = polf(cutpoint = cutpoints[2:3]),
- mv = TRUE), data = pdata, trace = TRUE)
+fit <- vglm(cuty ~ x2 + x3, data = pdata, trace = TRUE,
+ cumulative(reverse = TRUE,
+ parallel = TRUE, apply.parint = TRUE,
+ link = polf(cutpoint = cutpoints[2:3]),
+ mv = TRUE))
head(depvar(fit))
head(fitted(fit))
head(predict(fit))
diff --git a/man/posbernUC.Rd b/man/posbernUC.Rd
new file mode 100644
index 0000000..76b7a90
--- /dev/null
+++ b/man/posbernUC.Rd
@@ -0,0 +1,168 @@
+\name{posbernUC}
+\alias{posbernUC}
+\alias{dposbern}
+\alias{rposbern}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Positive Bernoulli Sequence Model }
+\description{
+ Density, and random generation
+ for multiple Bernoulli responses where
+ each row in the response matrix has at least one success.
+
+}
+\usage{
+rposbern(n, nTimePts = 5, pvars = length(xcoeff), xcoeff = c(-2, 1, 2),
+ cap.effect = -1, link = "logit", is.popn = FALSE, earg.link = FALSE)
+dposbern(x, prob, prob0 = prob, log = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x}{
+ response vector or matrix.
+ Should only have 0 and 1 values, at least two columns, and each row
+ should have at least one 1.
+
+
+ }
+
+ \item{nTimePts}{Number of sampling occasions.
+ Called \eqn{\tau} in \code{\link{posbernoulli.b}}
+ and \code{\link{posbernoulli.t}}.
+
+
+ }
+ \item{n}{number of observations.
+ Usually a single positive integer, else the length of the vector is used.
+ See argument \code{is.popn}.
+
+
+ }
+
+ \item{is.popn}{
+ Logical.
+ If \code{TRUE} then argument \code{n} is the population
+ size and what is returned may have substantially less
+ rows than \code{n}.
+ That is, if an animal has at least one one in its sequence then
+ it is returned, else that animal is not returned.
+ Put in other words, only animals captured at least once are
+ returned in the sample.
+
+
+ }
+ \item{cap.effect}{
+ Numeric, the capture effect.
+ Added to the linear predictor if captured previously.
+ A positive or negative value corresponds to
+ a trap-happy and trap-shy effect respectively.
+
+
+ }
+
+% \item{double.ch}{
+% Logical.
+% If \code{TRUE} then the values of \code{ch0}, \code{ch1}, \ldots are
+% 2 or 0, else 1 or 0.
+% Setting this argument \code{TRUE} means that a model can be fitted
+% with half the capture history in both denominator and numerator
+% (this is a compromise of the Huggins (1991) model where the full
+% capture history only appears in the numerator).
+% }
+
+ \item{pvars}{ Number of other numeric covariates that make up
+ the linear predictor.
+ Labelled \code{x1}, \code{x2}, \ldots,
+ where the first is an intercept, and the others are
+ independent standard \code{\link[stats:Uniform]{runif}} random variates.
+ The first \code{pvars} elements of \code{xcoeff} are used.
+
+
+ }
+
+ \item{xcoeff}{
+ The regression coefficients of the linear predictor.
+ These correspond to \code{x1}, \code{x2}, \ldots,
+ and the first is for the intercept.
+ The length of \code{xcoeff} must be at least \code{pvars}.
+
+
+ }
+
+ \item{link, earg.link}{
+ The former is used to generate the probabilities for capture
+ at each occasion.
+ Other details at \code{\link{CommonVGAMffArguments}}.
+
+
+ }
+ \item{prob, prob0}{
+ Matrix of probabilities for the numerator and denominators
+ respectively.
+ The default does \emph{not} correspond to the
+ \eqn{M_b} model since the \eqn{M_b} model has a denominator
+ which involves the capture history.
+
+
+ }
+ \item{log}{
+ Logical. Return the logarithm of the answer?
+
+
+ }
+
+}
+\details{
+ The form of the conditional likelihood is
+ described in \code{\link{posbernoulli.b}}.
+
+
+ The denominator is equally shared among the elements of
+ the matrix \code{x}.
+
+
+}
+\value{
+ This function returns a data frame with some attributes.
+ The function generates random deviates
+ (\eqn{\tau} columns labelled \code{y1}, \code{y2}, \ldots)
+ for the response.
+ Some indicator columns are also included
+ (those starting with \code{ch} are for previous capture history,
+ and those starting with \code{z} are zero),
+ and these are useful for the \code{xij} argument.
+
+
+ The function \code{dposbern} gives the density,
+
+
+}
+%\references{ }
+\author{ Thomas W. Yee. }
+\note{
+ The \code{r}-type function is experimental and does not follow the
+ usual conventions of \code{r}-type R functions.
+ The \code{d}-type function is more conventional.
+
+
+}
+
+\seealso{
+% \code{\link{huggins91}},
+ \code{\link{posbernoulli.b}},
+ \code{\link{posbernoulli.t}},
+ \code{\link{posbernoulli.tb}}.
+
+
+}
+\examples{
+set.seed(123); rposbern(n = 10)
+attributes(rposbern(n = 10))
+}
+\keyword{distribution}
+\keyword{datagen}
+
+
+%double.ch = FALSE,
+
+
+
diff --git a/man/posbernoulli.b.Rd b/man/posbernoulli.b.Rd
new file mode 100644
index 0000000..278ebc6
--- /dev/null
+++ b/man/posbernoulli.b.Rd
@@ -0,0 +1,209 @@
+\name{posbernoulli.b}
+%\alias{posbernoulli}
+\alias{posbernoulli.b}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Positive Bernoulli Family Function with Behavioural Effects }
+\description{
+ Fits a GLM-like model to multiple Bernoulli responses where
+ each row in the capture history matrix response has at least one success
+ (capture).
+ Capture history behavioural effects are accommodated.
+
+
+}
+\usage{
+posbernoulli.b(link = "logit", parallel.b = FALSE, apply.parint = TRUE,
+ icap.prob = NULL, irecap.prob = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link, parallel.b, apply.parint, icap.prob, irecap.prob}{
+
+ See \code{\link{CommonVGAMffArguments}} for information about
+ these arguments.
+ With an intercept-only model
+ setting \code{parallel.b = TRUE} results in the \eqn{M_0} model;
+ it just deletes the 2nd column of the constraint matrix corresponding
+ to the intercept.
+
+
+% The default value of \code{zero} means that the behavioural
+% effect is modelled as the difference between the
+% two intercepts.
+
+
+% That is, it is modelled through the intercept, and a
+% negative value of the second linear/additive predictor means trap shy, etc.
+
+
+
+ }
+}
+\details{
+ This model
+ (commonly known as \eqn{M_b} in the capture--recapture literature)
+ operates on a capture history matrix response of 0s and 1s.
+ See \code{\link{posbernoulli.t}} for details.
+
+
+ Each sampling occasion has the same probability and this is modelled here.
+ But once an animal is captured, it is marked so that its future
+ capture history can be recorded. The effect of the recapture
+ probability is modelled through a second linear/additive predictor,
+ and this usually differs from the first linear/additive predictor
+ by just a different intercept (because \code{parallel.b = TRUE}
+ but the parallelism does not apply to the intercept).
+
+
+ It is well-known that some species of animals are affected by capture,
+ e.g., trap-shy or trap-happy. This \pkg{VGAM} family function
+ \emph{does} allow the capture history to be modelled via such
+ behavioural effects.
+
+
+ See \code{\link{posbernoulli.t}} for other information,
+ e.g., common assumptions.
+
+
+ The number of linear/additive predictors is \eqn{M = 2},
+ and the default links
+ are \eqn{(logit \,p_c, logit \,p_r)^T}{(logit p_c, logit p_r)^T}
+ where \eqn{p_c} is the probability of capture and
+ \eqn{p_r} is the probability of recapture.
+ The fitted value returned is of the same dimension as
+ the response matrix, and depends on the capture history:
+ prior to being first captured, it is \code{cap.prob}.
+ Afterwards, it is \code{recap.prob}.
+
+
+ By default, the constraint matrix for the intercept term
+ is set up so that \eqn{p_r} differs from \eqn{p_c} by a
+ simple binary effect. This allows an estimate of the
+ trap-happy/trap-shy effect.
+
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+
+}
+
+\section{Warning }{
+
+ See \code{\link{posbernoulli.tb}}.
+
+
+}
+
+\references{
+
+
+ See \code{\link{posbernoulli.t}}.
+
+
+}
+\author{ Thomas W. Yee. }
+
+\note{
+ When the number of sampling occasions is large
+ the computation becomes increasingly slower.
+ Monitor convergence by setting \code{trace = TRUE}.
+
+
+ The dependent variable is \emph{not} scaled to row proportions.
+ This is the same as \code{\link{posbernoulli.t}}
+ but different from \code{\link{posbinomial}}
+ and \code{\link{binomialff}}.
+
+
+}
+
+\seealso{
+ \code{\link{posbernoulli.t}} (including estimating \eqn{N}),
+ \code{\link{posbernoulli.tb}},
+ \code{\link{Perom}},
+ \code{\link{dposbern}},
+ \code{\link{rposbern}},
+ \code{\link{posbinomial}}.
+% \code{\link{huggins91}}.
+% \code{\link{vglm.control}} for \code{xij},
+
+
+}
+
+\examples{
+# Perom data ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+
+# Fit a M_b model
+M_b <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ 1,
+ data = Perom, posbernoulli.b, trace = TRUE)
+coef(M_b, matrix = TRUE)
+constraints(M_b, matrix = TRUE)
+summary(M_b)
+
+# Fit a M_bh model
+M_bh <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight,
+ posbernoulli.b, trace = TRUE, data = Perom)
+coef(M_bh, matrix = TRUE)
+constraints(M_bh) # (2,2) element of "(Intercept)" is the behavioural effect
+summary(M_bh) # Estimate of behavioural effect is positive (trap-happy)
+
+# Fit a M_h model
+M_h <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight,
+ data = Perom,
+ posbernoulli.t(parallel.t = TRUE), trace = TRUE)
+coef(M_h, matrix = TRUE)
+constraints(M_h, matrix = TRUE)
+summary(M_h)
+
+# Fit a M_0 model
+M_0 <- vglm(cbind( y1 + y2 + y3 + y4 + y5 + y6,
+ 6 - y1 - y2 - y3 - y4 - y5 - y6) ~ 1,
+ data = Perom, posbinomial, trace = TRUE)
+coef(M_0, matrix = TRUE)
+constraints(M_0, matrix = TRUE)
+summary(M_0)
+
+
+# Simulated data set ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+set.seed(123); nTimePts <- 5; N <- 1000
+hdata <- rposbern(n = N, nTimePts = nTimePts, pvars = 2,
+ is.popn = TRUE) # N is the popn size
+nrow(hdata) # Less than N
+# The truth: xcoeffs are c(-2, 1, 2) and cap.effect = -1
+
+model1 <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2,
+ posbernoulli.b, data = hdata, trace = TRUE)
+coef(model1)
+coef(model1, matrix = TRUE)
+constraints(model1, matrix = TRUE)
+summary(model1)
+head(depvar(model1)) # Capture history response matrix
+head(model1 at extra$cap.hist1) # Info on its capture history
+head(model1 at extra$cap1) # When it was first captured
+head(fitted(model1)) # Depends on capture history
+(trap.effect <- coef(model1)["(Intercept):2"]) # Should be -1
+head(model.matrix(model1, type = "vlm"), 21)
+head(hdata)
+summary(hdata)
+dim(depvar(model1))
+vcov(model1)
+
+model1 at extra$N.hat # Estimate of the population size; should be about N
+model1 at extra$SE.N.hat # SE of the estimate of the population size
+# An approximate 95 percent confidence interval:
+round(model1 at extra$N.hat + c(-1, 1) * 1.96 * model1 at extra$SE.N.hat, 1)
+}
+\keyword{models}
+\keyword{regression}
+
+%# Compare the models using a LRT
+%lrtest(M_bh, M_h)
+%(wald.pvalue <- 2 * pnorm(abs(summary(M_bh)@coef3["(Intercept):2", "z value"]),
+% lower.tail = FALSE)) # Two-sided pvalue
+
+
+
diff --git a/man/posbernoulli.t.Rd b/man/posbernoulli.t.Rd
new file mode 100644
index 0000000..a6cba19
--- /dev/null
+++ b/man/posbernoulli.t.Rd
@@ -0,0 +1,216 @@
+\name{posbernoulli.t}
+%\alias{posbernoulli}
+\alias{posbernoulli.t}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Positive Bernoulli Family Function with Time Effects }
+\description{
+ Fits a GLM-like model to multiple Bernoulli responses where
+ each row in the capture history matrix response has at least one success
+ (capture).
+ Sampling occasion effects are accommodated.
+
+
+}
+\usage{
+posbernoulli.t(link = "logit", parallel.t = FALSE, apply.parint = TRUE,
+ iprob = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link, iprob, parallel.t, apply.parint}{
+ See \code{\link{CommonVGAMffArguments}} for information.
+ Setting \code{parallel.t = TRUE} results in the \eqn{M_0} model.
+
+
+ }
+}
+\details{
+ This model
+ (commonly known as \eqn{M_t} in the capture--recapture literature)
+ operates on a capture history matrix response of 0s and 1s.
+ Each column is a sampling occasion where animals are potentially
+ captured (e.g., a field trip), and each row is an individual animal.
+ Capture is a 1, else a 0.
+ No removal of animals from the population is made (closed population),
+ e.g., no immigration or emigration.
+ Each row of the response matrix has at least one capture.
+
+
+ A conditional likelihood is maximized using Fisher scoring.
+ Each sampling occasion has a separate probability that is modelled here.
+ The probabilities can be constrained to be equal by setting
+ \code{parallel.t = TRUE};
+ then the results are effectively the same as \code{\link{posbinomial}}
+ except the binomial constants are not included in the log-likelihood.
+ If \code{parallel.t = FALSE} then each column should have
+ at least one 1 and at least one 0.
+
+
+ It is well-known that some species of animals are affected by capture,
+ e.g., trap-shy or trap-happy. This \pkg{VGAM} family function
+ does \emph{not} allow any behavioral effect to be modelled
+ (\code{\link{posbernoulli.b}} does).
+ However, it \emph{does} allow covariates that are specific to
+ each sampling occasion, e.g., through the \code{xij} argument.
+ Ignoring capture history effects would mean
+ \code{\link{posbinomial}} could be used by aggregating
+ over the sampling occasions.
+
+
+ If there are no covariates that are specific to
+ each occasion then the response matrix can be summed over
+ the columns and \code{\link{posbinomial}} could be used by aggregating
+ over the sampling occasions.
+
+
+ It is assumed that the animals are independent and
+ that, for a given animal, each sampling occasion is independent.
+ And animals do not lose their marks/tags, and
+ all marks/tags are correctly recorded.
+
+
+ The number of linear/additive predictors is equal to the number
+ of sampling occasions, i.e., \eqn{M = \tau}, say.
+ The default link functions
+ are \eqn{(logit \,p_{1},\ldots,logit \,p_{\tau})^T}{(logit p_(1),\ldots,logit p_(tau))^T}
+ where \eqn{p} denotes the probability.
+% Thus \eqn{M = \tau}{M = tau}.
+
+
+ The fitted value returned is of the same dimension as the response matrix.
+
+
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+
+ Upon fitting the \code{extra} slot has a (list) component
+ called \code{N.hat}
+ which is a point estimate of the population size \eqn{N}.
+ And there is a component called \code{SE.N.hat}
+ containing its standard error.
+
+
+}
+\references{
+
+
+Huggins, R. M. (1991)
+Some practical aspects of a conditional likelihood
+approach to capture experiments.
+\emph{Biometrics},
+\bold{47}, 725--732.
+
+
+Huggins, R. M. and Hwang, W.-H. (2011)
+A review of the use of conditional likelihood in
+capture--recapture experiments.
+\emph{International Statistical Review},
+\bold{79}, 385--400.
+
+
+ Otis, D. L. and Burnham, K. P. and White, G. C. and Anderson, D. R. (1978)
+ Statistical inference from capture data on closed animal populations,
+ \emph{Wildlife Monographs},
+ \bold{62}, 3--135.
+
+
+}
+\author{ Thomas W. Yee. }
+
+\note{
+ The \code{weights} argument of \code{\link{vglm}} need not be
+ assigned, and the default is just a matrix of ones.
+
+
+ Numerical problems are more likely to occur if \code{parallel.t = FALSE}.
+ Each sampling occasion may need at least one success
+ (capture) and one failure.
+
+
+ The response matrix is returned unchanged;
+ i.e., not converted into proportions like \code{\link{posbinomial}}.
+ If the response matrix has column names then these are used in the
+ labelling, else \code{prob1}, \code{prob2}, etc. are used.
+
+
+
+Data-wise, at each sampling occasion, the \eqn{M_t} model requires at least
+one first capture and at least one noncapture.
+
+% If not all of the \eqn{2^{\tau}-1}{2^(tau) - 1} combinations of
+% the response matrix are not present then it pays to add
+% such rows to the response matrix and assign a small but
+% positive prior weight.
+% For example, if \eqn{\tau=2}{tau=2} then there should be
+% (0,1) rows,
+% (1,0) rows and
+% (1,1) rows present in the response matrix.
+
+
+}
+
+\section{Warning }{
+
+ See \code{\link{posbernoulli.tb}}.
+
+
+}
+
+\seealso{
+ \code{\link{posbernoulli.b}},
+ \code{\link{posbernoulli.tb}},
+ \code{\link{Perom}},
+ \code{\link{Huggins89.t1}},
+ \code{\link{vglm.control}} for \code{xij},
+ \code{\link{dposbern}},
+ \code{\link{rposbern}},
+ \code{\link{posbinomial}}.
+% \code{\link{huggins91}}.
+
+
+}
+
+\examples{
+M_t <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ 1, trace = TRUE,
+ posbernoulli.t, data = Perom) # Has parallel.t = FALSE
+coef(M_t, matrix = TRUE)
+summary(M_t)
+
+
+M_th.1 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight, trace = TRUE,
+ posbernoulli.t, data = Perom) # Has parallel.t = FALSE
+summary(M_th.1)
+head(depvar(M_th.1)) # Response capture history matrix
+dim(depvar(M_th.1))
+
+M_h.2 <- vglm(cbind(y1, y2, y3, y4, y5, y6) ~ sex + weight, trace = TRUE,
+ posbernoulli.t(parallel.t = TRUE), data = Perom)
+lrtest(M_th.1, M_h.2) # Test the parallelism assumption
+coef(M_h.2)
+coef(M_h.2, matrix = TRUE)
+constraints(M_h.2, matrix = TRUE)
+summary(M_h.2)
+head(model.matrix(M_h.2, type = "vlm"), 21)
+
+M_h.2 at extra$N.hat # Estimate of the population size; should be about N
+M_h.2 at extra$SE.N.hat # SE of the estimate of the population size
+# An approximate 95 percent confidence interval:
+round(M_h.2 at extra$N.hat + c(-1, 1) * 1.96 * M_h.2 at extra$SE.N.hat, 1)
+
+
+# Fit (effectively) the parallel model using posbinomial()
+Perom <- transform(Perom, ysum = y1 + y2 + y3 + y4 + y5 + y6,
+ tau = 6)
+M_h.3 <- vglm(cbind(ysum, tau - ysum) ~ sex + weight,
+ posbinomial, data = Perom, trace = TRUE)
+max(abs(coef(M_h.2) - coef(M_h.3))) # Should be zero
+logLik(M_h.3) - logLik(M_h.2) # Difference is due to the binomial constants
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/posbernoulli.tb.Rd b/man/posbernoulli.tb.Rd
new file mode 100644
index 0000000..512291e
--- /dev/null
+++ b/man/posbernoulli.tb.Rd
@@ -0,0 +1,217 @@
+\name{posbernoulli.tb}
+%\alias{posbernoulli}
+\alias{posbernoulli.tb}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Positive Bernoulli Family Function with Time and Behavioural Effects (experimental) }
+\description{
+ Fits a GLM-like model to multiple
+ (currently only two or three)
+ Bernoulli responses where
+ each row in the capture history matrix response has at least one success
+ (capture).
+ Sampling occasion effects and behavioural effects are accommodated.
+ However, this function only handles two and three sampling occasions.
+
+
+}
+\usage{
+posbernoulli.tb(link = "logit", parallel.t = FALSE, parallel.b = FALSE,
+ apply.parint = FALSE, imethod = 1, iprob = NULL,
+ dconst = 0.1, dpower = -2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link, imethod, iprob, parallel.t, parallel.b, apply.parint}{
+ See \code{\link{CommonVGAMffArguments}} for information.
+ But \code{parallel.t} and \code{parallel.b} must each be
+ logicals only.
+ Argument \code{parallel.t} means parallel with respect to time, and
+ matches the same argument name in \code{\link{posbernoulli.t}}.
+
+
+
+ Suppose the model is intercept-only.
+ Setting \code{parallel.t = TRUE} results in the \eqn{M_b} model.
+ Setting \code{parallel.b = TRUE} results in the \eqn{M_t} model.
+ Setting \code{parallel.t = TRUE} and
+ setting \code{parallel.b = TRUE} results in the \eqn{M_0} model.
+ Note the default for \code{parallel.t} and \code{parallel.b}
+ (both \code{FALSE})
+ may be unsuitable for most data sets which have a large \eqn{\tau}
+ because of the large number of parameters; it can be too flexible.
+ Note that adding covariates will result in a \eqn{M_{tbh}} model.
+
+
+ }
+ \item{dconst, dpower}{
+ Decay constants and power (exponent) for the ridge adjustment
+ for the working weight matrices.
+ At iteration \eqn{t} of the IRLS algorithm
+ a positive value is added to the first \eqn{\tau}{tau}
+ diagonal elements of the working weight matrices to make
+ them positive-definite. This adjustment is \eqn{K \times t^p}{K * t^p}
+ where \eqn{K} is \code{dconst} and \eqn{p} is \code{dpower}.
+ This is always positive but decays to zero as iterations proceed
+ (provided \eqn{p} is negative etc.).
+
+
+ }
+}
+\details{
+ This model
+ (commonly known as \eqn{M_{tb}} in the capture--recapture literature)
+ operates on a response matrix of 0s and 1s.
+ See \code{\link{posbernoulli.t}}
+ for information that is in common.
+
+
+ This \pkg{VGAM} family function is \emph{experimental only}.
+ When finished, it should allow time and behavioural effects to be modelled.
+ Evidently, the expected information matrix (EIM) is \emph{not} of
+ full rank, so \code{dconst} and \code{dpower} are used to
+ \emph{try} fix up the problem.
+ The default link functions
+ are \eqn{(logit \,p_{c1},\ldots,logit \,p_{c\tau},logit \,p_{r2},\ldots,logit \,p_{r\tau})^T}{
+ (logit p_{c1},\ldots,logit p_{c,tau},logit p_{r2},\ldots,logit p_{r,tau})^T}
+ where the subscript \eqn{c} denotes capture,
+ the subscript \eqn{r} denotes recapture,
+ and it is not possible to recapture the animal at sampling occasion 1.
+ Thus \eqn{M = 2\tau - 1}{M=2*tau-1}.
+ The parameters are currently prefixed by \code{cap.prob} and \code{recap.prob}
+ for the capture and recapture probabilities.
+
+
+% Not surprisingly,
+% the fitted values are similar to \code{\link{posbernoulli.t}} and
+% \code{\link{posbernoulli.b}}.
+
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+
+}
+\references{
+
+ See \code{\link{posbernoulli.t}}.
+
+
+}
+\author{ Thomas W. Yee. }
+
+\note{
+ It is a good idea to constrain the probabilities for each sampling
+ occasion to be equal, and also allow the behavioural effect to
+ be modelled using the intercept.
+ See \code{M_tbh.1} below.
+
+
+ The current restriction of handling only \eqn{\tau=2}{tau=2} and
+ \eqn{\tau=3}{tau=3} sampling occasions
+ is unfortunate and more work is needed to extend this to four or more.
+
+
+Data-wise, at each sampling occasion, the \eqn{M_{tb}} model requires at least
+one first capture and at least one noncapture.
+
+% If not all of the \eqn{2^{\tau}-1}{2^(tau) - 1} combinations of
+% the response matrix are not present then it pays to add
+% such rows to the response matrix and assign a small but
+% positive prior weight.
+% For example, if \eqn{\tau=2}{tau=2} then there should be
+% (0,1) rows,
+% (1,0) rows and
+% (1,1) rows present in the response matrix.
+
+
+}
+
+\section{Warning }{
+
+ As this model is likely to be overparameterized, probably this
+ function should not be used (for now?).
+
+
+% From Jakub:
+ Estimation for the population size (and its SE) for the
+ \eqn{M_{tb}} model may be wrong.
+ Models \eqn{M_{tbh}} and \eqn{M_{th}} may be wrong.
+ But models \eqn{M_{bh}}, \eqn{M_{h}}, \eqn{M_{b}},
+ \eqn{M_{t}}, \eqn{M_{0}} seem fine.
+
+
+
+ Inference, especially using standard errors, may be fraught here
+ because the EIM is, strictly speaking, not of full rank.
+ A similar adjustment is made by \code{\link{zipebcom}}.
+ It is a good idea to monitor convergence.
+ The \eqn{M_0} model is best fitted with \code{\link{posbernoulli.b}}
+ or \code{\link{posbernoulli.t}} or \code{\link{posbinomial}} because
+ the standard errors are more accurate.
+
+
+}
+
+\seealso{
+ \code{\link{posbernoulli.b}} (including \eqn{\widehat{N}}),
+ \code{\link{posbernoulli.t}},
+ \code{\link{posbinomial}}.
+
+
+}
+
+\examples{
+\dontrun{
+# Example 1: simulated data
+set.seed(123)
+nTimePts <- 2 # Must be 2 or 3 currently (aka tau == # of sampling occasions)
+nnn <- 10000 # Number of animals
+pdata <- rposbern(n = nnn, nTimePts = nTimePts, pvars = 2)
+dim(pdata)
+head(pdata)
+
+clist <- list("(Intercept)" = cbind(1, c(0, 0, 1)), # Capture effect is last coln
+ x2 = rbind(1, 1, 1))
+M_tbh.1 <- vglm(cbind(y1, y2) ~ x2,
+ constraints = clist, trace = TRUE,
+ posbernoulli.tb, data = pdata)
+summary(M_tbh.1)
+
+coef(M_tbh.1)
+coef(M_tbh.1, matrix = TRUE)
+constraints(M_tbh.1, matrix = TRUE)
+summary(M_tbh.1) # Standard errors are very approximate
+head(fitted(M_tbh.1))
+head(model.matrix(M_tbh.1, type = "vlm"), 21)
+dim(depvar(M_tbh.1))
+
+
+# Example 2: Perom subset data
+Hlist <- list("(Intercept)" = cbind(1, c(0, 0, 0, 1, 1)),
+ sex = rbind(1, 1, 1, 1, 1),
+ weight = rbind(1, 1, 1, 1, 1))
+Psubset <- subset(Perom, y1 + y2 + y3 > 0)
+head(Psubset)
+
+fit1 <- vglm(cbind(y1, y2, y3) ~ sex + weight, constraints = Hlist,
+ posbernoulli.tb, data = Psubset, trace = TRUE)
+coef(fit1)
+coef(fit1, matrix = TRUE)
+summary(fit1) # Standard errors are very approximate
+
+# fit1 is the same as Fit1:
+Fit1 <- vglm(cbind(y1, y2, y3) ~ sex + weight, data = Psubset,
+ posbernoulli.tb(parallel.t = TRUE), trace = TRUE)
+constraints(Fit1) # Same as Hlist
+
+yyy <- depvar(fit1)
+if (length(table(4 * yyy[, 1] + 2 * yyy[, 2] + 1 * yyy[, 3])) != 2^(ncol(yyy))-1)
+ warning("not every combination is represented by a row in the response matrix")
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/posbinomUC.Rd b/man/posbinomUC.Rd
index 0776803..b8eeabc 100644
--- a/man/posbinomUC.Rd
+++ b/man/posbinomUC.Rd
@@ -75,7 +75,7 @@ rposbinom(n, size, prob)
%None.
%}
-\author{ T. W. Yee }
+\author{ T. W. Yee. }
\note{
For \code{dposbinom()}, if arguments \code{size} or \code{prob}
equal 0 then a \code{NaN} is returned.
@@ -96,6 +96,7 @@ rposbinom(n, size, prob)
\seealso{
\code{\link{posbinomial}},
+ \code{\link{dposbern}},
\code{\link{zabinomial}},
\code{\link{zibinomial}},
\code{\link[stats:Binomial]{rbinom}}.
@@ -105,15 +106,15 @@ rposbinom(n, size, prob)
\examples{
prob <- 0.2; size <- 10
table(y <- rposbinom(n = 1000, size, prob))
-mean(y) # Sample mean
-size * prob / (1-(1-prob)^size) # Population mean
+mean(y) # Sample mean
+size * prob / (1-(1-prob)^size) # Population mean
(ii <- dposbinom(0:size, size, prob))
-cumsum(ii) - pposbinom(0:size, size, prob) # Should be 0s
+cumsum(ii) - pposbinom(0:size, size, prob) # Should be 0s
table(rposbinom(100, size, prob))
table(qposbinom(runif(1000), size, prob))
-round(dposbinom(1:10, size, prob) * 1000) # Should be similar
+round(dposbinom(1:10, size, prob) * 1000) # Should be similar
\dontrun{ barplot(rbind(dposbinom(x = 0:size, size, prob),
dbinom(x = 0:size, size, prob)),
diff --git a/man/posbinomial.Rd b/man/posbinomial.Rd
index ea97291..dbd02a4 100644
--- a/man/posbinomial.Rd
+++ b/man/posbinomial.Rd
@@ -4,19 +4,16 @@
\title{ Positive Binomial Distribution Family Function }
\description{
Fits a positive binomial distribution.
+
}
\usage{
posbinomial(link = "logit", mv = FALSE, parallel = FALSE, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link}{
- Link function for the usual probability parameter.
- See \code{\link{CommonVGAMffArguments}} for more information.
+ \item{link, mv, parallel, zero}{
+ Details at \code{\link{CommonVGAMffArguments}}.
- }
- \item{mv, parallel, zero}{
- See \code{\link{CommonVGAMffArguments}} for more information.
}
}
@@ -29,6 +26,16 @@ posbinomial(link = "logit", mv = FALSE, parallel = FALSE, zero = NULL)
fitted values, i.e., the usual mean.
+ In the capture-recapture literature this model is called
+ the \eqn{M_0}. It arises from a sum of a sequence of
+ \eqn{\tau}-Bernoulli random variates subject to at least
+ one success (capture).
+ Here, each animal has the same probability of capture or
+ recapture, regardless of the \eqn{\tau} sampling occasions.
+ Independence between animals and between sampling occasions etc.
+ is assumed.
+
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -47,6 +54,11 @@ truncated binomial distribution.
\bold{49}, 227--237.
+Pearson, K. (1913)
+\emph{A monograph on Albinism in Man}.
+Drapers Company Research Memoirs.
+
+
}
\author{ Thomas W. Yee }
@@ -63,12 +75,10 @@ truncated binomial distribution.
as \code{\link{binomialff}}.
-
- Yet to be done: a \code{quasi.posbinomial} which estimates a
+ Yet to be done: a \code{quasi.posbinomial()} which estimates a
dispersion parameter.
-
}
\section{Warning }{
@@ -77,21 +87,35 @@ truncated binomial distribution.
}
\seealso{
- \code{\link{binomialff}}.
+ \code{\link{posbernoulli.b}},
+ \code{\link{posbernoulli.t}},
+ \code{\link{posbernoulli.tb}},
+ \code{\link{binomialff}}.
+
}
\examples{
-# Number of albinotic children in families with 5 kids (from Patil, 1962)
-akids <- data.frame(y = c(rep(1, 25), rep(2, 23), rep(3, 10), 4, 5),
- n = rep(5, 60))
-fit1 <- vglm(cbind(y, n-y) ~ 1, posbinomial, akids, trace = TRUE)
+# Number of albinotic children in families with 5 kids (from Patil, 1962) ,,,,
+albinos <- data.frame(y = c(rep(1, 25), rep(2, 23), rep(3, 10), 4, 5),
+ n = rep(5, 60))
+fit1 <- vglm(cbind(y, n-y) ~ 1, posbinomial, albinos, trace = TRUE)
summary(fit1)
-Coef(fit1) # = MLE of p = 0.3088
+Coef(fit1) # = MLE of p = 0.3088
head(fitted(fit1))
+sqrt(vcov(fit1, untransform = TRUE)) # SE = 0.0322
+
+# Fit a M_0 model to the Perom data ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+M_0 <- vglm(cbind( y1 + y2 + y3 + y4 + y5 + y6,
+ 6 - y1 - y2 - y3 - y4 - y5 - y6) ~ 1,
+ data = Perom, posbinomial, trace = TRUE)
+coef(M_0, matrix = TRUE)
+Coef(M_0)
+constraints(M_0, matrix = TRUE)
+summary(M_0)
}
\keyword{models}
\keyword{regression}
-% akids = transform(akids, yprop = y / 5)
+% albinos <- transform(albinos, yprop = y / 5)
diff --git a/man/qrrvglm.control.Rd b/man/qrrvglm.control.Rd
index 57c0caf..94e83bb 100644
--- a/man/qrrvglm.control.Rd
+++ b/man/qrrvglm.control.Rd
@@ -28,8 +28,8 @@ qrrvglm.control(Rank = 1,
maxitl = 40,
imethod = 1,
Maxit.optim = 250,
- MUXfactor = rep(7, length=Rank),
- Norrr = ~ 1,
+ MUXfactor = rep(7, length = Rank),
+ noRRR = ~ 1, Norrr = NA,
optim.maxit = 20,
Parscale = if(ITolerances) 0.001 else 1.0,
SD.Cinit = 0.02,
@@ -52,7 +52,7 @@ qrrvglm.control(Rank = 1,
\{1,2,\ldots,min(\eqn{M},\eqn{p_2}{p2})\} where the vector of explanatory
variables \eqn{x} is partitioned into (\eqn{x_1},\eqn{x_2}), which is
of dimension \eqn{p_1+p_2}{p1+p2}. The variables making up \eqn{x_1}
- are given by the terms in the \code{Norrr} argument, and the rest
+ are given by the terms in the \code{noRRR} argument, and the rest
of the terms comprise \eqn{x_2}.
}
@@ -78,7 +78,7 @@ qrrvglm.control(Rank = 1,
\item{Crow1positive}{
Logical vector of length \code{Rank} (recycled if necessary): are
the elements of the first row of \eqn{C} positive? For example,
- if \code{Rank} is 4, then specifying \code{Crow1positive=c(FALSE,
+ if \code{Rank} is 4, then specifying \code{Crow1positive = c(FALSE,
TRUE)} will force \eqn{C[1,1]} and \eqn{C[1,3]} to be negative,
and \eqn{C[1,2]} and \eqn{C[1,4]} to be positive. This argument
allows for a reflection in the ordination axes because the
@@ -223,7 +223,7 @@ qrrvglm.control(Rank = 1,
}
- \item{Norrr}{
+ \item{noRRR}{
Formula giving terms that are \emph{not} to be included in the
reduced-rank regression (or formation of the latent variables),
i.e., those belong to \eqn{x_1}.
@@ -231,11 +231,20 @@ qrrvglm.control(Rank = 1,
regression) correspond to the \eqn{B_1}{B_1} matrix.
The default is to omit the intercept term from the latent variables.
+
}
+ \item{Norrr}{
+ Defunct. Please use \code{noRRR}.
+ Use of \code{Norrr} will become an error soon.
+
+
+ }
+
+
\item{Parscale}{
Numerical and positive-valued vector of length \eqn{C}
(recycled if necessary).
- Passed into \code{optim(..., control=list(parscale=Parscale))};
+ Passed into \code{optim(..., control = list(parscale = Parscale))};
the elements of \eqn{C} become \eqn{C} / \code{Parscale}.
Setting \code{ITolerances = TRUE} results in line searches that
are very large, therefore \eqn{C} has to be scaled accordingly
@@ -360,7 +369,7 @@ qrrvglm.control(Rank = 1,
%Suppose \code{FastAlgorithm = FALSE}. In theory (if
-%\code{Eta.range=NULL}), for QRR-VGLMs, the predictors have the values of
+%\code{Eta.range = NULL}), for QRR-VGLMs, the predictors have the values of
%a quadratic form. However, when \code{Eta.range} is assigned a numerical
%vector of length 2 (giving the endpoints of an interval), then those
%values lying outside the interval are assigned the closest boundary
@@ -489,7 +498,7 @@ sort(p1 at misc$deviance.Bestof) # A history of all the iterations
%# 20120221; withdrawn for a while coz it creates a lot of error messages.
%# Negative binomial CQO; smallest deviance is about 275.389
%set.seed(1234) # This leads to a reasonable (but not the global) solution?
-%nb1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+%nb1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
% Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
% WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
% ITol = FALSE, EqualTol = TRUE, # A good idea for negbinomial
@@ -497,7 +506,7 @@ sort(p1 at misc$deviance.Bestof) # A history of all the iterations
%sort(nb1 at misc$deviance.Bestof) # A history of all the iterations
%summary(nb1)
%}
-%\dontrun{ lvplot(nb1, lcol=1:12, y = TRUE, pcol=1:12) }
+%\dontrun{ lvplot(nb1, lcol = 1:12, y = TRUE, pcol = 1:12) }
diff --git a/man/rcqo.Rd b/man/rcqo.Rd
index 395a56d..5b58355 100644
--- a/man/rcqo.Rd
+++ b/man/rcqo.Rd
@@ -395,5 +395,6 @@ attr(mydata, "ccoefficients") # The 'truth'
}
}
\keyword{distribution}
+\keyword{datagen}
diff --git a/man/rrar.Rd b/man/rrar.Rd
index a37b7e6..ff65b84 100644
--- a/man/rrar.Rd
+++ b/man/rrar.Rd
@@ -87,14 +87,15 @@ time series.
}
\examples{
+\dontrun{
year <- seq(1961 + 1/12, 1972 + 10/12, by = 1/12)
-\dontrun{ par(mar = c(4, 4, 2, 2) + 0.1, mfrow = c(2, 2))
+par(mar = c(4, 4, 2, 2) + 0.1, mfrow = c(2, 2))
for(ii in 1:4) {
plot(year, grain.us[, ii], main = names(grain.us)[ii], las = 1,
type = "l", xlab = "", ylab = "", col = "blue")
- points(year, grain.us[,ii], pch = "*", col = "blue")
-}}
-apply(grain.us, 2, mean) # mu vector
+ points(year, grain.us[, ii], pch = "*", col = "blue")
+}
+apply(grain.us, 2, mean) # mu vector
cgrain <- scale(grain.us, scale = FALSE) # Center the time series only
fit <- vglm(cgrain ~ 1, rrar(Ranks = c(4, 1)), trace = TRUE)
summary(fit)
@@ -105,12 +106,13 @@ print(fit at misc$Dmatrices, dig = 3)
print(fit at misc$omegahat, dig = 3)
print(fit at misc$Phimatrices, dig = 2)
-\dontrun{ par(mar = c(4, 4, 2, 2) + 0.1, mfrow = c(4, 1))
+par(mar = c(4, 4, 2, 2) + 0.1, mfrow = c(4, 1))
for(ii in 1:4) {
- plot(year, fit at misc$Z[,ii], main = paste("Z", ii, sep = ""),
+ plot(year, fit at misc$Z[, ii], main = paste("Z", ii, sep = ""),
type = "l", xlab = "", ylab = "", las = 1, col = "blue")
- points(year, fit at misc$Z[,ii], pch = "*", col = "blue")
-} }
+ points(year, fit at misc$Z[, ii], pch = "*", col = "blue")
+}
+}
}
\keyword{ts}
\keyword{regression}
diff --git a/man/rrvglm-class.Rd b/man/rrvglm-class.Rd
index c797301..987d567 100644
--- a/man/rrvglm-class.Rd
+++ b/man/rrvglm-class.Rd
@@ -254,16 +254,17 @@ Vector generalized additive models.
\code{\link{lvplot.rrvglm}},
\code{\link{vglmff-class}}.
+
}
\examples{
-# Rank-1 stereotype model of Anderson (1984)
-pneumo <- transform(pneumo,
- let = log(exposure.time),
- x3 = runif(nrow(pneumo))) # x3 is some unrelated covariate
+\dontrun{ # Rank-1 stereotype model of Anderson (1984)
+pneumo <- transform(pneumo, let = log(exposure.time),
+ x3 = runif(nrow(pneumo))) # x3 is unrelated
fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3,
multinomial, pneumo, Rank = 1)
Coef(fit)
}
+}
\keyword{classes}
% set.seed(111)
diff --git a/man/rrvglm.Rd b/man/rrvglm.Rd
index 1105e5b..9482aa3 100644
--- a/man/rrvglm.Rd
+++ b/man/rrvglm.Rd
@@ -145,9 +145,9 @@ Regression and ordered categorical variables.
\bold{46}, 1--30.
- Yee, T. W. (2012)
- Two-parameter reduced-rank vector generalized linear models.
- \emph{In preparation}.
+ Yee, T. W. (2013)
+ Reduced-rank vector generalized linear models with two linear predictors.
+ \emph{Computational Statistics and Data Analysis}.
@@ -230,6 +230,7 @@ Regression and ordered categorical variables.
}
\examples{
+\dontrun{
# Example 1: RR negative binomial (RR-NB) with Var(Y) = mu + delta1 * mu^delta2
nn <- 1000 # Number of observations
delta1 <- 3.0 # Specify this
@@ -238,9 +239,8 @@ a21 <- 2 - delta2
mydata <- data.frame(x2 = runif(nn), x3 = runif(nn))
mydata <- transform(mydata, mu = exp(2 + 3 * x2 + 0 * x3))
mydata <- transform(mydata, y2 = rnbinom(nn, mu=mu, size=(1/delta1)*mu^a21))
-\dontrun{
plot(y2 ~ x2, data = mydata, pch = "+", col = 'blue', las = 1,
- main = paste("Var(Y) = mu + ", delta1, " * mu^", delta2, sep = "")) }
+ main = paste("Var(Y) = mu + ", delta1, " * mu^", delta2, sep = ""))
rrnb2 <- rrvglm(y2 ~ x2 + x3, negbinomial(zero = NULL), mydata, trace = TRUE)
a21.hat <- (Coef(rrnb2)@A)["log(size)", 1]
@@ -256,22 +256,21 @@ se.a21.hat <- sqrt(vcov(rrnb2)["I(lv.mat)", "I(lv.mat)"])
ci.a21 <- a21.hat + c(-1, 1) * 1.96 * se.a21.hat
(ci.delta2 <- 2 - rev(ci.a21)) # The 95 percent confidence interval
-confint_rrnb(rrnb2) # Quick way to get it
+Confint.rrnb(rrnb2) # Quick way to get it
# Plot the abundances and fitted values against the latent variable
-\dontrun{
plot(y2 ~ lv(rrnb2), data = mydata, col = "blue",
xlab = "Latent variable", las = 1)
ooo <- order(lv(rrnb2))
-lines(fitted(rrnb2)[ooo] ~ lv(rrnb2)[ooo], col = "red") }
+lines(fitted(rrnb2)[ooo] ~ lv(rrnb2)[ooo], col = "red")
# Example 2: stereotype model (reduced-rank multinomial logit model)
data(car.all)
index <- with(car.all, Country == "Germany" | Country == "USA" |
Country == "Japan" | Country == "Korea")
-scar <- car.all[index, ] # standardized car data
+scar <- car.all[index, ] # standardized car data
fcols <- c(13,14,18:20,22:26,29:31,33,34,36) # These are factors
-scar[,-fcols] = scale(scar[, -fcols]) # Standardize all numerical vars
+scar[,-fcols] = scale(scar[, -fcols]) # Standardize all numerical vars
ones <- matrix(1, 3, 1)
clist <- list("(Intercept)" = diag(3), Width = ones, Weight = ones,
Disp. = diag(3), Tank = diag(3), Price = diag(3),
@@ -279,13 +278,14 @@ clist <- list("(Intercept)" = diag(3), Width = ones, Weight = ones,
set.seed(111)
fit <- rrvglm(Country ~ Width + Weight + Disp. + Tank + Price + Frt.Leg.Room,
multinomial, data = scar, Rank = 2, trace = TRUE,
- constraints = clist, Norrr = ~ 1 + Width + Weight,
+ constraints = clist, noRRR = ~ 1 + Width + Weight,
Uncor = TRUE, Corner = FALSE, Bestof = 2)
-fit at misc$deviance # A history of the fits
+fit at misc$deviance # A history of the fits
Coef(fit)
-\dontrun{ biplot(fit, chull = TRUE, scores = TRUE, clty = 2, Ccex = 2,
+biplot(fit, chull = TRUE, scores = TRUE, clty = 2, Ccex = 2,
ccol = "blue", scol = "red", Ccol = "darkgreen", Clwd = 2,
- main = "1=Germany, 2=Japan, 3=Korea, 4=USA") }
+ main = "1=Germany, 2=Japan, 3=Korea, 4=USA")
+}
}
\keyword{models}
\keyword{regression}
diff --git a/man/rrvglm.control.Rd b/man/rrvglm.control.Rd
index d05e2d7..b60344d 100644
--- a/man/rrvglm.control.Rd
+++ b/man/rrvglm.control.Rd
@@ -15,7 +15,8 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
Ainit = NULL, Alpha = 0.5, Bestof = 1, Cinit = NULL,
Etamat.colmax = 10,
SD.Ainit = 0.02, SD.Cinit = 0.02, szero = NULL,
- Norrr = ~1, trace = FALSE, Use.Init.Poisson.QO = FALSE,
+ noRRR = ~1, Norrr = NA,
+ trace = FALSE, Use.Init.Poisson.QO = FALSE,
checkwz = TRUE, wzepsilon = .Machine$double.eps^0.75, ...)
}
%- maybe also `usage' for other objects documented here.
@@ -26,7 +27,7 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
Here, the vector of explanatory variables \bold{x} is partitioned into
(\bold{x1},\bold{x2}), which is of dimension \emph{p1}+\emph{p2}.
The variables making up \bold{x1} are given by the terms in
- \code{Norrr} argument, and the rest of the terms comprise \bold{x2}.
+ \code{noRRR} argument, and the rest of the terms comprise \bold{x2}.
}
@@ -132,10 +133,10 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
}
% \item{ppar}{ Ignore this. }
- \item{Norrr}{
+ \item{noRRR}{
Formula giving terms that are \emph{not} to be included
in the reduced-rank regression.
- That is, \code{Norrr} specifes which explanatory variables
+ That is, \code{noRRR} specifes which explanatory variables
are in the \eqn{x_1}{x1} vector of \code{\link{rrvglm}},
and the rest go into \eqn{x_2}{x2}.
The \eqn{x_1}{x1} variables constitute
@@ -143,11 +144,19 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
Those \eqn{x_2}{x2} variables which are subject to the reduced-rank
regression correspond to the \eqn{\bold{B}_2}{\bold{B}2}
matrix.
- Set \code{Norrr = NULL} for the reduced-rank regression to
+ Set \code{noRRR = NULL} for the reduced-rank regression to
be applied to every explanatory variable including the intercept.
}
+ \item{Norrr}{
+ Defunct. Please use \code{noRRR}.
+ Use of \code{Norrr} will become an error soon.
+
+
+ }
+
+
\item{trace}{
Logical indicating if output should be produced for
each iteration.
@@ -242,15 +251,17 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
}
\examples{
+\dontrun{
set.seed(111)
pneumo <- transform(pneumo, let = log(exposure.time),
- x3 = runif(nrow(pneumo))) # x3 is random noise
+ x3 = runif(nrow(pneumo))) # x3 is random noise
fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3,
multinomial, pneumo, Rank = 1, Index.corner = 2)
constraints(fit)
vcov(fit)
summary(fit)
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/s.Rd b/man/s.Rd
index cc1cf4f..da17c17 100644
--- a/man/s.Rd
+++ b/man/s.Rd
@@ -17,10 +17,12 @@ s(x, df = 4, spar = 0, ...)
Note that \code{x} must be a \emph{single} variable
and not a function of a variable.
For example, \code{s(x)} is fine but \code{s(log(x))} will fail.
- In this case, let \code{logx <- log(x)}, say, and then use \code{s(logx)}.
+ In this case, let \code{logx <- log(x)} (in the data frame),
+ say, and then use \code{s(logx)}.
At this stage bivariate smoothers (\code{x} would be a two-column matrix)
are not implemented.
+
}
\item{df}{
numerical vector of length \eqn{r}.
@@ -29,6 +31,8 @@ s(x, df = 4, spar = 0, ...)
Thus one could say that \code{df-1} is the
\emph{nonlinear degrees of freedom} of the smooth.
Recycling of values will be used if \code{df} is not of length \eqn{r}.
+ If \code{spar} is positive then this argument is ignored.
+
}
\item{spar}{ numerical vector of length \eqn{r}.
@@ -39,8 +43,13 @@ s(x, df = 4, spar = 0, ...)
Recycling of values will be used if \code{spar} is not of length
\eqn{r}.
+
+ }
+ \item{\dots}{
+ Ignored for now.
+
+
}
- \item{\dots}{ Ignored for now. }
}
\details{
In this help file \eqn{M} is the number of additive predictors
@@ -63,28 +72,35 @@ s(x, df = 4, spar = 0, ...)
involving interactions and nesting etc.
For example, \code{myfactor:s(x2)} is not a good idea.
- It also differs from the S-PLUS \code{s} which
- allows \code{spar} to be negative;
- \pkg{VGAM} does not allow this.
+
+% It also differs from the S-PLUS \code{s} which allows
+% \code{spar} to be negative; \pkg{VGAM} does not allow this.
+
}
\value{
A vector with attributes that are (only) used by \code{vgam}.
+
}
\references{
+
Yee, T. W. and Wild, C. J. (1996)
Vector generalized additive models.
\emph{Journal of the Royal Statistical Society, Series B, Methodological},
\bold{58}, 481--493.
+
}
\author{ Thomas W. Yee }
\note{
+
The vector cubic smoothing spline which \code{s()} represents is
computationally demanding for large \eqn{M}.
- The cost is approximately \eqn{O(M^3)}.
+ The cost is approximately \eqn{O(n M^3)} where \eqn{n} is the
+ number of unique abscissae.
+
An alternative to using
\code{s} with \code{\link{vgam}} is
@@ -94,6 +110,7 @@ Vector generalized additive models.
The latter implements half-stepping, which is helpful if
convergence is difficult.
+
}
% ~Make other sections like WARNING with \section{WARNING }{....} ~
@@ -102,25 +119,24 @@ Vector generalized additive models.
\code{\link{vgam}},
\code{\link{vsmooth.spline}}.
+
}
\examples{
# Nonparametric logistic regression
-fit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, hunua)
-\dontrun{
-plot(fit, se = TRUE)}
+fit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = hunua)
+\dontrun{ plot(fit, se = TRUE) }
# Bivariate logistic model with artificial data
nn <- 300
-mydf <- data.frame(x1 = runif(nn), x2 = runif(nn))
-mydf <- transform(mydf,
- y1 = rbinom(nn, size = 1, prob = logit(sin(2*x2), inv = TRUE)),
- y2 = rbinom(nn, size = 1, prob = logit(sin(2*x2), inv = TRUE)))
-fit <- vgam(cbind(y1, y2) ~ x1 + s(x2, 3), trace=TRUE,
- binom2.or(exchangeable = TRUE ~ s(x2, 3)), data = mydf)
-coef(fit, matrix = TRUE) # Hard to interpret
-\dontrun{
-plot(fit, se = TRUE, which.term = 2, scol = "blue")}
+bdata <- data.frame(x1 = runif(nn), x2 = runif(nn))
+bdata <- transform(bdata,
+ y1 = rbinom(nn, size = 1, prob = logit(sin(2 * x2), inverse = TRUE)),
+ y2 = rbinom(nn, size = 1, prob = logit(sin(2 * x2), inverse = TRUE)))
+fit <- vgam(cbind(y1, y2) ~ x1 + s(x2, 3), trace = TRUE,
+ binom2.or(exchangeable = TRUE ~ s(x2, 3)), data = bdata)
+coef(fit, matrix = TRUE) # Hard to interpret
+\dontrun{ plot(fit, se = TRUE, which.term = 2, scol = "blue") }
}
\keyword{models}
\keyword{regression}
diff --git a/man/seq2binomial.Rd b/man/seq2binomial.Rd
index 3dc887a..99760a4 100644
--- a/man/seq2binomial.Rd
+++ b/man/seq2binomial.Rd
@@ -9,7 +9,8 @@
}
\usage{
seq2binomial(lprob1 = "logit", lprob2 = "logit",
- iprob1 = NULL, iprob2 = NULL, zero = NULL)
+ iprob1 = NULL, iprob2 = NULL,
+ parallel = FALSE, apply.parint = TRUE, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -24,12 +25,9 @@ seq2binomial(lprob1 = "logit", lprob2 = "logit",
A \code{NULL} means a value is obtained in the \code{initialize} slot.
}
- \item{zero}{
- An integer-valued vector specifying which
- linear/additive predictors are modelled as intercepts only.
- If used, the value must be from the set \{1,2\} which correspond to
- the first and second probabilities respectively.
- A \code{NULL} value means none.
+ \item{parallel, apply.parint, zero}{
+ Details at \code{\link{Links}}.
+
}
}
@@ -58,20 +56,24 @@ seq2binomial(lprob1 = "logit", lprob2 = "logit",
I have named it the \emph{(two-stage) sequential binomial} model.
Fisher scoring is used.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
+
Crowder, M. and Sweeting, T. (1989).
Bayesian inference for a bivariate binomial distribution.
\emph{Biometrika},
\bold{76},
599--603.
+
}
\author{ Thomas W. Yee }
\note{
@@ -108,6 +110,8 @@ fit <- vglm(cbind(y1, y2) ~ x2, seq2binomial, weight = mvector,
coef(fit)
coef(fit, matrix = TRUE)
head(fitted(fit))
+head(depvar(fit))
+head(weights(fit, type = "prior")) # Same as with(sdata, mvector)
}
\keyword{models}
\keyword{regression}
diff --git a/man/sinmad.Rd b/man/sinmad.Rd
index 498b58a..1c867a5 100644
--- a/man/sinmad.Rd
+++ b/man/sinmad.Rd
@@ -47,7 +47,7 @@ paralogistic (\eqn{a=q}).
The Singh-Maddala distribution has density
\deqn{f(y) = aq y^{a-1} / [b^a \{1 + (y/b)^a\}^{1+q}]}{%
f(y) = aq y^(a-1) / [b^a (1 + (y/b)^a)^(1+q)]}
- for \eqn{a > 0}, \eqn{b > 0}, \eqn{q > 0}, \eqn{y > 0}.
+ for \eqn{a > 0}, \eqn{b > 0}, \eqn{q > 0}, \eqn{y \geq 0}{y >= 0}.
Here, \eqn{b} is the scale parameter \code{scale},
and the others are shape parameters.
The cumulative distribution function is
@@ -65,6 +65,7 @@ provided \eqn{-a < 1 < aq}; these are returned as the fitted values.
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
@@ -104,6 +105,24 @@ fit <- vglm(y ~ 1, sinmad(ishape1.a = exp(1)), sdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
+
+# Harder problem (has the shape3.q parameter going to infinity)
+
+set.seed(3)
+sdata <- data.frame(y1 = rbeta(1000, 6, 6))
+# hist(with(sdata, y1))
+if (FALSE) {
+# This fails
+ fit1 <- vglm(y1 ~ 1, sinmad, data = sdata, trace = TRUE)
+ fit1 <- vglm(y1 ~ 1, sinmad, data = sdata, trace = TRUE, maxit = 6,
+ crit = "coef")
+ Coef(fit1)
+}
+# Try this remedy:
+fit2 <- vglm(y1 ~ 1, sinmad(ishape3.q = 3, lshape3.q = "loglog"),
+ data = sdata, trace = TRUE, stepsize = 0.05, maxit = 99)
+coef(fit2, matrix = TRUE)
+Coef(fit2)
}
\keyword{models}
\keyword{regression}
diff --git a/man/sinmadUC.Rd b/man/sinmadUC.Rd
index bbcc3fb..dbe8437 100644
--- a/man/sinmadUC.Rd
+++ b/man/sinmadUC.Rd
@@ -10,6 +10,7 @@
generation for the Singh-Maddala distribution with shape parameters \code{a}
and \code{q}, and scale parameter \code{scale}.
+
}
\usage{
dsinmad(x, shape1.a, scale = 1, shape3.q, log = FALSE)
@@ -37,6 +38,7 @@ rsinmad(n, shape1.a, scale = 1, shape3.q)
\code{qsinmad} gives the quantile function, and
\code{rsinmad} generates random deviates.
+
}
\references{
@@ -67,7 +69,7 @@ Hoboken, NJ: Wiley-Interscience.
}
\examples{
-sdata <- data.frame(y = rsinmad(n = 3000, 4, 6, 2))
+sdata <- data.frame(y = rsinmad(n = 3000, exp(1), exp(2), exp(1)))
fit <- vglm(y ~ 1, sinmad(ishape1.a = 2.1), sdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
diff --git a/man/snormUC.Rd b/man/snormUC.Rd
index 1be799d..0b8e5fb 100644
--- a/man/snormUC.Rd
+++ b/man/snormUC.Rd
@@ -24,7 +24,10 @@ rsnorm(n, location = 0, scale = 1, shape = 0)
\item{x}{vector of quantiles.}
% \item{x, q}{vector of quantiles.}
% \item{p}{vector of probabilities.}
- \item{n}{number of observations. Must be a single positive integer. }
+ \item{n}{number of observations.
+ Same as \code{\link[stats]{runif}}.
+
+ }
\item{location}{
The location parameter \eqn{\xi}{xi}. A vector.
diff --git a/man/studentt.Rd b/man/studentt.Rd
index 234fce5..4466b72 100644
--- a/man/studentt.Rd
+++ b/man/studentt.Rd
@@ -140,7 +140,7 @@ application to financial econometrics.
\code{\link{normal1}},
\code{\link{cauchy1}},
\code{\link{logistic}},
- \code{\link{huber}},
+ \code{\link{huber2}},
\code{\link{koenker}},
\code{\link[stats]{TDist}}.
diff --git a/man/tikuv.Rd b/man/tikuv.Rd
index 2c3527c..4c420bf 100644
--- a/man/tikuv.Rd
+++ b/man/tikuv.Rd
@@ -120,10 +120,10 @@ tikuv(d, lmean = "identity", lsigma = "loge",
}
\examples{
-m = 1.0; sigma = exp(0.5)
+m <- 1.0; sigma <- exp(0.5)
tdata <- data.frame(y = rtikuv(n = 1000, d = 1, m = m, s = sigma))
tdata <- transform(tdata, sy = sort(y))
-fit <- vglm(y ~ 1, fam = tikuv(d = 1), tdata, trace = TRUE)
+fit <- vglm(y ~ 1, tikuv(d = 1), data = tdata, trace = TRUE)
coef(fit, matrix = TRUE)
(Cfit <- Coef(fit))
with(tdata, mean(y))
diff --git a/man/tikuvUC.Rd b/man/tikuvUC.Rd
index 84ff221..96164c7 100644
--- a/man/tikuvUC.Rd
+++ b/man/tikuvUC.Rd
@@ -10,6 +10,7 @@
random generation for
the short-tailed symmetric distribution of Tiku and Vaughan (1999).
+
}
\usage{
dtikuv(x, d, mean = 0, sigma = 1, log = FALSE)
@@ -67,6 +68,7 @@ rtikuv(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6)
\seealso{
\code{\link{tikuv}}.
+
}
\examples{
\dontrun{ par(mfrow = c(2, 1))
diff --git a/man/tobit.Rd b/man/tobit.Rd
index 132952e..a977e6f 100644
--- a/man/tobit.Rd
+++ b/man/tobit.Rd
@@ -169,6 +169,7 @@ tobit(Lower = 0, Upper = Inf, lmu = "identity", lsd = "loge",
}
\examples{
+\dontrun{
# Here, fit1 is a standard Tobit model and fit2 is a nonstandard Tobit model
tdata <- data.frame(x2 = seq(-1, 1, length = (nn <- 100)))
set.seed(1)
@@ -219,6 +220,7 @@ head(fit4 at extra$censoredU) # A matrix
head(fit4 at misc$Lower) # A matrix
head(fit4 at misc$Upper) # A matrix
coef(fit4, matrix = TRUE)
+}
\dontrun{ # Plot the results
par(mfrow = c(2, 2))
diff --git a/man/toxop.Rd b/man/toxop.Rd
index 70c4317..74dd173 100644
--- a/man/toxop.Rd
+++ b/man/toxop.Rd
@@ -34,7 +34,7 @@
}
\seealso{
- \code{\link{dexpbinomial}}.
+ \code{\link[VGAM]{dexpbinomial}}.
}
@@ -54,7 +54,7 @@
}
\examples{
-\dontrun{ with(toxop, plot(rainfall, positive/ssize, col = "blue"))
+\dontrun{ with(toxop, plot(rainfall, positive / ssize, col = "blue"))
plot(toxop, col = "blue") }
}
\keyword{datasets}
diff --git a/man/tparetoUC.Rd b/man/tparetoUC.Rd
index c4c638d..ebb4e39 100644
--- a/man/tparetoUC.Rd
+++ b/man/tparetoUC.Rd
@@ -10,6 +10,7 @@
for the upper truncated Pareto(I) distribution with parameters
\code{lower}, \code{upper} and \code{shape}.
+
}
\usage{
dtpareto(x, lower, upper, shape, log = FALSE)
@@ -24,6 +25,7 @@ rtpareto(n, lower, upper, shape)
\item{lower, upper, shape}{
the lower, upper and shape (\eqn{k}) parameters.
If necessary, values are recycled.
+
}
}
@@ -51,6 +53,7 @@ rtpareto(n, lower, upper, shape)
range restrictions imposed on the parameters.
+
}
%%\note{
%% The truncated Pareto distribution is
diff --git a/man/trplot.qrrvglm.Rd b/man/trplot.qrrvglm.Rd
index 875ddba..9ba1e90 100644
--- a/man/trplot.qrrvglm.Rd
+++ b/man/trplot.qrrvglm.Rd
@@ -7,7 +7,7 @@ Produces a trajectory plot for
\emph{quadratic reduced-rank vector generalized linear models}
(QRR-VGLMs).
It is only applicable for rank-1 models with argument
-\code{Norrr = ~ 1}.
+\code{noRRR = ~ 1}.
}
\usage{
@@ -104,7 +104,7 @@ trplot.qrrvglm(object, whichSpecies = NULL, add=FALSE, plot.it = TRUE,
}
\item{check.ok}{ Logical. Whether a check is performed to see
- that \code{Norrr = ~ 1} was used.
+ that \code{noRRR = ~ 1} was used.
It doesn't make sense to have a trace plot unless this is so.
@@ -130,7 +130,7 @@ In the above, \eqn{M} is the number of species selected for plotting,
so there will be \eqn{M(M-1)/2}{M*(M-1)/2} curves/trajectories in total.
-A trajectory plot will be fitted only if \code{Norrr = ~ 1} because
+A trajectory plot will be fitted only if \code{noRRR = ~ 1} because
otherwise the trajectory will not be a smooth function of the latent
variables.
diff --git a/man/truncweibull.Rd b/man/truncweibull.Rd
new file mode 100644
index 0000000..a111b99
--- /dev/null
+++ b/man/truncweibull.Rd
@@ -0,0 +1,150 @@
+\name{truncweibull}
+\alias{truncweibull}
+%\alias{truncweibullff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Truncated Weibull Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 2-parameter Weibull distribution
+ with lower truncation.
+ No observations should be censored.
+
+}
+\usage{
+truncweibull(lower.limit = 1e-5,
+ lAlpha = "loge", lBetaa = "loge",
+ iAlpha = NULL, iBetaa = NULL,
+ nrfs = 1, probs.y = c(0.2, 0.5, 0.8),
+ imethod = 1, zero = -2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lower.limit}{
+ Positive lower truncation limits.
+ Recycled to the same dimension as the response, going
+ across rows first.
+ The default, being close to 0, should mean effectively the same
+ results as \code{\link{weibull}} if there are no response
+ values that are smaller.
+
+
+ }
+
+ \item{lAlpha, lBetaa}{
+ Parameter link functions applied to the
+ (positive) parameters \code{Alpha}
+ (called \eqn{\alpha} below) and
+ (positive) \code{Betaa} (called \eqn{\beta} below).
+ See \code{\link{Links}} for more choices.
+
+
+ }
+ \item{iAlpha, iBetaa}{
+ See \code{\link{CommonVGAMffArguments}}.
+
+
+ }
+ \item{imethod, nrfs, zero, probs.y}{
+ Details at \code{\link{weibull}}.
+
+ }
+}
+\details{
+ MLE of the two parameters of the Weibull distribution are
+ computed, subject to lower truncation.
+ That is, all response values are greater than \code{lower.limit},
+ element-wise.
+ For a particular observation this is any known positive value.
+ This function is currently based directly on
+ Wingo (1989) and his parameterization is used (it differs
+ from \code{\link{weibull}}.)
+ In particular,
+ \eqn{\beta = a} and \eqn{\alpha = (1/b)^a}
+ where \eqn{a} and \eqn{b} are as in \code{\link{weibull}} and
+ \code{\link[stats:Weibull]{dweibull}}.
+
+
+% More details about the Weibull density are \code{\link{weibull}}.
+
+
+ Upon fitting the \code{extra} slot has a component called
+ \code{lower.limit} which is of the same dimension as the
+ response.
+ The fitted values are the mean, which are computed
+ using \code{\link{pgamma.deriv}}
+ and \code{\link{pgamma.deriv.unscaled}}.
+
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+
+}
+\references{
+ Wingo, D. R. (1989)
+ The left-truncated Weibull distribution: theory and computation.
+ \emph{Statistical Papers},
+ \bold{30}(1), 39--48.
+
+
+}
+
+\author{ T. W. Yee }
+\note{
+ More improvements need to be made, e.g.,
+ initial values are currently based on no truncation.
+ This \pkg{VGAM} family function handles multiple responses.
+
+
+}
+\section{Warning}{
+ This function may be converted to the same parameterization as
+ \code{\link{weibull}} at any time.
+ Yet to do: one element of the EIM may be wrong (due to
+ two interpretations of a formula; but it seems to work).
+ Convergence is slower than usual and this may imply something
+ is wrong; use argument \code{maxit}.
+ In fact, it's probably because \code{\link{pgamma.deriv.unscaled}} is
+ inaccurate at \code{q = 1} and \code{q = 2}.
+ Also,
+ convergence should be monitored, especially if the truncation
+ means that a large proportion of the data is lost compared to an
+ ordinary Weibull distribution.
+
+}
+
+\seealso{
+ \code{\link{weibull}},
+ \code{\link[stats:Weibull]{dweibull}},
+ \code{\link{pgamma.deriv}},
+ \code{\link{pgamma.deriv.unscaled}}.
+
+
+}
+\examples{
+nn <- 5000; prop.lost <- 0.40 # Proportion lost to truncation
+wdata <- data.frame(x2 = runif(nn)) # Complete Weibull data
+wdata <- transform(wdata,
+ Betaa = exp(1)) # > 2 is okay (satisfies regularity conds)
+wdata <- transform(wdata, Alpha = exp(0.5 - 1 * x2))
+wdata <- transform(wdata, Shape = Betaa,
+# aaa = Betaa,
+# bbb = 1 / Alpha^(1 / Betaa),
+ Scale = 1 / Alpha^(1 / Betaa))
+wdata <- transform(wdata, y2 = rweibull(nn, shape = Shape, scale = Scale))
+summary(wdata)
+
+lower.limit2 <- with(wdata, quantile(y2, prob = prop.lost)) # Proportion lost
+wdata <- subset(wdata, y2 > lower.limit2) # Smaller due to truncation
+
+fit1 <- vglm(y2 ~ x2, maxit = 100, trace = TRUE,
+ truncweibull(lower.limit = lower.limit2), data = wdata)
+coef(fit1, matrix = TRUE)
+summary(fit1)
+vcov(fit1)
+head(fit1 at extra$lower.limit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/ugss.Rd b/man/ugss.Rd
deleted file mode 100644
index ffa6746..0000000
--- a/man/ugss.Rd
+++ /dev/null
@@ -1,100 +0,0 @@
-\name{ugss}
-\alias{ugss}
-\docType{data}
-\title{
- Undergraduate Statistics Students Lifestyle Questionnaire
-
-%% ~~ data name/kind ... ~~
-}
-\description{
- About 800 students studying undergraduate statistics
- were asked many lifestyle questions.
-
-%% ~~ A concise (1-5 lines) description of the dataset. ~~
-}
-\usage{data(ugss)}
-\format{
- A data frame with 804 observations on the following 29 variables.
-\describe{
- \item{\code{sex}}{Gender, a factor, (female or male) }
- \item{\code{age}}{age in years, a numeric vector}
- \item{\code{eyes}}{eye colour, a factor,
- (blue, brown, green, hazel or other)}
- \item{\code{piercings}}{Number of body piercings, a numeric vector}
- \item{\code{pierced}}{Any body piercings? a factor, (Yes or No)}
- \item{\code{tattoos}}{Number of tattoos, a numeric vector}
- \item{\code{tattooed}}{Any tattoos? a factor, (Yes or No) }
- \item{\code{glasses}}{Wears glasses etc.? a factor, (Yes or No)}
- \item{\code{sleep}}{Average number of hours of sleep per night,
- a numeric vector}
- \item{\code{study}}{Average number of hours of study per week,
- a numeric vector}
- \item{\code{tv}}{Average number of hours watching TV per week,
- a numeric vector}
- \item{\code{movies}}{
- Number of movies seen at a cinema during the last 3 months,
- a numeric vector
- }
- \item{\code{movies3m}}{Seen movies in last 3 months?
- a factor, (Yes or No)}
- \item{\code{sport}}{Favourite sport, a factor,
- about 19 of them }
- \item{\code{entertainment}}{Favourite entertainment, a factor,
- about 15 of them }
- \item{\code{fruit}}{Favourite fruit a factor,
- about 13 of them }
- \item{\code{income}}{Average income during semester per week,
- a numeric vector }
- \item{\code{rent}}{Amount spent on rent or room and board per week,
- a numeric vector }
- \item{\code{clothes}}{Average amount spent on clothes per month,
- a numeric vector }
- \item{\code{hair}}{Average cost to get a hair-cut,
- a numeric vector }
- \item{\code{tobacco}}{Average amount spent on tobacco per week,
- a numeric vector}
- \item{\code{smokes}}{Smokes? a factor, (Yes or No) }
- \item{\code{alcohol}}{Average amount spent on alcohol per week,
- a numeric vector }
- \item{\code{buy.alcohol}}{Buys (purchases) alcohol? a factor, (Yes or No) }
- \item{\code{sendtxt}}{Average number text messages sent per day,
- a numeric vector.}
- \item{\code{receivetxt}}{Average number text messages received per day,
- a numeric vector.}
- \item{\code{txts}}{Uses text messaging? a factor, (Yes or No) }
- \item{\code{country}}{Country of birth, a factor,
- about 54 of them }
- \item{\code{status}}{Student status, a factor,
- (International, NZ.Citizen, NZ.Resident) }
- }
-}
-%%\format{
-%% The format is:
-%% chr "ugss"
-%%}
-\details{
- This data was collected online and anonymously in 2010.
- The respondents were students studying an undergraduate statistics
- course at a New Zealand university.
- Possibly there are duplicate students (due to failing and
- re-enrolling).
- All monies are in NZD.
- Note the data has had minimal checking.
- Most numerical variables tend to have measurement error, and all of
- them happen to be all integer-valued.
-
-
-%% ~~ If necessary, more details than the __description__ above
-}
-
-
-%%\source{
-%% ~~ reference to a publication or URL from which the data were obtained ~~
-%%}
-%%\references{
-%% ~~ possibly secondary sources and usages ~~
-%%}
-\examples{
-summary(ugss)
-}
-\keyword{datasets}
diff --git a/man/undocumented-methods.Rd b/man/undocumented-methods.Rd
index 22c3eaa..f23c3b7 100644
--- a/man/undocumented-methods.Rd
+++ b/man/undocumented-methods.Rd
@@ -5,6 +5,10 @@
%
%
%
+% 20121105
+\alias{Rank,qrrvglm-method}
+\alias{Rank,rrvglm-method}
+\alias{Rank,cao-method}
% 20120821
\alias{model.matrix,vsmooth.spline-method}
%
@@ -86,6 +90,7 @@
\alias{depvar,rcim-method}
\alias{depvar,rrvglm-method}
\alias{depvar,vlm-method}
+\alias{depvar,vsmooth.spline-method}
%
\alias{deviance,qrrvglm-method}
\alias{deviance,vlm-method}
@@ -201,6 +206,10 @@
\alias{lv,Coef.rrvglm-method}
\alias{lv,Coef.qrrvglm-method}
\alias{lv,uqo-method}
+\alias{latvar,Coef.qrrvglm-method}
+\alias{latvar,Coef.rrvglm-method}
+\alias{latvar,qrrvglm-method}
+\alias{latvar,rrvglm-method}
\alias{Max,qrrvglm-method}
\alias{Max,Coef.qrrvglm-method}
\alias{Max,uqo-method}
@@ -326,6 +335,7 @@
\alias{Tol,Coef.uqo-method}
\alias{trplot,qrrvglm-method}
\alias{trplot,uqo-method}
+\alias{trplot,cao-method}
\alias{vcov,rrvglm-method}
\alias{vcov,qrrvglm-method}
\alias{vcov,vlm-method}
diff --git a/man/uqo.Rd b/man/uqo.Rd
index d017509..7d8084f 100644
--- a/man/uqo.Rd
+++ b/man/uqo.Rd
@@ -26,24 +26,32 @@ uqo(formula, family, data = list(), weights = NULL, subset = NULL,
Since there is no \eqn{x_2} vector by definition, the RHS of
the formula has all terms belonging to the \eqn{x_1} vector.
+
}
\item{family}{ a function of class \code{"vglmff"} describing
what statistical model is to be fitted. Currently two families
are supported: Poisson and binomial.
+
+
}
\item{data}{ an optional data frame containing the variables
in the model. By default the variables are taken from
\code{environment(formula)}, typically the environment from
which \code{uqo} is called.
+
+
}
\item{weights}{ an optional vector or matrix of (prior) weights
to be used in the fitting process.
This argument should not be used.
+
}
\item{subset}{ an optional logical vector specifying a subset of
observations to
be used in the fitting process.
+
+
}
\item{na.action}{
a function which indicates what should happen when
@@ -52,50 +60,82 @@ uqo(formula, family, data = list(), weights = NULL, subset = NULL,
of \code{\link[base]{options}}, and is \code{na.fail}
if that is unset.
The ``factory-fresh'' default is \code{na.omit}.
+
+
}
\item{etastart}{ starting values for the linear predictors.
It is a \eqn{M}-column matrix. If \eqn{M = 1} then it may be a vector.
+
+
}
\item{mustart}{ starting values for the
fitted values. It can be a vector or a matrix.
Some family functions do not make use of this argument.
+
+
}
\item{coefstart}{ starting values for the
- coefficient vector. }
+ coefficient vector.
+
+
+ }
\item{control}{ a list of parameters for controlling the fitting process.
See \code{\link{uqo.control}} for details.
+
+
}
\item{offset}{ a vector or \eqn{M}-column matrix of offset values.
This argument should not be used.
+
+
}
\item{method}{
the method to be used in fitting the model.
The default (and presently only) method \code{uqo.fit}
uses iteratively reweighted least squares (IRLS).
+
+
}
\item{model}{ a logical value indicating whether the
\emph{model frame}
- should be assigned in the \code{model} slot. }
+ should be assigned in the \code{model} slot.
+
+ }
+
\item{x.arg, y.arg}{ logical values indicating whether
the model matrix and response matrix used in the fitting
process should be assigned in the \code{x} and \code{y} slots.
Note the model matrix is the LM model matrix.
+
}
\item{contrasts}{ an optional list. See the \code{contrasts.arg}
- of \code{\link{model.matrix.default}}. }
+ of \code{\link{model.matrix.default}}.
+
+
+ }
+
\item{constraints}{ an optional list of constraint matrices.
This argument should not be used.
+
+
}
\item{extra}{ an optional list with any extra information that
might be needed by the family function.
+
+
}
\item{qr.arg}{ logical value indicating whether
the slot \code{qr}, which returns the QR decomposition of the
VLM model matrix, is returned on the object.
This argument should not be set \code{TRUE}.
+
+
}
- \item{\dots}{ further arguments passed into \code{\link{uqo.control}}. }
+ \item{\dots}{ further arguments passed into \code{\link{uqo.control}}.
+
+
+ }
}
@@ -108,6 +148,7 @@ uqo(formula, family, data = list(), weights = NULL, subset = NULL,
and will often fail (even for \code{Rank = 1}) but hopefully this will
be improved in the future.
+
The central formula is given by
\deqn{\eta = B_1^T x_1 + A \nu +
\sum_{m = 1}^M (\nu^T D_m \nu) e_m}{%
@@ -127,6 +168,7 @@ uqo(formula, family, data = list(), weights = NULL, subset = NULL,
arguments in \code{\link{uqo.control}};
see also \code{\link{cqo}} and \code{\link{qrrvglm.control}}.
+
Currently, only Poisson and binomial \pkg{VGAM} family functions are
implemented for this function, and dispersion parameters for these are
assumed known. Thus the Poisson is catered for by
@@ -134,10 +176,13 @@ assumed known. Thus the Poisson is catered for by
Those beginning with \code{"quasi"} have dispersion parameters that are
estimated for each species, hence will give an error message here.
+
}
\value{
An object of class \code{"uqo"}
(this may change to \code{"quvglm"} in the future).
+
+
}
\references{
@@ -248,12 +293,12 @@ up1 <- uqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Crow1positive = TRUE, lvstart = -lv(p1))
if (deviance(up1) > 1310.0) stop("suboptimal fit obtained")
-nos <- ncol(up1 at y) # Number of species
+nos <- ncol(depvar(up1)) # Number of species
clr <- (1:(nos+1))[-7] # Omit yellow
lvplot(up1, las = 1, y = TRUE, pch = 1:nos, scol = clr, lcol = clr,
- pcol = clr, llty = 1:nos, llwd=2)
-legend(x=2, y = 135, colnames(up1 at y), col = clr, lty = 1:nos,
- lwd=2, merge = FALSE, ncol = 1, x.inter=4.0, bty = "l", cex = 0.9)
+ pcol = clr, llty = 1:nos, llwd = 2)
+legend(x = 2, y = 135, colnames(up1 at y), col = clr, lty = 1:nos,
+ lwd = 2, merge = FALSE, ncol = 1, x.inter = 4.0, bty = "l", cex = 0.9)
# Compare the site scores between the two models
plot(lv(p1), lv(up1), xlim = c(-3, 4), ylim = c(-3, 4), las = 1)
diff --git a/man/uqo.control.Rd b/man/uqo.control.Rd
index 5b342cd..127d2e1 100644
--- a/man/uqo.control.Rd
+++ b/man/uqo.control.Rd
@@ -27,6 +27,8 @@ uqo.control(Rank=1, Bestof = if (length(lvstart) &&
\item{Rank}{ The numerical rank \eqn{R} of the model,
i.e., the number of latent variables or ordination axes.
Currently only \eqn{R=1} is recommended.
+
+
}
\item{Bestof}{ Integer. The best of \code{Bestof} models fitted is
returned. This argument helps guard against local solutions by
@@ -34,6 +36,7 @@ uqo.control(Rank=1, Bestof = if (length(lvstart) &&
The argument has value 1 if an initial value for the site scores is
inputted using \code{lvstart}.
+
}
\item{CA1}{
Logical. If \code{TRUE} the site scores from a correspondence analysis
@@ -41,6 +44,7 @@ uqo.control(Rank=1, Bestof = if (length(lvstart) &&
Both \code{CA1} and \code{Use.Init.Poisson.QO} cannot both be
\code{TRUE}.
+
}
\item{Crow1positive}{
Logical vector of length \code{Rank} (recycled if necessary):
@@ -53,12 +57,14 @@ uqo.control(Rank=1, Bestof = if (length(lvstart) &&
\eqn{C} matrix with UQO, but the argument's name comes from
\code{\link{qrrvglm.control}} and is left unchanged for convenience.
+
}
\item{epsilon}{
Positive numeric. Used to test for convergence for GLMs fitted
in FORTRAN. Larger values mean a loosening of the convergence
criterion.
+
}
\item{EqualTolerances}{
Logical indicating whether each (quadratic) predictor will have
@@ -76,6 +82,7 @@ uqo.control(Rank=1, Bestof = if (length(lvstart) &&
See \bold{Details} in \code{\link{cqo}} and \code{\link{qrrvglm.control}}
for more details.
+
}
\item{Etamat.colmax}{
Positive integer, no smaller than \code{Rank}. Controls the amount
@@ -84,6 +91,7 @@ uqo.control(Rank=1, Bestof = if (length(lvstart) &&
In general, the larger the value, the better the initial value.
Used only if \code{Use.Init.Poisson.QO=TRUE}.
+
}
\item{GradientFunction}{
@@ -91,11 +99,13 @@ uqo.control(Rank=1, Bestof = if (length(lvstart) &&
used or not, i.e., to compute gradient values. The default value is
usually faster on most problems.
+
}
\item{Hstep}{
Positive value. Used as the step size in the finite difference
approximation to the derivatives by \code{\link[stats]{optim}}.
+
}
\item{isdlv}{
Initial standard deviations for the latent variables (site scores).
@@ -111,6 +121,7 @@ uqo.control(Rank=1, Bestof = if (length(lvstart) &&
the most spread of site scores, followed by the second ordination
axis, etc.
+
}
\item{ITolerances}{
Logical. If \code{TRUE} then the (common) tolerance matrix is
@@ -133,17 +144,20 @@ uqo.control(Rank=1, Bestof = if (length(lvstart) &&
Good possibilities for \code{lvstart} are the site scores from a
constrained ordination, e.g., from \code{\link{cqo}}.
+
}
\item{jitter.sitescores}{ Logical.
If \code{TRUE} the initial values for the site scores are jittered
to add a random element to the starting values.
+
}
\item{maxitl}{
Positive integer. Number of iterations allowed for the IRLS algorithm
implemented in the compiled code.
+
}
\item{Maxit.optim}{
Positive integer. Number of iterations given to the function
@@ -164,14 +178,17 @@ uqo.control(Rank=1, Bestof = if (length(lvstart) &&
\eqn{x_2} is recommended). A value about 3 or 4 is recommended.
If failure to converge occurs, try a slightly lower value.
+
}
\item{optim.maxit}{
Positive integer. Number of times \code{\link[stats]{optim}}
is invoked.
+
% At iteration \code{i}, the \code{i}th value of \code{Maxit.optim}
% is fed into \code{\link[stats]{optim}}.
+
}
\item{nRmax}{
Positive integer. If the number of parameters making up the latent
@@ -181,10 +198,13 @@ uqo.control(Rank=1, Bestof = if (length(lvstart) &&
conjugate-gradients method is more suitable when the number of
parameters is large because it requires less memory.
+
}
\item{SD.sitescores}{ Numeric. Standard deviation of the
initial values of the site scores, which are generated from
a normal distribution.
+
+
}
% \item{Dzero}{ Integer vector specifying which squared terms
% are to be zeroed. These linear predictors will correspond to
@@ -195,21 +215,27 @@ uqo.control(Rank=1, Bestof = if (length(lvstart) &&
\item{SmallNo}{ Positive numeric between \code{.Machine$double.eps} and
\code{0.0001}.
Used to avoid under- or over-flow in the IRLS algorithm.
+
+
}
\item{trace}{ Logical indicating if output should be produced for
each iteration.
+
+
}
% \item{Kinit}{ Initial values for the index parameters \code{k} in the
% negative binomial distribution (one per species).
% In general, a smaller number is preferred over a larger number.
% The vector is recycled to the number of responses (species).
% }
+
\item{Use.Init.Poisson.QO}{
Logical. If \code{TRUE} then the function \code{.Init.Poisson.QO()} is
used to obtain initial values for the site scores. If \code{FALSE}
then random numbers are used instead. Both \code{CA1} and
\code{Use.Init.Poisson.QO} cannot both be \code{TRUE}.
+
}
\item{\dots}{ Ignored at present. }
}
@@ -217,18 +243,23 @@ uqo.control(Rank=1, Bestof = if (length(lvstart) &&
The algorithm currently used by \code{\link{uqo}} is unsophisticated
and fails often. Improvements will hopefully be made soon.
+
See \code{\link{cqo}} and \code{\link{qrrvglm.control}} for more details
that are equally pertinent to UQO.
+
% zz site scores are centered. Possibly uncorrelated too?
To reduce the number of parameters being estimated, setting
\code{ITolerances = TRUE} or \code{EqualTolerances = TRUE} is advised.
+
}
\value{
A list with the components corresponding to its arguments, after
some basic error checking.
+
+
}
\references{
diff --git a/man/venice.Rd b/man/venice.Rd
index d4272a6..8c0c0ee 100644
--- a/man/venice.Rd
+++ b/man/venice.Rd
@@ -111,32 +111,34 @@ Istituzione Centro Previsione e Segnalazioni Maree.
}
\seealso{
- \code{\link{guplot}},
- \code{\link{gev}},
- \code{\link{gpd}}.
+ \code{\link[VGAM]{guplot}},
+ \code{\link[VGAM]{gev}},
+ \code{\link[VGAM]{gpd}}.
}
\examples{
-\dontrun{ matplot(venice[["year"]], venice[, -1], xlab = "Year",
- ylab = "Sea level (cm)", type = "l") }
+\dontrun{
+matplot(venice[["year"]], venice[, -1], xlab = "Year",
+ ylab = "Sea level (cm)", type = "l")
ymat <- as.matrix(venice[, paste("r", 1:10, sep = "")])
fit1 <- vgam(ymat ~ s(year, df = 3), gumbel(R = 365, mpv = TRUE),
data = venice, trace = TRUE, na.action = na.pass)
head(fitted(fit1))
-\dontrun{ par(mfrow = c(2, 1), xpd = TRUE)
+par(mfrow = c(2, 1), xpd = TRUE)
plot(fit1, se = TRUE, lcol = "blue", llwd = 2, slty = "dashed")
par(mfrow = c(1,1), bty = "l", xpd = TRUE, las = 1)
qtplot(fit1, mpv = TRUE, lcol = c(1, 2, 5), tcol = c(1, 2, 5),
- llwd = 2, pcol = "blue", tadj = 0.1) }
+ llwd = 2, pcol = "blue", tadj = 0.1)
plot(sealevel ~ Year, data = venice90, type = "h", col = "blue")
summary(venice90)
dim(venice90)
-round(100 * nrow(venice90) / ((2009-1940+1)*365.26*24), dig = 3)
+round(100 * nrow(venice90) / ((2009 - 1940 + 1) * 365.26 * 24), dig = 3)
+}
}
\keyword{datasets}
diff --git a/man/vgam.Rd b/man/vgam.Rd
index 837b406..9e10fce 100644
--- a/man/vgam.Rd
+++ b/man/vgam.Rd
@@ -33,6 +33,7 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
\item{family}{
Same as for \code{\link{vglm}}.
+
}
\item{data}{
an optional data frame containing the variables in the model.
@@ -40,6 +41,7 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
\code{environment(formula)}, typically the environment from which
\code{vgam} is called.
+
}
\item{weights, subset, na.action}{
Same as for \code{\link{vglm}}.
@@ -62,6 +64,7 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
The default (and presently only) method \code{vgam.fit}
uses iteratively reweighted least squares (IRLS).
+
}
\item{constraints, model, offset}{
Same as for \code{\link{vglm}}.
@@ -85,6 +88,7 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
\item{\dots}{
further arguments passed into \code{\link{vgam.control}}.
+
}
}
@@ -203,6 +207,11 @@ The \code{VGAM} Package.
%~Make other sections like WARNING with \section{WARNING }{....} ~
+\section{WARNING}{
+ See warnings in \code{\link{vglm.control}}.
+
+
+}
\seealso{
\code{\link{vgam.control}},
@@ -220,10 +229,10 @@ The \code{VGAM} Package.
\examples{ # Nonparametric proportional odds model
pneumo <- transform(pneumo, let = log(exposure.time))
vgam(cbind(normal, mild, severe) ~ s(let),
- cumulative(parallel = TRUE), pneumo)
+ cumulative(parallel = TRUE), data = pneumo)
# Nonparametric logistic regression
-fit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, hunua)
+fit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = hunua)
\dontrun{ plot(fit, se = TRUE) }
pfit <- predict(fit, type = "terms", raw = TRUE, se = TRUE)
names(pfit)
@@ -234,12 +243,12 @@ pfit$sigma
# Fit two species simultaneously
fit2 <- vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2, 3)),
- binomialff(mv = TRUE), hunua)
-coef(fit2, matrix = TRUE) # Not really interpretable
+ binomialff(mv = TRUE), data = hunua)
+coef(fit2, matrix = TRUE) # Not really interpretable
\dontrun{ plot(fit2, se = TRUE, overlay = TRUE, lcol = 1:2, scol = 1:2)
ooo <- with(hunua, order(altitude))
-with(hunua, matplot(altitude[ooo], fitted(fit2)[ooo,], ylim = c(0, .8),
+with(hunua, matplot(altitude[ooo], fitted(fit2)[ooo,], ylim = c(0, 0.8),
xlab = "Altitude (m)", ylab = "Probability of presence", las = 1,
main = "Two plant species' response curves", type = "l", lwd = 2))
with(hunua, rug(altitude)) }
diff --git a/man/vgam.control.Rd b/man/vgam.control.Rd
index 52716c5..c19e35e 100644
--- a/man/vgam.control.Rd
+++ b/man/vgam.control.Rd
@@ -172,7 +172,15 @@ Vector generalized additive models.
}
-% ~Make other sections like Warning with \section{Warning }{....} ~
+
+\section{Warning}{
+ See \code{\link{vglm.control}}.
+
+}
+
+
+
+
\seealso{
\code{\link{vgam}},
diff --git a/man/vglm.Rd b/man/vglm.Rd
index c35c400..1353841 100644
--- a/man/vglm.Rd
+++ b/man/vglm.Rd
@@ -26,6 +26,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
predictor. Different variables in each linear predictor
can be chosen by specifying constraint matrices.
+
}
\item{family}{
a function of class \code{"vglmff"} (see \code{\link{vglmff-class}})
@@ -34,6 +35,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
for general information about many types of arguments found in this
type of function.
+
}
\item{data}{
an optional data frame containing the variables in the model.
@@ -84,6 +86,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
of \code{\link[base]{options}}, and is \code{na.fail} if that is unset.
The ``factory-fresh'' default is \code{na.omit}.
+
}
\item{etastart}{
starting values for the linear predictors.
@@ -103,34 +106,40 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
should be comparable.
Some family functions do not make use of this argument.
+
}
\item{coefstart}{
starting values for the coefficient vector.
The length and order must match that of \code{coef(fit)}.
+
}
\item{control}{
a list of parameters for controlling the fitting process.
See \code{\link{vglm.control}} for details.
+
}
\item{offset}{
a vector or \eqn{M}-column matrix of offset values.
These are \emph{a priori} known and are added to the
linear/additive predictors during fitting.
+
}
\item{method}{
the method to be used in fitting the model. The default (and
presently only) method \code{vglm.fit()} uses iteratively
reweighted least squares (IRLS).
+
}
\item{model}{
a logical value indicating whether the
\emph{model frame}
should be assigned in the \code{model} slot.
+
}
\item{x.arg, y.arg}{
logical values indicating whether
@@ -140,6 +149,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
model matrix type \code{model.matrix(vglmfit)} where
\code{vglmfit} is a \code{vglm} object.
+
}
\item{contrasts}{
an optional list. See the \code{contrasts.arg}
@@ -177,6 +187,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
an optional list with any extra information that might be needed by
the \pkg{VGAM} family function.
+
}
\item{form2}{
The second (optional) formula.
@@ -187,21 +198,25 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
If given, the slots \code{@Xm2} and \code{@Ym2} may be assigned.
Note that smart prediction applies to terms in \code{form2} too.
+
}
\item{qr.arg}{
logical value indicating whether the slot \code{qr}, which
returns the QR decomposition of the VLM model matrix,
is returned on the object.
+
}
\item{smart}{
logical value indicating whether smart prediction
(\code{\link{smartpred}}) will be used.
+
}
\item{\dots}{
further arguments passed into \code{\link{vglm.control}}.
+
}
}
@@ -291,10 +306,14 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
\item{xlevels}{the levels of the factors, if any, used in fitting.}
\item{y}{the response, in matrix form.}
+
This slot information is repeated at \code{\link{vglm-class}}.
+
+
}
\references{
+
Yee, T. W. and Hastie, T. J. (2003)
Reduced-rank vector generalized linear models.
\emph{Statistical Modelling},
@@ -367,6 +386,13 @@ The \code{VGAM} Package.
}
%~Make other sections like WARNING with \section{WARNING }{....} ~
+\section{WARNING}{
+ See warnings in \code{\link{vglm.control}}.
+
+
+}
+
+
\seealso{
\code{\link{vglm.control}},
@@ -423,14 +449,14 @@ weights(fit4, type = "prior")
# Example 5. The use of the xij argument (simple case).
# The constraint matrix for 'op' has one column.
nn <- 1000
-eyesdat = round(data.frame(lop = runif(nn),
- rop = runif(nn),
- op = runif(nn)), dig = 2)
-eyesdat = transform(eyesdat, eta1 = -1 + 2 * lop,
- eta2 = -1 + 2 * lop)
-eyesdat = transform(eyesdat,
- leye = rbinom(nn, size = 1, prob = logit(eta1, inv = TRUE)),
- reye = rbinom(nn, size = 1, prob = logit(eta2, inv = TRUE)))
+eyesdat <- round(data.frame(lop = runif(nn),
+ rop = runif(nn),
+ op = runif(nn)), dig = 2)
+eyesdat <- transform(eyesdat, eta1 = -1 + 2 * lop,
+ eta2 = -1 + 2 * lop)
+eyesdat <- transform(eyesdat,
+ leye = rbinom(nn, size = 1, prob = logit(eta1, inv = TRUE)),
+ reye = rbinom(nn, size = 1, prob = logit(eta2, inv = TRUE)))
head(eyesdat)
fit5 <- vglm(cbind(leye, reye) ~ op,
binom2.or(exchangeable = TRUE, zero = 3),
@@ -456,13 +482,13 @@ constraints(fit5)
%# Here is one method to handle the xij argument with a term that
%# produces more than one column in the model matrix.
%# The constraint matrix for 'op' has essentially one column.
-%POLY3 = function(x, ...) {
+%POLY3 <- function(x, ...) {
% # A cubic; ensures that the basis functions are the same.
% poly(c(x,...), 3)[1:length(x),]
% head(poly(c(x,...), 3), length(x), drop = FALSE)
%}
%
-%fit6 = vglm(cbind(leye, reye) ~ POLY3(op), trace = TRUE,
+%fit6 <- vglm(cbind(leye, reye) ~ POLY3(op), trace = TRUE,
% fam = binom2.or(exchangeable = TRUE, zero=3), data=eyesdat,
% xij = list(POLY3(op) ~ POLY3(lop,rop) + POLY3(rop,lop) +
% fill(POLY3(lop,rop))),
@@ -478,12 +504,12 @@ constraints(fit5)
%
%# Example 7. The use of the xij argument (simple case).
%# Each constraint matrix has 4 columns.
-%ymat = rdiric(n <- 1000, shape=c(4,7,3,1))
-%mydat = data.frame(x1=runif(n), x2=runif(n), x3=runif(n), x4=runif(n),
+%ymat <- rdiric(n <- 1000, shape=c(4,7,3,1))
+%mydat <- data.frame(x1=runif(n), x2=runif(n), x3=runif(n), x4=runif(n),
% z1=runif(n), z2=runif(n), z3=runif(n), z4=runif(n),
% X2=runif(n), Z2=runif(n))
-%mydat = round(mydat, dig=2)
-%fit7 = vglm(ymat ~ X2 + Z2, data=mydat, crit="c",
+%mydat <- round(mydat, dig=2)
+%fit7 <- vglm(ymat ~ X2 + Z2, data=mydat, crit="c",
% fam = dirichlet(parallel = TRUE), # Intercept is also parallel.
% xij = list(Z2 ~ z1 + z2 + z3 + z4,
% X2 ~ x1 + x2 + x3 + x4),
diff --git a/man/vglm.control.Rd b/man/vglm.control.Rd
index 88dec65..4adb313 100644
--- a/man/vglm.control.Rd
+++ b/man/vglm.control.Rd
@@ -8,9 +8,10 @@
}
\usage{
-vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
+vglm.control(checkwz = TRUE, Check.rank = TRUE,
+ criterion = names(.min.criterion.VGAM),
epsilon = 1e-07, half.stepsizing = TRUE,
- maxit = 30, nowarning = FALSE,
+ maxit = 30, noWarning = FALSE,
stepsize = 1, save.weight = FALSE,
trace = FALSE, wzepsilon = .Machine$double.eps^0.75,
xij = NULL, ...)
@@ -24,6 +25,16 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
than \code{wzepsilon}. If not, any values less than
\code{wzepsilon} are replaced with this value.
+
+ }
+ \item{Check.rank}{
+ logical indicating whether the rank of the VLM matrix
+ should be checked. If this is not of full column rank then
+ the results are not to be trusted.
+ The default is to give an error message if the VLM
+ matrix is not of full column rank.
+
+
}
\item{criterion}{
character variable describing what criterion is to be
@@ -31,6 +42,7 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
listed in \code{.min.criterion.VGAM}, but most family
functions only implement a few of these.
+
}
\item{epsilon}{
positive convergence tolerance epsilon. Roughly speaking,
@@ -38,6 +50,7 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
to have converged when two successive \code{criterion}
values are within \code{epsilon} of each other.
+
}
\item{half.stepsizing}{
logical indicating if half-stepsizing is allowed. For
@@ -51,6 +64,7 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
criterion. \code{half.stepsizing} is ignored if
\code{criterion == "coefficients"}.
+
}
\item{maxit}{
maximum number of (usually Fisher-scoring) iterations allowed.
@@ -58,7 +72,7 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
}
- \item{nowarning}{
+ \item{noWarning}{
logical indicating whether to suppress a warning if
convergence is not obtained within \code{maxit} iterations.
This is ignored if \code{maxit = 1} is set.
@@ -74,6 +88,7 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
to be generally slow but may help increase the chances
of successful convergence for some family functions.
+
}
\item{save.weight}{
logical indicating whether the \code{weights} slot of a
@@ -83,6 +98,7 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
others have \code{save.weight = FALSE} in their control
functions.
+
}
\item{trace}{
logical indicating if output should be produced for each
@@ -95,11 +111,13 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
values are needed, the making of invalid assumptions,
or that the model is inappropriate for the data, etc.
+
}
\item{wzepsilon}{
small positive number used to test whether the diagonals
of the working weight matrices are sufficiently positive.
+
}
\item{xij}{
A formula or a list of formulas.
@@ -117,6 +135,7 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
When \code{xij} is used, the use of \code{form2} is also required
to give \emph{every} term used by the model.
+
}
% \item{jix}{
% A formula or a list of formulas specifying
@@ -139,6 +158,7 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
other parameters that may be picked up from control
functions that are specific to the \pkg{VGAM} family function.
+
}
}
\details{
@@ -164,6 +184,7 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
}
\references{
+
Yee, T. W. and Hastie, T. J. (2003)
Reduced-rank vector generalized linear models.
\emph{Statistical Modelling},
@@ -184,7 +205,20 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
}
-% ~Make other sections like Warning with \section{Warning }{....} ~
+\section{Warning}{
+ For some applications the default convergence criterion should
+ be tightened.
+ Setting something like \code{criterion = "coef", epsilon = 1e-09}
+ is one way to achieve this, and also add
+ \code{trace = TRUE} to monitor the convergence.
+ Setting \code{maxit} to some higher number is usually not
+ needed, and needing to do so suggests something is wrong, e.g.,
+ an ill-conditioned model, over-fitting or under-fitting.
+
+
+}
+
+
\seealso{
\code{\link{vglm}},
@@ -199,13 +233,13 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
# Example 1.
pneumo <- transform(pneumo, let = log(exposure.time))
vglm(cbind(normal, mild, severe) ~ let, multinomial, data = pneumo,
- crit = "coef", step = 0.5, trace = TRUE, eps = 1e-8, maxit = 40)
+ crit = "coef", step = 0.5, trace = TRUE, epsil = 1e-8, maxit = 40)
# Example 2. The use of the xij argument (simple case).
ymat <- rdiric(n <- 1000, shape = rep(exp(2), len = 4))
mydat <- data.frame(x1 = runif(n), x2 = runif(n), x3 = runif(n), x4 = runif(n),
- z1 = runif(n), z2 = runif(n), z3 = runif(n), z4 = runif(n))
+ z1 = runif(n), z2 = runif(n), z3 = runif(n), z4 = runif(n))
mydat <- transform(mydat, X = x1, Z = z1)
mydat <- round(mydat, dig = 2)
fit2 <- vglm(ymat ~ X + Z,
@@ -214,8 +248,8 @@ fit2 <- vglm(ymat ~ X + Z,
X ~ x1 + x2 + x3 + x4),
form2 = ~ Z + z1 + z2 + z3 + z4 +
X + x1 + x2 + x3 + x4)
-head(model.matrix(fit2, type = "lm")) # LM model matrix
-head(model.matrix(fit2, type = "vlm")) # Big VLM model matrix
+head(model.matrix(fit2, type = "lm")) # LM model matrix
+head(model.matrix(fit2, type = "vlm")) # Big VLM model matrix
coef(fit2)
coef(fit2, matrix = TRUE)
max(abs(predict(fit2)-predict(fit2, new = mydat))) # Predicts correctly
diff --git a/man/vglmff-class.Rd b/man/vglmff-class.Rd
index cb92ff4..1767568 100644
--- a/man/vglmff-class.Rd
+++ b/man/vglmff-class.Rd
@@ -11,7 +11,7 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
In the following, \eqn{M} is the number of linear/additive
predictors.
- \describe{
+\describe{
\item{\code{blurb}:}{
Object of class \code{"character"} giving
a small description of the model. Important arguments such as
@@ -43,8 +43,8 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
}
\item{\code{first}:}{
Object of class \code{"expression"} to insert
- code at a special position in \code{vglm} or
- \code{vgam}.
+ code at a special position in \code{\link{vglm}} or
+ \code{\link{vgam}}.
}
\item{\code{infos}:}{
@@ -74,9 +74,9 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
}
\item{\code{last}:}{
- Object of class \code{"expression"} to insert
- code at a special position (at the very end) of \code{vglm.fit} or
- \code{vgam.fit}.
+ Object of class \code{"expression"} to insert code at a
+ special position (at the very end) of \code{vglm.fit()}
+ or \code{vgam.fit()}.
This code is evaluated after the fitting.
The list \code{misc} is often assigned components in this slot,
which becomes the \code{misc} slot on the fitted object.
@@ -108,6 +108,7 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
code at a special position in \code{vglm.fit} or
\code{vgam.fit}.
+
}
\item{\code{middle2}:}{
Object of class \code{"expression"} to insert
@@ -169,7 +170,7 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
of the weight matrices.
}
- }
+}
}
\section{Methods}{
@@ -203,26 +204,29 @@ The file is amongst other \pkg{VGAM} PDF documentation.
for functions.
- A unified method of handling arguments is to use
- \code{match.arg}. This allows, for example,
- \code{vglm(..., family = cratio(link = logit))}
- and
- \code{vglm(..., family = cratio(link = "logi"))}
- to be equivalent (Nb. there is a \code{logit} function).
+% 20130322; this is obsolete, and can delete it:
+% A unified method of handling arguments is to use
+% \code{match.arg}. This allows, for example,
+% \code{vglm(..., family = cratio(link = logit))}
+% and
+% \code{vglm(..., family = cratio(link = "logi"))}
+% to be equivalent (Nb. there is a \code{logit} function).
The \code{extra} argument in
- \code{linkinv}, \code{linkfun}, \code{deviance}, \code{loglikelihood}, etc.
+ \code{linkinv}, \code{linkfun}, \code{deviance},
+ \code{loglikelihood}, etc.
matches with the argument \code{extra}
- in \code{vglm}, \code{vgam} and \code{rrvglm}. This allows input
- to be fed into all slots of a \pkg{VGAM} family function.
+ in \code{\link{vglm}}, \code{\link{vgam}} and \code{\link{rrvglm}}.
+ This allows input to be fed into all slots of a \pkg{VGAM}
+ family function.
The expression \code{derivative} is evaluated immediately
prior to \code{weight}, so there is provision for re-use
of variables etc. Programmers must be careful to choose
variable names that do not interfere with \code{vglm.fit},
- \code{vgam.fit} etc.
+ \code{vgam.fit()} etc.
Programmers of \pkg{VGAM} family functions are encouraged
@@ -264,3 +268,9 @@ cratio(link = "cloglog")
cratio(link = "cloglog", reverse = TRUE)
}
\keyword{classes}
+
+
+
+
+
+
diff --git a/man/vsmooth.spline.Rd b/man/vsmooth.spline.Rd
index 58fcfd5..00a0b38 100644
--- a/man/vsmooth.spline.Rd
+++ b/man/vsmooth.spline.Rd
@@ -23,6 +23,7 @@ vsmooth.spline(x, y, w = NULL, df = rep(5, M), spar = NULL,
the real part is used, and the imaginary part is used for the response.
In this help file, \code{n} is the number of unique values of \code{x}.
+
}
\item{y}{
A vector, matrix or a list.
@@ -31,6 +32,7 @@ vsmooth.spline(x, y, w = NULL, df = rep(5, M), spar = NULL,
In this help file, \code{M} is the number of columns of \code{y} if
there are no constraints on the functions.
+
}
\item{w}{
The weight matrices or the number of observations.
@@ -40,6 +42,7 @@ vsmooth.spline(x, y, w = NULL, df = rep(5, M), spar = NULL,
By default, \code{w} is the \code{M} by \code{M} identity
matrix, denoted by \code{matrix(1, n, M)}.
+
}
\item{df}{
Numerical vector containing the degrees of
@@ -53,6 +56,7 @@ A value of 2 means a linear fit, and each element of
The larger the values of \code{df} the more wiggly the
smooths.
+
}
\item{spar}{
Numerical vector containing the non-negative smoothing
@@ -69,6 +73,7 @@ By default, the \code{NULL} value of \code{spar} means
\code{df} is used to determine the smoothing
parameters.
+
}
\item{all.knots}{ Logical. If \code{TRUE} then each distinct
value of \code{x} will be a knot. By default, only a subset of
@@ -76,18 +81,21 @@ the unique values of \code{x} are used; typically, the number
of knots is \code{O(n^0.25)} for \code{n} large,
but if \code{n <= 40} then all the unique values of \code{x} are used.
+
}
\item{iconstraint}{ A \code{M}-row constraint matrix for the
intercepts. It must be of full column rank.
By default, the constraint matrix for the intercepts is the
\code{M} by \code{M} identity matrix, meaning no constraints.
+
}
\item{xconstraint}{ A \code{M}-row constraint matrix for \code{x}.
It must be of full column rank.
By default, the constraint matrix for the intercepts is the
\code{M} by \code{M} identity matrix, meaning no constraints.
+
}
\item{constraints}{
An alternative to specifying \code{iconstraint} and \code{xconstraint},
@@ -95,27 +103,32 @@ this is a list with two components corresponding to the
intercept and \code{x} respectively. They must both be a
\code{M}-row constraint matrix with full column rank.
+
}
\item{var.arg}{ Logical: return the pointwise variances
of the fit?
Currently, this corresponds only to the nonlinear part of the
fit, and may be wrong.
+
}
\item{scale.w}{
Logical.
By default, the weights \code{w} are scaled so that the
diagonal elements have mean 1.
+
}
\item{nk}{ Number of knots.
If used, this argument overrides \code{all.knots}, and
must lie between 6 and \code{n}+2 inclusive.
+
}
\item{control.spar}{
See \code{\link[stats]{smooth.spline}}.
+
}
}
\details{
@@ -130,8 +143,9 @@ See \code{\link[stats]{smooth.spline}}.
}
\value{
- An object of class \code{"vsmooth.spline"} (see
-\code{vsmooth.spline-class}).
+ An object of class \code{"vsmooth.spline"}
+ (see \code{vsmooth.spline-class}).
+
}
\references{
@@ -155,7 +169,8 @@ Heidelberg: Physica-Verlag.
The vector cubic smoothing spline which \code{s()} represents is
computationally demanding for large \eqn{M}.
- The cost is approximately \eqn{O(M^3)}.
+ The cost is approximately \eqn{O(n M^3)} where
+ \eqn{n} is the number of unique abscissae.
Yet to be done: return the \emph{unscaled} smoothing parameters.
@@ -171,19 +186,20 @@ Heidelberg: Physica-Verlag.
\code{\link[VGAM]{s}},
\code{\link[stats]{smooth.spline}}.
+
}
\examples{
nn <- 20; x <- 2 + 5*(nn:1)/nn
-x[2:4] <- x[5:7] # Allow duplication
+x[2:4] <- x[5:7] # Allow duplication
y1 <- sin(x) + rnorm(nn, sd = 0.13)
y2 <- cos(x) + rnorm(nn, sd = 0.13)
-y3 <- 1 + sin(x) + rnorm(nn, sd = 0.13) # Run this for constraints
+y3 <- 1 + sin(x) + rnorm(nn, sd = 0.13) # Run this for constraints
y <- cbind(y1, y2, y3)
ww <- cbind(rep(3, nn), 4, (1:nn)/nn)
(fit <- vsmooth.spline(x, y, w = ww, df = 5))
\dontrun{
-plot(fit) # The 1st and 3rd functions do not differ by a constant
+plot(fit) # The 1st and 3rd functions do not differ by a constant
}
mat <- matrix(c(1,0,1, 0,1,0), 3, 2)
@@ -192,17 +208,16 @@ mat <- matrix(c(1,0,1, 0,1,0), 3, 2)
mycols <- c("orange", "blue", "orange")
\dontrun{ plot(fit2, lcol = mycols, pcol = mycols, las = 1) }
-
p <- predict(fit, x = model.matrix(fit, type = "lm"), deriv = 0)
-max(abs(fit at y - with(p, y))) # Should be zero
+max(abs(depvar(fit) - with(p, y))) # Should be 0; and fit at y is not good
-par(mfrow <- c(3, 1))
+par(mfrow = c(3, 1))
ux <- seq(1, 8, len = 100)
-for(d in 1:3) {
- p <- predict(fit, x = ux, deriv = d)
-\dontrun{with(p, matplot(x, y, type = "l", main = paste("deriv =", d),
- lwd = 2, ylab = "", cex.axis = 1.5,
- cex.lab = 1.5, cex.main = 1.5)) }
+for(dd in 1:3) {
+ pp <- predict(fit, x = ux, deriv = dd)
+\dontrun{with(pp, matplot(x, y, type = "l", main = paste("deriv =", dd),
+ lwd = 2, ylab = "", cex.axis = 1.5,
+ cex.lab = 1.5, cex.main = 1.5)) }
}
}
\keyword{regression}
diff --git a/man/weibull.Rd b/man/weibull.Rd
index e1b2df3..b4fe17c 100644
--- a/man/weibull.Rd
+++ b/man/weibull.Rd
@@ -85,7 +85,7 @@ weibull(lshape = "loge", lscale = "loge",
Summarized in Harper et al. (2011),
for inference, there are 4 cases to consider.
- If \eqn{a \leq 1} then the MLEs are not consistent
+ If \eqn{a \le 1} then the MLEs are not consistent
(and the smallest observation becomes a hyperefficient
solution for the location parameter in the 3-parameter case).
If \eqn{1 < a < 2} then MLEs exist but are not asymptotically normal.
@@ -205,6 +205,7 @@ Concerns about Maximum Likelihood Estimation for
\seealso{
\code{\link[stats:Weibull]{dweibull}},
+ \code{\link{truncweibull}},
\code{\link{gev}},
\code{\link{lognormal}},
\code{\link{expexp}}.
diff --git a/man/wffc.P2star.Rd b/man/wffc.P2star.Rd
index 1e83dc7..1d49982 100644
--- a/man/wffc.P2star.Rd
+++ b/man/wffc.P2star.Rd
@@ -63,11 +63,14 @@ wffc.P3star(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
% \url{http://www.http://san2010.pl}
% was the official 2010 website.
- Yee, T. W. (2011)
+
+ Yee, T. W. (2013)
On strategies and issues raised by an analysis of
the 2008 World Fly Fishing Championships data.
\emph{In preparation}.
+
+
}
\author{ T. W. Yee. }
@@ -77,7 +80,7 @@ wffc.P3star(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
\code{wffc.P3} and \code{wffc.P3star}.
}
-\seealso{ \code{\link{wffc}}. }
+\seealso{ \code{\link[VGAM]{wffc}}. }
\examples{
\dontrun{ fishlength <- seq(0.0, 0.72, by = 0.001)
plot(fishlength, wffc.P2star(fishlength), type = "l", col = "blue",
diff --git a/man/wffc.Rd b/man/wffc.Rd
index fc88123..f0083ab 100644
--- a/man/wffc.Rd
+++ b/man/wffc.Rd
@@ -56,7 +56,7 @@ Wales (WAL).
}
\details{
- Details may be obtained at Yee (2010) and Yee (2010b).
+ Details may be obtained at Yee (2010) and Yee (2013).
Here is a brief summary.
The three competition days were 28--30 March.
Each session was fixed at 9.00am--12.00pm and 2.30--5.30pm daily.
@@ -115,10 +115,10 @@ is rather poor and furthermore they were not recorded electronically.
Note also that a few internal discrepancies may be found within
and between the data frames
- \code{\link{wffc}},
- \code{\link{wffc.nc}},
- \code{\link{wffc.indiv}},
- \code{\link{wffc.teams}}.
+ \code{\link[VGAM]{wffc}},
+ \code{\link[VGAM]{wffc.nc}},
+ \code{\link[VGAM]{wffc.indiv}},
+ \code{\link[VGAM]{wffc.teams}}.
This is due to various reasons, such as
competitors being replaced by reserves when sick,
fish that were included or excluded upon the local judge's decision,
@@ -130,10 +130,10 @@ is rather poor and furthermore they were not recorded electronically.
}
\seealso{
- \code{\link{wffc.indiv}},
- \code{\link{wffc.teams}},
- \code{\link{wffc.nc}},
- \code{\link{wffc.P1}}.
+ \code{\link[VGAM]{wffc.indiv}},
+ \code{\link[VGAM]{wffc.teams}},
+ \code{\link[VGAM]{wffc.nc}},
+ \code{\link[VGAM]{wffc.P1}}.
}
\source{
@@ -160,7 +160,7 @@ is rather poor and furthermore they were not recorded electronically.
\bold{101}, 116--126.
- Yee, T. W. (2011)
+ Yee, T. W. (2013)
On strategies and issues raised by an analysis of
the 2008 World Fly Fishing Championships data.
\emph{In preparation}.
diff --git a/man/wffc.indiv.Rd b/man/wffc.indiv.Rd
index 744a283..dbd0958 100644
--- a/man/wffc.indiv.Rd
+++ b/man/wffc.indiv.Rd
@@ -11,7 +11,7 @@
\usage{data(wffc.indiv)}
\format{
A data frame with 99 observations on the following 8 variables.
- Some of these variable are described in \code{\link{wffc}}.
+ Some of these variable are described in \code{\link[VGAM]{wffc}}.
\describe{
\item{\code{totalPlacings}}{a numeric vector; these are the summed
placings over the 5 sessions.}
@@ -28,7 +28,7 @@
}
\details{
This data frame gives the individual results of the competition.
- See also \code{\link{wffc}} and \code{\link{wffc.teams}} for more
+ See also \code{\link[VGAM]{wffc}} and \code{\link[VGAM]{wffc.teams}} for more
details and links.
diff --git a/man/wffc.nc.Rd b/man/wffc.nc.Rd
index 7ffa2ef..8c9895c 100644
--- a/man/wffc.nc.Rd
+++ b/man/wffc.nc.Rd
@@ -11,7 +11,7 @@
\usage{data(wffc.nc)}
\format{
A data frame with 475 observations on the following 7 variables.
- Most of these variable are described in \code{\link{wffc}}.
+ Most of these variable are described in \code{\link[VGAM]{wffc}}.
Each row is sorted by sector, session and beat.
\describe{
\item{\code{sector}}{a numeric vector.}
@@ -24,7 +24,7 @@
}
}
\details{
- This data frame was obtained by processing \code{\link{wffc}}.
+ This data frame was obtained by processing \code{\link[VGAM]{wffc}}.
The key variable is \code{numbers}, which is
sector-session-beat specific.
@@ -47,7 +47,7 @@
}
\seealso{
-\code{\link{DeLury}}.
+\code{\link[VGAM]{DeLury}}.
}
diff --git a/man/wffc.teams.Rd b/man/wffc.teams.Rd
index 4642edc..50c15d0 100644
--- a/man/wffc.teams.Rd
+++ b/man/wffc.teams.Rd
@@ -11,19 +11,19 @@
\usage{data(wffc.teams)}
\format{
A data frame with 18 observations on the following 5 variables.
- Some of these variable are described in \code{\link{wffc}}.
+ Some of these variable are described in \code{\link[VGAM]{wffc}}.
\describe{
\item{\code{country}}{a character vector.}
\item{\code{totalPlacings}}{a numeric vector; these are the summed
placings over the 5 sessions and 5 team members. }
- \item{\code{points}}{a numeric vector; see \code{\link{wffc}}.}
+ \item{\code{points}}{a numeric vector; see \code{\link[VGAM]{wffc}}.}
\item{\code{noofcaptures}}{a numeric vector.}
\item{\code{longestfish}}{a numeric vector.}
}
}
\details{
This data frame gives the team results of the competition.
- See also \code{\link{wffc}} and \code{\link{wffc.indiv}} for more
+ See also \code{\link[VGAM]{wffc}} and \code{\link[VGAM]{wffc.indiv}} for more
details and links.
diff --git a/man/xs.nz.Rd b/man/xs.nz.Rd
deleted file mode 100644
index 41a3cc6..0000000
--- a/man/xs.nz.Rd
+++ /dev/null
@@ -1,418 +0,0 @@
-\name{xs.nz}
-\alias{xs.nz}
-\docType{data}
-\title{
- Cross-sectional Data from the New Zealand Population
-
-}
-\description{
- A cross-sectional data set of a workforce company, plus
- another health survey, in New Zealand during the 1990s,
-
-%% ~~ A concise (1-5 lines) description of the dataset. ~~
-}
-\usage{data(xs.nz)}
-\format{
- A data frame with 10529 observations on the following 58 variables.
- For binary variables, a \code{"1"} or \code{TRUE} means \code{yes},
- and \code{"0"} or \code{FALSE} means \code{no}.
- Also, \code{"D"} means don't know,
- and \code{"-"} means not applicable.
- The pregnancy questions were administered to women only.
-
-
- \describe{
- \item{\code{regnum}}{a numeric vector,
- a unique registration number.
- This differs from their original registration number,
- and the rows are sorted by their new registration number.
-
- }
- \item{\code{Study1}}{a logical vector, Study 1 (workforce) or Study 2?
-
- }
- \item{\code{age}}{a numeric vector, age in years.
-
- }
- \item{\code{sex}}{a factor with levels \code{F} and \code{M}.
-
- }
- \item{\code{pulse}}{a numeric vector, beats per minute.
-
- }
- \item{\code{sbp}}{a numeric vector, systolic blood pressure (mm Hg).
-
- }
- \item{\code{dbp}}{a numeric vector, diastolic blood pressure
- (mm Hg).
-
- }
- \item{\code{cholest}}{a numeric vector, cholesterol (mmol/L).
-
- }
- \item{\code{height}}{a numeric vector, in m.
-
- }
- \item{\code{weight}}{a numeric vector, in kg.
-
- }
- \item{\code{famheart}}{a factor with levels \code{0}, \code{1},
- \code{D}.
- Has a family history of heart disease (heart attack, angina, or
- had a heart bypass operation) within the immediate
- family (brother, sister, father or mother, blood relatives only)?
-
-
- }
- \item{\code{famage}}{a factor, following from \code{famheart},
- if yes, how old was the family member when it happened (if
- more than one family member, give the age of the
- youngest person)?
-
-
- }
- \item{\code{famcan}}{a factor with levels \code{0}, \code{1},
- \code{D}.
- Has a family history of cancer within the immediate
- family (blood relatives only)?
-
-
- }
- \item{\code{heart}}{a factor, have you ever been told by
- a doctor that you have had a heart attack ("coronary")?
-
- }
- \item{\code{stroke}}{a numeric vector, have you ever been told by
- a doctor that you have had a stroke?
-
- }
- \item{\code{diabetes}}{a numeric vector, have you ever been told by
- a doctor that you have had diabetes?
-
-
- }
- \item{\code{hyper}}{a numeric vector, have you ever been told by
- a doctor that you have had high blood pressure (hypertension)?
-
- }
- \item{\code{hichol}}{a numeric vector, have you ever been told by
- a doctor that you have had high cholesterol?
-
-
- }
- \item{\code{asthma}}{a numeric vector, have you ever been told by
- a doctor that you have had asthma?
-
-
- }
- \item{\code{cancer}}{a numeric vector, have you ever been told by
- a doctor that you have had cancer?
-
- }
- \item{\code{acne}}{a numeric vector, have you ever
- received treatment from a doctor for acne (pimples)?
-
-
- }
- \item{\code{sunburn}}{a numeric vector, have you ever
- received treatment from a doctor for sunburn?
-
- }
- \item{\code{smokeever}}{a numeric vector, have you ever
- smoked tailor-made or roll-you-own cigarettes once a
- week or more?
-
-
- }
- \item{\code{smokenow}}{a numeric vector,
- do you smoke tailor-made or roll-you-own cigarettes now?
-
- }
- \item{\code{smokeagequit}}{a factor,
- if no to \code{smokenow}, how old were you when
- you stopped smoking?
-
-
- }
- \item{\code{smokehowmany}}{a numeric vector,
- if yes to \code{smokeever}, for how many years altogether
- have you smoked tailor-made or roll-you-own cigarettes?
-
-
- }
- \item{\code{alcmonth}}{a numeric vector,
- do you drink alcohol once a month or more?
-
- }
- \item{\code{drinkfreqweek}}{a numeric vector,
- if yes to \code{alcmonth}, about how often do you
- drink alcohol (days per week)?
- Note: 0.25 is once a month,
- 0.5 is once every two weeks,
- 1 is once a week,
- 2.5 is 2-3 days a week,
- 4.5 is 4-5 days a week,
- 6.5 is 6-7 days a week.
-
- Further note:
- 1 can, small bottle or handle of beer or home brew = 1 drink,
- 1 quart bottle of beer = 2 drinks,
- 1 jug of beer = 3 drinks,
- 1 flagon/peter of beer = 6 drinks,
- 1 glass of wine, sherry = 1 drink,
- 1 bottle of wine = 6 drinks,
- 1 double nip of spirits = 1 drink.
-
-
- }
- \item{\code{drinkweek}}{a numeric vector,
- how many drinks per week, on average.
- This is the average daily amount of drinks multiplied
- by the frequency of drinking per week.
- See \code{drinkfreqweek} on what constitutes a 'drink'.
-
-
-
- }
- \item{\code{drinkmaxday}}{a numeric vector,
- in the last three months, what is the largest number of
- drinks that you had on any one day?
-
-
- }
- \item{\code{pregnant}}{a factor,
- have you ever been pregnant for more than 5 months?
-
-
- }
- \item{\code{pregfirst}}{a factor, if
- yes to \code{pregnant}, how old were you when your first
- baby was born (or you had a miscarriage after 5 months)?
-
- }
- \item{\code{preglast}}{a factor, how old were you when your last
- baby was born (or you had a miscarriage after 5 months)?
-
-
- }
- \item{\code{babies}}{a factor,
- how many babies have you given birth to?
-
-
- }
- \item{\code{mood}}{a numeric vector,
- does your mood often go up or down?
-
-
- }
- \item{\code{miserab}}{a numeric vector,
- do you ever feel 'just miserable' for no reason?
-
-
- }
- \item{\code{hurt}}{a numeric vector,
- are your feelings easily hurt?
-
- }
- \item{\code{fedup}}{a numeric vector,
- do you often feel 'fed up'?
-
- }
- \item{\code{nervous}}{a numeric vector,
- would you call yourself a nervous person?
-
- }
- \item{\code{worrier}}{a numeric vector,
- are you a worrier?
-
- }
- \item{\code{worry}}{a numeric vector,
- do you worry about awful things that might happen?
-
- }
- \item{\code{tense}}{a numeric vector,
- would you call yourself tense or 'highly strung'?
-
- }
- \item{\code{embarrass}}{a numeric vector,
- do you worry too long after an embarrassing
- experience?
-
- }
- \item{\code{nerves}}{a numeric vector,
- do you suffer from 'nerves'?
-
- }
- \item{\code{friend}}{a numeric vector,
- do you have a friend or family member that you
- can talk to about problems or worries that you may have?
-
- }
- \item{\code{depress}}{a numeric vector,
- in your lifetime, have you ever had two weeks or more
- when nearly every day you felt sad or depressed?
-
- }
- \item{\code{exervig}}{a numeric vector,
- how many hours per week would you do any vigorous
- activity or exercise either at work or away from
- work that makes you breathe hard and sweat?
- Values here ought be be less than 168.
-
-
- }
- \item{\code{exermod}}{a numeric vector,
- how many hours per week would you do any moderate
- activity or exercise such as brisk walking, cycling or
- mowing the lawn?
- Values here ought be be less than 168.
-
-
- }
- \item{\code{hourfeet}}{a numeric vector,
- on an average work day, how long would you spend on your
- feet, either standing or moving about?
-
-
- }
- \item{\code{ethnic}}{a factor with 4 levels,
- what ethnic group do you belong to?
- 0 = European (NZ European or British or other European),
- 1 = Maori,
- 2 = Pacific Island Polynesian,
- 3 = Other (Chinese, Indian, Other).
-
-
- }
- \item{\code{sleep}}{a numeric vector,
- how many hours do you usually sleep each night?
-
-
- }
- \item{\code{snore}}{a factor with levels \code{0}, \code{1},
- \code{D}.
- Do you usually snore?
-
-
- }
- \item{\code{cat}}{a numeric vector,
- do you have a household pet? If yes, is it a cat?
-
- }
- \item{\code{dog}}{a numeric vector,
- do you have a household pet? If yes, is it a dog?
-
-
- }
- \item{\code{hand}}{a factor with levels
- \code{0} = right,
- \code{1} = left,
- \code{2} = either.
- Are you right-handed, left-handed, or no preference for left
- or right?
-
-
- }
- \item{\code{nhouse}}{an ordered factor with 4 levels,
- how many people (including yourself) usually live in your house?
- 1 = 1, 2 = 2, 3 = 3, 4 = four or more.
-
-
- }
- \item{\code{marital}}{a factor with 4 levels:
- \code{1} = single,
- \code{2} = married or living with a partner,
- \code{3} = separated or divorced,
- \code{4} = widowed.
-
-
- }
- \item{\code{educ}}{an ordered factor with 4 levels.
- What was the highest level of education you received?
- Primary school = \code{1},
- High school/secondary school = \code{2},
- Polytechnic or similar = \code{3},
- University = \code{4}.
-
- }
- }
-}
-\details{
-%% ~~ If necessary, more details than the __description__ above ~~
-
-The data frame is a subset of the entire data set which was
-collected from a confidential self-administered questionnaire
-administered in a large New Zealand workforce observational
-study conducted during 1992--3. The data were augmented
-by a second study consisting of retirees. The data can be
-considered a reasonable representation of the white male New
-Zealand population in the early 1990s. There were physical,
-lifestyle and psychological variables that were measured.
-The psychological variables were headed
-"Questions about your feelings".
-
-
-Although some data cleaning was performed and logic checks
-conducted, anomalies remain. Some variables, of course,
-are subject to a lot of measurement error and bias. It is
-conceivable that some participants had poor reading skills!
-
-
-}
-\source{
-
- Originally,
- Clinical Trials Research Unit, University of Auckland, New Zealand,
- \url{http://www.ctru.auckland.ac.nz}.
-
-
-%% ~~ reference to a publication or URL from which the data were obtained ~~
-
-% MacMahon, S., Norton, R., Jackson, R., Mackie, M. J.,
-% Cheng, A., Vander Hoorn, S., Milne, A., McCulloch, A. (1995)
-% Fletcher Challenge-University of Auckland Heart &
-% Health Study: design and baseline findings.
-% \emph{New Zealand Medical Journal},
-% \bold{108}, 499--502.
-
-
-
-}
-\references{
-
- MacMahon, S., Norton, R., Jackson, R., Mackie, M. J.,
- Cheng, A., Vander Hoorn, S., Milne, A., McCulloch, A. (1995)
- Fletcher Challenge-University of Auckland Heart &
- Health Study: design and baseline findings.
- \emph{New Zealand Medical Journal},
- \bold{108}, 499--502.
-
-}
-
-\seealso{
- \code{\link{chest.nz}}.
-
-
-
-}
-\section{Warning }{
- More variables may be added in the future and these
- may be placed in any column position. Therefore
- references such as \code{xs.nz[, 12]} are dangerous.
-
-
-}
-
-
-\examples{
-data(xs.nz)
-summary(xs.nz)
-
-# Handling of factors requires care
-is.factor(xs.nz$babies) # TRUE
-summary(xs.nz$babies) # Note the "-"s
-charbabies <- as.character(xs.nz$babies)
-summary(as.numeric(charbabies)) # "-"s converted to NAs + warning
-table(as.numeric(charbabies)) # Ditto
-}
-\keyword{datasets}
diff --git a/man/zabinomial.Rd b/man/zabinomial.Rd
index 7b7db32..d893524 100644
--- a/man/zabinomial.Rd
+++ b/man/zabinomial.Rd
@@ -27,6 +27,7 @@ zabinomial(lprob = "logit", lpobs0 = "logit",
}
\item{iprob, ipobs0}{
+ See
\code{\link{CommonVGAMffArguments}}.
}
diff --git a/man/zageometric.Rd b/man/zageometric.Rd
index d3b392a..f795625 100644
--- a/man/zageometric.Rd
+++ b/man/zageometric.Rd
@@ -61,7 +61,7 @@ zageometric(lpobs0 = "logit", lprob = "logit", imethod = 1,
call the zero-altered geometric a \emph{hurdle} model.
- The input can be a matrix.
+ The input can be a matrix (multiple responses).
By default, the two linear/additive
predictors are \eqn{(\log(\phi), logit(p))^T}{(log(phi), logit(prob))^T}.
@@ -112,6 +112,7 @@ zageometric(lpobs0 = "logit", lprob = "logit", imethod = 1,
However, \code{posgeometric()} is not written because it
is trivially related to \code{\link{geometric}}.
+
}
\seealso{
@@ -131,10 +132,11 @@ zdata <- transform(zdata,
pobs0 = logit(-1 + 2*x2, inverse = TRUE),
prob = logit(-2 + 3*x2, inverse = TRUE))
zdata <- transform(zdata,
- y1 = rzageom(nn, prob = prob, pobs0 = pobs0))
+ y1 = rzageom(nn, prob = prob, pobs0 = pobs0),
+ y2 = rzageom(nn, prob = prob, pobs0 = pobs0))
with(zdata, table(y1))
-fit <- vglm(y1 ~ x2, zageometric, zdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ x2, zageometric, zdata, trace = TRUE)
coef(fit, matrix = TRUE)
head(fitted(fit))
head(predict(fit))
diff --git a/man/zeta.Rd b/man/zeta.Rd
index d668ffb..0d40cef 100644
--- a/man/zeta.Rd
+++ b/man/zeta.Rd
@@ -105,25 +105,27 @@ New York: Dover Publications Inc.
zeta(2:10)
\dontrun{
-curve(zeta, -13, 0.8, xlim = c(-12, 10), ylim = c(-1, 4), col = "orange")
+curve(zeta, -13, 0.8, xlim = c(-12, 10), ylim = c(-1, 4), col = "orange",
+ las = 1, main = expression({zeta}(x)))
curve(zeta, 1.2, 12, add = TRUE, col = "orange")
-abline(v = 0, h = c(0, 1), lty = "dashed")
+abline(v = 0, h = c(0, 1), lty = "dashed", col = "gray")
-curve(zeta, -14, -0.4, col = "orange") # Close up plot
-abline(v = 0, h = 0, lty = "dashed")
+# Close up plot:
+curve(zeta, -14, -0.4, col = "orange", main = expression({zeta}(x)))
+abline(v = 0, h = 0, lty = "dashed", col = "gray")
-x <- seq(0.04, 0.8, len = 100) # Plot of the first derivative
+x <- seq(0.04, 0.8, len = 100) # Plot of the first derivative
plot(x, zeta(x, deriv = 1), type = "l", las = 1, col = "blue",
xlim = c(0.04, 3), ylim = c(-6, 0), main = "zeta'(x)")
x <- seq(1.2, 3, len = 100)
lines(x, zeta(x, deriv = 1), col = "blue")
-abline(v = 0, h = 0, lty = "dashed") }
+abline(v = 0, h = 0, lty = "dashed", col = "gray") }
-zeta(2) - pi^2 / 6 # Should be zero
-zeta(4) - pi^4 / 90 # Should be zero
-zeta(6) - pi^6 / 945 # Should be 0
-zeta(8) - pi^8 / 9450 # Should be 0
-# zeta(0, deriv = 1) + 0.5 * log(2*pi) # Should be 0
+zeta(2) - pi^2 / 6 # Should be zero
+zeta(4) - pi^4 / 90 # Should be zero
+zeta(6) - pi^6 / 945 # Should be 0
+zeta(8) - pi^8 / 9450 # Should be 0
+# zeta(0, deriv = 1) + 0.5 * log(2*pi) # Should be 0
}
\keyword{math}
diff --git a/man/zibinomial.Rd b/man/zibinomial.Rd
index 4cd7b76..4236bf6 100644
--- a/man/zibinomial.Rd
+++ b/man/zibinomial.Rd
@@ -91,15 +91,30 @@ zibinomial(lpstr0 = "logit", lprob = "logit",
specifying the values of \eqn{N}.
- To work well, one needs \eqn{N>1} and \eqn{\mu>0}{prob>0}, i.e.,
+ To work well, one needs large values of \eqn{N}
+ and \eqn{\mu>0}{prob>0}, i.e.,
the larger \eqn{N} and \eqn{\mu}{prob} are, the better.
+ If \eqn{N = 1} then the model is unidentifiable since
+ the number of parameters is excessive.
+
+
+ Setting \code{stepsize = 0.5}, say, may aid convergence.
+
+
+
+
+% 20130316; commenting out this:
+% For intercept-models and constant \eqn{N} over the \eqn{n} observations,
+% the \code{misc} slot has a component called \code{pobs0} which is the
+% estimate of the probability of an observed 0, i.e., \eqn{P(Y=0)}.
+% This family function currently cannot handle a multivariate
+% response (only \code{mv = FALSE} can be handled).
+
+% 20130316; adding this:
+ Estimated probabilities of a structural zero and an
+ observed zero are returned, as in \code{\link{zipoisson}}.
- For intercept-models and constant \eqn{N} over the \eqn{n} observations,
- the \code{misc} slot has a component called \code{pobs0} which is the
- estimate of the probability of an observed 0, i.e., \eqn{P(Y=0)}.
- This family function currently cannot handle a multivariate
- response (only \code{mv = FALSE} can be handled).
The zero-\emph{deflated} binomial distribution might
@@ -129,7 +144,7 @@ zibinomial(lpstr0 = "logit", lprob = "logit",
}
\examples{
-size <- 10 # Number of trials; N in the notation above
+size <- 10 # Number of trials; N in the notation above
nn <- 200
zibdata <- data.frame(pstr0 = logit( 0, inverse = TRUE), # 0.50
mubin = logit(-1, inverse = TRUE), # Mean of usual binomial
@@ -138,12 +153,13 @@ zibdata <- transform(zibdata,
y = rzibinom(nn, size = sv, prob = mubin, pstr0 = pstr0))
with(zibdata, table(y))
fit <- vglm(cbind(y, sv - y) ~ 1, zibinomial, zibdata, trace = TRUE)
+fit <- vglm(cbind(y, sv - y) ~ 1, zibinomial, zibdata, trace = TRUE, stepsize = 0.5)
coef(fit, matrix = TRUE)
-Coef(fit) # Useful for intercept-only models
+Coef(fit) # Useful for intercept-only models
fit at misc$pobs0 # Estimate of P(Y = 0)
head(fitted(fit))
-with(zibdata, mean(y)) # Compare this with fitted(fit)
+with(zibdata, mean(y)) # Compare this with fitted(fit)
summary(fit)
}
\keyword{models}
diff --git a/man/zigeometric.Rd b/man/zigeometric.Rd
index b9a7e93..4909689 100644
--- a/man/zigeometric.Rd
+++ b/man/zigeometric.Rd
@@ -56,6 +56,12 @@ zigeometric(lprob = "logit", lpstr0 = "logit",
= (1-phi) * prob / (1-prob)} and these are returned as the fitted values.
By default, the two linear/additive predictors are \eqn{(logit(p),
logit(\phi))^T}{(logit(prob), logit(phi))^T}.
+ Multiple responses are handled.
+
+
+% 20130316:
+ Estimated probabilities of a structural zero and an
+ observed zero are returned as in \code{\link{zipoisson}}.
}
@@ -121,12 +127,9 @@ head(gdata)
fit1 <- vglm(y1 ~ x2 + x3 + x4, zigeometric, gdata, trace = TRUE)
coef(fit1, matrix = TRUE)
-fit2 <- vglm(y2 ~ 1, zigeometric, gdata, trace = TRUE)
+fit2 <- vglm(cbind(y2, y3) ~ 1, zigeometric, gdata, trace = TRUE)
coef(fit2, matrix = TRUE)
-
-fit3 <- vglm(y3 ~ 1, zigeometric, gdata, trace = TRUE)
-coef(fit3, matrix = TRUE)
-summary(fit3)
+summary(fit2)
}
\keyword{models}
\keyword{regression}
diff --git a/man/zinegbinomial.Rd b/man/zinegbinomial.Rd
index 6851b11..30dee03 100644
--- a/man/zinegbinomial.Rd
+++ b/man/zinegbinomial.Rd
@@ -91,9 +91,17 @@ zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
%}
\author{ T. W. Yee }
\note{
- For intercept-models, the \code{misc} slot has a component called
- \code{pobs0} which is the estimate of \eqn{P(Y=0)}.
- Note that \eqn{P(Y=0)} is not the parameter \eqn{\phi}{phi}.
+
+% 20130316: commenting out this:
+% For intercept-models, the \code{misc} slot has a component called
+% \code{pobs0} which is the estimate of \eqn{P(Y=0)}.
+% Note that \eqn{P(Y=0)} is not the parameter \eqn{\phi}{phi}.
+
+
+% 20130316: adding this:
+ Estimated probabilities of a structural zero and an
+ observed zero are returned, as in \code{\link{zipoisson}}.
+
If \eqn{k} is large then the use of \pkg{VGAM} family function
diff --git a/man/zipebcom.Rd b/man/zipebcom.Rd
index 4f720d5..0ab6ecc 100644
--- a/man/zipebcom.Rd
+++ b/man/zipebcom.Rd
@@ -23,17 +23,20 @@ zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
Argument \code{lmu12} should be left alone.
Argument \code{imu12} may be of length 2 (one element for each response).
+
}
\item{lphi12}{
Link function applied to the \eqn{\phi}{phi} parameter of the
zero-inflated Poisson distribution (see \code{\link{zipoisson}}).
See \code{\link{Links}} for more choices.
+
}
\item{loratio}{
Link function applied to the odds ratio.
See \code{\link{Links}} for more choices.
+
}
\item{iphi12, ioratio}{
Optional initial values for \eqn{\phi}{phi} and the odds ratio.
@@ -44,6 +47,7 @@ zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
If inputted, the value of \code{iphi12} cannot be more than the sample
proportions of zeros in either response.
+
}
% \item{ephi12, eoratio}{
@@ -59,11 +63,13 @@ zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
not being modelled as a function of the explanatory variables (apart
from an intercept).
+
}
\item{tol}{
Tolerance for testing independence.
Should be some small positive numerical value.
+
}
\item{addRidge}{
Some small positive numerical value.
@@ -71,6 +77,7 @@ zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
multiplied by \code{1+addRidge} to make it diagonally dominant,
therefore positive-definite.
+
}
}
\details{
@@ -201,37 +208,38 @@ zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
\code{\link{binom2.or}},
\code{\link{zipoisson}},
\code{\link{cloglog}},
- \code{\link{CommonVGAMffArguments}}.
+ \code{\link{CommonVGAMffArguments}},
+ \code{\link{posbernoulli.tb}}.
}
\examples{
-mydat <- data.frame(x = seq(0, 1, len=(nsites <- 2000)))
-mydat <- transform(mydat, eta1 = -3 + 5 * x,
- phi1 = logit(-1, inverse=TRUE),
+zdata <- data.frame(x2 = seq(0, 1, len = (nsites <- 2000)))
+zdata <- transform(zdata, eta1 = -3 + 5 * x2,
+ phi1 = logit(-1, inverse = TRUE),
oratio = exp(2))
-mydat <- transform(mydat, mu12 = cloglog(eta1, inverse = TRUE) * (1-phi1))
-tmat <- with(mydat, rbinom2.or(nsites, mu1 = mu12, oratio = oratio, exch = TRUE))
-mydat <- transform(mydat, ybin1 = tmat[,1], ybin2 = tmat[,2])
+zdata <- transform(zdata, mu12 = cloglog(eta1, inverse = TRUE) * (1-phi1))
+tmat <- with(zdata, rbinom2.or(nsites, mu1 = mu12, oratio = oratio, exch = TRUE))
+zdata <- transform(zdata, ybin1 = tmat[, 1], ybin2 = tmat[, 2])
-with(mydat, table(ybin1,ybin2)) / nsites # For interest only
+with(zdata, table(ybin1, ybin2)) / nsites # For interest only
\dontrun{
# Various plots of the data, for interest only
par(mfrow = c(2, 2))
-plot(jitter(ybin1) ~ x, data = mydat, col = "blue")
+plot(jitter(ybin1) ~ x2, data = zdata, col = "blue")
-plot(jitter(ybin2) ~ jitter(ybin1), data = mydat, col = "blue")
+plot(jitter(ybin2) ~ jitter(ybin1), data = zdata, col = "blue")
-plot(mu12 ~ x, data = mydat, col = "blue", type = "l", ylim = 0:1,
+plot(mu12 ~ x2, data = zdata, col = "blue", type = "l", ylim = 0:1,
ylab = "Probability", main = "Marginal probability and phi")
-with(mydat, abline(h = phi1[1], col = "red", lty = "dashed"))
+with(zdata, abline(h = phi1[1], col = "red", lty = "dashed"))
-tmat2 <- with(mydat, dbinom2.or(mu1 = mu12, oratio = oratio, exch = TRUE))
-with(mydat, matplot(x, tmat2, col = 1:4, type = "l", ylim = 0:1,
+tmat2 <- with(zdata, dbinom2.or(mu1 = mu12, oratio = oratio, exch = TRUE))
+with(zdata, matplot(x2, tmat2, col = 1:4, type = "l", ylim = 0:1,
ylab = "Probability", main = "Joint probabilities")) }
# Now fit the model to the data.
-fit <- vglm(cbind(ybin1, ybin2) ~ x, fam = zipebcom, dat = mydat, trace = TRUE)
+fit <- vglm(cbind(ybin1, ybin2) ~ x2, zipebcom, dat = zdata, trace = TRUE)
coef(fit, matrix = TRUE)
summary(fit)
vcov(fit)
diff --git a/man/zipoisson.Rd b/man/zipoisson.Rd
index c0e9ff8..516068e 100644
--- a/man/zipoisson.Rd
+++ b/man/zipoisson.Rd
@@ -93,7 +93,6 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
(1-phi)*lambda*(1 + phi lambda)}.
By default, the two linear/additive predictors are
\eqn{(logit(\phi), \log(\lambda))^T}{(logit(phi), log(lambda))^T}.
- This function implements Fisher scoring.
The \pkg{VGAM} family function \code{zipoissonff()} has a few
@@ -107,8 +106,10 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
(iv) argument \code{zero} has a new default so that the \code{probp}
is an intercept-only by default.
Now \code{zipoissonff()} is generally recommended over
- \code{zipoisson()}, and definitely recommended over
- \code{\link{yip88}}.
+ \code{zipoisson()} (and definitely recommended over \code{\link{yip88}}).
+ Both functions implement Fisher scoring and can handle
+ multiple responses.
+
}
@@ -139,18 +140,19 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
Cambridge University Press: Cambridge.
- Yee, T. W. (2012)
- Two-parameter reduced-rank vector generalized linear models.
- \emph{In preparation}.
+ Yee, T. W. (2013)
+ Reduced-rank vector generalized linear models with two linear predictors.
+ \emph{Computational Statistics and Data Analysis}.
}
\author{ T. W. Yee }
\note{
- For intercept-models, the \code{misc} slot has a component called
- \code{p0} which is the estimate of \eqn{P(Y = 0)}. Note that
- \eqn{P(Y = 0)} is not the parameter \eqn{\phi}{phi}. This family
- function currently cannot handle a multivariate response.
+ The \code{misc} slot has a component called
+ \code{pobs0} which is the estimate of \eqn{P(Y = 0)}.
+ Note that \eqn{P(Y = 0)} is not the parameter \eqn{\phi}{phi}.
+ The estimated probability of a structural 0 is returned in
+ the \code{misc} slot with component name \code{pstr0}.
Although the functions in \code{\link{Zipois}}
@@ -212,6 +214,7 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
}
\examples{
# Example 1: simulated ZIP data
+set.seed(123)
zdata <- data.frame(x2 = runif(nn <- 2000))
zdata <- transform(zdata, pstr01 = logit(-0.5 + 1*x2, inverse = TRUE),
pstr02 = logit( 0.5 - 1*x2, inverse = TRUE),
@@ -233,6 +236,15 @@ coef(fit2, matrix = TRUE) # These should agree with the above values
fit12 <- vglm(cbind(y1, y2) ~ x2, zipoissonff, zdata, crit = "coef")
coef(fit12, matrix = TRUE) # These should agree with the above values
+# For the first observation compute the probability that y1 is
+# due to a structural zero.
+head(zdata, 1)
+pfit1 <- predict(fit1, zdata[1, ])
+pstr0 <- logit(pfit1[1], inverse = TRUE)
+lambda <- loge(pfit1[2], inverse = TRUE)
+(prob.struc.0 <- pstr0 / dzipois(x = 0, lambda = lambda, pstr0 = pstr0))
+
+
# Example 2: McKendrick (1926). Data from 223 Indian village households
cholera <- data.frame(ncases = 0:4, # Number of cholera cases,
wfreq = c(168, 32, 16, 6, 1)) # Frequencies
diff --git a/src/fgam.f b/src/fgam.f
index f6ea0d4..43d4779 100644
--- a/src/fgam.f
+++ b/src/fgam.f
@@ -48,7 +48,7 @@ c values.
c
integer k,left,nderiv, i,ideriv,il,j,jlow,jp1mid,kp1,kp1mm,
* ldummy,m,mhigh
- double precision a(k,k),dbiatx(k,nderiv),t(1),x
+ double precision a(k,k),dbiatx(k,nderiv),t(*),x
double precision factor,fkp1mm,sum
mhigh = max0(min0(nderiv,k),1)
c mhigh is usually equal to nderiv.
@@ -186,7 +186,7 @@ c gorithm (8) in chapter x of the text.
c
parameter(jmax = 20)
integer index,jhigh,left, i,j,jp1
- double precision biatx(jhigh),t(1),x, deltal(jmax)
+ double precision biatx(jhigh),t(*),x, deltal(jmax)
double precision deltar(jmax),saved,term
c dimension biatx(jout), t(left+jout)
current fortran standard makes it impossible to specify the length of
@@ -277,7 +277,7 @@ c be the desired number (d**j)f(x). (see x.(17)-(19) of text).
c
parameter(kmax = 20)
integer jderiv,k,n, i,ilo,imk,j,jc,jcmin,jcmax,jj,km1,mflag,nmi
- double precision bcoef(n),t(1),x
+ double precision bcoef(n),t(*),x
double precision aj(kmax),dm(kmax),dp(kmax),fkmj
c dimension t(n+k)
current fortran standard makes it impossible to specify the length of
@@ -476,7 +476,14 @@ c T.Yee has renamed dbpbfa to dbpbfa8 and dpbsl to dpbsl8, to ensure uniqueness
subroutine dpbfa8(abd,lda,n,m,info)
integer lda,n,m,info
- double precision abd(lda,1)
+ double precision abd(lda,*)
+c
+c
+c 20130419; Originally:
+c double precision abd(lda,1)
+c
+c
+c
c
c dpbfa8 factors a double precision symmetric positive definite
c matrix stored in band form.
@@ -571,7 +578,12 @@ c ......exit
subroutine dpbsl8(abd,lda,n,m,b)
integer lda,n,m
- double precision abd(lda,1),b(1)
+ double precision abd(lda,*),b(*)
+c
+c
+c 20130419; originally:
+c double precision abd(lda,1),b(1)
+c
c
c dpbsl8 solves the double precision symmetric positive definite
c band system a*x = b
diff --git a/src/rgam.f b/src/rgam.f
index e004e26..1853d5b 100644
--- a/src/rgam.f
+++ b/src/rgam.f
@@ -6,7 +6,7 @@
&fbd5yktj
double precision penalt, pjb6wfoq, xs(kuzxj1lo), ys(kuzxj1lo), ws(
&kuzxj1lo), ankcghz2(nk+4), coef(nk), sz(kuzxj1lo), ifys6woa(
- &kuzxj1lo), qcpiaj7f, wbkq9zyi, parms(3), scrtch(1)
+ &kuzxj1lo), qcpiaj7f, wbkq9zyi, parms(3), scrtch(*)
call hbzuprs6(penalt,pjb6wfoq,xs,ys,ws, kuzxj1lo,nk, ankcghz2,
&coef,sz,ifys6woa, qcpiaj7f,l3zpbstu(1),wbkq9zyi,l3zpbstu(2),
&l3zpbstu(3), parms(1),parms(2),parms(3), gp0xjetb, scrtch(1),
diff --git a/src/rgam3.c b/src/rgam3.c
index ae35250..cc81eee 100644
--- a/src/rgam3.c
+++ b/src/rgam3.c
@@ -75,6 +75,7 @@ void n5aioudkdnaoqj0l(double *qgnl3toc,
+
double *wkumc9idxwy, *wkumc9idbuhyalv4,
*wkumc9idzvau2lct, *wkumc9idf6lsuzax, *wkumc9idfvh2rwtc, *wkumc9iddcfir2no,
*wkumc9idfulcp8wa, *wkumc9idplj0trqx;
@@ -108,6 +109,8 @@ void n5aioudkdnaoqj0l(double *qgnl3toc,
}
+
+
void n5aioudkhbzuprs6(double *qgnl3toc,
double sjwyig9t[], double bhcji9gl[], double po8rwsmy[],
int *kuzxj1lo, int *acpios9q, double gkdx5jal[],
@@ -136,6 +139,9 @@ void n5aioudkhbzuprs6(double *qgnl3toc,
*qcpiaj7f, qcpiaj7f0 = 0.0;
+ qcpiaj7f = &qcpiaj7f0;
+
+
g2dnwteb = bk3ymcih;
bk3ymcih += bk3ymcih;
@@ -148,7 +154,9 @@ void n5aioudkhbzuprs6(double *qgnl3toc,
- qcpiaj7f = &qcpiaj7f0,
+
+
+
@@ -190,24 +198,24 @@ void n5aioudkhbzuprs6(double *qgnl3toc,
zvau2lct, f6lsuzax, fvh2rwtc, dcfir2no);
for (ayfnwr1v = 3; ayfnwr1v <= (*acpios9q - 3); ayfnwr1v++) {
- tt1 += zvau2lct[ayfnwr1v-1];
+ tt1 += zvau2lct[ayfnwr1v-1];
}
g2dnwteb = tt1 / *tt2;
if (*pn9eowxc == 1) {
- *mynl7uaq = g2dnwteb * pow(16.0, *wbkq9zyi * 6.0 - 2.0);
- n5aioudkwmhctl9x(qgnl3toc, sjwyig9t,
- po8rwsmy, kuzxj1lo, acpios9q,
- pn9eowxc, // icrit, (icrit used to be used solely)
- gkdx5jal, rpyis2kc, imdvf4hx,
- ifys6woa, mynl7uaq, xwy,
- qcpiaj7f, // Not used here
- zvau2lct, f6lsuzax, fvh2rwtc, dcfir2no,
- xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs,
- buhyalv4, fulcp8wa, plj0trqx,
- xtov9rbf, wep0oibc, fbd5yktj);
- return;
+ *mynl7uaq = g2dnwteb * pow(16.0, *wbkq9zyi * 6.0 - 2.0);
+ n5aioudkwmhctl9x(qgnl3toc, sjwyig9t,
+ po8rwsmy, kuzxj1lo, acpios9q,
+ pn9eowxc, // icrit, (icrit used to be used solely)
+ gkdx5jal, rpyis2kc, imdvf4hx,
+ ifys6woa, mynl7uaq, xwy,
+ qcpiaj7f, // Not used here
+ zvau2lct, f6lsuzax, fvh2rwtc, dcfir2no,
+ xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs,
+ buhyalv4, fulcp8wa, plj0trqx,
+ xtov9rbf, wep0oibc, fbd5yktj);
+ return;
}
@@ -285,8 +293,9 @@ void n5aioudkhbzuprs6(double *qgnl3toc,
wkumc9idp >= wkumc9idq * (wkumc9idb - wkumc9idx)) goto a3bdsirf;
wkumc9idd = wkumc9idp / wkumc9idq;
- if(!R_FINITE(wkumc9idd))
- Rprintf("Error in n5aioudkhbzuprs6: wkumc9idd is not finite.\n");
+
+
+
wkumc9idu = wkumc9idx + wkumc9idd;
@@ -346,6 +355,9 @@ ceqzd1hi50: wkumc9idu = wkumc9idx +
+
+
+
void n5aioudkzosq7hub(double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], double v2eydbxs[],
double gkdx5jal[], int *acpios9q) {
@@ -360,7 +372,7 @@ void n5aioudkzosq7hub(double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], d
- qnwamo0e0 = xecbg0pf; qnwamo0e1 = z4grbpiq; qnwamo0e2 = d7glzhbj; qnwamo0e3 = v2eydbxs;
+ qnwamo0e0 = xecbg0pf; qnwamo0e1 = z4grbpiq; qnwamo0e2 = d7glzhbj; qnwamo0e3 = v2eydbxs;
for (ayfnwr1v = 0; ayfnwr1v < *acpios9q; ayfnwr1v++) {
*qnwamo0e0++ = *qnwamo0e1++ = *qnwamo0e2++ = *qnwamo0e3++ = 0.0e0;
}
@@ -479,6 +491,8 @@ void n5aioudkzosq7hub(double xecbg0pf[], double z4grbpiq[], double d7glzhbj[], d
}
+
+
void n5aioudkvmnweiy2(double buhyalv4[], double fulcp8wa[], double plj0trqx[], int *xtov9rbf,
int *acpios9q, int *wep0oibc, int *iflag) {
@@ -628,9 +642,9 @@ void n5aioudkwmhctl9x(double *qgnl3toc, double sjwyig9t[],
F77_CALL(dpbfa8)(buhyalv4, xtov9rbf, acpios9q, &bvsquk3z, algpft4y);
if (*algpft4y != 0) {
- Rprintf("In C function wmhctl9x; Error:\n");
- Rprintf("Leading minor of order %d is not pos-def\n", *algpft4y);
- return;
+ Rprintf("In C function wmhctl9x; Error:\n");
+ Rprintf("Leading minor of order %d is not pos-def\n", *algpft4y);
+ return;
}
F77_CALL(dpbsl8)(buhyalv4, xtov9rbf, acpios9q, &bvsquk3z, rpyis2kc);
@@ -649,50 +663,51 @@ void n5aioudkwmhctl9x(double *qgnl3toc, double sjwyig9t[],
chw8lzty = sjwyig9t;
for (ayfnwr1v = 1; ayfnwr1v <= *kuzxj1lo; ayfnwr1v++) {
- F77_CALL(vinterv)(gkdx5jal, &nkp1, chw8lzty, &dqlr5bse, &pqzfxw4i);
+ F77_CALL(vinterv)(gkdx5jal, &nkp1, chw8lzty, &dqlr5bse, &pqzfxw4i);
- if (pqzfxw4i == -1) {
- dqlr5bse = 4;
- *chw8lzty = gkdx5jal[3] + qaltf0nz;
- } else
- if (pqzfxw4i == 1) {
- dqlr5bse = *acpios9q;
- *chw8lzty = gkdx5jal[*acpios9q] - qaltf0nz;
- }
- yq6lorbx = dqlr5bse-3;
-
- F77_CALL(vbsplvd)(gkdx5jal, &h2dpsbkr, chw8lzty++, &dqlr5bse,
- ms0qypiw, g9fvdrbw, &pqneb2ra);
-
- b0 = g9fvdrbw[0]; b1 = g9fvdrbw[1]; b2 = g9fvdrbw[2]; b3 = g9fvdrbw[3];
-
- qtce8hzo = (b0 * (fulcp8wa[3 + (yq6lorbx-1) * *xtov9rbf] * b0 +
- 2.0e0* (fulcp8wa[2 + (yq6lorbx-1) * *xtov9rbf] * b1 +
- fulcp8wa[1 + (yq6lorbx-1) * *xtov9rbf] * b2 +
- fulcp8wa[0 + (yq6lorbx-1) * *xtov9rbf] * b3)) +
- b1 * (fulcp8wa[3 + yq6lorbx * *xtov9rbf] * b1 +
- 2.0e0* (fulcp8wa[2 + yq6lorbx * *xtov9rbf] * b2 +
- fulcp8wa[1 + yq6lorbx * *xtov9rbf] * b3)) +
- b2 * (fulcp8wa[3 + (yq6lorbx+1) * *xtov9rbf] * b2 +
- 2.0e0* fulcp8wa[2 + (yq6lorbx+1) * *xtov9rbf] * b3) +
- fulcp8wa[3 + (yq6lorbx+2) * *xtov9rbf] *
- pow(b3, (double) 2.0)) *
- po8rwsmy[ayfnwr1v-1];
- ifys6woa[ayfnwr1v-1] = qtce8hzo;
+ if (pqzfxw4i == -1) {
+ dqlr5bse = 4;
+ *chw8lzty = gkdx5jal[3] + qaltf0nz;
+ } else
+ if (pqzfxw4i == 1) {
+ dqlr5bse = *acpios9q;
+ *chw8lzty = gkdx5jal[*acpios9q] - qaltf0nz;
+ }
+ yq6lorbx = dqlr5bse-3;
+
+ F77_CALL(vbsplvd)(gkdx5jal, &h2dpsbkr, chw8lzty++, &dqlr5bse,
+ ms0qypiw, g9fvdrbw, &pqneb2ra);
+
+ b0 = g9fvdrbw[0]; b1 = g9fvdrbw[1]; b2 = g9fvdrbw[2]; b3 = g9fvdrbw[3];
+
+ qtce8hzo = (b0 * (fulcp8wa[3 + (yq6lorbx-1) * *xtov9rbf] * b0 +
+ 2.0e0* (fulcp8wa[2 + (yq6lorbx-1) * *xtov9rbf] * b1 +
+ fulcp8wa[1 + (yq6lorbx-1) * *xtov9rbf] * b2 +
+ fulcp8wa[0 + (yq6lorbx-1) * *xtov9rbf] * b3)) +
+ b1 * (fulcp8wa[3 + yq6lorbx * *xtov9rbf] * b1 +
+ 2.0e0* (fulcp8wa[2 + yq6lorbx * *xtov9rbf] * b2 +
+ fulcp8wa[1 + yq6lorbx * *xtov9rbf] * b3)) +
+ b2 * (fulcp8wa[3 + (yq6lorbx+1) * *xtov9rbf] * b2 +
+ 2.0e0* fulcp8wa[2 + (yq6lorbx+1) * *xtov9rbf] * b3) +
+ fulcp8wa[3 + (yq6lorbx+2) * *xtov9rbf] *
+ pow(b3, (double) 2.0)) *
+ po8rwsmy[ayfnwr1v-1];
+ ifys6woa[ayfnwr1v-1] = qtce8hzo;
}
- if (*pn9eowxc == 1) {
+ if (*pn9eowxc == 1) {
return;
- }
+ }
for (ayfnwr1v = 1; ayfnwr1v <= *kuzxj1lo; ayfnwr1v++) {
- egwbdua212 += ifys6woa[ayfnwr1v-1];
+ egwbdua212 += ifys6woa[ayfnwr1v-1];
}
*qcpiaj7f = pow(*qgnl3toc - egwbdua212, (double) 2.0);
}
+
void n5aioudkgt9iulbf(double sjwyig9t[], double ghz9vuba[], double po8rwsmy[],
double gkdx5jal[], int *rvy1fpli, int *kuzxj1lo, double zyupcmk6[],
double zvau2lct[], double f6lsuzax[], double fvh2rwtc[], double dcfir2no[]) {
@@ -706,7 +721,7 @@ void n5aioudkgt9iulbf(double sjwyig9t[], double ghz9vuba[], double po8rwsmy[],
qnwamo0e0 = zvau2lct; qnwamo0e1 = f6lsuzax; qnwamo0e2 = fvh2rwtc; qnwamo0e3 = dcfir2no; qnwamo0e4 = zyupcmk6;
for (ayfnwr1v = 0; ayfnwr1v < *kuzxj1lo; ayfnwr1v++) {
- *qnwamo0e0++ = *qnwamo0e1++ = *qnwamo0e2++ = *qnwamo0e3++ = *qnwamo0e4++ = 0.0e0;
+ *qnwamo0e0++ = *qnwamo0e1++ = *qnwamo0e2++ = *qnwamo0e3++ = *qnwamo0e4++ = 0.0e0;
}
//Rprintf("first one n5aioudkgt9iulbf pow(po8rwsmy[0], (double) 1.0) = ");
@@ -715,45 +730,45 @@ void n5aioudkgt9iulbf(double sjwyig9t[], double ghz9vuba[], double po8rwsmy[],
for (ayfnwr1v = 1; ayfnwr1v <= *rvy1fpli; ayfnwr1v++) {
- F77_CALL(vinterv)(gkdx5jal, &nhnpt1zym1, sjwyig9t + ayfnwr1v - 1, &dqlr5bse, &pqzfxw4i);
+ F77_CALL(vinterv)(gkdx5jal, &nhnpt1zym1, sjwyig9t + ayfnwr1v - 1, &dqlr5bse, &pqzfxw4i);
- if (pqzfxw4i == 1) {
- if (sjwyig9t[ayfnwr1v-1] <= (gkdx5jal[dqlr5bse-1] + qaltf0nz)) {
- dqlr5bse--;
- } else {
- return;
- }
+ if (pqzfxw4i == 1) {
+ if (sjwyig9t[ayfnwr1v-1] <= (gkdx5jal[dqlr5bse-1] + qaltf0nz)) {
+ dqlr5bse--;
+ } else {
+ return;
}
+ }
- F77_CALL(vbsplvd)(gkdx5jal, &h2dpsbkr, sjwyig9t + ayfnwr1v - 1, &dqlr5bse,
- ms0qypiw, g9fvdrbw, &pqneb2ra);
-
-
- yq6lorbx = dqlr5bse - 4 + 1;
- wsvdbx3tk = po8rwsmy[ayfnwr1v-1];
- wv2svdbx3tk = wsvdbx3tk * g9fvdrbw[0];
- zyupcmk6[yq6lorbx-1] += wv2svdbx3tk * ghz9vuba[ayfnwr1v-1];
- zvau2lct[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[0];
- f6lsuzax[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[1];
- fvh2rwtc[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[2];
- dcfir2no[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[3];
-
- yq6lorbx = dqlr5bse - 4 + 2;
- wv2svdbx3tk = wsvdbx3tk * g9fvdrbw[1];
- zyupcmk6[yq6lorbx-1] += wv2svdbx3tk * ghz9vuba[ayfnwr1v-1];
- zvau2lct[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[1];
- f6lsuzax[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[2];
- fvh2rwtc[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[3];
-
- yq6lorbx = dqlr5bse - 4 + 3;
- wv2svdbx3tk = wsvdbx3tk * g9fvdrbw[2];
- zyupcmk6[yq6lorbx-1] += wv2svdbx3tk * ghz9vuba[ayfnwr1v-1];
- zvau2lct[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[2];
- f6lsuzax[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[3];
- yq6lorbx = dqlr5bse;
- wv2svdbx3tk = wsvdbx3tk * g9fvdrbw[3];
- zyupcmk6[yq6lorbx-1] += wv2svdbx3tk * ghz9vuba[ayfnwr1v-1];
- zvau2lct[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[3];
+ F77_CALL(vbsplvd)(gkdx5jal, &h2dpsbkr, sjwyig9t + ayfnwr1v - 1, &dqlr5bse,
+ ms0qypiw, g9fvdrbw, &pqneb2ra);
+
+
+ yq6lorbx = dqlr5bse - 4 + 1;
+ wsvdbx3tk = po8rwsmy[ayfnwr1v-1];
+ wv2svdbx3tk = wsvdbx3tk * g9fvdrbw[0];
+ zyupcmk6[yq6lorbx-1] += wv2svdbx3tk * ghz9vuba[ayfnwr1v-1];
+ zvau2lct[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[0];
+ f6lsuzax[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[1];
+ fvh2rwtc[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[2];
+ dcfir2no[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[3];
+
+ yq6lorbx = dqlr5bse - 4 + 2;
+ wv2svdbx3tk = wsvdbx3tk * g9fvdrbw[1];
+ zyupcmk6[yq6lorbx-1] += wv2svdbx3tk * ghz9vuba[ayfnwr1v-1];
+ zvau2lct[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[1];
+ f6lsuzax[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[2];
+ fvh2rwtc[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[3];
+
+ yq6lorbx = dqlr5bse - 4 + 3;
+ wv2svdbx3tk = wsvdbx3tk * g9fvdrbw[2];
+ zyupcmk6[yq6lorbx-1] += wv2svdbx3tk * ghz9vuba[ayfnwr1v-1];
+ zvau2lct[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[2];
+ f6lsuzax[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[3];
+ yq6lorbx = dqlr5bse;
+ wv2svdbx3tk = wsvdbx3tk * g9fvdrbw[3];
+ zyupcmk6[yq6lorbx-1] += wv2svdbx3tk * ghz9vuba[ayfnwr1v-1];
+ zvau2lct[yq6lorbx-1] += wv2svdbx3tk * g9fvdrbw[3];
}
}
diff --git a/src/vcall2.f b/src/vcall2.f
index 6e009b6..a6f7aec 100644
--- a/src/vcall2.f
+++ b/src/vcall2.f
@@ -1,6 +1,7 @@
+C Output from Public domain Ratfor, version 1.01
subroutine vcall2(onemor,w,y,eta,beta,u)
logical onemor
- double precision w(1), y(1), eta(1), beta(1), u(1)
+ double precision w(*), y(*), eta(*), beta(*), u(*)
onemor = .true.
w(1) = 1.0d0
y(1) = 1.0d0
@@ -11,7 +12,7 @@
end
subroutine vcall1(onemor,y,eta,beta,u,xbig,cpxbig)
logical onemor, cpxbig
- double precision y(1), eta(1), beta(1), u(1), xbig(1)
+ double precision y(*), eta(*), beta(*), u(*), xbig(*)
onemor = .true.
y(1) = 1.0d0
eta(1) = 1.0d0
diff --git a/src/vdigami.f b/src/vdigami.f
new file mode 100644
index 0000000..72f4efc
--- /dev/null
+++ b/src/vdigami.f
@@ -0,0 +1,156 @@
+ SUBROUTINE vdigami(D, X, P, GPLOG, GP1LOG, PSIP, PSIP1, PSIDP,
+ * PSIDP1, IFAULT, TMAX)
+C
+C ALGORITHM AS 187 APPL. STATIST. (1982) VOL.31, NO.3
+C
+C Computes derivatives of the incomplete gamma integral for positive
+C parameters, X, P, using a series expansion if P > X or X <= 1, and
+C a continued fraction expansion otherwise.
+C
+C Calculation of D(4) in line 60 corrected 5 October 1993.
+C
+C N.B. The user must input values of the incomplete gamma, digamma
+C and trigamma functions. These can be obtained using AS 239
+C (or 32), AS 103 and AS 121 respectively.
+C
+C
+C
+C
+C 20130214; adapted by T. W. Yee to handle DOUBLE PRECISION arguments.
+C And declarations of *all* variables.
+C And a wrapper function written to call this subroutine.
+C TMAX is now input.
+C Seems to work but more testing is required.
+C
+ DOUBLE PRECISION X, P, GPLOG, GP1LOG, PSIP, PSIP1, PSIDP, PSIDP1
+ DOUBLE PRECISION TMAX
+ INTEGER IFAULT
+C
+C
+C
+C
+C
+ DOUBLE PRECISION PN(6), D(6), DP(6), DPP(6), ZERO, ONE, TWO
+C DATA TMAX/100.0/
+ DATA E, OFLO, VSMALL/1.D-6, 1.D30, 1.D-30/
+ DATA ZERO/0.0/, ONE/1.0/, TWO/2.0/
+C
+ IFAULT = 0
+C
+C Derivatives with respect to X
+C
+ PM1 = P - ONE
+ XLOG = DLOG(X)
+ D(1) = DEXP(-GPLOG + PM1*XLOG - X)
+ D(2) = D(1) * (PM1/X - ONE)
+ D(5) = D(1) * (XLOG - PSIP)
+C
+C Derivatives with respect to P
+C
+ IF (X .GT. ONE .AND. X .GE. P) GO TO 30
+C
+C Series expansion
+C
+ F = DEXP(P*XLOG - GP1LOG - X)
+ DFP = F * (XLOG - PSIP1)
+ DFPP = DFP*DFP/F - F*PSIDP1
+C
+ TMAXP = TMAX + P
+ C = ONE
+ S = ONE
+ CP = ZERO
+ CPP = ZERO
+ DSP = ZERO
+ DSPP = ZERO
+ A = P
+ 1 A = A + ONE
+ CPC = CP / C
+ CP = CPC - ONE/A
+ CPP = CPP/C - CPC*CPC + ONE/A**2
+ C = C*X/A
+ CP = CP*C
+ CPP = CPP*C + CP*CP/C
+ S = S + C
+ DSP = DSP + CP
+ DSPP = DSPP + CPP
+ IF (A .GT. TMAXP) GO TO 1001
+ IF (C .GT. E*S) GO TO 1
+ D(6) = S*F
+ D(3) = S*DFP + F*DSP
+ D(4) = S*DFPP + TWO*DFP*DSP + F*DSPP
+ RETURN
+C
+C Continued fraction expansion
+C
+ 30 F = DEXP(P*XLOG - GPLOG - X)
+ DFP = F * (XLOG - PSIP)
+ DFPP = DFP*DFP/F - F*PSIDP
+C
+ A = PM1
+ B = X + ONE - A
+ TERM = ZERO
+ PN(1) = ONE
+ PN(2) = X
+ PN(3) = X + ONE
+ PN(4) = X * B
+ S0 = PN(3) / PN(4)
+ DO 31 I = 1, 4
+ DP(I) = ZERO
+ DPP(I) = ZERO
+ 31 CONTINUE
+ DP(4) = -X
+C
+ 32 A = A - ONE
+ B = B + TWO
+ TERM = TERM + ONE
+ AN = A*TERM
+ PN(5) = B*PN(3) + AN*PN(1)
+ PN(6) = B*PN(4) + AN*PN(2)
+ DP(5) = B*DP(3) - PN(3) + AN*DP(1) + PN(1)*TERM
+ DP(6) = B*DP(4) - PN(4) + AN*DP(2) + PN(2)*TERM
+ DPP(5) = B*DPP(3) + AN*DPP(1) + TWO*(TERM*DP(1) - DP(3))
+ DPP(6) = B*DPP(4) + AN*DPP(2) + TWO*(TERM*DP(2) - DP(4))
+C
+ IF (ABS(PN(6)) .LT. VSMALL) GO TO 35
+ S = PN(5) / PN(6)
+ C = ABS(S - S0)
+ IF (C*P .GT. E) GO TO 34
+ IF (C .LE. E*S) GO TO 42
+C
+ 34 S0 = S
+ 35 DO 36 I = 1, 4
+ I2 = I + 2
+ DP(I) = DP(I2)
+ DPP(I) = DPP(I2)
+ PN(I) = PN(I2)
+ 36 CONTINUE
+C
+ IF (TERM .GT. TMAX) GO TO 1001
+ IF (ABS(PN(5)) .LT. OFLO) GO TO 32
+ DO 41 I = 1, 4
+ DP(I) = DP(I) / OFLO
+ DPP(I) = DPP(I) / OFLO
+ PN(I) = PN(I) / OFLO
+ 41 CONTINUE
+ GO TO 32
+C
+ 42 D(6) = ONE - F*S
+ DSP = (DP(5) - S*DP(6)) / PN(6)
+ DSPP = (DPP(5) - S*DPP(6) - TWO*DSP*DP(6)) / PN(6)
+ D(3) = -F*DSP - S*DFP
+ D(4) = -F*DSPP - TWO*DSP*DFP - S*DFPP
+ RETURN
+C
+C Set fault indicator
+C
+ 1001 IFAULT = 1
+ RETURN
+ END
+
+
+
+
+
+
+
+
diff --git a/src/vgam.f b/src/vgam.f
index 25bbe1c..f689f0c 100644
--- a/src/vgam.f
+++ b/src/vgam.f
@@ -56,8 +56,8 @@
&tgiyxdw1, dufozmt7)
implicit logical (a-z)
integer iii, cz8qdfyj, tesdm5kv, kxvq6sfw, nyfu9rod, wy1vqfzu,
- &ldk, dimw, kuzxj1lo, nk, tgiyxdw1(1), dufozmt7(1)
- double precision g9fvdrbw(4,1), osiz4fxy(ldk, nk*wy1vqfzu), wmat(
+ &ldk, dimw, kuzxj1lo, nk, tgiyxdw1(*), dufozmt7(*)
+ double precision g9fvdrbw(4,*), osiz4fxy(ldk, nk*wy1vqfzu), wmat(
&kuzxj1lo,dimw)
double precision obr6tcex
integer urohxe6t, nead, bcol, brow, biuvowq2, nbj8tdsk
@@ -87,8 +87,8 @@
&t8hwvalr, rpyis2kc, osiz4fxy, btwy, sgdub, ui8ysltq, yzoe1rsp,
&bmb, ifys6woa, dof, scrtch, fbd5yktj, truen)
implicit logical (a-z)
- integer kuzxj1lo, nk, ldk, wy1vqfzu, dimw, tgiyxdw1(1), dufozmt7(
- &1), info, fbd5yktj, truen
+ integer kuzxj1lo, nk, ldk, wy1vqfzu, dimw, tgiyxdw1(*), dufozmt7(*
+ &), info, fbd5yktj, truen
integer yzoe1rsp
double precision he7mqnvy(kuzxj1lo), rbne6ouj(kuzxj1lo,wy1vqfzu),
&wmat(kuzxj1lo,dimw), gkdx5jal(nk+4), wkmm(wy1vqfzu,wy1vqfzu,16),
@@ -96,7 +96,7 @@
&wy1vqfzu), osiz4fxy(ldk,nk*wy1vqfzu), btwy(wy1vqfzu,nk)
double precision sgdub(nk,wy1vqfzu), ui8ysltq(truen,wy1vqfzu),
&bmb(wy1vqfzu,wy1vqfzu), ifys6woa(kuzxj1lo,wy1vqfzu), dof(wy1vqfzu)
- &, scrtch(1)
+ &, scrtch(*)
integer yq6lorbx, ayfnwr1v, dqlr5bse, pqzfxw4i, urohxe6t, icrit
integer gp0xjetb, e5knafcg, wep0oibc, l3zpbstu(3), ispar, i1loc
double precision qaltf0nz, g9fvdrbw(4,1), ms0qypiw(16), penalt,
@@ -300,10 +300,10 @@
&wuwbar, dvhw1ulq)
implicit logical (a-z)
integer kuzxj1lo, nef, ezlgm2up(kuzxj1lo), wy1vqfzu, dimw, dimu,
- &tgiyxdw1(1),dufozmt7(1), kgwmz4ip, iz2nbfjc, wuwbar, dvhw1ulq
+ &kgwmz4ip, iz2nbfjc, wuwbar, dvhw1ulq, tgiyxdw1(*),dufozmt7(*)
double precision he7mqnvy(kuzxj1lo), tlgduey8(kuzxj1lo,wy1vqfzu),
&wmat(kuzxj1lo,dimw), pygsw6ko(nef), pasjmo8g(nef,wy1vqfzu), wbar(
- &nef,1), uwbar(dimu,nef), wpasjmo8g(nef,wy1vqfzu), work(wy1vqfzu,
+ &nef,*), uwbar(dimu,nef), wpasjmo8g(nef,wy1vqfzu), work(wy1vqfzu,
&wy1vqfzu+1), work2(kgwmz4ip,kgwmz4ip+1), hjm2ktyr(wy1vqfzu,
&kgwmz4ip)
integer ayfnwr1v, yq6lorbx, gp1jxzuh, urohxe6t, bpvaqm5z,
@@ -455,8 +455,8 @@
&kuzxj1lo, nk, wy1vqfzu, jzwsy6tp, bmb, work, wmat, ifys6woa, dimw,
& tgiyxdw1, dufozmt7, truen)
implicit logical (a-z)
- integer ldk, kuzxj1lo, nk, wy1vqfzu, jzwsy6tp, dimw, tgiyxdw1(1),
- &dufozmt7(1), truen
+ integer ldk, kuzxj1lo, nk, wy1vqfzu, jzwsy6tp, dimw, tgiyxdw1(*),
+ &dufozmt7(*), truen
double precision enaqpzk9(ldk,nk*wy1vqfzu), he7mqnvy(kuzxj1lo),
&gkdx5jal(nk+4), grmuyvx9(truen,wy1vqfzu), bmb(wy1vqfzu,wy1vqfzu),
&work(wy1vqfzu,wy1vqfzu), wmat(kuzxj1lo,dimw), ifys6woa(kuzxj1lo,
@@ -572,7 +572,7 @@
subroutine ovjnsmt2(bmb, wmat, work, ifys6woa, wy1vqfzu, kuzxj1lo,
& dimw, tgiyxdw1, dufozmt7, iii)
implicit logical (a-z)
- integer wy1vqfzu, kuzxj1lo, dimw, tgiyxdw1(1), dufozmt7(1), iii
+ integer wy1vqfzu, kuzxj1lo, dimw, tgiyxdw1(*), dufozmt7(*), iii
double precision bmb(wy1vqfzu,wy1vqfzu), wmat(kuzxj1lo,dimw),
&work(wy1vqfzu,wy1vqfzu), ifys6woa(kuzxj1lo,wy1vqfzu)
double precision q6zdcwxk, obr6tcex
@@ -690,20 +690,20 @@
&r0oydcxb, ub4xioar, effect, uwin)
implicit logical (a-z)
integer kuzxj1lo,wy1vqfzu,ezlgm2up(kuzxj1lo),nef, dimw, fbd5yktj,
- &ldk, info, yzoe1rsp, acpios9q,tgiyxdw1(1),dufozmt7(1), iz2nbfjc,
+ &ldk, info, yzoe1rsp, acpios9q,tgiyxdw1(*),dufozmt7(*), iz2nbfjc,
&kgwmz4ip, ges1xpkr(kgwmz4ip*2)
double precision sjwyig9tto(kuzxj1lo), tlgduey8(kuzxj1lo,wy1vqfzu)
&, wmat(kuzxj1lo,dimw), wbkq9zyi(kgwmz4ip), dof(kgwmz4ip), smo(
&kuzxj1lo,kgwmz4ip), cov(kuzxj1lo,kgwmz4ip)
double precision s0(2*kgwmz4ip, 2*kgwmz4ip,2)
- double precision work1(1), work3(1), sgdub(1), rpyis2kc(1),
+ double precision work1(*), work3(*), sgdub(*), rpyis2kc(*),
&zv2xfhei(acpios9q+4)
double precision xin(nef), yin(nef,wy1vqfzu), rbne6ouj(nef,
- &wy1vqfzu), win(nef,1), bmb(1), ifys6woa(nef,kgwmz4ip), wkmm(
+ &wy1vqfzu), win(nef,*), bmb(*), ifys6woa(nef,kgwmz4ip), wkmm(
&wy1vqfzu,wy1vqfzu,16), hjm2ktyr(wy1vqfzu,kgwmz4ip)
double precision beta(2*kgwmz4ip), fasrkub3(2*kgwmz4ip), sout(nef,
&kgwmz4ip), r0oydcxb(kgwmz4ip,nef), ub4xioar(kgwmz4ip,nef), effect(
- &nef*kgwmz4ip), uwin(1)
+ &nef*kgwmz4ip), uwin(*)
integer dimwin
integer ayfnwr1v, yq6lorbx, gp1jxzuh, rutyk8mg, xjc4ywlh, job,
&qemj9asg, dvhw1ulq
@@ -807,16 +807,16 @@
&wkmm, work3, sgdub, bmb, ifys6woa, mwk, twk, rpyis2kc, zv2xfhei,
&resss, nbzjkpi3, acpios9q, itwk, jwbkl9fp)
implicit logical (a-z)
- integer irhm4cfa, n, wy1vqfzu, psdvgce3(15), ezlgm2up(1),nef(1),
- &which(1), ges1xpkr(1)
- integer jnxpuym2(1), hnpt1zym(1), fzm1ihwj(1), iz2nbfjc(1),
- &nbzjkpi3(1), acpios9q(1), itwk(1), jwbkl9fp(1)
- double precision he7mqnvy(1),tlgduey8(1),wmat(1),wbkq9zyi(1),dof(
- &1), ub4xioar(1),kispwgx3(1), m0ibglfx(1), s0(wy1vqfzu), beta(1),
- &cov(1),zpcqv3uj, vc6hatuj(1),fasrkub3(1)
- double precision xbig(1), wpuarq2m(1), hjm2ktyr(1), work1(1), wk2(
- &n,wy1vqfzu,3), wkmm(wy1vqfzu,wy1vqfzu,16), work3(1), sgdub(1),
- &bmb(1), ifys6woa(1), mwk(1), twk(1), rpyis2kc(1), zv2xfhei(1),
+ integer irhm4cfa, n, wy1vqfzu, psdvgce3(15), ezlgm2up(*),nef(*),
+ &which(*), ges1xpkr(*)
+ integer jnxpuym2(*), hnpt1zym(*), fzm1ihwj(*), iz2nbfjc(*),
+ &nbzjkpi3(*), acpios9q(*), itwk(*), jwbkl9fp(*)
+ double precision he7mqnvy(*),tlgduey8(*),wmat(*),wbkq9zyi(*),dof(*
+ &), ub4xioar(*),kispwgx3(*), m0ibglfx(*), s0(wy1vqfzu), beta(*),
+ &cov(*),zpcqv3uj, vc6hatuj(*),fasrkub3(*)
+ double precision xbig(*), wpuarq2m(*), hjm2ktyr(*), work1(*), wk2(
+ &n,wy1vqfzu,3), wkmm(wy1vqfzu,wy1vqfzu,16), work3(*), sgdub(*),
+ &bmb(*), ifys6woa(*), mwk(*), twk(*), rpyis2kc(*), zv2xfhei(*),
&resss
integer p,q,yzoe1rsp,niter,gtrlbz3e, rutyk8mg, xjc4ywlh, lyma1kwc,
& dimw, dimu, fbd5yktj,ldk
@@ -902,13 +902,13 @@
&xjc4ywlh, lyma1kwc, dimw, dimu, fbd5yktj, ldk)
implicit logical (a-z)
integer qemj9asg
- integer dufozmt7(1), tgiyxdw1(1)
+ integer dufozmt7(*), tgiyxdw1(*)
integer p, q, yzoe1rsp, niter, gtrlbz3e, rutyk8mg, xjc4ywlh,
&lyma1kwc, dimw, dimu, fbd5yktj, ldk
integer irhm4cfa, kuzxj1lo, wy1vqfzu, ezlgm2up(kuzxj1lo,q),nef(q),
&which(q), ges1xpkr(xjc4ywlh)
integer jnxpuym2(q), hnpt1zym(q), iz2nbfjc(q), nbzjkpi3(q+1),
- &acpios9q(q), itwk(1), jwbkl9fp(q+1)
+ &acpios9q(q), itwk(*), jwbkl9fp(q+1)
double precision he7mqnvy(kuzxj1lo,p), tlgduey8(kuzxj1lo,wy1vqfzu)
&, wmat(kuzxj1lo,dimw), wbkq9zyi(lyma1kwc), dof(lyma1kwc)
double precision ub4xioar(wy1vqfzu,kuzxj1lo), kispwgx3(kuzxj1lo,
@@ -916,9 +916,9 @@
&xjc4ywlh), cov(kuzxj1lo,lyma1kwc), zpcqv3uj, vc6hatuj(rutyk8mg,
&xjc4ywlh), fasrkub3(xjc4ywlh)
double precision xbig(rutyk8mg,xjc4ywlh), wpuarq2m(dimu,kuzxj1lo),
- & hjm2ktyr(wy1vqfzu,lyma1kwc), work1(1), wk2(kuzxj1lo,wy1vqfzu),
- &wkmm(wy1vqfzu,wy1vqfzu,16), work3(1), sgdub(1), bmb(1), ifys6woa(
- &1), mwk(1), twk(1), rpyis2kc(1), zv2xfhei(1), resss
+ & hjm2ktyr(wy1vqfzu,lyma1kwc), work1(*), wk2(kuzxj1lo,wy1vqfzu),
+ &wkmm(wy1vqfzu,wy1vqfzu,16), work3(*), sgdub(*), bmb(*), ifys6woa(*
+ &), mwk(*), twk(*), rpyis2kc(*), zv2xfhei(*), resss
double precision ghz9vuba(kuzxj1lo,wy1vqfzu), oldmat(kuzxj1lo,
&wy1vqfzu)
integer job,info,nefk
@@ -1141,7 +1141,7 @@
subroutine x6kanjdh(he7mqnvy, xout, kuzxj1lo, wy1vqfzu)
implicit logical (a-z)
integer kuzxj1lo, wy1vqfzu
- double precision he7mqnvy(kuzxj1lo), xout(1)
+ double precision he7mqnvy(kuzxj1lo), xout(*)
integer ayfnwr1v, yq6lorbx, gp1jxzuh, iptr
iptr=1
do 23422 yq6lorbx=1,wy1vqfzu
@@ -1223,7 +1223,7 @@
integer yzoe1rsp
double precision x(kuzxj1lo), w(kuzxj1lo), bhcji9gl(kuzxj1lo),
&ub4xioar(kuzxj1lo)
- double precision cov(kuzxj1lo,1)
+ double precision cov(kuzxj1lo,*)
integer ayfnwr1v
double precision pasjmo8g, pygsw6ko, q6zdcwxk, nsum, eck8vubt,
&interc, bzmd6ftv, hofjnx2e, lm9vcjob
diff --git a/src/vgam3.c b/src/vgam3.c
index 39f877b..865b958 100644
--- a/src/vgam3.c
+++ b/src/vgam3.c
@@ -4,6 +4,11 @@
+
+
+
+
+
#include<math.h>
#include<stdio.h>
#include<stdlib.h>
@@ -91,7 +96,8 @@ void Yee_vbfa(
double rpyis2kc[], double gkdx5jals[],
- int nbzjkpi3[], int acpios9q[], int jwbkl9fp[]);
+ int nbzjkpi3[], int lindex[],
+ int acpios9q[], int jwbkl9fp[]);
void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[], int which[],
double he7mqnvy[], double tlgduey8[], double rbne6ouj[],
double wbkq9zyi[], double lamvec[], double hdnw2fts[],
@@ -101,7 +107,8 @@ void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[],
int ulm3dvzg[], int hnpt1zym[], int iz2nbfjc[],
double ifys6woa[],
double rpyis2kc[], double gkdx5jals[], double *ghdetj8v,
- int nbzjkpi3[], int acpios9q[], int jwbkl9fp[],
+ int nbzjkpi3[], int lindex[],
+ int acpios9q[], int jwbkl9fp[],
int *nhja0izq, int *yzoe1rsp, int *ueb8hndv, int *gtrlbz3e,
int *rutyk8mg, int *xjc4ywlh,
int *kvowz9ht, int *npjlv3mr, int *fbd5yktj, int *ldk, int *algpft4y,
@@ -130,9 +137,10 @@ void F77_NAME(vdqrsl)(double*, int*, int*, int*, double*, double*, double*,
void F77_NAME(vqrdca)(double*, int*, int*, int*, double*, int*, double*,
int*, double*);
-void Free_fapc0tnbvsplin(double *wkumc9idosiz4fxy, double *wkumc9idenaqpzk9,
- double *wkumc9idbtwy, double *wkumc9idwk0, double *wkumc9idbk3ymcih,
- int *wkumc9idtgiyxdw1, int *wkumc9iddufozmt7);
+void Free_fapc0tnbyee_spline(double *wkumc9idosiz4fxy, double *wkumc9idenaqpzk9,
+ double *wkumc9idbtwy, double *wkumc9idwk0,
+ double *wkumc9idbk3ymcih,
+ int *wkumc9idtgiyxdw1, int *wkumc9iddufozmt7);
void Free_fapc0tnbewg7qruh(double *wkumc9idWrk1,
int *wkumc9idges1xpkr,
double *wkumc9idbeta, double *wkumc9idfasrkub3,
@@ -143,6 +151,24 @@ void Free_fapc0tnbewg7qruh(double *wkumc9idWrk1,
double *wkumc9ideshvo2ic, double *wkumc9idonxjvw8u,
double *wkumc9idwk4);
+void F77_NAME(vdigami)(double*, double*, double*,
+ double*, double*, double*,
+ double*, double*, double*,
+ double*, int*);
+
+void VGAM_C_vdigami(double d[], double x[], double p[],
+ double gplog[], double gp1log[], double psip[],
+ double psip1[], double psidp[], double psidp1[],
+ int *ifault, double *tmax,
+ int *f8yswcat);
+
+
+void n5aioudkgt9iulbf(double sjwyig9t[], double ghz9vuba[], double po8rwsmy[],
+ double gkdx5jal[], int *rvy1fpli, int *kuzxj1lo, double zyupcmk6[],
+ double zvau2lct[], double f6lsuzax[], double fvh2rwtc[], double dcfir2no[]);
+
+
+
extern
void n5aioudkdnaoqj0l(double *pjb6wfoq, double *xs, double *ys,
@@ -188,6 +214,38 @@ void fvlmz9iyC_mxrbkut0(double wpuarq2m[], double he7mqnvy[], int *wy1vqfzu, int
+
+void VGAM_C_vdigami(double d[], double x[], double p[],
+ double gplog[], double gp1log[], double psip[],
+ double psip1[], double psidp[], double psidp1[],
+ int *ifault, double *tmax,
+ int *f8yswcat) {
+
+
+ int ayfnwr1v;
+
+
+
+ for (ayfnwr1v = 0; ayfnwr1v < *f8yswcat; ayfnwr1v++) {
+ F77_CALL(vdigami)(d, x, p,
+ gplog, gp1log, psip,
+ psip1, psidp, psidp1,
+ tmax, ifault);
+ d += 6;
+ x++;
+ p++;
+ gplog++;
+ gp1log++;
+ psip++;
+ psip1++;
+ psidp++;
+ psidp1++;
+ ifault++;
+ }
+}
+
+
+
void Yee_vbvs(int *f8yswcat, double gkdx5jal[], double rpyis2kc[],
double sjwyig9t[], double kispwgx3[],
int *acpios9q, int *order, int *wy1vqfzu) {
@@ -299,15 +357,19 @@ void fapc0tnbybnagt8k(int *iii, int *cz8qdfyj, int *tesdm5kv,
}
-void Free_fapc0tnbvsplin(double *wkumc9idosiz4fxy, double *wkumc9idenaqpzk9,
- double *wkumc9idbtwy, double *wkumc9idwk0, double *wkumc9idbk3ymcih,
- int *wkumc9idtgiyxdw1, int *wkumc9iddufozmt7) {
+void Free_fapc0tnbyee_spline(double *wkumc9idosiz4fxy, double *wkumc9idenaqpzk9,
+ double *wkumc9idbtwy, double *wkumc9idwk0,
+ double *wkumc9idbk3ymcih,
+ int *wkumc9idtgiyxdw1, int *wkumc9iddufozmt7) {
Free(wkumc9idosiz4fxy); Free(wkumc9idenaqpzk9);
Free(wkumc9idbtwy); Free(wkumc9idwk0); Free(wkumc9idbk3ymcih);
Free(wkumc9idtgiyxdw1); Free(wkumc9iddufozmt7);
}
+
+
+
void Yee_spline(double *sjwyig9t, double *tlgduey8, double *rbne6ouj, double *gkdx5jal,
int *lqsahu0r, int *acpios9q, int *ldk, int *wy1vqfzu, int *kvowz9ht,
double wbkq9zyi[], double lamvec[],
@@ -322,8 +384,15 @@ void Yee_spline(double *sjwyig9t, double *tlgduey8, double *rbne6ouj, double *gk
double tdcb8ilk[] // Added 20100313
) {
+
+
+
int ayfnwr1v, yq6lorbx, gp1jxzuh, urohxe6t, bpvaqm5z,
dqlr5bse, pqzfxw4i, wep0oibc;
+ int have_setup_sg = 0; /* == 1 if sg[0123] have been initialized */
+
+
+
int junkicrit = -1,
@@ -347,6 +416,14 @@ void Yee_spline(double *sjwyig9t, double *tlgduey8, double *rbne6ouj, double *gk
kpftdm0jeps = tdcb8ilk[3];
+ double svdbx3tk_tt1, svdbx3tk_tt2 = 0.0, svdbx3tk_g2dnwteb = -1.0;
+ double *wkumc9idzvau2lct, *wkumc9idf6lsuzax, *wkumc9idfvh2rwtc, *wkumc9iddcfir2no;
+ double *wkumc9idxwy;
+ double *fpdlcqk9ifys6woa;
+
+
+
+
wkumc9idtgiyxdw1 = Calloc(imk5wjxg, int);
wkumc9iddufozmt7 = Calloc(imk5wjxg, int);
fvlmz9iyC_qpsedg8x(wkumc9idtgiyxdw1, wkumc9iddufozmt7, wy1vqfzu);
@@ -361,104 +438,186 @@ void Yee_spline(double *sjwyig9t, double *tlgduey8, double *rbne6ouj, double *gk
+
+
for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
- if (wbkq9zyi[yq6lorbx-1] == 0.0) {
- pn9eowxc = 0;
- } else {
- pn9eowxc = 1;
+
+
+
+
+
+
+
+
+ if (wbkq9zyi[yq6lorbx-1] == 0.0) {
+ pn9eowxc = 0;
+
+ } else { /// vvv
+
+
+ pn9eowxc = 1;
+
+
+
+ if (have_setup_sg == 0) {
+ have_setup_sg = 1; // Need only be done once
+
+ n5aioudkzosq7hub(xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs, gkdx5jal, acpios9q);
+
+ for (ayfnwr1v = 3; ayfnwr1v <= (*acpios9q - 3); ayfnwr1v++) {
+ svdbx3tk_tt2 += xecbg0pf[ayfnwr1v-1];
+ }
+ }
+
+
+ wkumc9idxwy = Calloc(*acpios9q, double);
+ wkumc9idzvau2lct = Calloc(*acpios9q, double);
+ wkumc9idf6lsuzax = Calloc(*acpios9q, double);
+ wkumc9idfvh2rwtc = Calloc(*acpios9q, double);
+ wkumc9iddcfir2no = Calloc(*acpios9q, double);
+ n5aioudkgt9iulbf(sjwyig9t,
+ tlgduey8 + (yq6lorbx-1) * *lqsahu0r, // bhcji9gl
+ rbne6ouj + (yq6lorbx-1) * *lqsahu0r, // po8rwsmy,
+ gkdx5jal, lqsahu0r, acpios9q, wkumc9idxwy, // lqsahu0r === kuzxj1lo
+ wkumc9idzvau2lct, wkumc9idf6lsuzax, wkumc9idfvh2rwtc, wkumc9iddcfir2no);
+
+ svdbx3tk_tt1 = 0.0;
+ for (ayfnwr1v = 3; ayfnwr1v <= (*acpios9q - 3); ayfnwr1v++) {
+ svdbx3tk_tt1 += wkumc9idzvau2lct[ayfnwr1v-1];
}
+ Free(wkumc9idxwy);
+ Free(wkumc9idzvau2lct);
+ Free(wkumc9idf6lsuzax);
+ Free(wkumc9idfvh2rwtc);
+ Free(wkumc9iddcfir2no);
+
+ svdbx3tk_g2dnwteb = svdbx3tk_tt1 / svdbx3tk_tt2;
+ lamvec[yq6lorbx-1] =
+ svdbx3tk_g2dnwteb * pow(16.0, wbkq9zyi[yq6lorbx-1] * 6.0 - 2.0);
+
+
+
+
+ } /// vvv
+
+
+
+
+
+
+
if (*wy1vqfzu == 1 || *kvowz9ht == *wy1vqfzu || pn9eowxc == 0) { // ggg
- wep0oibc = 1;
- l3zpbstu[0] = junkicrit;
- l3zpbstu[1] = pn9eowxc;
- l3zpbstu[2] = itdcb8ilk[0];
- jstx4uwe[0] = kpftdm0jmynl7uaq; // Prior to 20100313: was waiez6nt;
- jstx4uwe[1] = kpftdm0jzustx4fw; // Prior to 20100313: was fp6nozvx;
- jstx4uwe[2] = kpftdm0jtol; // Prior to 20100313: was Toler_df;
- jstx4uwe[3] = kpftdm0jeps; // Introduced as an arg, 20100313
+ wep0oibc = 1;
- if (*wy1vqfzu == 1 || *kvowz9ht == *wy1vqfzu) { // hhh
- for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
- tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] /=
- rbne6ouj[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r];
- }
+ l3zpbstu[0] = junkicrit;
+ l3zpbstu[1] = pn9eowxc;
+ l3zpbstu[2] = itdcb8ilk[0];
+ jstx4uwe[0] = kpftdm0jmynl7uaq; // Prior to 20100313: was waiez6nt;
+ jstx4uwe[1] = kpftdm0jzustx4fw; // Prior to 20100313: was fp6nozvx;
+ jstx4uwe[2] = kpftdm0jtol; // Prior to 20100313: was Toler_df;
+ jstx4uwe[3] = kpftdm0jeps; // Introduced as an arg, 20100313
- n5aioudkdnaoqj0l(hdnw2fts + yq6lorbx-1,
- sjwyig9t, tlgduey8 + (yq6lorbx-1) * *lqsahu0r,
- rbne6ouj + (yq6lorbx-1) * *lqsahu0r,
- lqsahu0r, acpios9q,
- gkdx5jal, rpyis2kc + (yq6lorbx-1) * *acpios9q,
- t8hwvalr + (yq6lorbx-1) * *lqsahu0r,
- ifys6woa + (yq6lorbx-1) * *ftnjamu2,
- wbkq9zyi + yq6lorbx-1, jstx4uwe,
- xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs,
- tt2,
- cvnjhg2u, l3zpbstu,
- &xtov9rbf, &wep0oibc, fbd5yktj);
- lamvec[yq6lorbx-1] = jstx4uwe[0];
-
- if (*fbd5yktj) {
- Rprintf("Error in n5aioudkdnaoqj0l; inside Yee_spline\n");
- Free_fapc0tnbvsplin(wkumc9idosiz4fxy, wkumc9idenaqpzk9,
- wkumc9idbtwy, wkumc9idwk0, wkumc9idbk3ymcih,
- wkumc9idtgiyxdw1, wkumc9iddufozmt7);
- return;
- }
+ if (*wy1vqfzu == 1 || *kvowz9ht == *wy1vqfzu) { // hhh
+ for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
+ tlgduey8[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] /=
+ rbne6ouj[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r];
+ }
+
+
+ have_setup_sg = 1;
+ n5aioudkdnaoqj0l(hdnw2fts + yq6lorbx-1,
+ sjwyig9t, tlgduey8 + (yq6lorbx-1) * *lqsahu0r,
+ rbne6ouj + (yq6lorbx-1) * *lqsahu0r,
+ lqsahu0r, acpios9q,
+ gkdx5jal, rpyis2kc + (yq6lorbx-1) * *acpios9q,
+ t8hwvalr + (yq6lorbx-1) * *lqsahu0r,
+ ifys6woa + (yq6lorbx-1) * *lqsahu0r, // *ftnjamu2,
+ wbkq9zyi + yq6lorbx-1, jstx4uwe,
+ xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs,
+ tt2,
+ cvnjhg2u, l3zpbstu,
+ &xtov9rbf, &wep0oibc, fbd5yktj);
+ lamvec[yq6lorbx-1] = jstx4uwe[0];
+
+ if (*fbd5yktj) {
+ Rprintf("Error in n5aioudkdnaoqj0l; inside Yee_spline\n");
+ Free_fapc0tnbyee_spline(wkumc9idosiz4fxy, wkumc9idenaqpzk9,
+ wkumc9idbtwy, wkumc9idwk0,
+ wkumc9idbk3ymcih,
+ wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+ return;
+ }
- if (*yzoe1rsp) {
- for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
- gp1jxzuh = ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2;
- bpvaqm5z = ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r;
- ui8ysltq[gp1jxzuh] = ifys6woa[gp1jxzuh] / rbne6ouj[bpvaqm5z];
- }
+ if (*yzoe1rsp) {
+ for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
+ gp1jxzuh = ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2;
+ bpvaqm5z = ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r;
+ ui8ysltq[gp1jxzuh] = ifys6woa[bpvaqm5z] / rbne6ouj[bpvaqm5z];
}
+ }
} else { // hhh and uuu
- n5aioudkdnaoqj0l(hdnw2fts + yq6lorbx-1,
- sjwyig9t, wkumc9idbk3ymcih, rbne6ouj + (yq6lorbx-1) * *lqsahu0r,
- lqsahu0r, acpios9q,
- gkdx5jal, rpyis2kc + (yq6lorbx-1) * *acpios9q,
- t8hwvalr + (yq6lorbx-1) * *lqsahu0r,
- ifys6woa + (yq6lorbx-1) * *ftnjamu2,
- wbkq9zyi + yq6lorbx-1, jstx4uwe,
- xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs,
- tt2,
- cvnjhg2u, l3zpbstu,
- &xtov9rbf, &wep0oibc, fbd5yktj);
-
- lamvec[yq6lorbx-1] = jstx4uwe[0];
-
- if (*fbd5yktj) {
- Rprintf("Error in Rgam_dnaoqj0l; inside Yee_spline\n");
- Free_fapc0tnbvsplin(wkumc9idosiz4fxy, wkumc9idenaqpzk9,
- wkumc9idbtwy, wkumc9idwk0, wkumc9idbk3ymcih,
+ have_setup_sg = 1;
+ n5aioudkdnaoqj0l(hdnw2fts + yq6lorbx-1,
+ sjwyig9t, wkumc9idbk3ymcih, rbne6ouj + (yq6lorbx-1) * *lqsahu0r,
+ lqsahu0r, acpios9q,
+ gkdx5jal, rpyis2kc + (yq6lorbx-1) * *acpios9q,
+ t8hwvalr + (yq6lorbx-1) * *lqsahu0r,
+ ifys6woa + (yq6lorbx-1) * *lqsahu0r, // 20130427
+
+
+ wbkq9zyi + yq6lorbx-1, jstx4uwe,
+ xecbg0pf, z4grbpiq, d7glzhbj, v2eydbxs,
+ tt2,
+ cvnjhg2u, l3zpbstu,
+ &xtov9rbf, &wep0oibc, fbd5yktj);
+
+ lamvec[yq6lorbx-1] = jstx4uwe[0];
+
+ if (*fbd5yktj) {
+ Rprintf("Error in Rgam_dnaoqj0l; inside Yee_spline\n");
+ Free_fapc0tnbyee_spline(wkumc9idosiz4fxy, wkumc9idenaqpzk9,
+ wkumc9idbtwy, wkumc9idwk0,
+ wkumc9idbk3ymcih,
wkumc9idtgiyxdw1, wkumc9iddufozmt7);
- return;
- }
- } // uuu
-
- if (*fbd5yktj) {
- Rprintf("Error in n5aioudkdnaoqj0l: fbd5yktj = %3d.\n", *fbd5yktj);
- Rprintf("Called within Yee_spline.\n");
- Free_fapc0tnbvsplin(wkumc9idosiz4fxy, wkumc9idenaqpzk9,
- wkumc9idbtwy, wkumc9idwk0, wkumc9idbk3ymcih,
- wkumc9idtgiyxdw1, wkumc9iddufozmt7);
return;
- }
+ }
+ } // uuu
+
+ if (*fbd5yktj) {
+ Rprintf("Error in n5aioudkdnaoqj0l: fbd5yktj = %3d.\n", *fbd5yktj);
+ Rprintf("Called within Yee_spline.\n");
+ Free_fapc0tnbyee_spline(wkumc9idosiz4fxy, wkumc9idenaqpzk9,
+ wkumc9idbtwy, wkumc9idwk0,
+ wkumc9idbk3ymcih,
+ wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+ return;
+ }
} // ggg
}
+
+
if (*wy1vqfzu == 1 || *kvowz9ht == *wy1vqfzu) {
- Free_fapc0tnbvsplin(wkumc9idosiz4fxy, wkumc9idenaqpzk9,
- wkumc9idbtwy, wkumc9idwk0, wkumc9idbk3ymcih,
- wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+ Free_fapc0tnbyee_spline(wkumc9idosiz4fxy, wkumc9idenaqpzk9,
+ wkumc9idbtwy, wkumc9idwk0,
+ wkumc9idbk3ymcih,
+ wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+
+
+ for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+ hdnw2fts[yq6lorbx-1] -= 1.0; // Decrement it.
+ }
+
+
+
return;
}
@@ -467,98 +626,115 @@ void Yee_spline(double *sjwyig9t, double *tlgduey8, double *rbne6ouj, double *gk
+
+
+
+
+
+
for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
- arm0lkbg1 = *acpios9q + 1;
- F77_CALL(vinterv)(gkdx5jal, &arm0lkbg1, sjwyig9t + ayfnwr1v-1,
- &dqlr5bse, &pqzfxw4i);
+ arm0lkbg1 = *acpios9q + 1;
+ F77_CALL(vinterv)(gkdx5jal, &arm0lkbg1, sjwyig9t + ayfnwr1v-1,
+ &dqlr5bse, &pqzfxw4i);
- if (pqzfxw4i == 1) {
- if (sjwyig9t[ayfnwr1v-1] <= (gkdx5jal[dqlr5bse-1] + qaltf0nz)) {
- dqlr5bse--;
- } else {
- Rprintf("Freeing memory in Yee_spline and returning.\n");
- Free_fapc0tnbvsplin(wkumc9idosiz4fxy, wkumc9idenaqpzk9,
- wkumc9idbtwy, wkumc9idwk0, wkumc9idbk3ymcih,
- wkumc9idtgiyxdw1, wkumc9iddufozmt7);
- return;
- }
+ if (pqzfxw4i == 1) {
+ if (sjwyig9t[ayfnwr1v-1] <= (gkdx5jal[dqlr5bse-1] + qaltf0nz)) {
+ dqlr5bse--;
+ } else {
+ Rprintf("Freeing memory in Yee_spline and returning.\n");
+ Free_fapc0tnbyee_spline(wkumc9idosiz4fxy, wkumc9idenaqpzk9,
+ wkumc9idbtwy, wkumc9idwk0,
+ wkumc9idbk3ymcih,
+ wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+ return;
}
+ }
+
+ F77_CALL(vbsplvd)(gkdx5jal, &h2dpsbkr, sjwyig9t + ayfnwr1v-1, &dqlr5bse, ms0qypiw,
+ g9fvdrbw, &pqneb2ra);
+
+ yq6lorbx = dqlr5bse - 4 + 1;
+ fpdlcqk9btwy = wkumc9idbtwy + (yq6lorbx-1) * *wy1vqfzu;
+ for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) {
+ *fpdlcqk9btwy += tlgduey8[ayfnwr1v-1 + (urohxe6t-1) * *lqsahu0r] * g9fvdrbw[0];
+ fpdlcqk9btwy++;
+ }
+
+ fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &yu6izdrc,
+ g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
+ &pqneb2ra, &pqneb2ra, wy1vqfzu, ldk,
+ kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+ fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &pqneb2ra,
+ g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
+ &pqneb2ra, &qhzja4ny, wy1vqfzu, ldk,
+ kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+ fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &qhzja4ny,
+ g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
+ &pqneb2ra, &bvsquk3z, wy1vqfzu, ldk,
+ kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+ fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &bvsquk3z,
+ g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
+ &pqneb2ra, &h2dpsbkr, wy1vqfzu, ldk,
+ kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+
+ yq6lorbx = dqlr5bse - 4 + 2;
+ fpdlcqk9btwy = wkumc9idbtwy + (yq6lorbx-1) * *wy1vqfzu;
+ for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) {
+ *fpdlcqk9btwy += tlgduey8[ayfnwr1v-1 + (urohxe6t-1) * *lqsahu0r] * g9fvdrbw[1];
+ fpdlcqk9btwy++;
+ }
+
+ fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &yu6izdrc,
+ g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
+ &qhzja4ny, &qhzja4ny, wy1vqfzu, ldk,
+ kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+ fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &pqneb2ra,
+ g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
+ &qhzja4ny, &bvsquk3z, wy1vqfzu, ldk,
+ kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+ fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &qhzja4ny,
+ g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
+ &qhzja4ny, &h2dpsbkr, wy1vqfzu, ldk,
+ kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+
+ yq6lorbx = dqlr5bse - 4 + 3;
+ fpdlcqk9btwy = wkumc9idbtwy + (yq6lorbx-1) * *wy1vqfzu;
+ for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) {
+ *fpdlcqk9btwy += tlgduey8[ayfnwr1v-1 + (urohxe6t-1) * *lqsahu0r] * g9fvdrbw[2];
+ fpdlcqk9btwy++;
+ }
+
+ fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &yu6izdrc,
+ g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
+ &bvsquk3z, &bvsquk3z, wy1vqfzu, ldk,
+ kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+ fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &pqneb2ra,
+ g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
+ &bvsquk3z, &h2dpsbkr, wy1vqfzu, ldk,
+ kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+
+ yq6lorbx = dqlr5bse - 4 + 4;
+ fpdlcqk9btwy = wkumc9idbtwy + (yq6lorbx-1) * *wy1vqfzu;
+ for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) {
+ *fpdlcqk9btwy += tlgduey8[ayfnwr1v-1 + (urohxe6t-1) * *lqsahu0r] * g9fvdrbw[3];
+ fpdlcqk9btwy++;
+ }
+
+ fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &yu6izdrc,
+ g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
+ &h2dpsbkr, &h2dpsbkr, wy1vqfzu, ldk,
+ kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+ }
+
+
+
+
- F77_CALL(vbsplvd)(gkdx5jal, &h2dpsbkr, sjwyig9t + ayfnwr1v-1, &dqlr5bse, ms0qypiw,
- g9fvdrbw, &pqneb2ra);
- yq6lorbx= dqlr5bse - 4 + 1;
- fpdlcqk9btwy = wkumc9idbtwy + (yq6lorbx-1) * *wy1vqfzu;
- for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) {
- *fpdlcqk9btwy += tlgduey8[ayfnwr1v-1 + (urohxe6t-1) * *lqsahu0r] * g9fvdrbw[0];
- fpdlcqk9btwy++;
- }
- fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &yu6izdrc,
- g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
- &pqneb2ra, &pqneb2ra, wy1vqfzu, ldk,
- kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
- fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &pqneb2ra,
- g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
- &pqneb2ra, &qhzja4ny, wy1vqfzu, ldk,
- kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
- fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &qhzja4ny,
- g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
- &pqneb2ra, &bvsquk3z, wy1vqfzu, ldk,
- kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
- fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &bvsquk3z,
- g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
- &pqneb2ra, &h2dpsbkr, wy1vqfzu, ldk,
- kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
-
- yq6lorbx = dqlr5bse - 4 + 2;
- fpdlcqk9btwy = wkumc9idbtwy + (yq6lorbx-1) * *wy1vqfzu;
- for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) {
- *fpdlcqk9btwy += tlgduey8[ayfnwr1v-1 + (urohxe6t-1) * *lqsahu0r] * g9fvdrbw[1];
- fpdlcqk9btwy++;
- }
- fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &yu6izdrc,
- g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
- &qhzja4ny, &qhzja4ny, wy1vqfzu, ldk,
- kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
- fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &pqneb2ra,
- g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
- &qhzja4ny, &bvsquk3z, wy1vqfzu, ldk,
- kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
- fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &qhzja4ny,
- g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
- &qhzja4ny, &h2dpsbkr, wy1vqfzu, ldk,
- kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
-
- yq6lorbx = dqlr5bse - 4 + 3;
- fpdlcqk9btwy = wkumc9idbtwy + (yq6lorbx-1) * *wy1vqfzu;
- for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) {
- *fpdlcqk9btwy += tlgduey8[ayfnwr1v-1 + (urohxe6t-1) * *lqsahu0r] * g9fvdrbw[2];
- fpdlcqk9btwy++;
- }
- fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &yu6izdrc,
- g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
- &bvsquk3z, &bvsquk3z, wy1vqfzu, ldk,
- kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
- fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &pqneb2ra,
- g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
- &bvsquk3z, &h2dpsbkr, wy1vqfzu, ldk,
- kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
-
- yq6lorbx = dqlr5bse - 4 + 4;
- fpdlcqk9btwy = wkumc9idbtwy + (yq6lorbx-1) * *wy1vqfzu;
- for (urohxe6t = 1; urohxe6t <= *wy1vqfzu; urohxe6t++) {
- *fpdlcqk9btwy += tlgduey8[ayfnwr1v-1 + (urohxe6t-1) * *lqsahu0r] * g9fvdrbw[3];
- fpdlcqk9btwy++;
- }
- fapc0tnbybnagt8k(&ayfnwr1v, &yq6lorbx, &yu6izdrc,
- g9fvdrbw, wkumc9idosiz4fxy, rbne6ouj,
- &h2dpsbkr, &h2dpsbkr, wy1vqfzu, ldk,
- kvowz9ht, lqsahu0r, wkumc9idtgiyxdw1, wkumc9iddufozmt7);
- }
@@ -572,12 +748,13 @@ void Yee_spline(double *sjwyig9t, double *tlgduey8, double *rbne6ouj, double *gk
if (*aalgpft4y) {
- Rprintf("Error in subroutine vdpbfa7; inside Yee_spline.\n");
- Rprintf("*aalgpft4y = %3d\n", *aalgpft4y);
- Free_fapc0tnbvsplin(wkumc9idosiz4fxy, wkumc9idenaqpzk9,
- wkumc9idbtwy, wkumc9idwk0, wkumc9idbk3ymcih,
- wkumc9idtgiyxdw1, wkumc9iddufozmt7);
- return;
+ Rprintf("Error in subroutine vdpbfa7; inside Yee_spline.\n");
+ Rprintf("*aalgpft4y = %3d\n", *aalgpft4y);
+ Free_fapc0tnbyee_spline(wkumc9idosiz4fxy, wkumc9idenaqpzk9,
+ wkumc9idbtwy, wkumc9idwk0,
+ wkumc9idbk3ymcih,
+ wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+ return;
}
arm0lkbg1 = *acpios9q * *wy1vqfzu;
@@ -588,9 +765,9 @@ void Yee_spline(double *sjwyig9t, double *tlgduey8, double *rbne6ouj, double *gk
fpdlcqk9btwy = wkumc9idbtwy;
for (ayfnwr1v = 1; ayfnwr1v <= *acpios9q; ayfnwr1v++) {
- for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
- rpyis2kc[ ayfnwr1v-1 + (yq6lorbx-1) * *acpios9q] = *fpdlcqk9btwy++;
- }
+ for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+ rpyis2kc[ ayfnwr1v-1 + (yq6lorbx-1) * *acpios9q] = *fpdlcqk9btwy++;
+ }
}
fapc0tnbcn8kzpab(gkdx5jal, sjwyig9t, rpyis2kc,
@@ -598,23 +775,52 @@ void Yee_spline(double *sjwyig9t, double *tlgduey8, double *rbne6ouj, double *gk
+
+
arm0lkbg1 = *acpios9q * *wy1vqfzu;
arm0lkbg2 = *ldk - 1;
fapc0tnbvicb2(wkumc9idenaqpzk9, wkumc9idosiz4fxy, wkumc9idwk0,
&arm0lkbg2, &arm0lkbg1);
+
fapc0tnbicpd0omv(wkumc9idenaqpzk9, sjwyig9t, gkdx5jal, ui8ysltq,
ldk, lqsahu0r, acpios9q, wy1vqfzu, yzoe1rsp,
rbne6ouj, ifys6woa, kvowz9ht, ftnjamu2);
- Free_fapc0tnbvsplin(wkumc9idosiz4fxy, wkumc9idenaqpzk9,
- wkumc9idbtwy, wkumc9idwk0, wkumc9idbk3ymcih,
- wkumc9idtgiyxdw1, wkumc9iddufozmt7);
+
+
+
+
+ for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+ hdnw2fts[yq6lorbx-1] = -1.0; // Initialize; subtract the linear part
+ }
+ fpdlcqk9ifys6woa = ifys6woa;
+ for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+ for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
+ hdnw2fts[yq6lorbx-1] += *fpdlcqk9ifys6woa++;
+ }
+ }
+
+
+
+
+
+
+
+ Free_fapc0tnbyee_spline(wkumc9idosiz4fxy, wkumc9idenaqpzk9,
+ wkumc9idbtwy, wkumc9idwk0,
+ wkumc9idbk3ymcih,
+ wkumc9idtgiyxdw1, wkumc9iddufozmt7);
}
+
+
+
+
+
void fapc0tnbcn8kzpab(double gkdx5jals[], double sjwyig9t[], double rpyis2kc[],
int *lqsahu0r, int *acpios9q, int *wy1vqfzu, double t8hwvalr[]) {
@@ -643,8 +849,8 @@ void Free_fapc0tnbvsuff9(double *wkumc9idwk1a, double *wkumc9idwk1b,
Free(wkumc9idwk1a); Free(wkumc9idwk1b);
Free(wkumc9idwk2a); Free(wkumc9idwk2b);
if (! *iz2nbfjc) {
- Free(wkumc9ideshvo2ic);
- Free(wkumc9idonxjvw8u);
+ Free(wkumc9ideshvo2ic);
+ Free(wkumc9idonxjvw8u);
}
Free(wkumc9idtgiyxdw11); Free(wkumc9iddufozmt71);
Free(wkumc9idtgiyxdw12); Free(wkumc9iddufozmt72);
@@ -692,34 +898,34 @@ void vsuff9(int *ftnjamu2, int *lqsahu0r, int ezlgm2up[],
wkumc9iddufozmt72 = Calloc(n3colb , int);
if (*iz2nbfjc) {
- if (*npjlv3mr < *kvowz9ht || *kgwmz4ip != *wy1vqfzu) {
- Rprintf("Error in fapc0tnbvsuff9: ");
- Rprintf("must have npjlv3mr >= kvowz9ht & kgwmz4ip = M\n");
- Free_fapc0tnbvsuff9(wkumc9idwk1a, wkumc9idwk1b,
- wkumc9idwk2a, wkumc9idwk2b,
- wkumc9ideshvo2ic, wkumc9idonxjvw8u,
- wkumc9idtgiyxdw11, wkumc9iddufozmt71,
- wkumc9idtgiyxdw12, wkumc9iddufozmt72,
- iz2nbfjc);
- *dvhw1ulq = 0;
- return;
- }
+ if (*npjlv3mr < *kvowz9ht || *kgwmz4ip != *wy1vqfzu) {
+ Rprintf("Error in fapc0tnbvsuff9: ");
+ Rprintf("must have npjlv3mr >= kvowz9ht & kgwmz4ip = M\n");
+ Free_fapc0tnbvsuff9(wkumc9idwk1a, wkumc9idwk1b,
+ wkumc9idwk2a, wkumc9idwk2b,
+ wkumc9ideshvo2ic, wkumc9idonxjvw8u,
+ wkumc9idtgiyxdw11, wkumc9iddufozmt71,
+ wkumc9idtgiyxdw12, wkumc9iddufozmt72,
+ iz2nbfjc);
+ *dvhw1ulq = 0;
+ return;
+ }
} else {
- if (*npjlv3mreshvo2ic < n3colb || *dim2eshvo2ic < n3colb) {
- Rprintf("Error in fapc0tnbvsuff9 with nontrivial constraints:\n");
- Rprintf("must have npjlv3mreshvo2ic and dim2eshvo2ic both >= n3colb\n");
- Free_fapc0tnbvsuff9(wkumc9idwk1a, wkumc9idwk1b,
- wkumc9idwk2a, wkumc9idwk2b,
- wkumc9ideshvo2ic, wkumc9idonxjvw8u,
- wkumc9idtgiyxdw11, wkumc9iddufozmt71,
- wkumc9idtgiyxdw12, wkumc9iddufozmt72,
- iz2nbfjc);
- *dvhw1ulq = 0;
- return;
- }
+ if (*npjlv3mreshvo2ic < n3colb || *dim2eshvo2ic < n3colb) {
+ Rprintf("Error in fapc0tnbvsuff9 with nontrivial constraints:\n");
+ Rprintf("must have npjlv3mreshvo2ic and dim2eshvo2ic both >= n3colb\n");
+ Free_fapc0tnbvsuff9(wkumc9idwk1a, wkumc9idwk1b,
+ wkumc9idwk2a, wkumc9idwk2b,
+ wkumc9ideshvo2ic, wkumc9idonxjvw8u,
+ wkumc9idtgiyxdw11, wkumc9iddufozmt71,
+ wkumc9idtgiyxdw12, wkumc9iddufozmt72,
+ iz2nbfjc);
+ *dvhw1ulq = 0;
+ return;
+ }
- wkumc9ideshvo2ic = Calloc(*lqsahu0r * zyojx5hw , double);
- wkumc9idonxjvw8u = Calloc(*lqsahu0r * *wy1vqfzu , double);
+ wkumc9ideshvo2ic = Calloc(*lqsahu0r * zyojx5hw , double);
+ wkumc9idonxjvw8u = Calloc(*lqsahu0r * *wy1vqfzu , double);
}
@@ -789,44 +995,44 @@ void vsuff9(int *ftnjamu2, int *lqsahu0r, int ezlgm2up[],
fvlmz9iyjdbomp0g(wkumc9idwk1a, wkumc9idwk1b, wy1vqfzu, dvhw1ulq, &pqneb2ra);
if (*dvhw1ulq != 1) {
- Rprintf("*dvhw1ulq != 1 after fvlmz9iyjdbomp0g in vsuff9.\n");
- Free_fapc0tnbvsuff9(wkumc9idwk1a, wkumc9idwk1b,
- wkumc9idwk2a, wkumc9idwk2b,
- wkumc9ideshvo2ic, wkumc9idonxjvw8u,
- wkumc9idtgiyxdw11, wkumc9iddufozmt71,
- wkumc9idtgiyxdw12, wkumc9iddufozmt72,
- iz2nbfjc);
- return;
+ Rprintf("*dvhw1ulq != 1 after fvlmz9iyjdbomp0g in vsuff9.\n");
+ Free_fapc0tnbvsuff9(wkumc9idwk1a, wkumc9idwk1b,
+ wkumc9idwk2a, wkumc9idwk2b,
+ wkumc9ideshvo2ic, wkumc9idonxjvw8u,
+ wkumc9idtgiyxdw11, wkumc9iddufozmt71,
+ wkumc9idtgiyxdw12, wkumc9iddufozmt72,
+ iz2nbfjc);
+ return;
}
if (*wueshvo2ic) {
- for (yq6lorbx = 1; yq6lorbx <= *npjlv3mreshvo2ic; yq6lorbx++) {
- ueshvo2ic[yq6lorbx-1 + (ayfnwr1v-1) * *npjlv3mreshvo2ic] =
- wkumc9idwk1a[wkumc9idtgiyxdw11[yq6lorbx-1]-1 +
- (wkumc9iddufozmt71[yq6lorbx-1]-1) * *wy1vqfzu];
- }
+ for (yq6lorbx = 1; yq6lorbx <= *npjlv3mreshvo2ic; yq6lorbx++) {
+ ueshvo2ic[yq6lorbx-1 + (ayfnwr1v-1) * *npjlv3mreshvo2ic] =
+ wkumc9idwk1a[wkumc9idtgiyxdw11[yq6lorbx-1]-1 +
+ (wkumc9iddufozmt71[yq6lorbx-1]-1) * *wy1vqfzu];
+ }
}
for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
- pasjmo8g[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] = wkumc9idwk1b[yq6lorbx-1];
+ pasjmo8g[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r] = wkumc9idwk1b[yq6lorbx-1];
}
}
} else {
qnwamo0e = wkumc9idwk1a;
for (yq6lorbx = 1; yq6lorbx <= zyojx5hw; yq6lorbx++) {
- *qnwamo0e++ = 0.0e0;
+ *qnwamo0e++ = 0.0e0;
}
for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
- for (yq6lorbx = 1; yq6lorbx <= *kvowz9ht; yq6lorbx++) {
- wkumc9idwk1a[wkumc9idtgiyxdw11[yq6lorbx-1]-1 +
- (wkumc9iddufozmt71[yq6lorbx-1]-1) * *wy1vqfzu] =
- wkumc9idwk1a[wkumc9iddufozmt71[yq6lorbx-1]-1 +
- (wkumc9idtgiyxdw11[yq6lorbx-1]-1) * *wy1vqfzu] =
- wkumc9ideshvo2ic[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r];
- }
- for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
- wkumc9idwk1b[yq6lorbx-1] = wkumc9idonxjvw8u[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r];
- }
+ for (yq6lorbx = 1; yq6lorbx <= *kvowz9ht; yq6lorbx++) {
+ wkumc9idwk1a[wkumc9idtgiyxdw11[yq6lorbx-1]-1 +
+ (wkumc9iddufozmt71[yq6lorbx-1]-1) * *wy1vqfzu] =
+ wkumc9idwk1a[wkumc9iddufozmt71[yq6lorbx-1]-1 +
+ (wkumc9idtgiyxdw11[yq6lorbx-1]-1) * *wy1vqfzu] =
+ wkumc9ideshvo2ic[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r];
+ }
+ for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+ wkumc9idwk1b[yq6lorbx-1] = wkumc9idonxjvw8u[ayfnwr1v-1 + (yq6lorbx-1) * *lqsahu0r];
+ }
for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
for (gp1jxzuh = yq6lorbx; gp1jxzuh <= *kgwmz4ip; gp1jxzuh++) {
@@ -895,10 +1101,14 @@ void vsuff9(int *ftnjamu2, int *lqsahu0r, int ezlgm2up[],
}
+
+
+
void fapc0tnbicpd0omv(double enaqpzk9[], double sjwyig9t[], double gkdx5jals[],
- double grmuyvx9[],
- int *ldk, int *lqsahu0r, int *acpios9q, int *wy1vqfzu, int *jzwsy6tp,
- double rbne6ouj[], double ifys6woa[], int *kvowz9ht, int *ftnjamu2) {
+ double grmuyvx9[],
+ int *ldk, int *lqsahu0r, int *acpios9q, int *wy1vqfzu, int *jzwsy6tp,
+ double rbne6ouj[], double ifys6woa[],
+ int *kvowz9ht, int *ftnjamu2) {
int ayfnwr1v, yq6lorbx, gp1jxzuh, urohxe6t, bpvaqm5z, dqlr5bse, pqzfxw4i;
@@ -910,12 +1120,13 @@ void fapc0tnbicpd0omv(double enaqpzk9[], double sjwyig9t[], double gkdx5jals[],
int *wkumc9idtgiyxdw1_, *wkumc9iddufozmt7_,
imk5wjxg = *wy1vqfzu * (*wy1vqfzu + 1) / 2,
zyojx5hw = *wy1vqfzu * *wy1vqfzu;
+
wkumc9idtgiyxdw1_ = Calloc(imk5wjxg, int);
wkumc9iddufozmt7_ = Calloc(imk5wjxg, int);
fvlmz9iyC_qpsedg8x(wkumc9idtgiyxdw1_, wkumc9iddufozmt7_, wy1vqfzu);
ptri1 = wkumc9idtgiyxdw1_; ptri2 = wkumc9iddufozmt7_;
for (ayfnwr1v = 0; ayfnwr1v < imk5wjxg; ayfnwr1v++) {
- (*ptri1++)--; (*ptri2++)--;
+ (*ptri1++)--; (*ptri2++)--;
}
wkumc9idwrk = Calloc(zyojx5hw, double);
@@ -926,81 +1137,102 @@ void fapc0tnbicpd0omv(double enaqpzk9[], double sjwyig9t[], double gkdx5jals[],
+
+
+
+
if (*jzwsy6tp) {
- qnwamo0e = grmuyvx9;
- for (gp1jxzuh = 1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) {
- for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
- *qnwamo0e++ = 0.0e0;
- }
+ qnwamo0e = grmuyvx9;
+ for (gp1jxzuh = 1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) {
+ for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+ *qnwamo0e++ = 0.0e0;
}
+ }
}
+
+
+
+
+
for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
- qnwamo0e = wkumc9idbmb;
- for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
- for (gp1jxzuh = 1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) {
- *qnwamo0e++ = 0.0e0;
- }
+ qnwamo0e = wkumc9idbmb;
+ for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+ for (gp1jxzuh = 1; gp1jxzuh <= *wy1vqfzu; gp1jxzuh++) {
+ *qnwamo0e++ = 0.0e0;
}
+ }
- arm0lkbg1 = *acpios9q + 1;
- F77_CALL(vinterv)(gkdx5jals, &arm0lkbg1, sjwyig9t + ayfnwr1v-1,
- &dqlr5bse, &pqzfxw4i);
+ arm0lkbg1 = *acpios9q + 1;
+ F77_CALL(vinterv)(gkdx5jals, &arm0lkbg1, sjwyig9t + ayfnwr1v-1,
+ &dqlr5bse, &pqzfxw4i);
- if (pqzfxw4i == 1) {
- if (sjwyig9t[ayfnwr1v-1] <= (gkdx5jals[dqlr5bse-1] + qaltf0nz)) {
- dqlr5bse--;
- } else {
- Rprintf("pqzfxw4i!=1 after vinterv called in fapc0tnbicpd0omv\n");
- Free(wkumc9idtgiyxdw1_); Free(wkumc9iddufozmt7_);
- Free(wkumc9idwrk);
- return;
- }
+ if (pqzfxw4i == 1) {
+ if (sjwyig9t[ayfnwr1v-1] <= (gkdx5jals[dqlr5bse-1] + qaltf0nz)) {
+ dqlr5bse--;
+ } else {
+ Rprintf("pqzfxw4i!=1 after vinterv called in fapc0tnbicpd0omv\n");
+ Free(wkumc9idtgiyxdw1_); Free(wkumc9iddufozmt7_);
+ Free(wkumc9idwrk);
+ return;
}
- arm0lkbg1 = 1;
- arm0lkbg4 = 4;
- F77_CALL(vbsplvd)(gkdx5jals, &arm0lkbg4, sjwyig9t + ayfnwr1v-1, &dqlr5bse,
- ms0qypiw, g9fvdrbw, &arm0lkbg1);
+ }
+ arm0lkbg1 = 1;
+ arm0lkbg4 = 4;
+ F77_CALL(vbsplvd)(gkdx5jals, &arm0lkbg4, sjwyig9t + ayfnwr1v-1, &dqlr5bse,
+ ms0qypiw, g9fvdrbw, &arm0lkbg1);
- yq6lorbx = dqlr5bse - 4 + 1;
+ yq6lorbx = dqlr5bse - 4 + 1;
- for (urohxe6t = yq6lorbx; urohxe6t <= (yq6lorbx + 3); urohxe6t++) {
- fapc0tnbvsel(&urohxe6t, &urohxe6t, wy1vqfzu, ldk,
- enaqpzk9, wkumc9idwrk);
+ for (urohxe6t = yq6lorbx; urohxe6t <= (yq6lorbx + 3); urohxe6t++) {
+ fapc0tnbvsel(&urohxe6t, &urohxe6t, wy1vqfzu, ldk,
+ enaqpzk9, wkumc9idwrk);
- tmp_var4 = pow(g9fvdrbw[urohxe6t-yq6lorbx], (double) 2.0);
- fapc0tnbo0xlszqr(wy1vqfzu, &tmp_var4, wkumc9idwrk, wkumc9idbmb);
- }
+ tmp_var4 = pow(g9fvdrbw[urohxe6t-yq6lorbx], (double) 2.0);
+ fapc0tnbo0xlszqr(wy1vqfzu, &tmp_var4, wkumc9idwrk, wkumc9idbmb);
+ }
- for (urohxe6t = yq6lorbx; urohxe6t <= (yq6lorbx+3); urohxe6t++) {
- for (bpvaqm5z = urohxe6t+1; bpvaqm5z <= (yq6lorbx+3); bpvaqm5z++) {
- fapc0tnbvsel(&urohxe6t, &bpvaqm5z, wy1vqfzu, ldk,
- enaqpzk9, wkumc9idwrk);
- tmp_var5 = 2.0 * g9fvdrbw[urohxe6t-yq6lorbx] * g9fvdrbw[bpvaqm5z-yq6lorbx];
- fapc0tnbo0xlszqr(wy1vqfzu, &tmp_var5, wkumc9idwrk, wkumc9idbmb);
- }
+ for (urohxe6t = yq6lorbx; urohxe6t <= (yq6lorbx+3); urohxe6t++) {
+ for (bpvaqm5z = urohxe6t+1; bpvaqm5z <= (yq6lorbx+3); bpvaqm5z++) {
+ fapc0tnbvsel(&urohxe6t, &bpvaqm5z, wy1vqfzu, ldk,
+ enaqpzk9, wkumc9idwrk);
+ tmp_var5 = 2.0 * g9fvdrbw[urohxe6t-yq6lorbx] * g9fvdrbw[bpvaqm5z-yq6lorbx];
+ fapc0tnbo0xlszqr(wy1vqfzu, &tmp_var5, wkumc9idwrk, wkumc9idbmb);
}
+ }
- if (*jzwsy6tp) {
- for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
- grmuyvx9[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] =
- wkumc9idbmb[yq6lorbx-1 + (yq6lorbx-1) * *wy1vqfzu];
- }
+ if (*jzwsy6tp) {
+ for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+ grmuyvx9[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2] =
+ wkumc9idbmb[yq6lorbx-1 + (yq6lorbx-1) * *wy1vqfzu];
}
+ }
+
+
+
+ fapc0tnbovjnsmt2(wkumc9idbmb, rbne6ouj, ifys6woa,
+ wy1vqfzu, lqsahu0r, kvowz9ht, &ayfnwr1v,
+ wkumc9idtgiyxdw1_, wkumc9iddufozmt7_);
- fapc0tnbovjnsmt2(wkumc9idbmb, rbne6ouj, ifys6woa,
- wy1vqfzu, lqsahu0r, kvowz9ht, &ayfnwr1v,
- wkumc9idtgiyxdw1_, wkumc9iddufozmt7_);
}
+
+
+
+
+
+
Free(wkumc9idtgiyxdw1_); Free(wkumc9iddufozmt7_);
Free(wkumc9idwrk);
Free(wkumc9idbmb);
}
+
+
+
void fapc0tnbo0xlszqr(int *wy1vqfzu, double *g9fvdrbw, double *quc6khaf, double *bmb) {
@@ -1083,24 +1315,25 @@ void fapc0tnbovjnsmt2(double bmb[], double rbne6ouj[],
int zyojx5hw = *wy1vqfzu * *wy1vqfzu;
double *wkumc9idwrk;
+
wkumc9idwrk = Calloc(zyojx5hw, double);
for (bpvaqm5z = 1; bpvaqm5z <= *wy1vqfzu; bpvaqm5z++) {
- for (urohxe6t = 1; urohxe6t <= *kvowz9ht; urohxe6t++) {
- yq6lorbx = tgiyxdw1_[urohxe6t-1] + (dufozmt7_[urohxe6t-1] ) * *wy1vqfzu;
- gp1jxzuh = dufozmt7_[urohxe6t-1] + (tgiyxdw1_[urohxe6t-1] ) * *wy1vqfzu;
-
- wkumc9idwrk[yq6lorbx] =
- wkumc9idwrk[gp1jxzuh] = rbne6ouj[*iii-1 + (urohxe6t-1) * *lqsahu0r];
- }
-
- q6zdcwxk = 0.0e0;
- for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
- q6zdcwxk += bmb[bpvaqm5z-1 + (yq6lorbx-1) * *wy1vqfzu] *
- wkumc9idwrk[yq6lorbx-1 + (bpvaqm5z-1) * *wy1vqfzu];
- }
- ifys6woa[*iii-1 + (bpvaqm5z-1) * *lqsahu0r] = q6zdcwxk;
+ for (urohxe6t = 1; urohxe6t <= *kvowz9ht; urohxe6t++) {
+ yq6lorbx = tgiyxdw1_[urohxe6t-1] + (dufozmt7_[urohxe6t-1] ) * *wy1vqfzu;
+ gp1jxzuh = dufozmt7_[urohxe6t-1] + (tgiyxdw1_[urohxe6t-1] ) * *wy1vqfzu;
+
+ wkumc9idwrk[yq6lorbx] =
+ wkumc9idwrk[gp1jxzuh] = rbne6ouj[*iii-1 + (urohxe6t-1) * *lqsahu0r];
+ }
+
+ q6zdcwxk = 0.0e0;
+ for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+ q6zdcwxk += bmb[bpvaqm5z-1 + (yq6lorbx-1) * *wy1vqfzu] *
+ wkumc9idwrk[yq6lorbx-1 + (bpvaqm5z-1) * *wy1vqfzu];
+ }
+ ifys6woa[*iii-1 + (bpvaqm5z-1) * *lqsahu0r] = q6zdcwxk;
}
Free(wkumc9idwrk);
@@ -1210,6 +1443,8 @@ void Free_fapc0tnbewg7qruh(double *wkumc9idWrk1,
}
+
+
void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[],
int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int *lqsahu0r,
double wbkq9zyi[], double lamvec[], double hdnw2fts[],
@@ -1226,6 +1461,10 @@ void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[],
double tdcb8ilk[]) {
+
+
+
+
int ayfnwr1v, yq6lorbx, gp1jxzuh, qemj9asg, dvhw1ulq, infoqr_svdbx3tk,
rutyk8mg = *lqsahu0r * *kgwmz4ip;
int pqneb2ra = 1, ybnsqgo9 = 101;
@@ -1267,14 +1506,15 @@ void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[],
- vsuff9(ftnjamu2, lqsahu0r, ezlgm2up,
- ci1oyxas, tlgduey8, rbne6ouj,
- wkumc9idpygsw6ko, wkumc9idpasjmo8g, wkumc9ideshvo2ic,
- wkumc9idueshvo2ic, wkumc9idonxjvw8u, &dvhw1ulq,
- wy1vqfzu, kvowz9ht, npjlv3mr,
- conmat, kgwmz4ip,
- iz2nbfjc, &pqneb2ra,
- &npjlv3mreshvo2ic, &dim2eshvo2ic);
+
+ vsuff9(ftnjamu2, lqsahu0r, ezlgm2up,
+ ci1oyxas, tlgduey8, rbne6ouj,
+ wkumc9idpygsw6ko, wkumc9idpasjmo8g, wkumc9ideshvo2ic,
+ wkumc9idueshvo2ic, wkumc9idonxjvw8u, &dvhw1ulq,
+ wy1vqfzu, kvowz9ht, npjlv3mr,
+ conmat, kgwmz4ip,
+ iz2nbfjc, &pqneb2ra,
+ &npjlv3mreshvo2ic, &dim2eshvo2ic);
if (dvhw1ulq != 1) {
Rprintf("Error in fapc0tnbewg7qruh after calling vsuff9.\n");
@@ -1293,7 +1533,7 @@ void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[],
xmin = wkumc9idpygsw6ko[0];
xrange = wkumc9idpygsw6ko[*lqsahu0r-1] - wkumc9idpygsw6ko[0];
for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
- wkumc9idpygsw6ko[ayfnwr1v-1] = (wkumc9idpygsw6ko[ayfnwr1v-1] - xmin) / xrange;
+ wkumc9idpygsw6ko[ayfnwr1v-1] = (wkumc9idpygsw6ko[ayfnwr1v-1] - xmin) / xrange;
}
*ldk = 4 * *kgwmz4ip;
@@ -1302,11 +1542,10 @@ void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[],
*fbd5yktj = 0;
- for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
- if (wbkq9zyi[yq6lorbx-1] == 0.0e0) {
- hdnw2fts[yq6lorbx-1] += 1.0e0;
- }
- }
+
+
+
+
Yee_spline(wkumc9idpygsw6ko, wkumc9idonxjvw8u, wkumc9ideshvo2ic, gkdx5jals,
lqsahu0r, acpios9q, ldk, kgwmz4ip, &dim2eshvo2ic,
@@ -1319,17 +1558,36 @@ void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[],
tdcb8ilk);
+ for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
+ }
+
+
+
+
+
+
+
+
+
+ if (1) { // Do not execute this code block
fpdlcqk9hdnw2fts = hdnw2fts;
+ fpdlcqk9ifys6woa = ifys6woa;
for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
- *fpdlcqk9hdnw2fts = -1.0e0;
- fpdlcqk9ifys6woa = ifys6woa + (yq6lorbx-1) * *ftnjamu2;
- for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
- *fpdlcqk9hdnw2fts += *fpdlcqk9ifys6woa++;
- }
- fpdlcqk9hdnw2fts++;
+ *fpdlcqk9hdnw2fts = 0.0e0;
+ *fpdlcqk9hdnw2fts = -1.0e0;
+ for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
+ *fpdlcqk9hdnw2fts += *fpdlcqk9ifys6woa++;
+ }
+ fpdlcqk9hdnw2fts++;
+ }
}
+
+
+
+
+
if (*kgwmz4ip >= 1) {
fapc0tnbx6kanjdh(wkumc9idpygsw6ko, wkumc9idwk4, lqsahu0r, kgwmz4ip);
@@ -1357,36 +1615,36 @@ void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[],
if (*yzoe1rsp) {
- fvlmz9iyC_lkhnw9yq(wkumc9idwk4, wkumc9ids0, &rutyk8mg, &xjc4ywlh, &dvhw1ulq);
-
- if (dvhw1ulq != 1) {
- Rprintf("Error in fapc0tnbewg7qruh calling fvlmz9iyC_lkhnw9yq.\n");
- Free_fapc0tnbewg7qruh(wkumc9idWrk1,
- wkumc9idges1xpkr,
- wkumc9idbeta, wkumc9idfasrkub3,
- wkumc9idsout, wkumc9idr0oydcxb,
- wkumc9idub4xioar, wkumc9ideffect,
- wkumc9idueshvo2ic, wkumc9ids0,
- wkumc9idpygsw6ko, wkumc9idpasjmo8g,
- wkumc9ideshvo2ic, wkumc9idonxjvw8u,
- wkumc9idwk4);
- return;
- }
-
- for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
- dtad5vhsu = wkumc9ids0[yq6lorbx-1 + (yq6lorbx-1 ) * kgwmz4ip2];
- do3jyipdf = wkumc9ids0[yq6lorbx-1 + (yq6lorbx-1 + *kgwmz4ip) * kgwmz4ip2];
- dpq0hfucn = wkumc9ids0[yq6lorbx-1 + *kgwmz4ip +
- (yq6lorbx-1 + *kgwmz4ip) * kgwmz4ip2];
- fpdlcqk9ui8ysltq = ui8ysltq + (yq6lorbx-1) * *ftnjamu2;
- fpdlcqk9pygsw6ko = wkumc9idpygsw6ko;
- for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
- *fpdlcqk9ui8ysltq -= dtad5vhsu + *fpdlcqk9pygsw6ko *
- (2.0 * do3jyipdf + *fpdlcqk9pygsw6ko * dpq0hfucn);
- fpdlcqk9ui8ysltq++;
- fpdlcqk9pygsw6ko++;
- }
+ fvlmz9iyC_lkhnw9yq(wkumc9idwk4, wkumc9ids0, &rutyk8mg, &xjc4ywlh, &dvhw1ulq);
+
+ if (dvhw1ulq != 1) {
+ Rprintf("Error in fapc0tnbewg7qruh calling fvlmz9iyC_lkhnw9yq.\n");
+ Free_fapc0tnbewg7qruh(wkumc9idWrk1,
+ wkumc9idges1xpkr,
+ wkumc9idbeta, wkumc9idfasrkub3,
+ wkumc9idsout, wkumc9idr0oydcxb,
+ wkumc9idub4xioar, wkumc9ideffect,
+ wkumc9idueshvo2ic, wkumc9ids0,
+ wkumc9idpygsw6ko, wkumc9idpasjmo8g,
+ wkumc9ideshvo2ic, wkumc9idonxjvw8u,
+ wkumc9idwk4);
+ return;
+ }
+
+ for (yq6lorbx = 1; yq6lorbx <= *kgwmz4ip; yq6lorbx++) {
+ dtad5vhsu = wkumc9ids0[yq6lorbx-1 + (yq6lorbx-1 ) * kgwmz4ip2];
+ do3jyipdf = wkumc9ids0[yq6lorbx-1 + (yq6lorbx-1 + *kgwmz4ip) * kgwmz4ip2];
+ dpq0hfucn = wkumc9ids0[yq6lorbx-1 + *kgwmz4ip +
+ (yq6lorbx-1 + *kgwmz4ip) * kgwmz4ip2];
+ fpdlcqk9ui8ysltq = ui8ysltq + (yq6lorbx-1) * *ftnjamu2;
+ fpdlcqk9pygsw6ko = wkumc9idpygsw6ko;
+ for (ayfnwr1v = 1; ayfnwr1v <= *lqsahu0r; ayfnwr1v++) {
+ *fpdlcqk9ui8ysltq -= dtad5vhsu + *fpdlcqk9pygsw6ko *
+ (2.0 * do3jyipdf + *fpdlcqk9pygsw6ko * dpq0hfucn);
+ fpdlcqk9ui8ysltq++;
+ fpdlcqk9pygsw6ko++;
}
+ }
}
} else {
@@ -1421,8 +1679,9 @@ void fapc0tnbewg7qruh(double ci1oyxas[], double tlgduey8[], double rbne6ouj[],
}
-void Yee_vbfa(int psdvgce3[], double *fjcasv7g, double he7mqnvy[], double tlgduey8[],
- double rbne6ouj[], double hdnw2fts[], double lamvec[], double wbkq9zyi[],
+void Yee_vbfa(int psdvgce3[], double *fjcasv7g,
+ double he7mqnvy[], double tlgduey8[], double rbne6ouj[],
+ double hdnw2fts[], double lamvec[], double wbkq9zyi[],
int ezlgm2up[], int lqsahu0r[], int which[],
double kispwgx3[], double m0ibglfx[],
double zshtfg8c[], double ui8ysltq[],
@@ -1431,7 +1690,8 @@ void Yee_vbfa(int psdvgce3[], double *fjcasv7g, double he7mqnvy[], double tlgdue
int ulm3dvzg[], int hnpt1zym[], int iz2nbfjc[],
double ifys6woa[],
double rpyis2kc[], double gkdx5jals[],
- int nbzjkpi3[], int acpios9q[], int jwbkl9fp[]) {
+ int nbzjkpi3[], int lindex[],
+ int acpios9q[], int jwbkl9fp[]) {
double *ghdetj8v, *zpcqv3uj;
@@ -1451,11 +1711,12 @@ void Yee_vbfa(int psdvgce3[], double *fjcasv7g, double he7mqnvy[], double tlgdue
int itdcb8ilk[1];
double tdcb8ilk[4];
- itdcb8ilk[0] = psdvgce3[15];
- tdcb8ilk[0] = fjcasv7g[2];
- tdcb8ilk[1] = fjcasv7g[3];
- tdcb8ilk[2] = fjcasv7g[4];
- tdcb8ilk[3] = fjcasv7g[5];
+
+ itdcb8ilk[0] = psdvgce3[15]; /* contr.sp$c5aesxku in s.vam() */
+ tdcb8ilk[0] = fjcasv7g[2]; /* contr.sp$low in s.vam() */
+ tdcb8ilk[1] = fjcasv7g[3]; /* contr.sp$high in s.vam() */
+ tdcb8ilk[2] = fjcasv7g[4]; /* contr.sp$tol in s.vam() */
+ tdcb8ilk[3] = fjcasv7g[5]; /* contr.sp$eps in s.vam() */
wy1vqfzu = psdvgce3 + 7;
ftnjamu2 = psdvgce3;
@@ -1470,8 +1731,8 @@ void Yee_vbfa(int psdvgce3[], double *fjcasv7g, double he7mqnvy[], double tlgdue
npjlv3mr = psdvgce3[12];
ldk = psdvgce3[14];
- zpcqv3uj = fjcasv7g + 0;;
- ghdetj8v = fjcasv7g + 1;
+ zpcqv3uj = fjcasv7g + 0; /* bf.qaltf0nz */
+ ghdetj8v = fjcasv7g + 1; /* ghdetj8v */
fapc0tnbvbfa1(ftnjamu2, wy1vqfzu, ezlgm2up, lqsahu0r, which,
he7mqnvy, tlgduey8, rbne6ouj,
@@ -1482,7 +1743,8 @@ void Yee_vbfa(int psdvgce3[], double *fjcasv7g, double he7mqnvy[], double tlgdue
ulm3dvzg, hnpt1zym, iz2nbfjc,
ifys6woa,
rpyis2kc, gkdx5jals, ghdetj8v,
- nbzjkpi3, acpios9q, jwbkl9fp,
+ nbzjkpi3, lindex,
+ acpios9q, jwbkl9fp,
&nhja0izq, &lyzoe1rsp, &ueb8hndv, >rlbz3e,
&rutyk8mg, &xjc4ywlh,
&kvowz9ht, &npjlv3mr, &fbd5yktj, &ldk, &algpft4y,
@@ -1506,7 +1768,8 @@ void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[],
int ulm3dvzg[], int hnpt1zym[], int iz2nbfjc[],
double ifys6woa[],
double rpyis2kc[], double gkdx5jals[], double *ghdetj8v,
- int nbzjkpi3[], int acpios9q[], int jwbkl9fp[],
+ int nbzjkpi3[], int lindex[],
+ int acpios9q[], int jwbkl9fp[],
int *nhja0izq, int *yzoe1rsp, int *ueb8hndv, int *gtrlbz3e,
int *rutyk8mg, int *xjc4ywlh,
int *kvowz9ht, int *npjlv3mr, int *fbd5yktj, int *ldk, int *algpft4y,
@@ -1679,7 +1942,12 @@ void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[],
kvowz9ht, fbd5yktj, ldk, algpft4y, yzoe1rsp,
rpyis2kc + nbzjkpi3[gp1jxzuh-1]-1,
gkdx5jals + jwbkl9fp[gp1jxzuh-1]-1,
- ifys6woa + (hnpt1zym[gp1jxzuh-1]-1) * *ftnjamu2,
+
+
+
+ ifys6woa + lindex[gp1jxzuh-1]-1,
+
+
hjm2ktyr + (hnpt1zym[gp1jxzuh-1]-1) * *wy1vqfzu,
wkumc9idall_xecbg0pf + jwbkl9fp[gp1jxzuh-1]-1,
@@ -1696,24 +1964,24 @@ void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[],
tdcb8ilk);
for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
- if (iz2nbfjc[gp1jxzuh-1] == 1) {
- for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
- m0ibglfx[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] +=
- kispwgx3[ayfnwr1v-1 + (hnpt1zym[gp1jxzuh-1]+yq6lorbx-2) * *ftnjamu2];
- }
- } else {
- for (wg1xifdy = 1; wg1xifdy <= ulm3dvzg[gp1jxzuh-1]; wg1xifdy++) {
- for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
- m0ibglfx[yq6lorbx-1+ (ayfnwr1v-1) * *wy1vqfzu] +=
- hjm2ktyr[yq6lorbx-1+ (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *wy1vqfzu] *
- kispwgx3[ayfnwr1v-1+ (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *ftnjamu2];
- }
- }
- }
+ if (iz2nbfjc[gp1jxzuh-1] == 1) {
for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
- m0ibglfx[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] -=
- wkumc9idoldmat[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2];
+ m0ibglfx[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] +=
+ kispwgx3[ayfnwr1v-1 + (hnpt1zym[gp1jxzuh-1]+yq6lorbx-2) * *ftnjamu2];
}
+ } else {
+ for (wg1xifdy = 1; wg1xifdy <= ulm3dvzg[gp1jxzuh-1]; wg1xifdy++) {
+ for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+ m0ibglfx[yq6lorbx-1+ (ayfnwr1v-1) * *wy1vqfzu] +=
+ hjm2ktyr[yq6lorbx-1+ (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *wy1vqfzu] *
+ kispwgx3[ayfnwr1v-1+ (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *ftnjamu2];
+ }
+ }
+ }
+ for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+ m0ibglfx[yq6lorbx-1 + (ayfnwr1v-1) * *wy1vqfzu] -=
+ wkumc9idoldmat[ayfnwr1v-1 + (yq6lorbx-1) * *ftnjamu2];
+ }
}
for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
@@ -1770,52 +2038,56 @@ void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[],
}
if (*ueb8hndv == 1) {
- g2dnwteb = 1.0e0;
+ g2dnwteb = 1.0e0;
}
}
+
for (yq6lorbx = 1; yq6lorbx <= *xjc4ywlh; yq6lorbx++) {
- wkumc9idwk9[yq6lorbx-1] = zshtfg8c[yq6lorbx-1];
+ wkumc9idwk9[yq6lorbx-1] = zshtfg8c[yq6lorbx-1];
}
for (yq6lorbx = 1; yq6lorbx <= *xjc4ywlh; yq6lorbx++) {
- zshtfg8c[ges1xpkr[yq6lorbx-1]-1] = wkumc9idwk9[yq6lorbx-1];
+ zshtfg8c[ges1xpkr[yq6lorbx-1]-1] = wkumc9idwk9[yq6lorbx-1];
}
fpdlcqk9m0ibglfx = m0ibglfx; fpdlcqk9ub4xioar = wkumc9idub4xioar;
for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
- for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
- *fpdlcqk9m0ibglfx += *fpdlcqk9ub4xioar++;
- fpdlcqk9m0ibglfx++;
- }
+ for (yq6lorbx = 1; yq6lorbx <= *wy1vqfzu; yq6lorbx++) {
+ *fpdlcqk9m0ibglfx += *fpdlcqk9ub4xioar++;
+ fpdlcqk9m0ibglfx++;
+ }
}
if (*yzoe1rsp && (*nhja0izq > 0)) {
- for (gp1jxzuh = 1; gp1jxzuh <= *nhja0izq; gp1jxzuh++) {
- for (wg1xifdy = 1; wg1xifdy <= ulm3dvzg[gp1jxzuh-1]; wg1xifdy++) {
- fapc0tnbshm8ynte(ftnjamu2, /* lqsahu0r + gp1jxzuh-1, */
- ezlgm2up + (gp1jxzuh-1) * *ftnjamu2,
- ui8ysltq + (hnpt1zym[ gp1jxzuh-1] + wg1xifdy-2) * *ftnjamu2,
- wkumc9idoldmat);
- for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
- ui8ysltq[ayfnwr1v-1 + (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *ftnjamu2] =
- wkumc9idoldmat[ayfnwr1v-1];
- }
- }
+ for (gp1jxzuh = 1; gp1jxzuh <= *nhja0izq; gp1jxzuh++) {
+ for (wg1xifdy = 1; wg1xifdy <= ulm3dvzg[gp1jxzuh-1]; wg1xifdy++) {
+
+ fapc0tnbshm8ynte(ftnjamu2, /* lqsahu0r + gp1jxzuh-1, */
+ ezlgm2up + (gp1jxzuh-1) * *ftnjamu2,
+ ui8ysltq + (hnpt1zym[ gp1jxzuh-1] + wg1xifdy-2) * *ftnjamu2,
+ wkumc9idoldmat);
+ for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+ ui8ysltq[ayfnwr1v-1 + (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *ftnjamu2] =
+ wkumc9idoldmat[ayfnwr1v-1];
+ }
}
-
- for (gp1jxzuh = 1; gp1jxzuh <= *nhja0izq; gp1jxzuh++) {
- for (wg1xifdy = 1; wg1xifdy <= ulm3dvzg[gp1jxzuh-1]; wg1xifdy++) {
- fapc0tnbshm8ynte(ftnjamu2, /* lqsahu0r + gp1jxzuh-1, */
- ezlgm2up + (gp1jxzuh-1) * *ftnjamu2,
- ifys6woa + (hnpt1zym[ gp1jxzuh-1] + wg1xifdy-2) * *ftnjamu2,
- wkumc9idoldmat);
- for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
- ifys6woa[ayfnwr1v-1 + (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *ftnjamu2] =
- wkumc9idoldmat[ayfnwr1v-1];
- }
-
- }
+ }
+
+
+ if (0) {
+ for (gp1jxzuh = 1; gp1jxzuh <= *nhja0izq; gp1jxzuh++) {
+ for (wg1xifdy = 1; wg1xifdy <= ulm3dvzg[gp1jxzuh-1]; wg1xifdy++) {
+ fapc0tnbshm8ynte(ftnjamu2, /* lqsahu0r + gp1jxzuh-1, */
+ ezlgm2up + (gp1jxzuh-1) * *ftnjamu2,
+ ifys6woa + (hnpt1zym[ gp1jxzuh-1] + wg1xifdy-2) * *ftnjamu2,
+ wkumc9idoldmat);
+ for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
+ ifys6woa[ayfnwr1v-1 + (hnpt1zym[gp1jxzuh-1]+wg1xifdy-2) * *ftnjamu2] =
+ wkumc9idoldmat[ayfnwr1v-1];
+ }
}
+ }
+ }
}
Free(wkumc9idwkbzmd6ftv); Free(wkumc9idwk9);
@@ -1828,6 +2100,8 @@ void fapc0tnbvbfa1(int *ftnjamu2, int *wy1vqfzu, int ezlgm2up[], int lqsahu0r[],
}
+
+
void fapc0tnbx6kanjdh(double sjwyig9t[], double xout[], int *f8yswcat, int *wy1vqfzu) {
@@ -1929,6 +2203,8 @@ void fapc0tnbdsrt0gem(int *f8yswcat, double sjwyig9t[], double po8rwsmy[], doubl
}
+
+
void fapc0tnbshm8ynte(int *ftnjamu2,
int ezlgm2up[], double pygsw6ko[], double sjwyig9t[]) {
@@ -1937,7 +2213,7 @@ void fapc0tnbshm8ynte(int *ftnjamu2,
for (ayfnwr1v = 0; ayfnwr1v < *ftnjamu2; ayfnwr1v++) {
- *sjwyig9t++ = pygsw6ko[*ezlgm2up++ -1];
+ *sjwyig9t++ = pygsw6ko[*ezlgm2up++ -1];
}
}
diff --git a/src/vlinpack1.f b/src/vlinpack1.f
index 127ab8e..5bc3be7 100644
--- a/src/vlinpack1.f
+++ b/src/vlinpack1.f
@@ -4,9 +4,9 @@
double precision dsign, dabs, dmax1, dsqrt
integer min0
integer ldx,n,p,xwdf5ltg
- integer jpvt(1)
+ integer jpvt(*)
integer j,jj,jp,l,lup,curpvt
- double precision x(ldx,p),fasrkub3(p),work(1),eps
+ double precision x(ldx,p),fasrkub3(p),work(*),eps
double precision vdnrm2,tt
double precision ddot8,nrmxl,t
do 23000 j=1,p
diff --git a/src/vlinpack2.f b/src/vlinpack2.f
index ecd0ce5..9dbf0ca 100644
--- a/src/vlinpack2.f
+++ b/src/vlinpack2.f
@@ -160,7 +160,14 @@ c Works
subroutine vdpbfa7(abd,lda,n,m,info,d)
integer lda,n,m,info
- double precision abd(lda,1), d(n)
+ double precision abd(lda,*), d(n)
+c
+c
+c
+c 20130419: orig.:
+c double precision abd(lda,1), d(n)
+c
+c
c
c vdpbfa7 is dpbfa8 but uses Rational Cholesky instead of ordinary
c Cholesky
@@ -240,7 +247,14 @@ c
subroutine vdpbsl7(abd,lda,n,m,b,d)
integer lda,n,m
- double precision abd(lda,1),b(1),d(1)
+ double precision abd(lda,*),b(*),d(*)
+c
+c
+c
+c 20130419: orig:
+c double precision abd(lda,1),b(1),d(1)
+c
+c
c
c vdpbsl7 is dpbsl8 but uses Rational Cholesky instead of ordinary
c Cholesky
diff --git a/src/vlinpack3.f b/src/vlinpack3.f
index fd03704..91261ba 100644
--- a/src/vlinpack3.f
+++ b/src/vlinpack3.f
@@ -19,7 +19,15 @@ c constant times a vector plus a vector.
c uses unrolled loops for increments equal to one.
c jack dongarra, linpack, 3/11/78.
c
- double precision dx(1),dy(1),da
+c
+c
+c 20130419: orig.:
+c double precision dx(1),dy(1),da
+c
+c
+c
+c
+ double precision dx(*),dy(*),da
integer i,incx,incy,m,mp1,n
c Undeclared, so added by T.Yee
@@ -74,7 +82,7 @@ c copies a vector, x, to a vector, y.
c uses unrolled loops for increments equal to one.
c jack dongarra, linpack, 3/11/78.
c
- double precision dx(1),dy(1)
+ double precision dx(*),dy(*)
integer i,incx,incy,ix,iy,m,mp1,n
c
if(n.le.0)return
@@ -134,7 +142,7 @@ c forms the dot product of two vectors.
c uses unrolled loops for increments equal to one.
c jack dongarra, linpack, 3/11/78.
c
- double precision dx(1),dy(1),dtemp
+ double precision dx(*),dy(*),dtemp
integer i,incx,incy,ix,iy,m,mp1,n
c
ddot8 = 0.0d0
@@ -326,7 +334,7 @@ c scales a vector by a constant.
c uses unrolled loops for increment equal to one.
c jack dongarra, linpack, 3/11/78.
c
- double precision da,dx(1)
+ double precision da,dx(*)
integer i,incx,m,mp1,n,nincx
c
if(n.le.0)return
@@ -386,8 +394,16 @@ c
subroutine vdqrsl(x,ldx,n,k,qraux,y,qy,qty,b,rsd,xb,job,info)
implicit logical (a-z)
integer ldx,n,k,job,info
- double precision x(ldx,1),qraux(1),y(1),qy(1),qty(1),b(1),rsd(1),
- * xb(1)
+ double precision x(ldx,*),qraux(*),y(*),qy(*),qty(*),b(*),rsd(*),
+ * xb(*)
+c
+c
+c
+c 20130419: orig.:
+c double precision x(ldx,1),qraux(1),y(1),qy(1),qty(1),b(1),rsd(1),
+c * xb(1)
+c
+c
c
c dqrsl applies the output of dqrdc to compute coordinate
c transformations, projections, and least squares solutions.
diff --git a/src/vmux.f b/src/vmux.f
index 31d6072..07b270e 100644
--- a/src/vmux.f
+++ b/src/vmux.f
@@ -1,6 +1,6 @@
subroutine qpsedg8xf(tgiyxdw1, dufozmt7, wy1vqfzu)
implicit logical (a-z)
- integer wy1vqfzu, tgiyxdw1(1), dufozmt7(1)
+ integer wy1vqfzu, tgiyxdw1(*), dufozmt7(*)
integer urohxe6t, bpvaqm5z, ayfnwr1v
ayfnwr1v = 1
urohxe6t = wy1vqfzu
@@ -23,7 +23,7 @@
end
integer function viamf(cz8qdfyj, rvy1fpli, wy1vqfzu, tgiyxdw1,
&dufozmt7)
- integer cz8qdfyj, rvy1fpli, wy1vqfzu, tgiyxdw1(1), dufozmt7(1)
+ integer cz8qdfyj, rvy1fpli, wy1vqfzu, tgiyxdw1(*), dufozmt7(*)
integer urohxe6t, imk5wjxg
imk5wjxg = wy1vqfzu*(wy1vqfzu+1)/2
do 23009 urohxe6t=1,imk5wjxg
@@ -78,7 +78,7 @@
subroutine nudh6szqf(wpuarq2m, tlgduey8, lfu2qhid, dimu, tgiyxdw1,
& dufozmt7, kuzxj1lo, wy1vqfzu, wk1200)
implicit logical (a-z)
- integer dimu, tgiyxdw1(1), dufozmt7(1), kuzxj1lo, wy1vqfzu
+ integer dimu, tgiyxdw1(*), dufozmt7(*), kuzxj1lo, wy1vqfzu
double precision wpuarq2m(dimu,kuzxj1lo), tlgduey8(kuzxj1lo,
&wy1vqfzu), lfu2qhid(wy1vqfzu,kuzxj1lo), wk1200(wy1vqfzu,wy1vqfzu)
double precision q6zdcwxk
@@ -111,7 +111,7 @@
subroutine vbksf(wpuarq2m, bvecto, wy1vqfzu, kuzxj1lo, wk1200,
&tgiyxdw1, dufozmt7, dimu)
implicit logical (a-z)
- integer wy1vqfzu, kuzxj1lo, tgiyxdw1(1), dufozmt7(1), dimu
+ integer wy1vqfzu, kuzxj1lo, tgiyxdw1(*), dufozmt7(*), dimu
double precision wpuarq2m(dimu,kuzxj1lo), bvecto(wy1vqfzu,
&kuzxj1lo), wk1200(wy1vqfzu,wy1vqfzu)
double precision q6zdcwxk
@@ -204,8 +204,8 @@
subroutine mxrbkut0f(wpuarq2m, he7mqnvy, wy1vqfzu, xjc4ywlh,
&kuzxj1lo, wk1200, wk3400, tgiyxdw1, dufozmt7, dimu, rutyk8mg)
implicit logical (a-z)
- integer dimu, wy1vqfzu, xjc4ywlh, kuzxj1lo, tgiyxdw1(1), dufozmt7(
- &1), rutyk8mg
+ integer dimu, wy1vqfzu, xjc4ywlh, kuzxj1lo, tgiyxdw1(*), dufozmt7(
+ &*), rutyk8mg
double precision wpuarq2m(dimu,kuzxj1lo), he7mqnvy(rutyk8mg,
&xjc4ywlh), wk1200(wy1vqfzu,wy1vqfzu), wk3400(wy1vqfzu,xjc4ywlh)
double precision q6zdcwxk
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-vgam.git
More information about the debian-science-commits
mailing list