[r-cran-vgam] 51/63: Import Upstream version 1.0-2
Andreas Tille
tille at debian.org
Tue Jan 24 13:54:40 UTC 2017
This is an automated email from the git hooks/post-receive script.
tille pushed a commit to branch master
in repository r-cran-vgam.
commit 672311024a155ba7054ef3d4817abdc4a95b2ff1
Author: Andreas Tille <tille at debian.org>
Date: Tue Jan 24 14:17:06 2017 +0100
Import Upstream version 1.0-2
---
BUGS | 16 +-
DESCRIPTION | 13 +-
MD5 | 424 +--
NAMESPACE | 52 +-
NEWS | 58 +
R/Links.R | 4 +-
R/aamethods.q | 9 +-
R/add1.vglm.q | 7 -
R/attrassign.R | 36 +-
R/bAIC.q | 2 +-
R/build.terms.vlm.q | 8 +-
R/calibrate.q | 4 +-
R/cao.R | 12 +-
R/cao.fit.q | 124 +-
R/coef.vlm.q | 4 +-
R/confint.vlm.R | 20 +-
R/cqo.R | 8 +-
R/cqo.fit.q | 39 +-
R/deviance.vlm.q | 4 +-
R/effects.vglm.q | 2 +-
R/family.actuary.R | 1219 ++++---
R/family.aunivariate.R | 1064 +++++-
R/family.basics.R | 182 +-
R/family.binomial.R | 145 +-
R/family.bivariate.R | 182 +-
R/family.categorical.R | 196 +-
R/family.censored.R | 54 +-
R/family.circular.R | 31 +-
R/family.exp.R | 10 +-
R/family.extremes.R | 1444 ++++----
R/family.functions.R | 3 +-
R/family.genetic.R | 45 +-
R/family.glmgam.R | 67 +-
R/family.loglin.R | 4 +-
R/family.math.R | 10 +-
R/family.mixture.R | 26 +-
R/family.nonlinear.R | 12 +-
R/family.normal.R | 311 +-
R/family.others.R | 463 ++-
R/family.positive.R | 383 ++-
R/family.qreg.R | 376 +-
R/family.rcim.R | 26 +-
R/family.rcqo.R | 16 +-
R/family.robust.R | 18 +-
R/family.rrr.R | 172 +-
R/family.sur.R | 8 +-
R/family.survival.R | 24 +-
R/family.ts.R | 1051 +++---
R/family.univariate.R | 1585 +++------
R/family.vglm.R | 4 +-
R/family.zeroinf.R | 5695 +++++++++++++++++++------------
R/fittedvlm.R | 2 +-
R/formula.vlm.q | 2 +-
R/generic.q | 2 +-
R/links.q | 24 +-
R/logLik.vlm.q | 2 +-
R/lrwaldtest.R | 8 +-
R/model.matrix.vglm.q | 184 +-
R/mux.q | 14 +-
R/nobs.R | 2 +-
R/penvps.R | 139 +
R/{plot.vglm.q => plot.vgam.R} | 120 +-
R/plot.vglm.R | 106 +
R/predict.vgam.q | 7 +-
R/predict.vglm.q | 8 +-
R/predict.vlm.q | 20 +-
R/print.vglm.q | 8 +-
R/print.vlm.q | 2 +-
R/psfun.R | 183 +
R/psv2magic.R | 97 +
R/qrrvglm.control.q | 13 +-
R/qtplot.q | 27 +-
R/residuals.vlm.q | 8 +-
R/rrvglm.R | 13 +-
R/rrvglm.control.q | 2 +-
R/rrvglm.fit.q | 17 +-
R/s.q | 2 +-
R/s.vam.q | 14 +-
R/simulate.vglm.R | 17 +-
R/smart.R | 8 +-
R/step.vglm.q | 17 -
R/summary.vgam.q | 4 +-
R/summary.vglm.q | 2 +-
R/summary.vlm.q | 4 +-
R/vgam.R | 133 +-
R/vgam.control.q | 10 +-
R/vgam.fit.q | 131 +-
R/vgam.match.q | 8 +-
R/vglm.R | 17 +-
R/vglm.control.q | 2 +-
R/vglm.fit.q | 273 +-
R/vlm.R | 4 +-
R/vlm.wfit.q | 138 +-
R/vsmooth.spline.q | 40 +-
build/vignette.rds | Bin 480 -> 478 bytes
data/Huggins89.t1.rda | Bin 443 -> 442 bytes
data/Huggins89table1.rda | Bin 445 -> 444 bytes
data/alclevels.rda | Bin 551 -> 549 bytes
data/alcoff.rda | Bin 548 -> 546 bytes
data/auuc.rda | Bin 246 -> 244 bytes
data/backPain.rda | Bin 474 -> 480 bytes
data/beggs.rda | Bin 198 -> 196 bytes
data/car.all.rda | Bin 6968 -> 6959 bytes
data/cfibrosis.rda | Bin 264 -> 263 bytes
data/corbet.rda | Bin 244 -> 236 bytes
data/crashbc.rda | Bin 374 -> 374 bytes
data/crashf.rda | Bin 341 -> 340 bytes
data/crashi.rda | Bin 491 -> 489 bytes
data/crashmc.rda | Bin 385 -> 385 bytes
data/crashp.rda | Bin 376 -> 375 bytes
data/crashtr.rda | Bin 361 -> 360 bytes
data/deermice.rda | Bin 393 -> 393 bytes
data/ducklings.rda | Bin 561 -> 560 bytes
data/finney44.rda | Bin 210 -> 209 bytes
data/flourbeetle.rda | Bin 344 -> 342 bytes
data/hspider.rda | Bin 1344 -> 1343 bytes
data/lakeO.rda | Bin 335 -> 333 bytes
data/leukemia.rda | Bin 329 -> 328 bytes
data/marital.nz.rda | Bin 10432 -> 10492 bytes
data/melbmaxtemp.rda | Bin 4263 -> 4257 bytes
data/pneumo.rda | Bin 267 -> 266 bytes
data/prinia.rda | Bin 1229 -> 1228 bytes
data/ruge.rda | Bin 258 -> 254 bytes
data/toxop.rda | Bin 473 -> 472 bytes
data/venice.rda | Bin 981 -> 983 bytes
data/venice90.rda | Bin 8000 -> 8036 bytes
data/wine.rda | Bin 270 -> 270 bytes
inst/doc/categoricalVGAM.pdf | Bin 645909 -> 646125 bytes
inst/doc/crVGAM.pdf | Bin 421544 -> 421526 bytes
man/AR1.Rd | 255 +-
man/AR1EIM.Rd | 303 ++
man/AR1UC.Rd | 14 +-
man/CommonVGAMffArguments.Rd | 22 +-
man/Select.Rd | 3 +-
man/VGAM-package.Rd | 19 +-
man/betaII.Rd | 3 +-
man/betaR.Rd | 1 +
man/betabinomUC.Rd | 45 +-
man/betaff.Rd | 3 +-
man/binom2.orUC.Rd | 18 +-
man/binormal.Rd | 2 +-
man/binormalUC.Rd | 2 +-
man/calibrate.qrrvglm.control.Rd | 2 +-
man/cens.gumbel.Rd | 2 +-
man/clo.Rd | 2 +-
man/cumulative.Rd | 3 +-
man/dagum.Rd | 4 +-
man/depvar.Rd | 2 +-
man/dirichlet.Rd | 9 +-
man/double.expbinomial.Rd | 2 +-
man/ducklings.Rd | 2 +-
man/fisk.Rd | 3 +-
man/gaussianff.Rd | 2 +-
man/gev.Rd | 119 +-
man/gevUC.Rd | 62 +-
man/gpd.Rd | 5 +-
man/gpdUC.Rd | 34 +-
man/gumbel.Rd | 25 +-
man/gumbelUC.Rd | 2 +-
man/guplot.Rd | 2 +-
man/huber.Rd | 2 +-
man/huberUC.Rd | 2 +-
man/identitylink.Rd | 2 +-
man/inv.paralogistic.Rd | 2 +-
man/linkfun.Rd | 2 +-
man/logc.Rd | 4 +-
man/loge.Rd | 2 +-
man/logoff.Rd | 2 +-
man/lomax.Rd | 3 +-
man/marital.nz.Rd | 2 +-
man/melbmaxtemp.Rd | 2 +-
man/multilogit.Rd | 2 +-
man/multinomial.Rd | 21 +-
man/nbcanlink.Rd | 2 +-
man/negbinomial.Rd | 110 +-
man/negbinomial.size.Rd | 4 +-
man/normal.vcm.Rd | 17 +-
man/notdocumentedyet.Rd | 35 +-
man/oiposbinomUC.Rd | 121 +
man/oipospoisUC.Rd | 116 +
man/oipospoisson.Rd | 91 +
man/oxtemp.Rd | 2 +-
man/paralogistic.Rd | 3 +-
man/plotrcim0.Rd | 9 +-
man/plotvgam.Rd | 2 +-
man/plotvglm.Rd | 99 +-
man/poissonff.Rd | 2 +-
man/posbinomUC.Rd | 6 +-
man/posgeomUC.Rd | 2 +-
man/posnegbinUC.Rd | 9 +
man/posnegbinomial.Rd | 17 +-
man/pospoisUC.Rd | 7 +-
man/pospoisson.Rd | 4 +-
man/ps.Rd | 267 ++
man/qrrvglm.control.Rd | 6 +-
man/qtplot.gumbel.Rd | 7 +-
man/reciprocal.Rd | 2 +-
man/{rlplot.egev.Rd => rlplot.gevff.Rd} | 16 +-
man/rrvglm.control.Rd | 2 +-
man/rrvglm.optim.control.Rd | 2 +-
man/s.Rd | 17 +-
man/sinmad.Rd | 4 +-
man/trplot.qrrvglm.Rd | 6 +-
man/undocumented-methods.Rd | 2 +
man/vgam.Rd | 18 +-
man/vgam.control.Rd | 10 +-
man/vglm.Rd | 1 +
man/vglm.control.Rd | 4 +-
man/zanegbinomial.Rd | 47 +-
man/zapoisson.Rd | 4 +-
man/zetaff.Rd | 9 +-
man/zibinomUC.Rd | 17 +-
man/zibinomial.Rd | 4 +-
man/zinegbinomial.Rd | 35 +-
man/zipebcom.Rd | 9 +-
man/zipoisUC.Rd | 5 +-
man/zipoisson.Rd | 40 +-
man/zoabetaR.Rd | 107 +
man/{ozibetaUC.Rd => zoabetaUC.Rd} | 75 +-
219 files changed, 12206 insertions(+), 7694 deletions(-)
diff --git a/BUGS b/BUGS
index d80b37c..3d65463 100755
--- a/BUGS
+++ b/BUGS
@@ -1,6 +1,13 @@
Here is a list of known bugs.
+2016-05
+
+rcim() with alaplace2() may fail.
+
+
+
+
2014-02
The subset argument of vgam() may not work, especially with
@@ -18,8 +25,6 @@ t(cmat) %*% cmat is diagonal.
-
-
2013-07
quasipoisson()'s scale parameter estimate does not handle
@@ -28,7 +33,6 @@ prior weights correctly.
-
2012-09
@@ -64,8 +68,6 @@ from before. But cao() is still working... getting it going soon hopefully.
-
-
2009/07/13
cqo() fails... I think it is due to initial values being faulty.
Hope to look into it soon.
@@ -73,10 +75,6 @@ Hope to look into it soon.
-
-
-
-
2009/06/18
For a given VGAM family function,
diff --git a/DESCRIPTION b/DESCRIPTION
index 81d0c3d..f7e0605 100755
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,16 +1,17 @@
Package: VGAM
-Version: 1.0-1
-Date: 2016-03-15
+Version: 1.0-2
+Date: 2016-05-26
Title: Vector Generalized Linear and Additive Models
Author: Thomas W. Yee <t.yee at auckland.ac.nz>
Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
-Depends: R (>= 3.0.0), methods, stats, stats4, splines
-Suggests: VGAMdata, MASS
+Depends: R (>= 3.1.0), methods, stats, stats4, splines
+Suggests: VGAMdata, MASS, mgcv
Description: An implementation of about 6 major classes of
statistical regression models. At the heart of it are the
vector generalized linear and additive model (VGLM/VGAM)
classes, and the book "Vector Generalized Linear and
Additive Models: With an Implementation in R" (Yee, 2015)
+ <DOI:10.1007/978-1-4939-2818-7>
gives details of the statistical framework and VGAM package.
Currently only fixed-effects models are implemented,
i.e., no random-effects models. Many (150+) models and
@@ -30,6 +31,6 @@ NeedsCompilation: yes
BuildVignettes: yes
LazyLoad: yes
LazyData: yes
-Packaged: 2016-03-15 08:53:23 UTC; tyee001
+Packaged: 2016-05-26 13:22:19 UTC; tyee001
Repository: CRAN
-Date/Publication: 2016-03-15 10:51:21
+Date/Publication: 2016-05-27 18:08:24
diff --git a/MD5 b/MD5
index d046b43..14f9067 100644
--- a/MD5
+++ b/MD5
@@ -1,146 +1,148 @@
-66414b6ed296192426033f4ac29a6af2 *BUGS
+68f03c3d43f72a0d24b94d4af893dd36 *BUGS
7ee5b2dc375f5ec613dffed100ca7b3d *ChangeLog
-18f48164383f8841c382cd707f6eb1f6 *DESCRIPTION
+dfb0d44f06250aa6eaa381521a1ed880 *DESCRIPTION
e640665d8993539374917f850992ddc7 *LICENCE.note
-570ed3c2ae75b34ac517bb91a1cda92e *NAMESPACE
-a91a54eee4f44ad484b53f569aa4b0f0 *NEWS
-31e60bca4249bc261445355bd6496609 *R/Links.R
-ed47c2f7a4154dfa299eaaebc78b31ab *R/aamethods.q
-4ffc1530ca8113d2f2d8b0d5cc1db282 *R/add1.vglm.q
-29b192ec0239f8f013e99ef759823732 *R/attrassign.R
-19fd9a65f33bfc01a56d0ee1f4752159 *R/bAIC.q
-f96b47c7279f6b68a3946245deff4429 *R/build.terms.vlm.q
-f86d6550ffdd1b3ca9f06252dcf7ba50 *R/calibrate.q
-c575b259c9dd77477ec86e3df977b00c *R/cao.R
-34c0e75f6712d52bfb75cdcc5a62395e *R/cao.fit.q
-6280eb07b90ccb8852ad8706fd0cb13b *R/coef.vlm.q
-523b3faf78519c00346b1843bd5db02d *R/confint.vlm.R
-58e1930c4422d41f5c118f39459be314 *R/cqo.R
-9a4e3479392194fbe0c6e55cacb03f62 *R/cqo.fit.q
-d411a1bf3bfbe7057b4211255c33ba53 *R/deviance.vlm.q
-54b928344dc9efab031bf3e83d04f21f *R/effects.vglm.q
-928ecb3851ae3ff62ceab17baa7c8992 *R/family.actuary.R
-3cd382a09e1312f28ef0f3b9bf21fca4 *R/family.aunivariate.R
-1bd1dacbe2b70799e91a49b33f6c7afc *R/family.basics.R
-7fe4f2801d52575344a7520ed828a8cf *R/family.binomial.R
-2fcda4f92db74fd1af970029218ddb38 *R/family.bivariate.R
-79e723214af3efeaee67fc7b287455d7 *R/family.categorical.R
-092006fb107b2a1864ce284854e8ed0f *R/family.censored.R
-c9be4e7bfd15babad00c1872694f0f1f *R/family.circular.R
-adc8b490a49386c200b94980987d8fc3 *R/family.exp.R
-57d70a513f9e8041807a24d13354696e *R/family.extremes.R
-251b551aaf906c754d9e75342e7ea1af *R/family.functions.R
-8e8f859b7d6fcfc7bc0217f1c55283f8 *R/family.genetic.R
-3ac000dc4c07a19b8fb7a6c5496db839 *R/family.glmgam.R
-0f7af1b8f7a3ebb16ceac11221cab219 *R/family.loglin.R
-a8ae0b3b7507b40def24bedad2696097 *R/family.math.R
-9781662e0e6a85930eda8f0b470dbb9c *R/family.mixture.R
-9fcb7721324170b016e5d05e4762ecf0 *R/family.nonlinear.R
-fac1e304db31c1df631388c2f5c98afa *R/family.normal.R
-3e81dcdfbf1108dc27ad849424b5ed06 *R/family.others.R
-170f18f3a46c734b5fc42f3e137284e3 *R/family.positive.R
-44a39dac95371e0a9214632a4daefce2 *R/family.qreg.R
-9f8a2d77c4b80645472c258c609596cc *R/family.rcim.R
-ce2d185ba2afa3e7611f80c267280039 *R/family.rcqo.R
-028ae9e8c53a0dd3033e01f62b8c1a39 *R/family.robust.R
-81bcc233613aa077ae6fd75bc5a8eaaf *R/family.rrr.R
-ca8dfa29ba9b7733213147f573058f18 *R/family.sur.R
-9ceab9c4a48c8ab4124208896da02a43 *R/family.survival.R
-d35aa31fb99b813eb649ccfd0650126b *R/family.ts.R
-212f797fad47aa701641a0d11c18d886 *R/family.univariate.R
-8d7d5df3e005750453f8ed0977c0c4f6 *R/family.vglm.R
-8b7b2aa7b6b35f6d7bf84d5637d57d93 *R/family.zeroinf.R
-e5a738b6ba3f59a3962eb089e56e5786 *R/fittedvlm.R
-27dae12416e0840c1f75f4f18e0146f0 *R/formula.vlm.q
-1c7d28893d43c88a934731219098fd5c *R/generic.q
-aa6ec9eb642c7b79e0c05c54c13e7d35 *R/links.q
-06929b2f0a102fcca301a9f265279e04 *R/logLik.vlm.q
-7538bff4855fdeeb147da4822187ddf3 *R/lrwaldtest.R
-3c2bc6b07e880eb2f6ae5bfc3ee8f55e *R/model.matrix.vglm.q
-0a2fe5c2fef0512b723c10dbf8980914 *R/mux.q
-ec00a9fdace1922ca78877ac43605737 *R/nobs.R
-2b915559877c7bce5d0893ab740e7842 *R/plot.vglm.q
-a2547eed9a5570094efec6573e6f9f9b *R/predict.vgam.q
-2742099a9c2a856a3789ab8c6751bd7b *R/predict.vglm.q
-43a7efd329b4711111bb2e9d883b8717 *R/predict.vlm.q
-f0cee50fe2d92666ebd71d021c117352 *R/print.vglm.q
-74f7393a57eec9a96cc7d04a569037ca *R/print.vlm.q
-c431e12369752358904f6704832decd5 *R/qrrvglm.control.q
-186c1f01e378f2e03a6b3dd994551622 *R/qtplot.q
-a08653bfeb60aa7bc249535920889972 *R/residuals.vlm.q
-9d5826ad08d66734f7403d17fcbba5f6 *R/rrvglm.R
-e278dec435eddcc0345a59bd9dd56f6d *R/rrvglm.control.q
-54ad4c9dd8dd789a5d15cfb150c1f384 *R/rrvglm.fit.q
-cf62bdb183fe009cd47316eacbe3b14e *R/s.q
-156f02ed65d0d90c17241cbada6d0c00 *R/s.vam.q
-400c72e71f4820b58f292523029c6245 *R/simulate.vglm.R
-366887aff30fbfac5afb77ed10597005 *R/smart.R
-89968d39bff60306bab87cc1e3ebdca1 *R/step.vglm.q
-ea860d4429fbcfb1c8e494a198e72adc *R/summary.vgam.q
-77bacb6a3a26f41d314c7398229afe84 *R/summary.vglm.q
-242bb02f33630d22c6cd0219d375326e *R/summary.vlm.q
-f53cc75eb61ade505b6ee2d55f0ac377 *R/vgam.R
-4ccd8af1480a81325e5863775a59f597 *R/vgam.control.q
-f6da05ed223f0cac5b7731c8f5da2095 *R/vgam.fit.q
-c7836fc6514f090c9852ef7427b68a95 *R/vgam.match.q
-1501c23247071b2f853288145f80ac89 *R/vglm.R
-ce25aa40db5b384a7f1ca04ed8acb3fb *R/vglm.control.q
-49a5d53b383a87546d9cfc081c4c05f1 *R/vglm.fit.q
-d3c11b3c2876d98a37ea6f4a5658a4a6 *R/vlm.R
-b6d7090a5d83d261dc018d72d87f0ed1 *R/vlm.wfit.q
-8923fb07fdb84505792742d2a25b793d *R/vsmooth.spline.q
-c50b7cc72caa37922a7210113c736f47 *build/vignette.rds
-6f925d3a68c25b310fc7f2e95d780e7b *data/Huggins89.t1.rda
-3247124812b7363afd2ba89f3d51b6be *data/Huggins89table1.rda
+e9304ea877f43e4e34c4ce1b3d27a48d *NAMESPACE
+80eb5bf0880da3735541f850ea0cb1bc *NEWS
+1cf8c58b1b8f8f512152ad0fef77f9b3 *R/Links.R
+21f10bcb0180979e9306f79521eaeaf3 *R/aamethods.q
+9694330a9429ca317d230d95b4f28439 *R/attrassign.R
+56f4d63ba58a8118751745ee1d114c2f *R/bAIC.q
+ddc20f03a63a70292d06634a2c33fdb2 *R/build.terms.vlm.q
+e18970ef384fe48b670fd3e39da3e773 *R/calibrate.q
+cfefa5d3109605043519fcb00f2e973c *R/cao.R
+206e6fdff874d039ba7b0783aba59e5e *R/cao.fit.q
+712ba0e03427dfc76a251d04b564cbcd *R/coef.vlm.q
+0caeff46be684ed015c8797312fb584a *R/confint.vlm.R
+f7b3a3390696387b3160d50fab2f1e26 *R/cqo.R
+37a34a9474c4015cfff888efe0e1df58 *R/cqo.fit.q
+d7411f35192232f4b396b2b9a256c048 *R/deviance.vlm.q
+1d9d007e875a5298f466a67da0a5355b *R/effects.vglm.q
+0cdac42854e20d508ba4eda8fcf905b5 *R/family.actuary.R
+bbcd730bf607107b7901bf6ed0bbeb0c *R/family.aunivariate.R
+8a903ec2a3167664db6d781a446e2e0f *R/family.basics.R
+d865febbab3728ad23ab1d29c5a1aefc *R/family.binomial.R
+8cc88aa0238f0c24873e43b96248069f *R/family.bivariate.R
+9ab9ab5bb3408b7b8c96d0b27044ffd3 *R/family.categorical.R
+a1854051e36e942db41075b38d01da14 *R/family.censored.R
+53c98dae9e0fca770deb579158c3649a *R/family.circular.R
+4c12b580d3d15be2f7393731223e6483 *R/family.exp.R
+4c642fd4f02fe56de2ada12775ff545e *R/family.extremes.R
+436edded73dcb29d8a6eaa208a3d616d *R/family.functions.R
+d3cd45b76a23ce50c5d55487dfb2bea6 *R/family.genetic.R
+e6214ab432a3c59169a1d38f5d7321d5 *R/family.glmgam.R
+4ee41145327bb60d68e61636d742bfc4 *R/family.loglin.R
+9c7b3a42d0c04f9a6244180303ae52a4 *R/family.math.R
+38dd9066ca231d11fe68838ab1e8c236 *R/family.mixture.R
+4a9235c693f664c02d3f0028746456c8 *R/family.nonlinear.R
+610a746d203f5d80dfe3f9235201113d *R/family.normal.R
+3c8ccd291b82a4dd473f05b71dc33de0 *R/family.others.R
+165a865e6f3d4a689abf0df987dbe4c5 *R/family.positive.R
+160ef5bb3ade2a010f7fb89e0626a277 *R/family.qreg.R
+09e1e6f7b83739b27d34ae1355564a80 *R/family.rcim.R
+56bc66c989b520e3dab7995fbfb52b49 *R/family.rcqo.R
+b64fb946c724db1151be8c10a918609b *R/family.robust.R
+1fd817f411b76ffa57ef99b3266dbce5 *R/family.rrr.R
+ff3ab815d988febc776287b3230eec59 *R/family.sur.R
+533ca6d202400a48588a5f707aa4770e *R/family.survival.R
+2294aa48ef55115a5a985a4cc5b372d1 *R/family.ts.R
+4de93f7061f23fa901713236597676fa *R/family.univariate.R
+8c4fe096ac15f024a37038ae7e222662 *R/family.vglm.R
+f151351322dcd21ffe0a0adc093caed4 *R/family.zeroinf.R
+2e0feaff930416a2be667b96674f0f99 *R/fittedvlm.R
+562a5bfc621b8a1c0e1e897ee96ef27a *R/formula.vlm.q
+a9c48afe02653db5b368ddbbc3a98bae *R/generic.q
+8ccbb2c06885c2421b6079be28b1e0a9 *R/links.q
+b275831b8ad5e1c70f5e45849a663e6f *R/logLik.vlm.q
+2e5531d6158191fd1f1fc5851a514386 *R/lrwaldtest.R
+f7ce594fdeb677de275fe3935426902f *R/model.matrix.vglm.q
+308858105b4bd4eb35d15a90e7a06645 *R/mux.q
+21c6f98334d6f745adb68bf1a9b0de4d *R/nobs.R
+709894eed9cee3ab89ea651b52a5086b *R/penvps.R
+87144da24f6ccee1e83973a812733961 *R/plot.vgam.R
+d19041afde6928b79454d697e7ef7b77 *R/plot.vglm.R
+0b826112c20b62aa84de56995df0dfd2 *R/predict.vgam.q
+2b8ee0e63edd4225f6c654c574527d8c *R/predict.vglm.q
+54ea08fbdc77b5bc32281bb92e48a49f *R/predict.vlm.q
+b744f88aae3e4d79313f688b3edcfbd6 *R/print.vglm.q
+c1398324a29defd22e16a4bfc12eeb0c *R/print.vlm.q
+d8eb45f7f8cde512bfb81cd6ba73c852 *R/psfun.R
+e84add7acb90669f496564778fa82f2b *R/psv2magic.R
+b04deb6f14c8561e40d4c7477794426d *R/qrrvglm.control.q
+0a368c9c8cb0065eda12801f306c82c5 *R/qtplot.q
+f995162b96b7d38b0c388d818281a8c5 *R/residuals.vlm.q
+e660aa5a0acfe92c37c6d1a898bebe9a *R/rrvglm.R
+a765ef813001a447a28afd390a95c463 *R/rrvglm.control.q
+550ee5bdebaa23c93551a743bea80cdd *R/rrvglm.fit.q
+eab2d3dcd2d47eccc0917d44b2328834 *R/s.q
+09e35797e609474bf5586830b72f61b6 *R/s.vam.q
+a3611170442f07a7c5f6c5b80a4c88a7 *R/simulate.vglm.R
+6002fdbd315202fe3c809d0d4bad738d *R/smart.R
+dfb248166c9030652294440d3434114d *R/summary.vgam.q
+0d3ef350650f66ca4332aa74d8682419 *R/summary.vglm.q
+7b868586600cd09af0b0d962beb4f9ef *R/summary.vlm.q
+eae97f190da8dc8305aa3bb942c8d693 *R/vgam.R
+57cf03d0771a8787e73a631577b00aa4 *R/vgam.control.q
+7028939a835af468c0c18e185d479f13 *R/vgam.fit.q
+a6816380f5d7d20cc618d48e947aa920 *R/vgam.match.q
+de045db8370a4e1c0bdb69415cd77404 *R/vglm.R
+605a747fdc64cefa58bc6ec1e2f07512 *R/vglm.control.q
+f30de8d3512608317be4c535515fd5a2 *R/vglm.fit.q
+619a7ce3a35b93df64ae6eda4b64f681 *R/vlm.R
+ce7085fc18245d111bb2c9cb62db4052 *R/vlm.wfit.q
+77ed62698e44dc3768c6f325d6029323 *R/vsmooth.spline.q
+a07ba9fab9b037499cd4d53fe5dfdcbe *build/vignette.rds
+a7c4647f1fb9a84fb6b3e4939a1bdab2 *data/Huggins89.t1.rda
+4b7a96145fe191aae6d4000119fca953 *data/Huggins89table1.rda
d89f69ab78bc3c7a526960c8bdb9454b *data/V1.txt.gz
-be3b7d8c4d48c5d6dd28ae75b92d7f10 *data/alclevels.rda
-4a7c49e2c65c91646dfc1b0b9f010898 *data/alcoff.rda
-ff5853d8b50a88855ec1d31c550699bb *data/auuc.rda
-40caae8677f57d3e0316d671d30f016e *data/backPain.rda
+002750b117ad85abddddd7f3a9202329 *data/alclevels.rda
+f0273a4099e893b5c0a75026832353e6 *data/alcoff.rda
+88eff02445bfc854cb291e09358dfe77 *data/auuc.rda
+0d4f8bc5415cb9135d55dd15317d4522 *data/backPain.rda
4fa3eac69a59ea5ed0123d54528e5595 *data/backPain.txt.gz
-eec4e857ffb9c0888b4cfa59b3db915d *data/beggs.rda
+767e369fcbe38c25a6d25e3ca4766e5a *data/beggs.rda
e039fd36c33b359830b2ac811ca7fc49 *data/bmi.nz.txt.xz
-15e0d982e815b30ad197b083f6524975 *data/car.all.rda
-0c536d00a3a92028e6867817b5ee4f78 *data/cfibrosis.rda
+6e979612433ab8f64ecb0777552fbeaf *data/car.all.rda
+be2a9693f80356b34c6ab906f3ba279e *data/cfibrosis.rda
b29c1a4125f0898885b0a723442d6a92 *data/chest.nz.txt.bz2
4df5fd8b5db905c4c19071e1e6a698a4 *data/chinese.nz.txt.gz
3cb8bc8e1fc615416f0c8838a50b3f51 *data/coalminers.txt.gz
-690479e10cc5260d1e357a17c3c4007e *data/corbet.rda
-a0f47c847f1ea403e53f30073ef34942 *data/crashbc.rda
-aed7745292af4d98dce60b28772ad463 *data/crashf.rda
-af5cf05f5abbf40942009d3c2b875bf2 *data/crashi.rda
-32a0bbc55cf912483109718084f093a0 *data/crashmc.rda
-7ad8337140f5d172a4c0c6e0083a921a *data/crashp.rda
-b2af6eda780da9cc80ce4a2256687129 *data/crashtr.rda
-43963d3f939a513c3e95c4dd331efbc6 *data/deermice.rda
-297c62e906af91f0d8fd64592a1e8736 *data/ducklings.rda
+feca377e8acb42e16c9e7d5095e0ee8e *data/corbet.rda
+f619df7f64b33a8a86a8824fb7d8986b *data/crashbc.rda
+bf8193fdc83a46d6499d47ee1eeb0ee7 *data/crashf.rda
+314e993b8f2deff5002c9e3ae3ea7c77 *data/crashi.rda
+2fb69d2b29f97023857bbcb400ab53c7 *data/crashmc.rda
+8998af71ee12daff74ba3ccb852a5649 *data/crashp.rda
+bf110e2aca39cf5a7b24a618db4f8f3f *data/crashtr.rda
+9965ac837e615904002246ee7239064e *data/deermice.rda
+389b381253dfcaffca2855d6a07bfff1 *data/ducklings.rda
08e87bb80a2364697b17ccec6260387c *data/enzyme.txt.gz
-2f9f1cc67454cd0022e4b4d9b6e20152 *data/finney44.rda
-0947d375a8dac8b8f2df6c29d2a1e356 *data/flourbeetle.rda
+29a903c0cb10f4eaf0476b5e0b4b1b39 *data/finney44.rda
+311be1c751377403e48723c468169b25 *data/flourbeetle.rda
3125b7b004c671f9d4516999c8473eac *data/gew.txt.gz
bec512b2d2d680889c9b71c7b97dbffd *data/grain.us.txt.bz2
9dcb8cdf026f5468fa70f8037fd72a0b *data/hormone.txt.bz2
-3fba8d7e31acb3936d3db511f2aa9d2a *data/hspider.rda
+a07b5ef2e95ab88ea73243f6622f2ca7 *data/hspider.rda
dffe21fbabf645127bccc3f3733098a7 *data/hunua.txt.bz2
-bc0b0413c6641d1e4ddc260b96bd2eba *data/lakeO.rda
-f02e8aab9f481aff0e3399357305d259 *data/leukemia.rda
+36be2f45f357277bb790005a90dba695 *data/lakeO.rda
+425bfc030a5addd98be30f3e2406d367 *data/leukemia.rda
aba4885e0eeda8ee887a422fee01e02a *data/lirat.txt.gz
7d7e59127af09903659c5727d71acc56 *data/machinists.txt.gz
-e4d66877901e8e6a093ba6ab74a425a6 *data/marital.nz.rda
-08492303e0f51013202d0c921108d9f2 *data/melbmaxtemp.rda
+d7541dc77a4e696004b02e8c1a824068 *data/marital.nz.rda
+06d0845d4f0034e789d3b92c235812ec *data/melbmaxtemp.rda
56490506642d6415ac67d9b6a7f7aff6 *data/olym08.txt.gz
fe334fe839d5efbe61aa3a757c38faeb *data/olym12.txt.gz
3ed63397c4a34f3233326ade6cfd1279 *data/oxtemp.txt.gz
-ffa511438e40e3c06118a2e6a06b6783 *data/pneumo.rda
+64ac94ed85c27aeeeb69ed4334a43a9f *data/pneumo.rda
0cd66b7ce4e596ad3ca75e1e2ec0a73c *data/prats.txt.gz
-9e83d8a32f482fee9f3a9136818e8bd9 *data/prinia.rda
-e2121052c9b7de83f077605eb9b5e19f *data/ruge.rda
-51a6bfdb4caf035ad470cb730e3fd917 *data/toxop.rda
+0f719ec09b48e431b183a0a5dcbef0cd *data/prinia.rda
+ddd5cd78e8ba35f28add47f3d75bf690 *data/ruge.rda
+772105106bdec180786e942d0d43d68b *data/toxop.rda
1b059fc42c890bf89f2282298828d098 *data/ucberk.txt.gz
-adea95cad99f0e03f86c1d56f2926fa7 *data/venice.rda
-8c11f6736ada0e413b40673c38e7e459 *data/venice90.rda
+011b2583a58ab3fabcc204537ae4fa0e *data/venice.rda
+322ad7d5d65dc2208f410dfcd157fbbf *data/venice90.rda
e990ca4deea25b60febd2d315a6a9ec4 *data/waitakere.txt.bz2
-d570c2c7cfa9467f8b273b9902021116 *data/wine.rda
+fb0c5a27e2a6b8e1fd3b5243a6e51e55 *data/wine.rda
81f7f0844a196dc48e91870c4cfafc99 *demo/00Index
9327dcfa4015cf47172717bac166f353 *demo/binom2.or.R
b9f0af62a654d77a3052997eb4cc15e2 *demo/cqo.R
@@ -151,17 +153,18 @@ ab8081763fe2144558be25f3a154327b *demo/vgam.R
d2fcbc6a325172d058671fd977d0b5e5 *inst/CITATION
4ff0e35d38b3c5bb38f1f7232b9af863 *inst/doc/categoricalVGAM.R
bfa11dbdbff271fb20342560f2bacd53 *inst/doc/categoricalVGAM.Rnw
-3746d48d12209b86c2ab7665ed0e6fd2 *inst/doc/categoricalVGAM.pdf
+95283b44f1ada2e41b1637309b80bdaf *inst/doc/categoricalVGAM.pdf
2f57d2a0610fd514e05aae8ea94d8ebc *inst/doc/crVGAM.R
8e489008d8b8b8f769e5e93e351c9c42 *inst/doc/crVGAM.Rnw
-84efd2c0c9082cd8a48ead5b91f0c4e7 *inst/doc/crVGAM.pdf
+e3818408b067f73bc46ecb851b9e5123 *inst/doc/crVGAM.pdf
9b97006cdc82d3a0c0ace3d43c9758de *man/A1A2A3.Rd
4bc543c785c8a213c46693e2e37f5f00 *man/AA.Aa.aa.Rd
26a120083d1d9d77ac0a5193d0c186b9 *man/AB.Ab.aB.ab.Rd
c6c2a703e0f76c8b0f9e0a7d36f13386 *man/ABO.Rd
38647708600610216a454c61450810ff *man/AICvlm.Rd
-17a911f0784d0ecd30d53f9eeafd522f *man/AR1.Rd
-e7f6a39f61b6403d60cf99f0e17f3dc1 *man/AR1UC.Rd
+3891efed1727773933c6289b0147601a *man/AR1.Rd
+5867eaa6a561af2007aad087c87a7c91 *man/AR1EIM.Rd
+144c95d558d807bf195bb8b297bae332 *man/AR1UC.Rd
0f4a799e95b245cfa0b5a37280a446ef *man/BICvlm.Rd
32daae0afb71eae3cdeefc042f4241c6 *man/Coef.Rd
7b7ad4188c687ac8361fa1176697ce88 *man/Coef.qrrvglm-class.Rd
@@ -169,7 +172,7 @@ e7f6a39f61b6403d60cf99f0e17f3dc1 *man/AR1UC.Rd
a89beda3a48d5ff1cfdfae4636032a62 *man/Coef.rrvglm-class.Rd
4da595e2cf6fffc2227871e745a5ee77 *man/Coef.rrvglm.Rd
9d39d6e12ea6e56f687a10f76cb1803c *man/Coef.vlm.Rd
-5b55112125b3f2bdf8dec0219570950d *man/CommonVGAMffArguments.Rd
+827e574dfaf7a6b73ca967f05f2469be *man/CommonVGAMffArguments.Rd
098a57d6e5525de04157c61dea2e1b9b *man/Huggins89.t1.Rd
ce79d0626711d299c9c0cc2efab3abac *man/Inv.gaussian.Rd
b9505b66dea5b1311aa8d2700d3d6a34 *man/Links.Rd
@@ -180,13 +183,13 @@ d315bc4396e206c1ec3c5219e4efc677 *man/ParetoUC.Rd
f84dea8ac6b2c1e857d25faaceb706d2 *man/QvarUC.Rd
bd689bfc27028aea403c93863cf2e207 *man/Rcim.Rd
d39629f7598851d50262b1075321525a *man/SURff.Rd
-685985b08b4668ae66206e9d72170b45 *man/Select.Rd
+aaf085cd16f13eefc77d2f920ae840f0 *man/Select.Rd
20a760cb2a7468d974d2de5c88d870e3 *man/SurvS4-class.Rd
6ed5239b716d4aaef069b66f248503f0 *man/SurvS4.Rd
21dc3918d6b5375c18dcc6cc05be554e *man/Tol.Rd
eeed63e131219077a163410c683fd32e *man/UtilitiesVGAM.Rd
6930cfc91e602940cafeb95cbe4a60d3 *man/V1.Rd
-3656d1dde004b1de74846eaf813a2f69 *man/VGAM-package.Rd
+8d0c54826f9afc4662dd38c89f1264f8 *man/VGAM-package.Rd
ce8d4266cb5eeb30fbe40e28ff554f5e *man/acat.Rd
d2407fe64af0c4369d18ff4cc5f58a34 *man/alaplace3.Rd
8c0d8e4d9e634a0c2539e3a052afa9cc *man/alaplaceUC.Rd
@@ -201,12 +204,12 @@ bcddb8c1df8893cf14a4400ee5dee6df *man/backPain.Rd
65a5426c021e0a6c90731c14786a3395 *man/benfUC.Rd
afa1ccbe6dd6e769dc1bbbc5702148dd *man/benini.Rd
12d28242eea600b3e6f52db5d71d871f *man/beniniUC.Rd
-c22880cb87b5d3fcc1b394e5c4d0cfc4 *man/betaII.Rd
-55e5c7726717a7dca9e4785cc9871801 *man/betaR.Rd
-6a57403bd9855568e232f74234bf7681 *man/betabinomUC.Rd
+4711e53251972b32ac49ca46c65dd4e4 *man/betaII.Rd
+9ea8b2b59f58e287682c7a7ebdfeb705 *man/betaR.Rd
+33d5779c489976a0c1b94cfc590d24cc *man/betabinomUC.Rd
3dc23022db723ea07649cac674dd0e2f *man/betabinomial.Rd
5049c8cf22a2f1e637db29a40ad3c3b1 *man/betabinomialff.Rd
-98dd26e554dcebe5b9de6dfab6ffdeb4 *man/betaff.Rd
+d4f5ab83c26ee7c3ffcccff3b149797e *man/betaff.Rd
4b590ee6208b2f3025109b82c1f6d67c *man/betageomUC.Rd
725a8c9d8b4a9facb0c3cb815d75266b *man/betageometric.Rd
7553029f69c2be7dbb20c864b97102e5 *man/betanormUC.Rd
@@ -225,12 +228,12 @@ faeb492060203a0d89d5cf4f40b0e4c4 *man/bifgmcopUC.Rd
ffcbfc72f334094f6dfd4842ab522e96 *man/bilogisUC.Rd
df5c6274584e9a5b961b253c498c0580 *man/bilogistic.Rd
1e3bfb0dc5eb125518194b131c78ecc3 *man/binom2.or.Rd
-129f6be1cf1a039f137e5ef3da503fca *man/binom2.orUC.Rd
+84dc66c2c5d0321ace962180382c7c59 *man/binom2.orUC.Rd
3da84a2c9a4148aa7f062129c7b40c8d *man/binom2.rho.Rd
20cb304b16a9073488621b104549e361 *man/binom2.rhoUC.Rd
29a9e5aa565832fad506a6a45c7b2897 *man/binomialff.Rd
-2bb4acbcb6e81694a0eee8c794932afe *man/binormal.Rd
-3e2bebdf7d5db7a0c7960d6b6f1597b5 *man/binormalUC.Rd
+1fb31f47df7a9bcbf36b8ee148577384 *man/binormal.Rd
+262cfd215344aee98efb116c7c1151b8 *man/binormalUC.Rd
ad66bf95a28851ff1f77b8675352cc04 *man/binormalcop.Rd
9758ba4618c9c24caafec486b01238f5 *man/binormcopUC.Rd
1d943aad478481e7bf4c4b1a9540706c *man/biplackettcop.Rd
@@ -249,7 +252,7 @@ c7322bedb2b3d8ba4e7c0a19a2098ced *man/bratt.Rd
f640961a0c1a206ce052a54bb7b4ca34 *man/calibrate-methods.Rd
b121ffb4e604644ef7082d777b4411df *man/calibrate.Rd
22f73cce0070ea9bb785567af837e14f *man/calibrate.qrrvglm.Rd
-22e9a881f2f077f7e01e1dde9043dc7d *man/calibrate.qrrvglm.control.Rd
+3d6361a83221391b92f5d29a52117c29 *man/calibrate.qrrvglm.control.Rd
afbb7b695f652a4bccfb0e6cb80a8739 *man/cao.Rd
4005c8bdb2b1a2e7d0ff5f1a800f4224 *man/cao.control.Rd
10f72289cb33f5f734d39826893a280b *man/cardUC.Rd
@@ -257,7 +260,7 @@ afbb7b695f652a4bccfb0e6cb80a8739 *man/cao.Rd
a458bca3e32bdc653cd924dd564ee58d *man/cauchit.Rd
957dd50f814f492806ec05aa4c046569 *man/cauchy.Rd
4973007c9a18278e2130994b68a2e47d *man/cdf.lmscreg.Rd
-345accaaab82cc5d1f08b8d25c1432c4 *man/cens.gumbel.Rd
+d7acde63c3dccc32fc8d4932f6a5e314 *man/cens.gumbel.Rd
49787b380cee2941b0b8d04b602ebadb *man/cens.normal.Rd
72901f13efe7d772fc5ed78bd6c58cea *man/cens.poisson.Rd
94e6c5ea5488d93e0400ce9675e4d692 *man/cfibrosis.Rd
@@ -265,7 +268,7 @@ a443fafdb223e2fa87d3766ea31d3fd8 *man/cgo.Rd
1d5073eb8aded1b67fc52855c72fbc8d *man/chest.nz.Rd
922ebc06682ee2090eb1804d9939ec03 *man/chinese.nz.Rd
9dc1deb6ea4940257ebab8f072584b74 *man/chisq.Rd
-aff05a422130d8ced689190eec1b09dd *man/clo.Rd
+b52839c08d0e405351b90b44ecef37ef *man/clo.Rd
e35c0ce37b72050ab56a340fa1d4f375 *man/cloglog.Rd
b1985e33c967fdddf79e10cbb646b974 *man/coalminers.Rd
eb8ba8eea01187377705b5cb7d682947 *man/coefvgam.Rd
@@ -278,19 +281,19 @@ e9a2bf379aac3e4035b8259463a5374b *man/concoef.Rd
5314268c4257680ac10edf26e9222944 *man/cqo.Rd
8b1b3a39d15fe353a7eceec9f6a327d4 *man/crashes.Rd
b7742b0b5c630d48f1834fb5fefc0835 *man/cratio.Rd
-002568187283dd7faf83534553674e94 *man/cumulative.Rd
-99f24227c802897e75bce7f82ba99a7d *man/dagum.Rd
+7297008d99a1c3882555652d6a5d7441 *man/cumulative.Rd
+acd3af26cffe6abaf21889775de0aae2 *man/dagum.Rd
12192f19751804a540e6d0852e29726c *man/dagumUC.Rd
d5439d37875ba50990406c5c5f8595eb *man/deermice.Rd
dbebc9542906034905fe1137e86a1256 *man/deplot.lmscreg.Rd
-0e0f2e7368fa906e837d8432bb3cfb36 *man/depvar.Rd
+72353428e1c837e182bac272b2b75def *man/depvar.Rd
bffbb780b54bd3c8c76cf546ec87e4a0 *man/df.residual.Rd
-276aebb1ed4a71af9f9096e9f9c4515d *man/dirichlet.Rd
+7736a01002f56a8d42e24d984fee9a2e *man/dirichlet.Rd
17afdbe28f8a8d93725e2747c2daa303 *man/dirmul.old.Rd
7a63063be35f8510ea5198556bf1c192 *man/dirmultinomial.Rd
7c78ad345e44a5b81963f0cfc744f701 *man/double.cens.normal.Rd
-99e58209c99f594f80fc7da1524cfa53 *man/double.expbinomial.Rd
-1da4d63047f620bd38bc5fadf56ebfaf *man/ducklings.Rd
+7e76fa91f062dc857b0d628b6617b480 *man/double.expbinomial.Rd
+2b74d3ee310b347a7112ce9c0b0ccb34 *man/ducklings.Rd
90481ad7be6cb76a82e99694a2a8e016 *man/eexpUC.Rd
92007c408a76e89f46e756eba4724a44 *man/enormUC.Rd
ca3e766bd344902d3b8bf05c65d6c12b *man/enzyme.Rd
@@ -314,7 +317,7 @@ c5d0b237e64605d008502da6b8f4f64c *man/felixUC.Rd
9d679a175cfe7165b89906441e5efebc *man/fill.Rd
b929e2ab670eb59700bc4a1db07bbbc0 *man/finney44.Rd
460448c26c4268e7870bbff5f9d2fb66 *man/fisherz.Rd
-6d12a492e19a8f452b575c9f4473ded8 *man/fisk.Rd
+c987dc987a36b7df68a94e3309b96096 *man/fisk.Rd
5966dbc9e396bd3cbb15b2650d885177 *man/fiskUC.Rd
97bcdcc90669435272c5d940f0b6d967 *man/fittedvlm.Rd
742b72298fd6b2ca944812681ad625a6 *man/flourbeetle.Rd
@@ -330,7 +333,7 @@ c4aea59df1932e36cd6fb2ec38110e6d *man/gamma1.Rd
969c6650372ab79d1751a733754f0dac *man/gammaR.Rd
3558584dfba54663dc4de34e21cc9aa9 *man/gammahyperbola.Rd
edd2c4cefb99138667d2528f3d878bad *man/garma.Rd
-e0fdd50e95e43075ac79c911f05c0b61 *man/gaussianff.Rd
+1994c9a780c42db46cebfbcf23716be5 *man/gaussianff.Rd
6bdfa23e246b5ec65b369e4e746574e9 *man/genbetaII.Rd
45999add2a92fc243422b25bfc8f8198 *man/genbetaIIUC.Rd
69a758aeab4a968d9e9f74d96a43fa17 *man/gengamma.Rd
@@ -342,39 +345,39 @@ e0fdd50e95e43075ac79c911f05c0b61 *man/gaussianff.Rd
ac050e093931cbc8b783c56728350b69 *man/geometric.Rd
ea16a72ebd8739cd2133e91fd9c92662 *man/get.smart.Rd
d89a22500e2031841b7bcfa1d8607d44 *man/get.smart.prediction.Rd
-a793d458ea8847106a2f0ade265a6a1b *man/gev.Rd
-0496867739918b68919e42a4018a338c *man/gevUC.Rd
+737ed71da7e1d3d9f50d2ecd573d043d *man/gev.Rd
+1517975a704bc6f715924c10ae148d14 *man/gevUC.Rd
fd070015282f2cca2b0a4b8200822551 *man/gew.Rd
7ac66cc25e3d13cc7fed08bb6b85e1db *man/golf.Rd
9a635d01c2a0f08b71517df675b20a92 *man/gompertz.Rd
8170cb9545cf35f1768db069b13a893e *man/gompertzUC.Rd
-59edbd8559281a0c9f3ed748d67ec12e *man/gpd.Rd
-9cbfd18331d52c4fb66f0221d76be01f *man/gpdUC.Rd
+d5cc5cbc038ca4b11365bd44b3867b25 *man/gpd.Rd
+b87cedd5e790170299c3b2f9590475f2 *man/gpdUC.Rd
7e50fed7b6ffe72b14e243fcc601fc50 *man/grain.us.Rd
6e28498b6d44f47f2663a6be72f68529 *man/grc.Rd
-62e50cb71aa52e64f6395a83e13b23e5 *man/gumbel.Rd
+586ed95a4487db2fcd9e6b90c92efcf3 *man/gumbel.Rd
f4c347dbfde0cbe8013496d5f8ef175a *man/gumbelII.Rd
5099d1835eebc1b4610481e77463a50c *man/gumbelIIUC.Rd
-6a66a220a209ae6d1c7eb0bf57f59671 *man/gumbelUC.Rd
-fc6b1658cbcb87054ab516552b6875f9 *man/guplot.Rd
+ff165cb5cc91cc4574c32dc88c6218b8 *man/gumbelUC.Rd
+f2c855e1a1291a70604b70b0f0040963 *man/guplot.Rd
2c4e81cce3a305291fc9493d3f128b07 *man/has.intercept.Rd
d5ad348b7727127369874c7e7faf49bd *man/hatvalues.Rd
2be497a8d77472f00279d19f735863b5 *man/hormone.Rd
93557c7aca25514dc023773bdd045d76 *man/hspider.Rd
-ff68401c69a2da4605086cb24fb7944e *man/huber.Rd
-bddbb4682e3ee5c97f116acfc15d3f3f *man/huberUC.Rd
+fda38a6b1fd8ed628b52c6c2021dbdf3 *man/huber.Rd
+c3f293eac3d5ae8362329d28c5f2be17 *man/huberUC.Rd
d3df700bb2a4f9ae85b13abe7ffea123 *man/hunua.Rd
592f01af00d4309ecb01ed58b764e12e *man/hyperg.Rd
e3a9765eba431e1f55e2fdc11ff52b4b *man/hypersecant.Rd
2bf15af91bb331e94b94dd69050589c0 *man/hzeta.Rd
04198bb4e2bf6a230e17b4e84251887f *man/hzetaUC.Rd
7f0e64784914835bb11c6f43643aae15 *man/iam.Rd
-c978905e9ad1554330e74b3088faa909 *man/identitylink.Rd
+ca465d4fce5e11d024c52dd64b1d0b0e *man/identitylink.Rd
857cbf6f8c5970a18867fe560f275f6f *man/inv.binomial.Rd
3e5254faf43189942b98ee8dafaaa06f *man/inv.gaussianff.Rd
a78ed6bfc5949e6586975bf781ece433 *man/inv.lomax.Rd
4492e4a4f91d5fe7d4ec75a128bf4e07 *man/inv.lomaxUC.Rd
-84c75096c0dd15930a3d6df360fb0967 *man/inv.paralogistic.Rd
+c6a4f615abfb46e7961add90565f2c6f *man/inv.paralogistic.Rd
6f740a890a174ff4ff3879fa8719ec58 *man/inv.paralogisticUC.Rd
b2ce02b5af6709a1b2d294fcf254d393 *man/is.buggy.Rd
a501c3d3de4a744a0e0cdbc0673b543d *man/is.parallel.Rd
@@ -396,7 +399,7 @@ d3fb68f03d6cc946da6b48772bea3297 *man/lgammaUC.Rd
d3d35561bb39104a648833365e13bb26 *man/lgammaff.Rd
1bb4af539f983579a19c180c3ab29aec *man/lindUC.Rd
271536a592dedaff73d9cde20c844d76 *man/lindley.Rd
-53b900fd7a3bc5a1f4ff6a9b9353d4e9 *man/linkfun.Rd
+a271c63ae1172ebdabb67a1a25a18d17 *man/linkfun.Rd
79a20f167d06958b953c5a7a8dfe16f0 *man/linkfun.vglm.Rd
c6df85746e6410c593e22489045a88e5 *man/lino.Rd
f56802c0fe3ec1b61cd313c370b9ff58 *man/linoUC.Rd
@@ -409,8 +412,8 @@ b5dfa4faa955b15ebade0a3bdc8f93fe *man/lirat.Rd
06a1ce6e6f01fca7e7037eabc6cf3dad *man/logF.UC.Rd
9f80bd504e1c75b0c7b29b3449cf7362 *man/logLikvlm.Rd
236716ee0347bd21a08aec9fec2a810b *man/logUC.Rd
-34497f2200a115323b8be4c181dc5b09 *man/logc.Rd
-1e7009d720bba4d0201441cd02be84d7 *man/loge.Rd
+a319edb070badcf389045ee076ff0e41 *man/logc.Rd
+a3f3250120073a8ed5d34d7958234b5c *man/loge.Rd
20cc0c73ee555790179879533cb526f7 *man/logff.Rd
227fe95675d683b575accc2d9390755c *man/logistic.Rd
c65e7936494787bc6fa0c31d931d8f6b *man/logit.Rd
@@ -421,8 +424,8 @@ bc4fdb6ecc0913ebadab7deb1a95efed *man/loglinb2.Rd
4290a696c9eedd140e5d64489b6f29be *man/loglinb3.Rd
f5f48817604ad9b59304d4fb571359dd *man/loglog.Rd
7495135db74b6b1eb9646755218e7020 *man/lognormal.Rd
-e859c980e26eb3e483d0f3648b502d13 *man/logoff.Rd
-e23c05c9f84263ac83055c5f03eb7d30 *man/lomax.Rd
+25116483cd0f23a8c31bc80f91a1685f *man/logoff.Rd
+77db7395c6e627f5183464aa77a56835 *man/lomax.Rd
dbc62e15528097b42fb64d49be5f22f3 *man/lomaxUC.Rd
ac49f1d5575295a237328c2de3cbab10 *man/lqnorm.Rd
fc9ca61a4c495cf650cba5a458b0dae1 *man/lrtest.Rd
@@ -433,11 +436,11 @@ c5760c3960748f906230ded119478271 *man/machinists.Rd
4df8393312f1b7ff81d4dab3d18984cd *man/makeham.Rd
7785dc7e94e63e94e688d9553a9c7b2a *man/makehamUC.Rd
b830a21e53610a5abfbfa7466ae0f3c3 *man/margeff.Rd
-b5c6a5a36ebe07a60b152387e8096d9a *man/marital.nz.Rd
+3ddf15a17c9065fcb8a97c4d4dca724d *man/marital.nz.Rd
b2f1aa9cecaec318a14cc5d4fbb20d67 *man/maxwell.Rd
c7fcbd341df77f76494a92836715789a *man/maxwellUC.Rd
665ee56b876aac685d2e35853f8712b8 *man/mccullagh89.Rd
-c007d94fac5c46a26baae899a04aaf9d *man/melbmaxtemp.Rd
+a98728733448bc9c8943a2fb8f3f66bc *man/melbmaxtemp.Rd
4d8d0f37dc8249d00e52283764534e98 *man/meplot.Rd
2bcfc226edb08c7257783853ff52d87b *man/micmen.Rd
09a21e6a1a75e5a2e0e30079a1cbdee1 *man/mix2exp.Rd
@@ -446,22 +449,24 @@ ac6dffa8b08d6cba20464169d19e8439 *man/mix2normal.Rd
131aaa836a137554786e8bda01d8e334 *man/model.framevlm.Rd
3d875985c00b26af9cb66e0ae0e3aef8 *man/model.matrixvlm.Rd
199ef13d300d6fe1210885af1647c13b *man/moffset.Rd
-a725287719f6c4119913108ee4824ddb *man/multilogit.Rd
-44c03a67d9ec459f64af85542064beab *man/multinomial.Rd
+00846c716aa6e89baea6f08be433fe9f *man/multilogit.Rd
+c903155fb0adb75baf311a4e1b2df6b3 *man/multinomial.Rd
c3248f9d509aecb0726bd0e6e36a13d4 *man/nakagami.Rd
61319d756fcb8509696cc1aa55ae4ed2 *man/nakagamiUC.Rd
-170f52d48791fca14c83e19e00fab025 *man/nbcanlink.Rd
+879a1566439b7bec4f48b6ed57ac118c *man/nbcanlink.Rd
0c0ef87d1221196cdc7fc0d156ac150a *man/nbolf.Rd
-a9f0d86d35628b552c87595b20573ea5 *man/negbinomial.Rd
-7621ea96a711ce85182ef8c5ed6ed1a7 *man/negbinomial.size.Rd
-61d58f624b00429804e5d1cfbc60e82e *man/normal.vcm.Rd
-e50087c6bac80011e9f401f4f1e6b81a *man/notdocumentedyet.Rd
+045a7642ca57ea99a0196a36f077399d *man/negbinomial.Rd
+8bac5c2532f5af0a4bedf8afa437171f *man/negbinomial.size.Rd
+d2fea6d91944fd5842f5f7655f9cf278 *man/normal.vcm.Rd
+110d21511561a735015f63b126f79e95 *man/notdocumentedyet.Rd
5e590acdda3ff0a9e2df0db8d233f848 *man/nparamvglm.Rd
+8489561ee3f0890992e047759c6cca94 *man/oiposbinomUC.Rd
+2902dd89d64f467803e677121e19b2c3 *man/oipospoisUC.Rd
+467d7104cd536f3f682d5c6d97a318fe *man/oipospoisson.Rd
98b83e406ea1968ba3e8b17d0933b2cf *man/olym.Rd
858c73ce3c458d33e5151342a4e36707 *man/ordpoisson.Rd
-025c5545a37dd996931ea7d2b42211b5 *man/oxtemp.Rd
-97d58f1d0875eca9da52f607aa6a4c01 *man/ozibetaUC.Rd
-3c217a91527fb169737d67244e8572f4 *man/paralogistic.Rd
+5d9093c6c1297fb988ed0695d88ffeb2 *man/oxtemp.Rd
+e1f36164728a10785808cb359a7807e9 *man/paralogistic.Rd
383805a5130a512c207a6a30c28553d3 *man/paralogisticUC.Rd
b8a1bd0580460ec6155b7c7bb2dae503 *man/paretoIV.Rd
9e30cad5872ffef80576a429e37cdaca *man/paretoIVUC.Rd
@@ -474,29 +479,29 @@ e4ea396d024de674ff4bfdda6975bb72 *man/pgamma.deriv.Rd
2c3491351af8d4eb4618723f612c4f26 *man/plotdeplot.lmscreg.Rd
cea29349aed21cbaf8c70f81b7900b15 *man/plotqrrvglm.Rd
24a05d0db169fb74f603b21f0b8dd7b8 *man/plotqtplot.lmscreg.Rd
-3e689a8ffae086e45cbe82fcd5255042 *man/plotrcim0.Rd
-8c391f9ad83a6afeab6446044f22b16d *man/plotvgam.Rd
+725d095cfee76e0b05a1448738d7e853 *man/plotrcim0.Rd
+0f3e17d0b1877bd81a9ae8431cd6beb7 *man/plotvgam.Rd
72bade4a008240a55ae5a8e5298e30b8 *man/plotvgam.control.Rd
-6196fac00cd0044ba818ec0a794a031a *man/plotvglm.Rd
+823905758e645eb93ac7292316cb47fc *man/plotvglm.Rd
40f1661d2f26cb11f54c9140c767c61b *man/pneumo.Rd
606c4d8331ff8e0e4241f0284aba98cd *man/poisson.points.Rd
8c7d77fdf6933ab63d412be61e3fa0ec *man/poisson.pointsUC.Rd
-27ff99e8ac98ded3af8e4f94e6560b33 *man/poissonff.Rd
+e4e2fc2618efd51a0bc97ab8671ed82b *man/poissonff.Rd
83497c4069d8c74dc15f0308de0dac89 *man/polf.Rd
696c74487d4cebf0251299be00d545c7 *man/polonoUC.Rd
2f4dfc6a802a52da2e14e9789e0170ae *man/posbernUC.Rd
a746161f043ec5c5517df4b9cf71501e *man/posbernoulli.b.Rd
04f6169b69f25ad72e088a51ef9e99b7 *man/posbernoulli.t.Rd
12ee5b18104f163749da385de04fa175 *man/posbernoulli.tb.Rd
-c2c82f9a71f8a7d20e991dee48a9c734 *man/posbinomUC.Rd
+04eefb1ff1ad4d9af313b5dec284d91e *man/posbinomUC.Rd
aab909e407aa248772db0235e64890dd *man/posbinomial.Rd
-dc19e3d023a2a46c670e431a2cc853e0 *man/posgeomUC.Rd
-2963a956fa63f0bd9452b10b432d4fc8 *man/posnegbinUC.Rd
-2411fe14cfe5fa2f30f25546fb3ed2a0 *man/posnegbinomial.Rd
+3dc1e01b8fe96fe13467d80aab3d0465 *man/posgeomUC.Rd
+8b10cb766f37fa372b6f50632752d9a7 *man/posnegbinUC.Rd
+7f73ccbe96ab4a1d68184e0b525dae86 *man/posnegbinomial.Rd
45b528182d1c01bc352dea7b84fd7671 *man/posnormUC.Rd
9061c33c9a5d44acc0c5c4fd1eeec22f *man/posnormal.Rd
-137d3986fcbad41bf77c10585dace0b0 *man/pospoisUC.Rd
-15a13299e9a4052bfe951d8a962e555b *man/pospoisson.Rd
+b2a2f2ec4eff7fb9c96b053a099f93d3 *man/pospoisUC.Rd
+2fdd9216b21961f62931871dc165375a *man/pospoisson.Rd
cc06ad7f82789c3703e4977cc39828ed *man/powerlink.Rd
66bad6a1a2012e256b483e1727aca7e9 *man/prats.Rd
ee31e58dfd33c2c3b0d51eac95b553ad *man/predictqrrvglm.Rd
@@ -506,9 +511,10 @@ cb6a8c644c31d6ec5e8977ea7b1198df *man/predictvglm.Rd
889d24cbaa36abd8df4c54fbf88609e2 *man/probit.Rd
0dc0ebdd8538489ac38a624176612691 *man/propodds.Rd
241402d089ef4159f01fb4cd2c72b9a3 *man/prplot.Rd
+3a0eab7a9e21bac43d738f9ab9681f80 *man/ps.Rd
ab1399d5d5f71707fd46960dc3efad04 *man/put.smart.Rd
-8f4e6ebea74037334377e346c5b476f6 *man/qrrvglm.control.Rd
-0b4cf628cd3e15b0668ae4ddae4d3ee6 *man/qtplot.gumbel.Rd
+86482135c7096ece69019577ca79a2a1 *man/qrrvglm.control.Rd
+4d9e77b96958342af0ab14eb7efe6ed3 *man/qtplot.gumbel.Rd
b10bad72776d283be77901e730593f2e *man/qtplot.lmscreg.Rd
6c60658fef3dc7aa5d53d1d954a65e96 *man/quasibinomialff.Rd
06c7ef40ac06f97042d785a04e81989e *man/quasipoissonff.Rd
@@ -519,19 +525,19 @@ a95c0df100dedc0b4e80be0659858441 *man/rayleighUC.Rd
97b7c30ea27ac4fa16167599c35b136e *man/rdiric.Rd
585af0deb3deb7b61388d6d4557994d8 *man/rec.exp1.Rd
dbfea987d2d41c45477fa82bd978ab5e *man/rec.normal.Rd
-49abf27f1c088a43cda71f0723cf188b *man/reciprocal.Rd
+1787d0e69981aab74ae789bac092722e *man/reciprocal.Rd
8e6ffaeea6e88d46925e60f343364a0d *man/rhobit.Rd
d907e0bbe40b4fb02b0763ab6076309e *man/riceUC.Rd
4d5fb32666631b97e65f8a2324f42bcb *man/riceff.Rd
9dd5a151bfc05adcce0ae88a02eb08a8 *man/rigff.Rd
-0e12c48578228c300e8c04ab3b08c04a *man/rlplot.egev.Rd
+81b8b316257fea4c5fdd2f83a251f80b *man/rlplot.gevff.Rd
3c6afb0af10ae003dfa8cf9caa567d9b *man/rrar.Rd
330d39b23f38eea93d453f07fcb7574b *man/rrvglm-class.Rd
6c28407aa99813ab33175602570fbd3b *man/rrvglm.Rd
-71e3f19a37b6f429458eb9060f5e2ef4 *man/rrvglm.control.Rd
-eb0e4a0a8b0c63cd0c17120e9ca8df53 *man/rrvglm.optim.control.Rd
+7b5b0475883ebb8e5845dad778e39be9 *man/rrvglm.control.Rd
+7d300d5a2ba96f87a47dc8a73df5eaa8 *man/rrvglm.optim.control.Rd
ecc44804896b8f3d4a9d469a952fe9a6 *man/ruge.Rd
-21a97af245ddc566ddd8935381f6ea22 *man/s.Rd
+6eb25df526a146532bed84abda730335 *man/s.Rd
c66939737b4a412d7057eaf0da8f67d9 *man/sc.studentt2.Rd
114f55f02750721179c9fc78d93f686c *man/sc.t2UC.Rd
c3096134b4f765a7d1d893fb9388488b *man/seq2binomial.Rd
@@ -539,7 +545,7 @@ c3096134b4f765a7d1d893fb9388488b *man/seq2binomial.Rd
056aa6efa43e4cd79f5e07769a0c6fd9 *man/simplex.Rd
f158e6c60a4e6b6e13f2a9519515a021 *man/simplexUC.Rd
41af17badd0ef1b17cee591a35d46a12 *man/simulate.vlm.Rd
-5e675d926504dee487751a5a8d26ba47 *man/sinmad.Rd
+dad32c56e791762f4f795cd5d1fc38dc *man/sinmad.Rd
95cbc5903a187d325c52c3d9d07ee252 *man/sinmadUC.Rd
c5839042eff769ac461463b8a7a49428 *man/skellam.Rd
2424940e3cff6d5a3ddd0ee99565ea39 *man/skellamUC.Rd
@@ -561,20 +567,20 @@ b70afa170b0cf98a6c2a9eea9dc58483 *man/toxop.Rd
59e040af3616943e93946ddf0ba96aba *man/triangle.Rd
4b120eb41d1983a4afbe2b45793dc11e *man/triangleUC.Rd
1d13e92969384eebec80c2b5901bc5db *man/trplot.Rd
-c786330c607d69d19e59fc3823d1e2f2 *man/trplot.qrrvglm.Rd
+deae0b3d6157ae23411419f0f64b2ef6 *man/trplot.qrrvglm.Rd
aeaf42ac6e475f1dc3f180450d56c2ee *man/truncparetoUC.Rd
1658b0820ef97964c22fa4f3a18d13e6 *man/truncweibull.Rd
50ada9ecd189456ce9f218d22b49089c *man/ucberk.Rd
-f026eb5b7a1fba0724603f185abbe7d0 *man/undocumented-methods.Rd
+1ef31771939fc4f99d962a282252abf5 *man/undocumented-methods.Rd
395bf20844e881303e4f76da27a693cd *man/uninormal.Rd
6a60d8e09c890e47042be1203aee9547 *man/vcovvlm.Rd
f787bf505e7e68f5f16a49f48abb9bcb *man/venice.Rd
8ab09ea32a3839db780ac641218c322e *man/vgam-class.Rd
-77ac046fef4733e8dc8a26ecb61d201d *man/vgam.Rd
-ea3fe248b860921783367037c8302c49 *man/vgam.control.Rd
+8a51ee9d7f3c6960f4475e3dc76563b1 *man/vgam.Rd
+5bdd6fc54f66b79bdd33317fc6741e7f *man/vgam.control.Rd
126b55b4567a63cf2edb04a8b6d91506 *man/vglm-class.Rd
-71c4c86e48be338c410905722e51afb8 *man/vglm.Rd
-0fb3b6b60182efdce44c9d225bcf0a64 *man/vglm.control.Rd
+bc4f239adfeb3eba830c3738badad2d1 *man/vglm.Rd
+123a3db22c339481529926292883ec46 *man/vglm.control.Rd
33ea80f5f411700dff4b19371517c743 *man/vglmff-class.Rd
3c3444f49659331d0b0da1c4e28ea9c8 *man/vonmises.Rd
25b2ef45238e3f61e82dcf52f3d17090 *man/vsmooth.spline.Rd
@@ -594,24 +600,26 @@ cb21430df0f12962f6abf34d9d0e51ce *man/zabinomial.Rd
7d5df5fee6f78c5cf37faaf71adbbb91 *man/zageomUC.Rd
8c0f4c29525dab1b9715b9f7fe40facc *man/zageometric.Rd
78eef8b541d039b00e9990ff758e53e9 *man/zanegbinUC.Rd
-285850a216064c3c2395c91b38ae222a *man/zanegbinomial.Rd
+ce96ae4bbda9d9e1c0cbcf4b9852a3eb *man/zanegbinomial.Rd
b4bcb3a52a6e60efbdaa5d3cfed6fbf4 *man/zapoisUC.Rd
-11ebb5c9786781ef6eceaf18a5373ec4 *man/zapoisson.Rd
+d5a378daf17ca29279ffce104fe40cb1 *man/zapoisson.Rd
426432d39c7a2b0975e6cf9fc3ce520d *man/zero.Rd
2364749f0041ab1fc22b6469bef31fe4 *man/zeta.Rd
e0ef189ae8251b5e0d20b614c18cdd5a *man/zetaUC.Rd
-ffdfc9ccb4ade0814af72eded433db03 *man/zetaff.Rd
-bce8783175ca63f89475e705b2fb1709 *man/zibinomUC.Rd
-2b2cdf14b7faa05c066e45e35a6af0bb *man/zibinomial.Rd
+02dcb7552bb55e8e8b37aa55cde4a9b3 *man/zetaff.Rd
+a2a94ba506f78263c96f423740e3270f *man/zibinomUC.Rd
+6e0a43313870e96f898452981365bf90 *man/zibinomial.Rd
7b1d2ee37f339b9a218f1db4abb30cdd *man/zigeomUC.Rd
75b757f1586dba0d8837bc4bc682da73 *man/zigeometric.Rd
025dd2763701ec5b6880bcd6f4a9f35a *man/zinegbinUC.Rd
-fd3fbee62f3373263e83acfc09023734 *man/zinegbinomial.Rd
-0d842051c2750e57aa0b794f2f4640fe *man/zipebcom.Rd
+e638747f021ad82af94a1938bf33aaa0 *man/zinegbinomial.Rd
+75f01804be352529f2935da74770c4b1 *man/zipebcom.Rd
abfe2e5adf8a4fcd610adccf060e4f45 *man/zipf.Rd
fd2adf6acc7093de70cb3c16d3819f23 *man/zipfUC.Rd
-0b8c923247c77bffa3dc24440e5d8bae *man/zipoisUC.Rd
-c92c30581138442d15678d61eb9ef483 *man/zipoisson.Rd
+2751243dfcbd74edff2f05db0d841afc *man/zipoisUC.Rd
+e98f562d17c2bebb37d9937695458519 *man/zipoisson.Rd
+ca15a5a4e923e77169ed88e86877ab09 *man/zoabetaR.Rd
+bc8275fcc85b5884fcfe7a70e49e6b5f *man/zoabetaUC.Rd
f306f4262366ba8c13d31e6afd0e393b *src/caqo3.c
ec1b60ab786ea922f9c9665ae352b147 *src/cqof.f
8daac3d03d7cb7a355a4c5ba548c9793 *src/ei.f
diff --git a/NAMESPACE b/NAMESPACE
index 87e063b..c56d112 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -7,6 +7,31 @@
useDynLib(VGAM)
+export(doiposbinom, poiposbinom, qoiposbinom, roiposbinom)
+export(doipospois, poipospois, qoipospois, roipospois, oipospoisson)
+export(deflat.limit.oipospois)
+export(zoabetaR)
+
+
+
+
+export(ps, Pen.psv, psv2magic)
+
+export(checkwz)
+export(process.constraints)
+export(mux22, mux111)
+importFrom("splines", "splineDesign")
+
+
+
+
+
+
+
+export(AR1EIM)
+export(AR1.gammas)
+importFrom("stats", "cov")
+
export(as.char.expression)
@@ -15,14 +40,14 @@ export(predictvglmS4VGAM)
export(EIM.posNB.speciald, EIM.NB.speciald,
EIM.posNB.specialp, EIM.NB.specialp)
export(.min.criterion.VGAM)
-export(pozibetabinom, pozibetabinom.ab,
- rozibetabinom, rozibetabinom.ab,
- dozibetabinom, dozibetabinom.ab,
+export(pzoibetabinom, pzoibetabinom.ab,
+ rzoibetabinom, rzoibetabinom.ab,
+ dzoibetabinom, dzoibetabinom.ab,
Init.mu)
export(log1mexp)
-export(dozibeta, pozibeta, qozibeta, rozibeta)
+export(dzoabeta, pzoabeta, qzoabeta, rzoabeta)
export(logitoffsetlink)
export(showvglmS4VGAM)
export(showvgamS4VGAM)
@@ -87,7 +112,6 @@ exportMethods(confint) # For S4, not S3
export(dgenpois)
export(AR1)
export(dAR1)
-export(AR1.control)
export(param.names)
export(is.buggy.vlm)
exportMethods(is.buggy)
@@ -115,7 +139,7 @@ export(case.names,
weights)
-export(expected.betabin.ab, grid.search)
+export(expected.betabin.ab, grid.search, grid.search2, grid.search3, grid.search4)
exportMethods(QR.Q, QR.R)
export(QR.Q, QR.R)
@@ -252,6 +276,7 @@ interleave.VGAM, interleave.cmat,
procVec,
ResSS.vgam,
valt.control,
+vforsub, vbacksub, vchol,
vcontrol.expression,
vplot, vplot.default, vplot.factor, vplot.list,
vplot.matrix, vplot.numeric, vvplot.factor)
@@ -335,7 +360,7 @@ paretoIV, dparetoIV, qparetoIV, rparetoIV, pparetoIV,
paretoIII, dparetoIII, qparetoIII, rparetoIII, pparetoIII,
paretoII, dparetoII, qparetoII, rparetoII, pparetoII,
dparetoI, qparetoI, rparetoI, pparetoI,
-cens.gumbel, egumbel, gumbel,
+cens.gumbel, gumbelff, gumbel,
dgumbel, pgumbel, qgumbel, rgumbel,
foldnormal, dfoldnorm, pfoldnorm, qfoldnorm, rfoldnorm,
cennormal,
@@ -480,11 +505,11 @@ show.vsmooth.spline,
process.binomial2.data.VGAM, process.categorical.data.VGAM,
negzero.expression.VGAM,
qtplot,
-qtplot.default, qtplot.gumbel, qtplot.lms.bcg,
+qtplot.default, qtplot.gumbel, qtplot.gumbelff, qtplot.lms.bcg,
qtplot.lms.bcn, qtplot.lms.yjn, qtplot.lms.yjn2, qtplot.vextremes, qtplot.vglm,
explot.lms.bcn,
rlplot,
-rlplot.egev, rlplot.gev,
+rlplot.gevff, rlplot.gev,
rlplot.vextremes, rlplot.vglm,
rlplot, rlplot.vglm, rrar.control)
@@ -585,7 +610,7 @@ export(dsinmad, psinmad, qsinmad, rsinmad, sinmad)
export(lognormal)
export(dpolono, ppolono, rpolono)
export(dgpd, pgpd, qgpd, rgpd, gpd)
-export(dgev, pgev, qgev, rgev, gev, egev)
+export(dgev, pgev, qgev, rgev, gev, gevff)
export(dlaplace, plaplace, qlaplace, rlaplace, laplace)
export(dalap, palap, qalap, ralap,
alaplace1.control, alaplace2.control, alaplace3.control,
@@ -638,7 +663,8 @@ export(dgengamma.stacy, pgengamma.stacy, qgengamma.stacy, rgengamma.stacy)
export(
dbenf, pbenf, qbenf, rbenf,
-genbetaII, dgenbetaII, genpoisson,
+genbetaII.Loglikfun4, genbetaII, dgenbetaII,
+genpoisson,
geometric, truncgeometric,
dlino, plino, qlino, rlino, lino,
grc,
@@ -678,6 +704,8 @@ deexp, peexp, qeexp, reexp)
export(
meplot, meplot.default, meplot.vlm,
guplot, guplot.default, guplot.vlm,
+posNBD.Loglikfun2,
+NBD.Loglikfun2,
negbinomial, negbinomial.size, polya, polyaR,
uninormal, SURff, normal.vcm,
nbcanlink,
diff --git a/NEWS b/NEWS
index c4d6780..c986eff 100755
--- a/NEWS
+++ b/NEWS
@@ -6,6 +6,64 @@
+ CHANGES IN VGAM VERSION 1.0-2
+
+NEW FEATURES
+
+ o vglm.fit() has been simplified and handles half-stepping better.
+ o AR1() implements the EIM of Porat and Friedlander (1986); this is
+ the work of Victor Miranda. It is specified by type.EIM = "exact"
+ (the default).
+ o Function gevff() replaces egev(). It handles multiple responses
+ like any other ordinary VGAM family function.
+ o A rudimentrary plotvglm() plots the Pearson residuals, firstly
+ versus the predicted values, and secondly, against the hat values.
+ o The 'refLevel' argument of multinomial() accepts a character string,
+ e.g., multinomial(refLevel = "European") for xs.nz$ethnicity as a
+ response.
+ o New family function: oipospoisson(dpqr), zoabetaR().
+ o New functions: grid.search[23](), [dpqr]oiposbinom().
+ o is.buggy() is called by vgam() immediately after estimation;
+ it gives a warning if any constraint matrix corresponding
+ to an s() term is not orthogonal.
+
+
+
+BUG FIXES and CHANGES
+
+ o vglm.fit() did not handle half-stepping very well.
+ o Some families for counts (i.e., [pos,z[ai]]negbinomial[ff]())
+ have been "validparams"-enabled in order to make estimation
+ near the boundary of the parameter space more stable,
+ especially when a Poisson approximation is suitable.
+ o Other families that have been "validparams"-enabled:
+ gev(), gpd().
+ o Actuarial or statistical size distributions families have
+ been modified with respect to initial values, e.g.,
+ sinmad, dagum, [inv.]lomax, [inv.]paralogistic, [gen]betaII().
+ o rep_len() replaces rep() where possible.
+ o Function gev() has been changed internally and
+ arguments such as 'gshape' have changed.
+ o Function rzipois() may not have handled 0-deflation properly
+ but it does so now.
+ o Function plotvgam() had a bug testing for variable names when
+ the xij facility was used.
+ o multinomial() and multilogit() use "(Last)" to signify
+ the last level of a factor; it used to be "last".
+ o qposbinom() returned 0 (incorrect), and not 1 (correct), for p = 0.
+ o zipoisson() and zipoissonff() no longer store fitted values such
+ as pstr0 in the misc slot. They can be obtained by, e.g.,
+ fitted(fit, type.fitted = "pstr0").
+ o Renamed functions:
+ egumbel() is now called gumbelff().
+ [dqpr]ozibeta() is now called [dqpr]zoabeta().
+ o Renamed parameter names: zetaff() uses 'shape', not 'p'.
+ o qzibinom() did not handle arguments lower.tail and log.p correctly.
+ o Tested okay on R 3.3.0. This package now requires R 3.1.0 or higher
+ (not R 3.0.0 as before).
+
+
+
CHANGES IN VGAM VERSION 1.0-1
NEW FEATURES
diff --git a/R/Links.R b/R/Links.R
index b83e5c1..89f7d5e 100644
--- a/R/Links.R
+++ b/R/Links.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -245,7 +245,7 @@ link2list <- function(link
t.index <- pmatch(names(ans[-1]), names(big.list))
t.index
- if (any(is.na(t.index)))
+ if (anyNA(t.index))
stop("in '", fun.name, "' could not match argument(s) ",
paste('"', names(ans[-1])[is.na(t.index)], '"', sep = "",
collapse = ", "))
diff --git a/R/aamethods.q b/R/aamethods.q
index b2afadd..fbdc436 100644
--- a/R/aamethods.q
+++ b/R/aamethods.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -105,7 +105,7 @@ show.vglmff <- function(object) {
cat("Informal classes:", paste(f, collapse = ", "), "\n")
cat("\n")
- for (ii in 1:length(nn))
+ for (ii in seq_along(nn))
cat(nn[ii])
cat("\n")
@@ -199,6 +199,11 @@ setClass("vgam", representation(
contains = "vglm")
+setClass("psvgam", representation(
+ "psslot" = "list"),
+ contains = "vglm")
+
+
diff --git a/R/add1.vglm.q b/R/add1.vglm.q
deleted file mode 100644
index 2bbe725..0000000
--- a/R/add1.vglm.q
+++ /dev/null
@@ -1,7 +0,0 @@
-# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
-# All rights reserved.
-
-
-
-
diff --git a/R/attrassign.R b/R/attrassign.R
index c1a0f24..7191109 100644
--- a/R/attrassign.R
+++ b/R/attrassign.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -8,29 +8,33 @@
+
attrassignlm <- function(lmobj) {
- attrassign(model.matrix(lmobj),terms(lmobj))
+ attrassign(model.matrix(lmobj), terms(lmobj))
}
-attrassigndefault <- function(mmat,tt) {
- if (!inherits(tt,"terms"))
- stop("need terms object")
- aa<-attr(mmat,"assign")
- if (is.null(aa))
- stop("argument is not really a model matrix")
- ll<-attr(tt,"term.labels")
- if (attr(tt,"intercept")>0)
- ll<-c("(Intercept)",ll)
- aaa<-factor(aa,labels=ll)
- split(order(aa),aaa)
+
+
+attrassigndefault <- function(mmat, tt) {
+ if (!inherits(tt, "terms"))
+ stop("need terms object")
+ aa <- attr(mmat, "assign")
+ if (is.null(aa))
+ stop("argument is not really a model matrix")
+ ll <- attr(tt, "term.labels")
+ if (attr(tt, "intercept") > 0)
+ ll <- c("(Intercept)", ll)
+ aaa <- factor(aa, labels = ll)
+ split(order(aa), aaa)
}
+
if (!isGeneric("attrassign"))
- setGeneric("attrassign", function(object, ...)
- standardGeneric("attrassign"))
+ setGeneric("attrassign", function(object, ...)
+ standardGeneric("attrassign"))
-setMethod("attrassign", "lm",
+setMethod("attrassign", "lm",
function(object, ...)
attrassignlm(object, ...))
diff --git a/R/bAIC.q b/R/bAIC.q
index 4742990..98b556d 100644
--- a/R/bAIC.q
+++ b/R/bAIC.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/build.terms.vlm.q b/R/build.terms.vlm.q
index 509b40a..3bf3a1a 100644
--- a/R/build.terms.vlm.q
+++ b/R/build.terms.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -60,7 +60,7 @@ Build.terms.vlm <-
if (M == 1)
fit <- c(fit)
if (cov.true) {
- var <- ((x %*% cov) * x) %*% rep(1, length(coefs))
+ var <- ((x %*% cov) * x) %*% rep_len(1, length(coefs))
list(fitted.values = fit,
se.fit = if (M == 1) c(sqrt(var)) else
matrix(sqrt(var), ncol = M,
@@ -88,7 +88,7 @@ Build.terms.vlm <-
complex <- (TL > 1)
if (any(simple)) {
asss <- unlist(assign[simple])
- ones <- rep(1, nrow(x))
+ ones <- rep_len(1, nrow(x))
fit[, simple] <- x[, asss] * outer(ones, coefs[asss])
if (cov.true)
se[, simple] <- abs(x[, asss]) * outer(ones, sqrt(diag(cov))[asss])
@@ -101,7 +101,7 @@ Build.terms.vlm <-
fit[, term] <- xt %*% coefs[TT]
if (cov.true)
se[, term] <- sqrt(drop(((xt %*% cov[TT, TT]) * xt) %*%
- rep(1, length(TT))))
+ rep_len(1, length(TT))))
}
}
attr(fit, "constant") <- constant
diff --git a/R/calibrate.q b/R/calibrate.q
index b6370e2..c8c2daf 100644
--- a/R/calibrate.q
+++ b/R/calibrate.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -183,7 +183,7 @@ calibrate.qrrvglm <-
BestOFpar <- rbind(BestOFpar, OFpar[index, ])
BestOFvalues <- c(BestOFvalues, OFvalues[index])
} else {
- BestOFpar <- rbind(BestOFpar, rep(NA_real_, len = Rank))
+ BestOFpar <- rbind(BestOFpar, rep_len(NA_real_, Rank))
BestOFvalues <- c(BestOFvalues, NA)
}
}
diff --git a/R/cao.R b/R/cao.R
index d6e2023..85a5ff4 100644
--- a/R/cao.R
+++ b/R/cao.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -59,11 +59,11 @@ cao <- function(formula,
attr(x, "assign") <- attrassigndefault(x, mt)
offset <- model.offset(mf)
if (is.null(offset))
- offset <- 0 # yyy ???
+ offset <- 0 # yyy ???
w <- model.weights(mf)
- if (!length(w))
- w <- rep(1, nrow(mf))
- else if (ncol(as.matrix(w)) == 1 && any(w < 0))
+ if (!length(w)) {
+ w <- rep_len(1, nrow(mf))
+ } else if (ncol(as.matrix(w)) == 1 && any(w < 0))
stop("negative weights not allowed")
if (is.character(family))
@@ -83,7 +83,7 @@ cao <- function(formula,
cao.fitter <- get(method)
- deviance.Bestof <- rep(NA_real_, len = control$Bestof)
+ deviance.Bestof <- rep_len(NA_real_, control$Bestof)
for (tries in 1:control$Bestof) {
if (control$trace && (control$Bestof > 1)) {
cat(paste("\n========================= Fitting model",
diff --git a/R/cao.fit.q b/R/cao.fit.q
index 6929b94..e6bd882 100644
--- a/R/cao.fit.q
+++ b/R/cao.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -9,8 +9,12 @@
+
+
+
+
cao.fit <-
- function(x, y, w = rep(1, length(x[, 1])),
+ function(x, y, w = rep_len(1, length(x[, 1])),
etastart = NULL, mustart = NULL, coefstart = NULL,
offset = 0, family,
control = cao.control(...), criterion = "coefficients",
@@ -177,8 +181,8 @@ cao.fit <-
p1, p2 = p2, imethod = control$imethod, bchat = 0)
othdbl <- c(small = control$SmallNo, fseps = control$epsilon,
.Machine$double.eps,
- iKvector = rep(control$iKvector, len = NOS),
- iShape = rep(control$iShape, len = NOS),
+ iKvector = rep_len(control$iKvector, NOS),
+ iShape = rep_len(control$iShape, NOS),
resss = 0, bfeps = control$bf.epsilon, hstep = 0.1)
for (iter in 1:optim.maxit) {
@@ -465,7 +469,7 @@ cao.control <- function(Rank = 1,
Cinit = Cinit,
ConstrainedO = TRUE, # A constant, not a control parameter
criterion = criterion,
- Crow1positive = as.logical(rep(Crow1positive, len = Rank)),
+ Crow1positive = as.logical(rep_len(Crow1positive, Rank)),
epsilon = epsilon,
Etamat.colmax = Etamat.colmax,
FastAlgorithm = TRUE, # A constant, not a control parameter
@@ -585,7 +589,7 @@ callcaoc <- function(cmatrix,
getfromVGAMenv("etamat", prefix = ".VGAM.CAO.") else
t(etamat)
- if (any(is.na(usethiseta))) {
+ if (anyNA(usethiseta)) {
usethiseta <- t(etamat) # So that dim(usethiseta) == c(M,n)
rmfromVGAMenv("etamat", prefix = ".VGAM.CAO.")
}
@@ -620,7 +624,7 @@ callcaoc <- function(cmatrix,
all.knots = control$all.knots, nk = NULL,
sf.only = TRUE)
- ldk <- 3 * max(ncolHlist.[nwhich]) + 1 # 11/7/02
+ ldk <- 3 * max(ncolHlist.[nwhich]) + 1 # 20020711
dimw. <- M. # Smoothing one spp. at a time
dim1U. <- M.
@@ -629,12 +633,12 @@ callcaoc <- function(cmatrix,
stop("something wrong here")
Hlist.[[1]] <- NULL
- trivc <- rep(2 - M. , len = queue)
+ trivc <- rep_len(2 - M. , queue)
ncbvec <- ncolHlist.[nwhich]
ncolb <- max(ncbvec)
qbig. <- NOS * qbig # == NOS * Rank; holds all the smooths
- if (!all.equal(as.vector(ncbvec), rep(1, len = queue)))
+ if (!all.equal(as.vector(ncbvec), rep_len(1, queue)))
stop("'ncbvec' not right---should be a queue-vector of ones")
pbig <- pstar. #
@@ -733,7 +737,7 @@ flush.console()
names(Bspline) <- nwhich
ind9 <- 0 # moving index
for (sppno in 1:NOS) {
- for (ii in 1:length(nwhich)) {
+ for (ii in seq_along(nwhich)) {
ind7 <- (smooth.frame$bindex[ii]):(smooth.frame$bindex[ii+1]-1)
ans <- ans1$bcoeff[ind9+ind7]
ans <- matrix(ans, ncol = ncolHlist[nwhich[ii]])
@@ -831,7 +835,7 @@ calldcaoc <- function(cmatrix,
temp.smooth.frame <- vector("list", 1+Rank) # Temporary makeshift frame
mynames5 <- if (Rank == 1) "latvar" else paste("latvar", 1:Rank, sep = "")
names(temp.smooth.frame) <- c("(Intercept)", mynames5)
- temp.smooth.frame[[1]] <- rep(1, len = n)
+ temp.smooth.frame[[1]] <- rep_len(1, n)
for (uu in 1:Rank) {
temp.smooth.frame[[uu+1]] <- numat[, uu]
}
@@ -894,8 +898,8 @@ calldcaoc <- function(cmatrix,
all.knots = control$all.knots, nk = NULL,
sf.only = TRUE)
- ldk <- 4 * max(ncolHlist.[nwhich]) # was M; # Prior to 11/7/02
- ldk <- 3 * max(ncolHlist.[nwhich]) + 1 # 11/7/02
+ ldk <- 4 * max(ncolHlist.[nwhich]) # was M; # Prior to 20020711
+ ldk <- 3 * max(ncolHlist.[nwhich]) + 1 # 20020711
@@ -913,7 +917,7 @@ calldcaoc <- function(cmatrix,
Hlist.[[1]] <- NULL
- trivc <- rep(2 - M. , len = queue)
+ trivc <- rep_len(2 - M. , queue)
ncbvec <- ncolHlist.[nwhich]
ncolb <- max(ncbvec)
@@ -921,10 +925,10 @@ calldcaoc <- function(cmatrix,
qbig. <- NOS * qbig # == NOS * Rank
pbig <- pstar. # Not sure
if (FALSE) {
- df1.nl <- rep(control$df1.nl, len = NOS) # This is used
- df2.nl <- rep(control$df2.nl, len = NOS) # This is used
- spar1 <- rep(control$spar1, len = NOS) # This is used
- spar2 <- rep(control$spar2, len = NOS) # This is used
+ df1.nl <- rep_len(control$df1.nl, NOS) # This is used
+ df2.nl <- rep_len(control$df2.nl, NOS) # This is used
+ spar1 <- rep_len(control$spar1, NOS) # This is used
+ spar2 <- rep_len(control$spar2, NOS) # This is used
} else {
df1.nl <- procVec(control$df1.nl, yn = yn , Default = control$DF1)
df2.nl <- df1.nl # 20100417; stopgap
@@ -1030,7 +1034,7 @@ warning("20100405; this is new:")
names(Bspline) <- nwhich
ind9 <- 0 # moving index
for (jay in 1:NOS) {
- for (ii in 1:length(nwhich)) {
+ for (ii in seq_along(nwhich)) {
ind9 <- ind9[length(ind9)] + (bindex[ii]):(bindex[ii+1]-1)
ans <- ans1$bcoeff[ind9]
ans <- matrix(ans, ncol = ncolHlist[nwhich[ii]])
@@ -1045,7 +1049,7 @@ warning("20100405; this is new:")
}
qrank <- npetc[7] # Assume all species have the same qrank value
- dim(ans1$etamat) <- c(M,n) # bug: was c(n,M) prior to 22/8/06
+ dim(ans1$etamat) <- c(M,n) # bug: was c(n,M) prior to 20060822
list(deviance = ans1$deviance[1],
alldeviance = ans1$deviance[-1],
bcoefficients = ans1$bcoefficients,
@@ -1126,7 +1130,7 @@ Coef.rrvgam <- function(object,
ocontrol <- object at control
if ((Rank <- ocontrol$Rank) > 2) stop("'Rank' must be 1 or 2")
- gridlen <- rep(gridlen, length = Rank)
+ gridlen <- rep_len(gridlen, Rank)
M <- if (any(slotNames(object) == "predictors") &&
is.matrix(object at predictors))
ncol(object at predictors) else
@@ -1163,7 +1167,7 @@ Coef.rrvgam <- function(object,
dimnames = list(latvar.names, ynames))
extents <- apply(latvar.mat, 2, range) # 2 by R
- maximum <- rep(NA_real_, len = NOS)
+ maximum <- rep_len(NA_real_, NOS)
which.species <- 1:NOS # Do it for all species
if (Rank == 1) {
@@ -1175,7 +1179,7 @@ Coef.rrvgam <- function(object,
eta2matrix <- matrix(0, NOS, 1)
}
gridd.orig <- gridd
- for (sppno in 1:length(which.species)) {
+ for (sppno in seq_along(which.species)) {
gridd <- gridd.orig
gridres1 <- gridd[2, 1] - gridd[1, 1]
gridres2 <- if (Rank == 2) gridd[2, 2] - gridd[1, 2] else 0
@@ -1203,7 +1207,7 @@ Coef.rrvgam <- function(object,
if (length(index) != 1)
warning("could not find a single maximum")
if (Rank == 2) {
- initvalue <- rep(xvals[index,], length = Rank) # for optim()
+ initvalue <- rep_len(xvals[index,], Rank) # for optim()
if (abs(initvalue[1] - extents[1, 1]) < smallno)
initvalue[1] <- extents[1, 1] + smallno
if (abs(initvalue[1] - extents[2, 1]) < smallno)
@@ -1424,18 +1428,18 @@ lvplot.rrvgam <- function(object,
}
- pch <- rep(pch, length = length(which.species))
- pcol <- rep(pcol, length = length(which.species))
- pcex <- rep(pcex, length = length(which.species))
- llty <- rep(llty, length = length(which.species))
- lcol <- rep(lcol, length = length(which.species))
- llwd <- rep(llwd, length = length(which.species))
- adj.arg <- rep(adj.arg, length = length(which.species))
+ pch <- rep_len(pch, length(which.species))
+ pcol <- rep_len(pcol, length(which.species))
+ pcex <- rep_len(pcex, length(which.species))
+ llty <- rep_len(llty, length(which.species))
+ lcol <- rep_len(lcol, length(which.species))
+ llwd <- rep_len(llwd, length(which.species))
+ adj.arg <- rep_len(adj.arg, length(which.species))
sppnames <- if (type == "predictors") dimnames(r.curves)[[2]] else
dimnames(object at y)[[2]]
if (Rank == 1) {
- for (sppno in 1:length(which.species)) {
+ for (sppno in seq_along(which.species)) {
thisSpecies <- which.species[sppno]
indexSpecies <- if (is.character(which.species))
match(which.species[sppno], sppnames) else which.species[sppno]
@@ -1460,10 +1464,10 @@ lvplot.rrvgam <- function(object,
if (sites) {
text(latvarmat[,1], latvarmat[,2], adj = 0.5,
labels = if (is.null(spch)) dimnames(latvarmat)[[1]] else
- rep(spch, length = nrow(latvarmat)),
+ rep_len(spch, nrow(latvarmat)),
col = scol, cex = scex, font=sfont)
}
- for (sppno in 1:length(which.species)) {
+ for (sppno in seq_along(which.species)) {
thisSpecies <- which.species[sppno]
indexSpecies <- if (is.character(which.species))
match(which.species[sppno], sppnames) else
@@ -1475,7 +1479,7 @@ lvplot.rrvgam <- function(object,
col = pcol[sppno], cex = pcex[sppno], pch = pch[sppno])
}
if (label.arg) {
- for (sppno in 1:length(which.species)) {
+ for (sppno in seq_along(which.species)) {
thisSpecies <- which.species[sppno]
indexSpecies <- if (is.character(which.species))
match(which.species[sppno], sppnames) else
@@ -1532,7 +1536,7 @@ predict.rrvgam <- function (object, newdata = NULL,
setup.smart("read", smart.prediction = object at smart.prediction)
}
- tt <- terms(object) # 11/8/03; object at terms$terms
+ tt <- terms(object) # 20030811; object at terms$terms
X <- model.matrix(delete.response(tt), newdata,
contrasts = if (length(object at contrasts))
object at contrasts else NULL,
@@ -1540,7 +1544,7 @@ predict.rrvgam <- function (object, newdata = NULL,
if (nice21 && nrow(X) != nrow(newdata)) {
as.save <- attr(X, "assign")
- X <- X[rep(1, nrow(newdata)),, drop = FALSE]
+ X <- X[rep_len(1, nrow(newdata)),, drop = FALSE]
dimnames(X) <- list(dimnames(newdata)[[1]], "(Intercept)")
attr(X, "assign") <- as.save # Restored
}
@@ -1572,13 +1576,13 @@ predict.rrvgam <- function (object, newdata = NULL,
MSratio <- M / NOS # First value is g(mean) = quadratic form in latvar
if (type == "terms") {
terms.mat <- matrix(0, nrow(X), Rank*NOS) # 1st R cols for spp.1, etc.
- interceptvector <- rep(0, len = NOS)
+ interceptvector <- rep_len(0, NOS)
} else {
etamat <- matrix(0, nrow(X), M) # Could contain derivatives
}
ind8 <- 1:Rank
which.species <- 1:NOS # Do it all for all species
- for (sppno in 1:length(which.species)) {
+ for (sppno in seq_along(which.species)) {
thisSpecies <- which.species[sppno]
indexSpecies <- if (is.character(which.species))
match(which.species[sppno], sppnames) else which.species[sppno]
@@ -1634,6 +1638,8 @@ setMethod("predict", "rrvgam", function(object, ...)
predict.rrvgam(object, ...))
+
+
predictrrvgam <- function(object, grid, sppno, Rank = 1,
deriv = 0, MSratio = 1, type = "link") {
if (type != "link" && type != "terms")
@@ -1725,19 +1731,19 @@ plot.rrvgam <- function(x,
if (all((MSratio <- M / NOS) != c(1,2)))
stop("bad value for 'MSratio'")
- pcol <- rep(pcol, length = Rank*NOS)
- pcex <- rep(pcex, length = Rank*NOS)
- pch <- rep(pch, length = Rank*NOS)
- lcol <- rep(lcol, length = Rank*NOS)
- lwd <- rep(lwd, length = Rank*NOS)
- lty <- rep(lty, length = Rank*NOS)
- xlab <- rep(xlab, length = Rank)
+ pcol <- rep_len(pcol, Rank*NOS)
+ pcex <- rep_len(pcex, Rank*NOS)
+ pch <- rep_len(pch, Rank*NOS)
+ lcol <- rep_len(lcol, Rank*NOS)
+ lwd <- rep_len(lwd, Rank*NOS)
+ lty <- rep_len(lty, Rank*NOS)
+ xlab <- rep_len(xlab, Rank)
if (!length(which.species)) which.species <- 1:NOS
if (length(ylab))
- ylab <- rep(ylab, len = length(which.species)) # Too long if overlay
+ ylab <- rep_len(ylab, length(which.species)) # Too long if overlay
if (length(main))
- main <- rep(main, len = length(which.species)) # Too long if overlay
+ main <- rep_len(main, length(which.species)) # Too long if overlay
latvarmat <- latvar(x)
nice21 <- length(x at control$colx1.index) == 1 &&
names(x at control$colx1.index) == "(Intercept)"
@@ -1745,7 +1751,7 @@ plot.rrvgam <- function(x,
stop("can only handle intercept-only models")
counter <- 0
- for (sppno in 1:length(which.species)) {
+ for (sppno in seq_along(which.species)) {
thisSpecies <- which.species[sppno]
indexSpecies <- if (is.character(which.species))
match(which.species[sppno], sppnames) else which.species[sppno]
@@ -1836,9 +1842,9 @@ persp.rrvgam <-
c(0, max(fvmat)*stretch) else
range(coefobj at latvar[,2])
}
- xlim <- rep(xlim, length = 2)
- ylim <- rep(ylim, length = 2)
- gridlength <- rep(gridlength, length = Rank)
+ xlim <- rep_len(xlim, 2)
+ ylim <- rep_len(ylim, 2)
+ gridlength <- rep_len(gridlength, Rank)
latvar1 <- seq(xlim[1], xlim[2], length = gridlength[1])
latvar2 <- if (Rank == 2)
seq(ylim[1], ylim[2], len = gridlength[2]) else
@@ -1875,13 +1881,13 @@ persp.rrvgam <-
if (show.plot) {
if (!length(ylim.orig))
ylim <- c(0, max(fitvals[,which.species.numer]) * stretch) # A revision
- col <- rep(col, len = length(which.species.numer))
- lty <- rep(lty, len = length(which.species.numer))
- lwd <- rep(lwd, len = length(which.species.numer))
+ col <- rep_len(col, length(which.species.numer))
+ lty <- rep_len(lty, length(which.species.numer))
+ lwd <- rep_len(lwd, length(which.species.numer))
matplot(latvar1, fitvals, xlab = xlab, ylab = ylab,
type = "n", main = main, xlim = xlim, ylim = ylim, ...)
if (rugplot) rug(latvar(object))
- for (sppno in 1:length(which.species.numer)) {
+ for (sppno in seq_along(which.species.numer)) {
ptr2 <- which.species.numer[sppno] # points to species column
lines(latvar1, fitvals[,ptr2], col = col[sppno],
lty = lty[sppno], lwd = lwd [sppno], ...)
@@ -2110,9 +2116,3 @@ setMethod("show", "rrvgam", function(object) show.vgam(object))
-
-
-
-
-
-
diff --git a/R/coef.vlm.q b/R/coef.vlm.q
index 0effe53..6b07534 100644
--- a/R/coef.vlm.q
+++ b/R/coef.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -57,7 +57,7 @@ coefvlm <- function(object, matrix.out = FALSE, label = TRUE,
ncolHlist <- unlist(lapply(Hlist, ncol))
nasgn <- names(Hlist)
temp <- c(0, cumsum(ncolHlist))
- for (ii in 1:length(nasgn)) {
+ for (ii in seq_along(nasgn)) {
index <- (temp[ii] + 1):temp[ii + 1]
cmat <- Hlist[[nasgn[ii]]]
Bmat[ii,] <- cmat %*% ans[index]
diff --git a/R/confint.vlm.R b/R/confint.vlm.R
index 88fec34..b068677 100644
--- a/R/confint.vlm.R
+++ b/R/confint.vlm.R
@@ -1,9 +1,8 @@
-# 20150827; confint.vlm.R
+# These functions are
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# All rights reserved.
+
-# Last modified: 20150827,
-# 1. 20150827; Tingting Zhan prodded me to do this.
-# 2.
-# 3.
@@ -16,8 +15,6 @@ confintvglm <- function(object, parm, level = 0.95, ...) {
parm <- pnames[parm]
a <- (1 - level)/2
a <- c(a, 1 - a)
-# ":::" may lead to warning in \pkg{devtools}:
-# pct <- stats:::format.perc(a, 3)
format.perc <- function(probs, digits)
paste(format(100 * probs, trim = TRUE, scientific = FALSE, digits = digits),
"%")
@@ -35,10 +32,6 @@ confintrrvglm <- function(object, parm, level = 0.95, ...) {
stop("currently this function has not been written")
-# 20150828; yettodo: write this function... it's not too difficult.
-# Base it on vcovrrvglm() and summaryrrvglm() because they do all
-# the work, which involves elements in A and C and B1.
-#
}
@@ -53,10 +46,8 @@ confintvgam <- function(object, parm, level = 0.95, ...) {
-# ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
-# if (FALSE) {
if (!isGeneric("confint"))
setGeneric("confint",
function(object, parm, level = 0.95, ...)
@@ -69,7 +60,6 @@ setMethod("confint", "vglm",
confintvglm(object = object, parm = parm, level = level, ...))
-# Stop other types of models from working... its dangerous:
setMethod("confint", "rrvglm",
function(object, parm, level = 0.95, ...)
confintrrvglm(object = object, parm = parm, level = level, ...))
@@ -77,9 +67,7 @@ setMethod("confint", "rrvglm",
setMethod("confint", "vgam",
function(object, parm, level = 0.95, ...)
confintvgam(object = object, parm = parm, level = level, ...))
-# }
-# ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
diff --git a/R/cqo.R b/R/cqo.R
index f2cae72..07538a5 100644
--- a/R/cqo.R
+++ b/R/cqo.R
@@ -1,9 +1,11 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
+
+
cqo <- function(formula,
family, data = list(),
weights = NULL, subset = NULL, na.action = na.fail,
@@ -56,7 +58,7 @@ cqo <- function(formula,
offset <- 0 # yyy ???
w <- model.weights(mf)
if (!length(w)) {
- w <- rep(1, nrow(mf))
+ w <- rep_len(1, nrow(mf))
} else if (ncol(as.matrix(w)) == 1 && any(w < 0))
stop("negative weights not allowed")
@@ -78,7 +80,7 @@ cqo <- function(formula,
cqo.fitter <- get(method)
- deviance.Bestof <- rep(NA_real_, len = control$Bestof)
+ deviance.Bestof <- rep_len(NA_real_, control$Bestof)
for (tries in 1:control$Bestof) {
if (control$trace && (control$Bestof>1))
cat(paste("\n========================= Fitting model", tries,
diff --git a/R/cqo.fit.q b/R/cqo.fit.q
index 1282312..f293533 100644
--- a/R/cqo.fit.q
+++ b/R/cqo.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -92,8 +92,8 @@ callcqoc <- function(cmatrix, etamat, xmat, ymat, wvec,
deviance = double(1+NOS), beta = as.double(usethisbeta),
othdbl = as.double(c(small = control$SmallNo,
epsilon = control$epsilon, .Machine$double.eps,
- iKvector = rep(control$iKvector, len = NOS),
- iShape = rep(control$iShape, len = NOS)))) else
+ iKvector = rep_len(control$iKvector, NOS),
+ iShape = rep_len(control$iShape, NOS)))) else
.C("cqo_2",
numat = as.double(numat), as.double(ymat),
as.double(if (p1) xmat[, control$colx1.index] else 999),
@@ -109,8 +109,8 @@ callcqoc <- function(cmatrix, etamat, xmat, ymat, wvec,
deviance = double(1+NOS), beta = as.double(usethisbeta),
othdbl = as.double(c(small = control$SmallNo,
epsilon = control$epsilon, .Machine$double.eps,
- iKvector = rep(control$iKvector, len = NOS),
- iShape = rep(control$iShape, len = NOS))))
+ iKvector = rep_len(control$iKvector, NOS),
+ iShape = rep_len(control$iShape, NOS))))
@@ -229,8 +229,8 @@ calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
deviance = double(1 + NOS), beta = as.double(usethisbeta),
othdbl = as.double(c(small = control$SmallNo,
epsilon = control$epsilon, .Machine$double.eps,
- iKvector = rep(control$iKvector, len = NOS),
- iShape = rep(control$iShape, len = NOS))),
+ iKvector = rep_len(control$iKvector, NOS),
+ iShape = rep_len(control$iShape, NOS))),
xmat2 = as.double(xmat2),
cmat = as.double(cmatrix),
p2 = as.integer(p2), deriv = double(p2*Rank),
@@ -290,7 +290,7 @@ checkCMCO <- function(Hlist, control, modelno) {
-cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
+cqo.fit <- function(x, y, w = rep_len(1, length(x[, 1])),
etastart = NULL, mustart = NULL, coefstart = NULL,
offset = 0, family,
control = qrrvglm.control(...),
@@ -368,7 +368,7 @@ ny <- names(y)
if (is.character(rrcontrol$Dzero)) {
index <- match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]])
- if (any(is.na(index)))
+ if (anyNA(index))
stop("Dzero argument didn't fully match y-names")
if (length(index) == M)
stop("all linear predictors are linear in the",
@@ -594,10 +594,11 @@ ny <- names(y)
function(ymat, X1, X2, Rank = 1, epsilon = 1/32,
max.ncol.etamat = 10,
trace = FALSE,
- Crow1positive = rep(TRUE, len = Rank),
- isd.latvar = rep(1, length = Rank),
+ Crow1positive = rep_len(TRUE, Rank),
+ isd.latvar = rep_len(1, Rank),
constwt = FALSE, takelog = TRUE) {
+
print.CQO.expression <- expression({
if (trace && length(X2)) {
cat("\nUsing initial values\n")
@@ -618,8 +619,8 @@ ny <- names(y)
})
Crow1positive <- if (length(Crow1positive))
- rep(Crow1positive, len = Rank) else
- rep(TRUE, len = Rank)
+ rep_len(Crow1positive, Rank) else
+ rep_len(TRUE, Rank)
if (epsilon <= 0)
stop("epsilon > 0 is required")
ymat <- cbind(ymat) + epsilon # ymat == 0 cause problems
@@ -673,7 +674,7 @@ ny <- names(y)
rrr.normalize(rrcontrol = temp.control, A = alt$A,
C = alt$C, x = cbind(X1, X2)) else
alt
- ans <- crow1C(ans2$C, rep(Crow1positive, length.out = effrank))
+ ans <- crow1C(ans2$C, rep_len(Crow1positive, effrank))
Rank.save <- Rank
Rank <- effrank
@@ -691,7 +692,7 @@ ny <- names(y)
matrix.out = TRUE,
is.vlmX = FALSE, ResSS = TRUE, qr = FALSE, xij = xij)
ans <- crow1C(as.matrix(tmp$resid),
- rep(Crow1positive, length.out = effrank))
+ rep_len(Crow1positive, effrank))
if (effrank < Rank) {
ans <- cbind(ans, ans.save[,-(1:effrank)]) # ans is better
}
@@ -706,7 +707,7 @@ ny <- names(y)
for (ii in 1:Rank)
ans[,ii] <- ans[,ii] * isd.latvar[ii] / actualSD[ii]
}
- ans <- crow1C(ans, rep(Crow1positive, length.out = Rank))
+ ans <- crow1C(ans, rep_len(Crow1positive, Rank))
dimnames(ans) <- list(dimnames(X1)[[1]],
if (Rank == 1) "latvar" else
paste("latvar", 1:Rank,
@@ -768,7 +769,7 @@ cqo.derivative.expression <- expression({
gr = if (control$GradientFunction) calldcqo else NULL,
method = which.optimizer,
control = list(fnscale = 1, trace = as.integer(control$trace),
- parscale = rep(control$Parscale, len = length(Cmat)),
+ parscale = rep_len(control$Parscale, length(Cmat)),
maxit = control$Maxit.optim),
etamat = eta, xmat = x, ymat = y, wvec = w,
X.vlm.1save = X.vlm.1save,
@@ -847,7 +848,7 @@ cqo.end.expression <- expression({
eta <- fv + offset
mu <- family at linkinv(eta, extra)
- if (any(is.na(mu)))
+ if (anyNA(mu))
warning("there are NAs in mu")
deriv.mu <- eval(family at deriv)
@@ -865,7 +866,7 @@ cqo.end.expression <- expression({
crow1C <- function(cmat,
- crow1positive = rep(TRUE, length.out = ncol(cmat)),
+ crow1positive = rep_len(TRUE, ncol(cmat)),
amat = NULL) {
if (!is.logical(crow1positive) || length(crow1positive) != ncol(cmat))
stop("bad input in crow1C")
diff --git a/R/deviance.vlm.q b/R/deviance.vlm.q
index 542290c..d9fddf1 100644
--- a/R/deviance.vlm.q
+++ b/R/deviance.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -127,7 +127,7 @@ nvar_vlm <- function(object, ...) {
names(numPars) <- object at misc$predictors.names
- NumPars <- rep(0, length = M)
+ NumPars <- rep_len(0, M)
for (jay in 1:M) {
X.lm.jay <- model.matrix(object, type = "lm", linpred.index = jay)
NumPars[jay] <- ncol(X.lm.jay)
diff --git a/R/effects.vglm.q b/R/effects.vglm.q
index 0056821..2989e61 100644
--- a/R/effects.vglm.q
+++ b/R/effects.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/family.actuary.R b/R/family.actuary.R
index c0972d8..c3a3266 100644
--- a/R/family.actuary.R
+++ b/R/family.actuary.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -21,9 +21,9 @@ dgumbelII <- function(x, scale = 1, shape, log = FALSE) {
rm(log)
LLL <- max(length(x), length(shape), length(scale))
- if (length(x) != LLL) x <- rep(x, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
ans <- x
@@ -59,9 +59,9 @@ pgumbelII <- function(q, scale = 1, shape,
stop("bad input for argument 'log.p'")
LLL <- max(length(q), length(shape), length(scale))
- if (length(q) != LLL) q <- rep(q, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
# 20150121 KaiH
if (lower.tail) {
@@ -111,9 +111,9 @@ qgumbelII <- function(p, scale = 1, shape,
LLL <- max(length(p), length(shape), length(scale))
- if (length(p) != LLL) p <- rep(p, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(p) != LLL) p <- rep_len(p, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
if (lower.tail) {
@@ -324,8 +324,8 @@ rgumbelII <- function(n, scale = 1, shape) {
M1 <- extra$M1
misc$link <-
- c(rep( .lscale , length = ncoly),
- rep( .lshape , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ c(rep_len( .lscale , ncoly),
+ rep_len( .lshape , ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
@@ -440,11 +440,11 @@ dmbeard <- function(x, shape, scale = 1, rho, epsilon, log = FALSE) {
LLL <- max(length(x), length(shape), length(scale),
length(rho), length(epsilon))
- if (length(x) != LLL) x <- rep(x, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
- if (length(rho) != LLL) rho <- rep(rho, length.out = LLL)
- if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
+ if (length(rho) != LLL) rho <- rep_len(rho, LLL)
+ if (length(epsilon) != LLL) epsilon <- rep_len(epsilon, LLL)
index0 <- (x < 0)
@@ -474,11 +474,11 @@ pmbeard <- function(q, shape, scale = 1, rho, epsilon) {
LLL <- max(length(q), length(shape), length(scale),
length(rho), length(epsilon))
- if (length(q) != LLL) q <- rep(q, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
- if (length(rho) != LLL) rho <- rep(rho, length.out = LLL)
- if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
+ if (length(rho) != LLL) rho <- rep_len(rho, LLL)
+ if (length(epsilon) != LLL) epsilon <- rep_len(epsilon, LLL)
ans <- -expm1(-epsilon * q -
@@ -504,10 +504,10 @@ dmperks <- function(x, scale = 1, shape, epsilon, log = FALSE) {
rm(log)
LLL <- max(length(x), length(shape), length(scale), length(epsilon))
- if (length(x) != LLL) x <- rep(x, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
- if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
+ if (length(epsilon) != LLL) epsilon <- rep_len(epsilon, LLL)
index0 <- (x < 0)
@@ -535,9 +535,9 @@ dmperks <- function(x, scale = 1, shape, epsilon, log = FALSE) {
pmperks <- function(q, scale = 1, shape, epsilon) {
LLL <- max(length(q), length(shape), length(scale))
- if (length(q) != LLL) q <- rep(q, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
ans <- -expm1(-epsilon * q -
@@ -571,10 +571,10 @@ dbeard <- function(x, shape, scale = 1, rho, log = FALSE) {
rm(log)
LLL <- max(length(x), length(shape), length(scale), length(rho))
- if (length(x) != LLL) x <- rep(x, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
- if (length(rho) != LLL) rho <- rep(rho, length.out = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
+ if (length(rho) != LLL) rho <- rep_len(rho, LLL)
index0 <- (x < 0)
ans <- log(shape) - x * scale * (rho^(-1 / scale)) +
@@ -642,9 +642,9 @@ dperks <- function(x, scale = 1, shape, log = FALSE) {
rm(log)
LLL <- max(length(x), length(shape), length(scale))
- if (length(x) != LLL) x <- rep(x, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
index0 <- (x < 0)
ans <- log(shape) - x +
@@ -677,9 +677,9 @@ pperks <- function(q, scale = 1, shape,
LLL <- max(length(q), length(shape), length(scale))
- if (length(q) != LLL) q <- rep(q, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
logS <- -q + (log1p(shape) -
log(shape + exp(-q * scale))) / scale
@@ -721,9 +721,9 @@ qperks <- function(p, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) {
stop("bad input for argument 'log.p'")
LLL <- max(length(p), length(shape), length(scale))
- if (length(p) != LLL) p <- rep(p, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(p) != LLL) p <- rep_len(p, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
if (lower.tail) {
@@ -887,29 +887,21 @@ perks.control <- function(save.weights = TRUE, ...) {
yvec <- y[, spp.]
wvec <- w[, spp.]
- perks.Loglikfun <- function(scaleval, y, x, w, extraargs) {
- ans <-
- sum(c(w) * dperks(x = y, shape = extraargs$Shape,
- scale = scaleval, log = TRUE))
- ans
- }
-
- mymat <- matrix(-1, length(shape.grid), 2)
- for (jlocal in 1:length(shape.grid)) {
- mymat[jlocal, ] <-
- grid.search(scale.grid,
- objfun = perks.Loglikfun,
- y = yvec, x = x, w = wvec,
- ret.objfun = TRUE,
- extraargs = list(Shape = shape.grid[jlocal]))
+ perks.Loglikfun2 <- function(scaleval, shapeval,
+ y, x, w, extraargs) {
+ sum(c(w) * dperks(x = y, shape = shapeval,
+ scale = scaleval, log = TRUE))
}
- index.shape <- which(mymat[, 2] == max(mymat[, 2]))[1]
-
- if (!length( .ishape ))
- matH[, spp.] <- shape.grid[index.shape]
+ try.this <-
+ grid.search2(scale.grid, shape.grid,
+ objfun = perks.Loglikfun2,
+ y = yvec, w = wvec,
+ ret.objfun = TRUE) # Last value is the loglik
if (!length( .iscale ))
- matC[, spp.] <- mymat[index.shape, 1]
+ matC[, spp.] <- try.this["Value1"]
+ if (!length( .ishape ))
+ matH[, spp.] <- try.this["Value2"]
} # spp.
etastart <-
@@ -933,8 +925,8 @@ perks.control <- function(save.weights = TRUE, ...) {
last = eval(substitute(expression({
misc$link <-
- c(rep( .lscale , length = ncoly),
- rep( .lshape , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ c(rep_len( .lscale , ncoly),
+ rep_len( .lshape , ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
@@ -1126,10 +1118,10 @@ dmakeham <- function(x, scale = 1, shape, epsilon = 0, log = FALSE) {
rm(log)
LLL <- max(length(x), length(shape), length(scale), length(epsilon))
- if (length(x) != LLL) x <- rep(x, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
- if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
+ if (length(epsilon) != LLL) epsilon <- rep_len(epsilon, LLL)
index0 <- (x < 0)
ans <- log(epsilon * exp(-x * scale) + shape) +
@@ -1160,10 +1152,10 @@ pmakeham <- function(q, scale = 1, shape, epsilon = 0,
stop("bad input for argument 'log.p'")
LLL <- max(length(q), length(shape), length(scale), length(epsilon))
- if (length(q) != LLL) q <- rep(q, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
- if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
+ if (length(epsilon) != LLL) epsilon <- rep_len(epsilon, LLL)
if (lower.tail) {
if (log.p) {
@@ -1203,10 +1195,10 @@ qmakeham <- function(p, scale = 1, shape, epsilon = 0,
stop("bad input for argument 'log.p'")
LLL <- max(length(p), length(shape), length(scale), length(epsilon))
- if (length(p) != LLL) p <- rep(p, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
- if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
+ if (length(p) != LLL) p <- rep_len(p, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
+ if (length(epsilon) != LLL) epsilon <- rep_len(epsilon, LLL)
if (lower.tail) {
@@ -1405,30 +1397,23 @@ makeham.control <- function(save.weights = TRUE, ...) {
yvec <- y[, spp.]
wvec <- w[, spp.]
- makeham.Loglikfun <- function(scaleval, y, x, w, extraargs) {
- ans <-
- sum(c(w) * dmakeham(x = y, shape = extraargs$Shape,
+
+ makeham.Loglikfun2 <- function(scaleval, shapeval,
+ y, x, w, extraargs) {
+ sum(c(w) * dmakeham(x = y, shape = shapeval,
epsilon = extraargs$Epsil,
scale = scaleval, log = TRUE))
- ans
}
-
- mymat <- matrix(-1, length(shape.grid), 2)
- for (jlocal in 1:length(shape.grid)) {
- mymat[jlocal, ] <-
- grid.search(scale.grid,
- objfun = makeham.Loglikfun,
- y = yvec, x = x, w = wvec,
- ret.objfun = TRUE,
- extraargs = list(Shape = shape.grid[jlocal],
- Epsil = matE[1, spp.]))
- }
- index.shape <- which(mymat[, 2] == max(mymat[, 2]))[1]
-
- if (!length( .ishape ))
- matH[, spp.] <- shape.grid[index.shape]
+ try.this <-
+ grid.search2(scale.grid, shape.grid,
+ objfun = makeham.Loglikfun2,
+ y = yvec, w = wvec,
+ extraargs = list(Epsilon = matE[1, spp.]),
+ ret.objfun = TRUE) # Last value is the loglik
if (!length( .iscale ))
- matC[, spp.] <- mymat[index.shape, 1]
+ matC[, spp.] <- try.this["Value1"]
+ if (!length( .ishape ))
+ matH[, spp.] <- try.this["Value2"]
} # spp.
@@ -1482,9 +1467,9 @@ makeham.control <- function(save.weights = TRUE, ...) {
last = eval(substitute(expression({
M1 <- extra$M1
misc$link <-
- c(rep( .lscale , length = ncoly),
- rep( .lshape , length = ncoly),
- rep( .lepsil , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ c(rep_len( .lscale , ncoly),
+ rep_len( .lshape , ncoly),
+ rep_len( .lepsil , ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2, mynames3)[
interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
@@ -1643,9 +1628,9 @@ dgompertz <- function(x, scale = 1, shape, log = FALSE) {
rm(log)
LLL <- max(length(x), length(shape), length(scale))
- if (length(x) != LLL) x <- rep(x, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
index0 <- (x < 0)
@@ -1679,9 +1664,9 @@ pgompertz <- function(q, scale = 1, shape,
stop("bad input for argument 'log.p'")
LLL <- max(length(q), length(shape), length(scale))
- if (length(q) != LLL) q <- rep(q, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
if (lower.tail) {
@@ -1720,9 +1705,9 @@ qgompertz <- function(p, scale = 1, shape,
stop("bad input for argument 'log.p'")
LLL <- max(length(p), length(shape), length(scale))
- if (length(p) != LLL) p <- rep(p, length.out = LLL)
- if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(p) != LLL) p <- rep_len(p, LLL)
+ if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
if (lower.tail) {
if (log.p) {
@@ -1888,7 +1873,7 @@ gompertz.control <- function(save.weights = TRUE, ...) {
}
mymat <- matrix(-1, length(shape.grid), 2)
- for (jlocal in 1:length(shape.grid)) {
+ for (jlocal in seq_along(shape.grid)) {
mymat[jlocal, ] <-
grid.search(scale.grid,
objfun = gompertz.Loglikfun,
@@ -1922,8 +1907,8 @@ gompertz.control <- function(save.weights = TRUE, ...) {
last = eval(substitute(expression({
M1 <- extra$M1
misc$link <-
- c(rep( .lscale , length = ncoly),
- rep( .lshape , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ c(rep_len( .lscale , ncoly),
+ rep_len( .lshape , ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
@@ -2079,9 +2064,9 @@ if (ii < 3) {
LLL <- max(length(x),
length(alpha),
length(lambda))
- if (length(x) != LLL) x <- rep(x, length.out = LLL)
- if (length(alpha) != LLL) alpha <- rep(alpha, length.out = LLL)
- if (length(lambda) != LLL) lambda <- rep(lambda, length.out = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(alpha) != LLL) alpha <- rep_len(alpha, LLL)
+ if (length(lambda) != LLL) lambda <- rep_len(lambda, LLL)
index0 <- (x < 0)
if (log.arg) {
@@ -2283,8 +2268,8 @@ exponential.mo.control <- function(save.weights = TRUE, ...) {
last = eval(substitute(expression({
M1 <- extra$M1
misc$link <-
- c(rep( .lalpha0 , length = ncoly),
- rep( .llambda , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ c(rep_len( .lalpha0 , ncoly),
+ rep_len( .llambda , ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
@@ -2414,6 +2399,18 @@ if (ii < 3) {
+
+ genbetaII.Loglikfun4 <-
+ function(scaleval, shape1.a, shape2.p, shape3.q,
+ y, x, w, extraargs) {
+ sum(c(w) * dgenbetaII(x = y, scale = scaleval,
+ shape1.a = shape1.a,
+ shape2.p = shape2.p,
+ shape3.q = shape3.q, log = TRUE))
+ }
+
+
+
genbetaII <- function(lscale = "loge",
lshape1.a = "loge",
lshape2.p = "loge",
@@ -2552,43 +2549,21 @@ if (ii < 3) {
gshape1.a <- .gshape1.a
gshape2.p <- .gshape2.p
gshape3.q <- .gshape3.q
- if (length( .iscale ))
- gscale <- rep( .iscale , length = NOS)
- if (length( .ishape1.a ))
- gshape1.a <- rep( .ishape1.a , length = NOS)
- if (length( .ishape2.p ))
- gshape2.p <- rep( .ishape2.p , length = NOS)
- if (length( .ishape3.q ))
- gshape3.q <- rep( .ishape3.q , length = NOS)
- allmat1 <- expand.grid(shape1.a = gshape1.a,
- shape2.p = gshape2.p,
- shape3.q = gshape3.q)
- allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
-
- ll.gbII <- function(scaleval, x = x, y = y, w = w, extraargs) {
- ans <- sum(c(w) * dgenbetaII(x = y,
- scale = scaleval,
- shape1.a = extraargs$Shape1.a,
- shape2.p = extraargs$Shape2.p,
- shape3.q = extraargs$Shape3.q,
- log = TRUE))
- ans
- }
-
- for (iloc in 1:nrow(allmat1)) {
- allmat2[iloc, ] <-
- grid.search(gscale, objfun = ll.gbII,
- y = yvec, x = x, w = wvec,
- ret.objfun = TRUE, # 2nd value is the loglik
- extraargs = list(Shape1.a = allmat1[iloc, 1],
- Shape2.p = allmat1[iloc, 2],
- Shape3.q = allmat1[iloc, 3]))
- }
- ind5 <- which.max(allmat2[, 2]) # 2nd value is the loglik
- sc.init[, spp.] <- allmat2[ind5, 1]
- aa.init[, spp.] <- allmat1[ind5, 1]
- pp.init[, spp.] <- allmat1[ind5, 2]
- qq.init[, spp.] <- allmat1[ind5, 3]
+ if (length( .iscale )) gscale <- rep_len( .iscale , NOS)
+ if (length( .ishape1.a )) gshape1.a <- rep_len( .ishape1.a , NOS)
+ if (length( .ishape2.p )) gshape2.p <- rep_len( .ishape2.p , NOS)
+ if (length( .ishape3.q )) gshape3.q <- rep_len( .ishape3.q , NOS)
+
+
+ try.this <-
+ grid.search4(gscale, gshape1.a, gshape2.p, gshape3.q,
+ objfun = genbetaII.Loglikfun4, # x = x,
+ y = yvec, w = wvec, # extraargs = NULL,
+ ret.objfun = TRUE) # Last value is the loglik
+ sc.init[, spp.] <- try.this["Value1"]
+ aa.init[, spp.] <- try.this["Value2"]
+ pp.init[, spp.] <- try.this["Value3"]
+ qq.init[, spp.] <- try.this["Value4"]
} # End of for (spp. ...)
@@ -2621,7 +2596,7 @@ if (ii < 3) {
.lss = lss ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
M1 <- 4
- NOS <- ncol(eta)/M1
+ NOS <- ncol(eta) / M1
if ( .lss ) {
Scale <- eta2theta(eta[, M1*(1:NOS) - 3, drop = FALSE],
.lscale , earg = .escale )
@@ -2650,10 +2625,10 @@ if (ii < 3) {
last = eval(substitute(expression({
M1 <- 4
- misc$link <- c(rep( if ( .lss ) .lscale else .lshape1.a , len = ncoly),
- rep( if ( .lss ) .lshape1.a else .lscale , len = ncoly),
- rep( .lshape2.p , length = ncoly),
- rep( .lshape3.q , length = ncoly))[
+ misc$link <- c(rep_len( if ( .lss ) .lscale else .lshape1.a , ncoly),
+ rep_len( if ( .lss ) .lshape1.a else .lscale , ncoly),
+ rep_len( .lshape2.p , ncoly),
+ rep_len( .lshape3.q , ncoly))[
interleave.VGAM(M, M1 = M1)]
temp.names <- if ( .lss ) {
c(scaL.names, sha1.names, sha2.names, sha3.names)
@@ -2721,7 +2696,51 @@ if (ii < 3) {
.eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
.lss = lss ))),
vfamily = c("genbetaII"),
+
+
+
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ M1 <- 4
+ NOS <- ncol(eta) / M1
+ if ( .lss ) {
+ Scale <- eta2theta(eta[, M1*(1:NOS) - 3, drop = FALSE],
+ .lscale , earg = .escale )
+ aa <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+ .lshape1.a , earg = .eshape1.a)
+ } else {
+ aa <- eta2theta(eta[, M1*(1:NOS) - 3, drop = FALSE],
+ .lshape1.a , earg = .eshape1.a)
+ Scale <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+ .lscale , earg = .escale )
+ }
+ parg <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lshape2.p , earg = .eshape2.p)
+ qq <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lshape3.q , earg = .eshape3.q)
+
+
+ okay1 <- all(is.finite(Scale)) && all(Scale > 0) &&
+ all(is.finite(aa )) && all(aa > 0) &&
+ all(is.finite(parg )) && all(parg > 0) &&
+ all(is.finite(qq )) && all(qq > 0)
+ okay.support <- if (okay1) all(-aa * parg < 1 & 1 < aa * qq) else TRUE
+ if (!okay.support)
+ warning("parameter constraint -a*p < 1 < a*q has been violated; ",
+ "solution may be at the boundary of the ",
+ "parameter space.")
+ okay1 && okay.support
+ }, list( .lscale = lscale , .lshape1.a = lshape1.a,
+ .escale = escale , .eshape1.a = eshape1.a,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lss = lss ))),
+
+
+
+
deriv = eval(substitute(expression({
+ M1 <- 4
NOS <- ncol(eta)/M1 # Needed for summary()
if ( .lss ) {
Scale <- eta2theta(eta[, M1*(1:NOS) - 3, drop = FALSE],
@@ -2850,11 +2869,11 @@ dgenbetaII <- function(x, scale = 1, shape1.a, shape2.p, shape3.q,
if (any(x <= 0) || any(is.infinite(x))) {
LLL <- max(length(x), length(scale),
length(shape1.a), length(shape2.p), length(shape3.q))
- if (length(x) != LLL) x <- rep(x, length = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length = LLL)
- if (length(shape1.a) != LLL) shape1.a <- rep(shape1.a, length = LLL)
- if (length(shape2.p) != LLL) shape2.p <- rep(shape2.p, length = LLL)
- if (length(shape3.q) != LLL) shape3.q <- rep(shape3.q, length = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
+ if (length(shape1.a) != LLL) shape1.a <- rep_len(shape1.a, LLL)
+ if (length(shape2.p) != LLL) shape2.p <- rep_len(shape2.p, LLL)
+ if (length(shape3.q) != LLL) shape3.q <- rep_len(shape3.q, LLL)
logden[is.infinite(x)] <- log(0)
logden[x < 0] <- log(0)
@@ -2927,14 +2946,10 @@ qsinmad <- function(p, scale = 1, shape1.a, shape3.q,
LLL <- max(length(p), length(shape1.a), length(scale),
length(shape3.q))
- if (length(p) != LLL)
- p <- rep(p, length.out = LLL)
- if (length(shape1.a) != LLL)
- shape1.a <- rep(shape1.a, length.out = LLL)
- if (length(scale) != LLL)
- scale <- rep(scale, length.out = LLL)
- if (length(shape3.q) != LLL)
- shape3.q <- rep(shape3.q, length.out = LLL)
+ if (length(p) != LLL) p <- rep_len(p, LLL)
+ if (length(shape1.a) != LLL) shape1.a <- rep_len(shape1.a, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
+ if (length(shape3.q) != LLL) shape3.q <- rep_len(shape3.q, LLL)
if (lower.tail) {
@@ -2995,14 +3010,10 @@ qdagum <- function(p, scale = 1, shape1.a, shape2.p,
LLL <- max(length(p), length(shape1.a), length(scale),
length(shape2.p))
- if (length(p) != LLL)
- p <- rep(p, length.out = LLL)
- if (length(shape1.a) != LLL)
- shape1.a <- rep(shape1.a, length.out = LLL)
- if (length(scale) != LLL)
- scale <- rep(scale, length.out = LLL)
- if (length(shape2.p) != LLL)
- shape2.p <- rep(shape2.p, length.out = LLL)
+ if (length(p) != LLL) p <- rep_len(p, LLL)
+ if (length(shape1.a) != LLL) shape1.a <- rep_len(shape1.a, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
+ if (length(shape2.p) != LLL) shape2.p <- rep_len(shape2.p, LLL)
if (lower.tail) {
if (log.p) {
@@ -3065,14 +3076,10 @@ psinmad <- function(q, scale = 1, shape1.a, shape3.q,
LLL <- max(length(q), length(shape1.a), length(scale),
length(shape3.q))
- if (length(q) != LLL)
- q <- rep(q, length.out = LLL)
- if (length(shape1.a) != LLL)
- shape1.a <- rep(shape1.a, length.out = LLL)
- if (length(scale) != LLL)
- scale <- rep(scale, length.out = LLL)
- if (length(shape3.q) != LLL)
- shape3.q <- rep(shape3.q, length.out = LLL)
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(shape1.a) != LLL) shape1.a <- rep_len(shape1.a, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
+ if (length(shape3.q) != LLL) shape3.q <- rep_len(shape3.q, LLL)
# 20150121 KaiH
if (lower.tail) {
@@ -3142,14 +3149,10 @@ pdagum <- function(q, scale = 1, shape1.a, shape2.p,
LLL <- max(length(q), length(shape1.a), length(scale),
length(shape2.p))
- if (length(q) != LLL)
- q <- rep(q, length.out = LLL)
- if (length(shape1.a) != LLL)
- shape1.a <- rep(shape1.a, length.out = LLL)
- if (length(scale) != LLL)
- scale <- rep(scale, length.out = LLL)
- if (length(shape2.p) != LLL)
- shape2.p <- rep(shape2.p, length.out = LLL)
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(shape1.a) != LLL) shape1.a <- rep_len(shape1.a, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
+ if (length(shape2.p) != LLL) shape2.p <- rep_len(shape2.p, LLL)
if (lower.tail) {
@@ -3213,12 +3216,12 @@ dsinmad <- function(x, scale = 1, shape1.a, shape3.q, log = FALSE) {
LLL <- max(length(x), length(shape1.a),
length(scale), length(shape3.q))
- x <- rep(x, length.out = LLL)
- shape1.a <- rep(shape1.a, length.out = LLL)
- scale <- rep(scale, length.out = LLL)
- shape3.q <- rep(shape3.q, length.out = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(shape1.a) != LLL) shape1.a <- rep_len(shape1.a, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
+ if (length(shape3.q) != LLL) shape3.q <- rep_len(shape3.q, LLL)
- Loglik <- rep(log(0), length.out = LLL)
+ Loglik <- rep_len(log(0), LLL)
xok <- (x > 0) & !is.na(x) # Avoids log(x) if x<0, and handles NAs
Loglik[xok] <- log(shape1.a[xok]) + log(shape3.q[xok]) +
(shape1.a[xok]-1) * log(x[xok]) -
@@ -3261,12 +3264,12 @@ ddagum <- function(x, scale = 1, shape1.a, shape2.p, log = FALSE) {
length(shape1.a),
length(scale),
length(shape2.p))
- x <- rep(x, length.out = LLL)
- shape1.a <- rep(shape1.a, length.out = LLL)
- scale <- rep(scale, length.out = LLL)
- shape2.p <- rep(shape2.p, length.out = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(shape1.a) != LLL) shape1.a <- rep_len(shape1.a, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
+ if (length(shape2.p) != LLL) shape2.p <- rep_len(shape2.p, LLL)
- Loglik <- rep(log(0), length.out = LLL)
+ Loglik <- rep_len(log(0), LLL)
xok <- (x > 0) & !is.na(x) # Avoids log(x) if x<0, and handles NAs
Loglik[xok] <- log(shape1.a[xok]) +
log(shape2.p[xok]) +
@@ -3344,7 +3347,6 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
!is.Numeric(probs.y, positive = TRUE))
stop("Bad input for argument 'probs.y'")
-
lscale <- as.list(substitute(lscale))
escale <- link2list(lscale)
lscale <- attr(escale, "function.name")
@@ -3438,38 +3440,20 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
gscale <- .gscale
gshape1.a <- .gshape1.a
gshape3.q <- .gshape3.q
- if (length( .iscale ))
- gscale <- rep( .iscale , length = NOS)
- if (length( .ishape1.a ))
- gshape1.a <- rep( .ishape1.a , length = NOS)
- if (length( .ishape3.q ))
- gshape3.q <- rep( .ishape3.q , length = NOS)
- allmat1 <- expand.grid(shape1.a = gshape1.a,
- shape3.q = gshape3.q)
- allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
-
- ll.sinm <- function(scaleval, x = x, y = y, w = w, extraargs) {
- ans <- sum(c(w) * dgenbetaII(x = y,
- scale = scaleval,
- shape1.a = extraargs$Shape1.a,
- shape2.p = 1,
- shape3.q = extraargs$Shape3.q,
- log = TRUE))
- ans
- }
-
- for (iloc in 1:nrow(allmat1)) {
- allmat2[iloc, ] <-
- grid.search(gscale, objfun = ll.sinm,
- y = yvec, x = x, w = wvec,
- ret.objfun = TRUE, # 2nd value is the loglik
- extraargs = list(Shape1.a = allmat1[iloc, 1],
- Shape3.q = allmat1[iloc, 2]))
- }
- ind5 <- which.max(allmat2[, 2]) # 2nd value is the loglik
- sc.init[, spp.] <- allmat2[ind5, 1]
- aa.init[, spp.] <- allmat1[ind5, 1]
- qq.init[, spp.] <- allmat1[ind5, 2]
+ if (length( .iscale )) gscale <- rep_len( .iscale , NOS)
+ if (length( .ishape1.a )) gshape1.a <- rep_len( .ishape1.a , NOS)
+ if (length( .ishape3.q )) gshape3.q <- rep_len( .ishape3.q , NOS)
+
+
+
+ try.this <-
+ grid.search4(gscale, gshape1.a, vov3 = 1, gshape3.q,
+ objfun = genbetaII.Loglikfun4, # x = x,
+ y = yvec, w = wvec, # extraargs = NULL,
+ ret.objfun = TRUE) # Last value is the loglik
+ sc.init[, spp.] <- try.this["Value1"]
+ aa.init[, spp.] <- try.this["Value2"]
+ qq.init[, spp.] <- try.this["Value4"]
} else { # .imethod == 2
qvec <- .probs.y
ishape3.q <- if (length( .ishape3.q )) .ishape3.q else 1
@@ -3483,6 +3467,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
}
} # End of for (spp. ...)
+
+
+
finite.mean <- 1 < aa.init * qq.init
COP.use <- 1.15
while (FALSE && any(!finite.mean)) {
@@ -3544,9 +3531,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
last = eval(substitute(expression({
M1 <- 3
- misc$link <- c(rep( if ( .lss ) .lscale else .lshape1.a , len = ncoly),
- rep( if ( .lss ) .lshape1.a else .lscale , len = ncoly),
- rep( .lshape3.q , length = ncoly))[
+ misc$link <- c(rep_len( if ( .lss ) .lscale else .lshape1.a , ncoly),
+ rep_len( if ( .lss ) .lshape1.a else .lscale , ncoly),
+ rep_len( .lshape3.q , ncoly))[
interleave.VGAM(M, M1 = M1)]
temp.names <- if ( .lss ) {
c(scaL.names, sha1.names, sha3.names)
@@ -3644,7 +3631,46 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
.lshape3.q = lshape3.q,
.eshape3.q = eshape3.q
))),
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ M1 <- 3
+ NOS <- ncol(eta) / M1
+ if ( .lss ) {
+ Scale <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+ .lscale , earg = .escale )
+ aa <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lshape1.a , earg = .eshape1.a)
+ } else {
+ aa <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+ .lshape1.a , earg = .eshape1.a)
+ Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lscale , earg = .escale )
+ }
+ parg <- 1
+ qq <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lshape3.q , earg = .eshape3.q)
+
+ okay1 <- all(is.finite(Scale)) && all(Scale > 0) &&
+ all(is.finite(aa )) && all(aa > 0) &&
+ all(is.finite(parg )) && all(parg > 0) &&
+ all(is.finite(qq )) && all(qq > 0)
+ okay.support <- if (okay1) all(-aa * parg < 1 & 1 < aa * qq) else TRUE
+ if (!okay.support)
+ warning("parameter constraint -a < 1 < a*q has been violated; ",
+ "solution may be at the boundary of the ",
+ "parameter space.")
+ okay1 && okay.support
+ }, list( .lscale = lscale , .lshape1.a = lshape1.a,
+ .escale = escale , .eshape1.a = eshape1.a,
+ .lshape3.q = lshape3.q,
+ .eshape3.q = eshape3.q,
+ .lss = lss ))),
+
+
+
+
deriv = eval(substitute(expression({
+ M1 <- 3
NOS <- ncol(eta)/M1 # Needed for summary()
if ( .lss ) {
Scale <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
@@ -3741,19 +3767,20 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
- dagum <- function(lscale = "loge",
- lshape1.a = "loge",
- lshape2.p = "loge",
- iscale = NULL,
- ishape1.a = NULL,
- ishape2.p = NULL,
- imethod = 1,
- lss = TRUE,
- gscale = exp(-5:5),
- gshape1.a = exp(-5:5),
- gshape2.p = exp(-5:5),
- probs.y = c(0.25, 0.50, 0.75),
- zero = "shape") {
+ dagum <-
+ function(lscale = "loge",
+ lshape1.a = "loge",
+ lshape2.p = "loge",
+ iscale = NULL,
+ ishape1.a = NULL,
+ ishape2.p = NULL,
+ imethod = 1,
+ lss = TRUE,
+ gscale = exp(-5:5),
+ gshape1.a = seq(0.75, 4, by = 0.25), # exp(-5:5),
+ gshape2.p = exp(-5:5),
+ probs.y = c(0.25, 0.50, 0.75),
+ zero = "shape") {
@@ -3874,38 +3901,20 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
gscale <- .gscale
gshape1.a <- .gshape1.a
gshape2.p <- .gshape2.p
- if (length( .iscale ))
- gscale <- rep( .iscale , length = NOS)
- if (length( .ishape1.a ))
- gshape1.a <- rep( .ishape1.a , length = NOS)
- if (length( .ishape2.p ))
- gshape2.p <- rep( .ishape2.p , length = NOS)
- allmat1 <- expand.grid(shape1.a = gshape1.a,
- shape2.p = gshape2.p)
- allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
-
- ll.dagu <- function(scaleval, x = x, y = y, w = w, extraargs) {
- ans <- sum(c(w) * dgenbetaII(x = y,
- scale = scaleval,
- shape1.a = extraargs$Shape1.a,
- shape2.p = extraargs$Shape2.p,
- shape3.q = 1,
- log = TRUE))
- ans
- }
-
- for (iloc in 1:nrow(allmat1)) {
- allmat2[iloc, ] <-
- grid.search(gscale, objfun = ll.dagu,
- y = yvec, x = x, w = wvec,
- ret.objfun = TRUE, # 2nd value is the loglik
- extraargs = list(Shape1.a = allmat1[iloc, 1],
- Shape2.p = allmat1[iloc, 2]))
- }
- ind5 <- which.max(allmat2[, 2]) # 2nd value is the loglik
- sc.init[, spp.] <- allmat2[ind5, 1]
- aa.init[, spp.] <- allmat1[ind5, 1]
- pp.init[, spp.] <- allmat1[ind5, 2]
+ if (length( .iscale )) gscale <- rep_len( .iscale , NOS)
+ if (length( .ishape1.a )) gshape1.a <- rep_len( .ishape1.a , NOS)
+ if (length( .ishape2.p )) gshape2.p <- rep_len( .ishape2.p , NOS)
+
+
+
+ try.this <-
+ grid.search4(gscale, gshape1.a, gshape2.p, vov4 = 1,
+ objfun = genbetaII.Loglikfun4, # x = x,
+ y = yvec, w = wvec, # extraargs = NULL,
+ ret.objfun = TRUE) # Last value is the loglik
+ sc.init[, spp.] <- try.this["Value1"]
+ aa.init[, spp.] <- try.this["Value2"]
+ pp.init[, spp.] <- try.this["Value3"]
} else { # .imethod == 2
qvec <- .probs.y
ishape2.p <- if (length( .ishape2.p )) .ishape2.p else 1
@@ -3982,9 +3991,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
last = eval(substitute(expression({
M1 <- 3
- misc$link <- c(rep( if ( .lss ) .lscale else .lshape1.a , len = ncoly),
- rep( if ( .lss ) .lshape1.a else .lscale , len = ncoly),
- rep( .lshape2.p , length = ncoly))[
+ misc$link <- c(rep_len( if ( .lss ) .lscale else .lshape1.a , ncoly),
+ rep_len( if ( .lss ) .lshape1.a else .lscale , ncoly),
+ rep_len( .lshape2.p , ncoly))[
interleave.VGAM(M, M1 = M1)]
temp.names <- if ( .lss ) {
c(scaL.names, sha1.names, sha2.names)
@@ -4081,6 +4090,45 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
.lss = lss ))),
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ M1 <- 3
+ NOS <- ncol(eta)/M1 # Needed for summary()
+ if ( .lss ) {
+ Scale <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+ .lscale , earg = .escale )
+ aa <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lshape1.a , earg = .eshape1.a)
+ } else {
+ aa <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+ .lshape1.a , earg = .eshape1.a)
+ Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lscale , earg = .escale )
+ }
+ parg <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lshape2.p , earg = .eshape2.p)
+ qq <- 1
+
+
+ okay1 <- all(is.finite(Scale)) && all(Scale > 0) &&
+ all(is.finite(aa )) && all(aa > 0) &&
+ all(is.finite(parg )) && all(parg > 0) &&
+ all(is.finite(qq )) && all(qq > 0)
+ okay.support <- if (okay1) all(-aa * parg < 1 & 1 < aa * qq) else TRUE
+ if (!okay.support)
+ warning("parameter constraint -a*p < 1 < a has been violated; ",
+ "solution may be at the boundary of the ",
+ "parameter space.")
+ okay1 && okay.support
+ }, list( .lscale = lscale , .lshape1.a = lshape1.a,
+ .escale = escale , .eshape1.a = eshape1.a,
+ .lshape2.p = lshape2.p,
+ .eshape2.p = eshape2.p,
+ .lss = lss ))),
+
+
+
+
deriv = eval(substitute(expression({
M1 <- 3
NOS <- ncol(eta)/M1 # Needed for summary()
@@ -4173,18 +4221,19 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
- betaII <- function(lscale = "loge",
- lshape2.p = "loge",
- lshape3.q = "loge",
- iscale = NULL,
- ishape2.p = NULL,
- ishape3.q = NULL,
- imethod = 1,
- gscale = exp(-5:5),
- gshape2.p = exp(-5:5),
- gshape3.q = exp(-5:5),
- probs.y = c(0.25, 0.50, 0.75),
- zero = "shape") {
+ betaII <-
+ function(lscale = "loge",
+ lshape2.p = "loge",
+ lshape3.q = "loge",
+ iscale = NULL,
+ ishape2.p = NULL,
+ ishape3.q = NULL,
+ imethod = 1,
+ gscale = exp(-5:5),
+ gshape2.p = exp(-5:5),
+ gshape3.q = seq(0.75, 4, by = 0.25), # exp(-5:5),
+ probs.y = c(0.25, 0.50, 0.75),
+ zero = "shape") {
@@ -4289,38 +4338,21 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
gscale <- .gscale
gshape2.p <- .gshape2.p
gshape3.q <- .gshape3.q
- if (length( .iscale ))
- gscale <- rep( .iscale , length = NOS)
- if (length( .ishape2.p ))
- gshape2.p <- rep( .ishape2.p , length = NOS)
- if (length( .ishape3.q ))
- gshape3.q <- rep( .ishape3.q , length = NOS)
- allmat1 <- expand.grid(shape2.p = gshape2.p,
- shape3.q = gshape3.q)
- allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
-
- ll.beII <- function(scaleval, x = x, y = y, w = w, extraargs) {
- ans <- sum(c(w) * dgenbetaII(x = y,
- scale = scaleval,
- shape1.a = 1,
- shape2.p = extraargs$Shape2.p,
- shape3.q = extraargs$Shape3.q,
- log = TRUE))
- ans
- }
-
- for (iloc in 1:nrow(allmat1)) {
- allmat2[iloc, ] <-
- grid.search(gscale, objfun = ll.beII,
- y = yvec, x = x, w = wvec,
- ret.objfun = TRUE, # 2nd value is the loglik
- extraargs = list(Shape2.p = allmat1[iloc, 1],
- Shape3.q = allmat1[iloc, 2]))
- }
- ind5 <- which.max(allmat2[, 2]) # 2nd value is the loglik
- sc.init[, spp.] <- allmat2[ind5, 1]
- pp.init[, spp.] <- allmat1[ind5, 1]
- qq.init[, spp.] <- allmat1[ind5, 2]
+ if (length( .iscale )) gscale <- rep_len( .iscale , NOS)
+ if (length( .ishape2.p )) gshape2.p <- rep_len( .ishape2.p , NOS)
+ if (length( .ishape3.q )) gshape3.q <- rep_len( .ishape3.q , NOS)
+
+
+
+
+ try.this <-
+ grid.search4(gscale, vov2 = 1, gshape2.p, gshape3.q,
+ objfun = genbetaII.Loglikfun4, # x = x,
+ y = yvec, w = wvec, # extraargs = NULL,
+ ret.objfun = TRUE) # Last value is the loglik
+ sc.init[, spp.] <- try.this["Value1"]
+ pp.init[, spp.] <- try.this["Value3"]
+ qq.init[, spp.] <- try.this["Value4"]
} else { # .imethod == 2
sc.init[, spp.] <- if (length( .iscale )) .iscale else {
@@ -4388,10 +4420,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
last = eval(substitute(expression({
M1 <- 3
- misc$link <- c(rep( .lscale , length = ncoly),
- rep( .lshape2.p , length = ncoly),
- rep( .lshape3.q , length = ncoly))[
- interleave.VGAM(M, M1 = M1)]
+ misc$link <- c(rep_len( .lscale , ncoly),
+ rep_len( .lshape2.p , ncoly),
+ rep_len( .lshape3.q , ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(scaL.names, sha2.names, sha3.names)
names(misc$link) <- temp.names[interleave.VGAM(M, M1 = M1)]
@@ -4440,7 +4471,39 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
.eshape2.p = eshape2.p, .eshape3.q = eshape3.q
))),
vfamily = c("betaII"),
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ M1 <- 3
+ NOS <- ncol(eta)/M1
+ Scale <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+ .lscale , earg = .escale )
+ aa <- 1
+ parg <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lshape2.p , earg = .eshape2.p )
+ qq <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lshape3.q , earg = .eshape3.q )
+
+
+ okay1 <- all(is.finite(Scale)) && all(Scale > 0) &&
+ all(is.finite(aa )) && all(aa > 0) &&
+ all(is.finite(parg )) && all(parg > 0) &&
+ all(is.finite(qq )) && all(qq > 0)
+ okay.support <- if (okay1) all(-aa * parg < 1 & 1 < aa * qq) else TRUE
+ if (!okay.support)
+ warning("parameter constraint -p < 1 < q has been violated; ",
+ "solution may be at the boundary of the ",
+ "parameter space.")
+ okay1 && okay.support
+ }, list( .lscale = lscale ,
+ .escale = escale ,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q ))),
+
+
+
+
deriv = eval(substitute(expression({
+ M1 <- 3
NOS <- ncol(eta)/M1 # Needed for summary()
Scale <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
.lscale , earg = .escale )
@@ -4507,15 +4570,16 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
- lomax <- function(lscale = "loge",
- lshape3.q = "loge",
- iscale = NULL,
- ishape3.q = NULL,
- imethod = 1,
- gscale = exp(-5:5),
- gshape3.q = exp(-5:5), # Finite mean only if qq>1
- probs.y = c(0.25, 0.50, 0.75),
- zero = "shape") {
+ lomax <-
+ function(lscale = "loge",
+ lshape3.q = "loge",
+ iscale = NULL,
+ ishape3.q = NULL,
+ imethod = 1,
+ gscale = exp(-5:5),
+ gshape3.q = seq(0.75, 4, by = 0.25), # exp(-5:5),
+ probs.y = c(0.25, 0.50, 0.75),
+ zero = "shape") {
@@ -4606,33 +4670,18 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
if ( .imethod == 1) {
gscale <- .gscale
gshape3.q <- .gshape3.q
- if (length( .iscale ))
- gscale <- rep( .iscale , length = NOS)
- if (length( .ishape3.q ))
- gshape3.q <- rep( .ishape3.q , length = NOS)
- allmat1 <- cbind(shape3.q = gshape3.q)
- allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
-
- ll.lomx <- function(scaleval, x = x, y = y, w = w, extraargs) {
- ans <- sum(c(w) * dgenbetaII(x = y,
- scale = scaleval,
- shape1.a = 1,
- shape2.p = 1,
- shape3.q = extraargs$Shape3.q,
- log = TRUE))
- ans
- }
-
- for (iloc in 1:nrow(allmat1)) {
- allmat2[iloc, ] <-
- grid.search(gscale, objfun = ll.lomx,
- y = yvec, x = x, w = wvec,
- ret.objfun = TRUE, # 2nd value is the loglik
- extraargs = list(Shape3.q = allmat1[iloc, 1]))
- }
- ind5 <- which.max(allmat2[, 2]) # 2nd value is the loglik
- sc.init[, spp.] <- allmat2[ind5, 1]
- qq.init[, spp.] <- allmat1[ind5, 1]
+ if (length( .iscale )) gscale <- rep_len( .iscale , NOS)
+ if (length( .ishape3.q )) gshape3.q <- rep_len( .ishape3.q , NOS)
+
+
+
+ try.this <-
+ grid.search4(gscale, vov2 = 1, vov3 = 1, gshape3.q,
+ objfun = genbetaII.Loglikfun4, # x = x,
+ y = yvec, w = wvec, # extraargs = NULL,
+ ret.objfun = TRUE) # Last value is the loglik
+ sc.init[, spp.] <- try.this["Value1"]
+ qq.init[, spp.] <- try.this["Value4"]
} else { # .imethod == 2
qvec <- .probs.y
iscale <- if (length( .iscale )) .iscale else 1
@@ -4691,9 +4740,8 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
last = eval(substitute(expression({
M1 <- 2
- misc$link <- c(rep( .lscale , length = ncoly),
- rep( .lshape3.q , length = ncoly))[
- interleave.VGAM(M, M1 = M1)]
+ misc$link <- c(rep_len( .lscale , ncoly),
+ rep_len( .lshape3.q , ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <-
c(scaL.names, sha3.names)
names(misc$link) <- temp.names[interleave.VGAM(M, M1 = M1)]
@@ -4763,7 +4811,37 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
.lshape3.q = lshape3.q,
.eshape3.q = eshape3.q
))),
- deriv = eval(substitute(expression({
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lscale , earg = .escale )
+ aa <- 1
+ parg <- 1
+ qq <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lshape3.q , earg = .eshape3.q)
+
+
+
+ okay1 <- all(is.finite(Scale)) && all(Scale > 0) &&
+ all(is.finite(qq )) && all(qq > 0)
+ okay.support <- if (okay1) all(-aa * parg < 1 & 1 < aa * qq) else TRUE
+ if (!okay.support)
+ warning("parameter constraint 1 < q has been violated; ",
+ "solution may be at the boundary of the ",
+ "parameter space.")
+ okay1 && okay.support
+ }, list( .lscale = lscale ,
+ .escale = escale ,
+ .lshape3.q = lshape3.q,
+ .eshape3.q = eshape3.q ))),
+
+
+
+
+ deriv = eval(substitute(expression({
+ M1 <- 2
NOS <- ncol(eta)/M1 # Needed for summary()
Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
.lscale , earg = .escale )
@@ -4824,16 +4902,17 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
- fisk <- function(lscale = "loge",
- lshape1.a = "loge",
- iscale = NULL,
- ishape1.a = NULL,
- imethod = 1,
- lss = TRUE,
- gscale = exp(-5:5),
- gshape1.a = exp(-5:5),
- probs.y = c(0.25, 0.50, 0.75),
- zero = "shape") {
+ fisk <-
+ function(lscale = "loge",
+ lshape1.a = "loge",
+ iscale = NULL,
+ ishape1.a = NULL,
+ imethod = 1,
+ lss = TRUE,
+ gscale = exp(-5:5),
+ gshape1.a = seq(0.75, 4, by = 0.25), # exp(-5:5),
+ probs.y = c(0.25, 0.50, 0.75),
+ zero = "shape") {
@@ -4936,33 +5015,19 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
if ( .imethod == 1 ) {
gscale <- .gscale
gshape1.a <- .gshape1.a
- if (length( .iscale ))
- gscale <- rep( .iscale , length = NOS)
- if (length( .ishape1.a ))
- gshape1.a <- rep( .ishape1.a , length = NOS)
- allmat1 <- cbind(shape1.a = gshape1.a)
- allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
-
- ll.fisk <- function(scaleval, x = x, y = y, w = w, extraargs) {
- ans <- sum(c(w) * dgenbetaII(x = y,
- scale = scaleval,
- shape1.a = extraargs$Shape1.a,
- shape2.p = 1,
- shape3.q = 1,
- log = TRUE))
- ans
- }
-
- for (iloc in 1:nrow(allmat1)) {
- allmat2[iloc, ] <-
- grid.search(gscale, objfun = ll.fisk,
- y = yvec, x = x, w = wvec,
- ret.objfun = TRUE, # 2nd value is the loglik
- extraargs = list(Shape1.a = allmat1[iloc, 1]))
- }
- ind5 <- which.max(allmat2[, 2]) # 2nd value is the loglik
- sc.init[, spp.] <- allmat2[ind5, 1]
- aa.init[, spp.] <- allmat1[ind5, 1]
+ if (length( .iscale )) gscale <- rep_len( .iscale , NOS)
+ if (length( .ishape1.a )) gshape1.a <- rep_len( .ishape1.a , NOS)
+
+
+
+
+ try.this <-
+ grid.search4(gscale, gshape1.a, vov3 = 1, vov4 = 1,
+ objfun = genbetaII.Loglikfun4, # x = x,
+ y = yvec, w = wvec, # extraargs = NULL,
+ ret.objfun = TRUE) # Last value is the loglik
+ sc.init[, spp.] <- try.this["Value1"]
+ aa.init[, spp.] <- try.this["Value2"]
} else { # .imethod == 2
qvec <- .probs.y
iscale <- if (length( .iscale )) .iscale else 1
@@ -5026,8 +5091,8 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
last = eval(substitute(expression({
M1 <- 2
- misc$link <- c(rep( if ( .lss ) .lscale else .lshape1.a , len = ncoly),
- rep( if ( .lss ) .lshape1.a else .lscale , len = ncoly))[
+ misc$link <- c(rep_len( if ( .lss ) .lscale else .lshape1.a , ncoly),
+ rep_len( if ( .lss ) .lshape1.a else .lscale , ncoly))[
interleave.VGAM(M, M1 = M1)]
temp.names <- if ( .lss ) {
c(scaL.names, sha1.names)
@@ -5109,7 +5174,44 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
}, list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
.lss = lss ))),
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ if ( .lss ) {
+ Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lscale , earg = .escale )
+ aa <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lshape1.a , earg = .eshape1.a)
+ } else {
+ aa <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lshape1.a , earg = .eshape1.a)
+ Scale <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lscale , earg = .escale )
+ }
+ parg <- 1
+ qq <- 1
+
+
+ okay1 <- all(is.finite(Scale)) && all(Scale > 0) &&
+ all(is.finite(aa )) && all(aa > 0) &&
+ all(is.finite(parg )) && all(parg > 0) &&
+ all(is.finite(qq )) && all(qq > 0)
+ okay.support <- if (okay1) all(-aa * parg < 1 & 1 < aa * qq) else TRUE
+ if (!okay.support)
+ warning("parameter constraint -a < 1 < a has been violated; ",
+ "solution may be at the boundary of the ",
+ "parameter space.")
+ okay1 && okay.support
+ }, list( .lscale = lscale , .lshape1.a = lshape1.a,
+ .escale = escale , .eshape1.a = eshape1.a,
+ .lss = lss ))),
+
+
+
+
deriv = eval(substitute(expression({
+ M1 <- 2
NOS <- ncol(eta)/M1 # Needed for summary()
if ( .lss ) {
Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
@@ -5276,7 +5378,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
c(namesof(scaL.names , .lscale , earg = .escale , tag = FALSE),
namesof(sha2.names , .lshape2.p , earg = .eshape2.p , tag = FALSE))
predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
-
+
if (!length(etastart)) {
sc.init <-
pp.init <- matrix(NA_real_, n, NOS)
@@ -5288,33 +5390,20 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
if ( .imethod == 1 ) {
gscale <- .gscale
gshape2.p <- .gshape2.p
- if (length( .iscale ))
- gscale <- rep( .iscale , length = NOS)
- if (length( .ishape2.p ))
- gshape2.p <- rep( .ishape2.p , length = NOS)
- allmat1 <- cbind(shape2.p = gshape2.p)
- allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
-
- ll.invL <- function(scaleval, x = x, y = y, w = w, extraargs) {
- ans <- sum(c(w) * dgenbetaII(x = y,
- scale = scaleval,
- shape1.a = 1,
- shape2.p = extraargs$Shape2.p,
- shape3.q = 1,
- log = TRUE))
- ans
- }
-
- for (iloc in 1:nrow(allmat1)) {
- allmat2[iloc, ] <-
- grid.search(gscale, objfun = ll.invL,
- y = yvec, x = x, w = wvec,
- ret.objfun = TRUE, # 2nd value is the loglik
- extraargs = list(Shape2.p = allmat1[iloc, 1]))
- }
- ind5 <- which.max(allmat2[, 2]) # 2nd value is the loglik
- sc.init[, spp.] <- allmat2[ind5, 1]
- pp.init[, spp.] <- allmat1[ind5, 1]
+ if (length( .iscale )) gscale <- rep_len( .iscale , NOS)
+ if (length( .ishape2.p )) gshape2.p <- rep_len( .ishape2.p , NOS)
+
+
+
+
+
+ try.this <-
+ grid.search4(gscale, vov2 = 1, gshape2.p, vov4 = 1,
+ objfun = genbetaII.Loglikfun4, # x = x,
+ y = yvec, w = wvec, # extraargs = NULL,
+ ret.objfun = TRUE) # Last value is the loglik
+ sc.init[, spp.] <- try.this["Value1"]
+ pp.init[, spp.] <- try.this["Value3"]
} else { # .imethod == 2
qvec <- .probs.y
ishape2.p <- if (length( .ishape2.p )) .ishape2.p else 1
@@ -5360,9 +5449,8 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
last = eval(substitute(expression({
M1 <- 2
- misc$link <- c(rep( .lscale , length = ncoly),
- rep( .lshape2.p , length = ncoly))[
- interleave.VGAM(M, M1 = M1)]
+ misc$link <- c(rep_len( .lscale , ncoly),
+ rep_len( .lshape2.p , ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(scaL.names, sha2.names)
names(misc$link) <- temp.names[interleave.VGAM(M, M1 = M1)]
@@ -5430,6 +5518,37 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
))),
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lscale , earg = .escale )
+ parg <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lshape2.p , earg = .eshape2.p )
+ qq <- 1
+ aa <- 1
+
+
+ okay1 <- all(is.finite(Scale)) && all(Scale > 0) &&
+ all(is.finite(aa )) && all(aa > 0) &&
+ all(is.finite(parg )) && all(parg > 0) &&
+ all(is.finite(qq )) && all(qq > 0)
+ okay.support <- if (okay1) all(-aa * parg < 1 ) else TRUE
+ if (!okay.support)
+ warning("parameter constraint -a*p < 1 has been violated; ",
+ "solution may be at the boundary of the ",
+ "parameter space.")
+ okay1 && okay.support
+ }, list( .lscale = lscale ,
+ .escale = escale ,
+ .lshape2.p = lshape2.p,
+ .eshape2.p = eshape2.p
+ ))),
+
+
+
+
deriv = eval(substitute(expression({
M1 <- 2
NOS <- ncol(eta)/M1 # Needed for summary()
@@ -5489,16 +5608,16 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
paralogistic <-
- function(lscale = "loge",
- lshape1.a = "loge",
- iscale = NULL,
- ishape1.a = NULL,
- imethod = 1,
- lss = TRUE,
- gscale = exp(-5:5),
- gshape1.a = exp(-5:5),
- probs.y = c(0.25, 0.50, 0.75),
- zero = "shape") {
+ function(lscale = "loge",
+ lshape1.a = "loge",
+ iscale = NULL,
+ ishape1.a = NULL,
+ imethod = 1,
+ lss = TRUE,
+ gscale = exp(-5:5),
+ gshape1.a = seq(0.75, 4, by = 0.25), # exp(-5:5),
+ probs.y = c(0.25, 0.50, 0.75),
+ zero = "shape") {
@@ -5533,7 +5652,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
new("vglmff",
- blurb =
+ blurb =
c("Paralogistic distribution \n\n",
"Links: ",
ifelse (lss,
@@ -5603,33 +5722,26 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
if ( .imethod == 1 ) {
gscale <- .gscale
gshape1.a <- .gshape1.a
- if (length( .iscale ))
- gscale <- rep( .iscale , length = NOS)
- if (length( .ishape1.a ))
- gshape1.a <- rep( .ishape1.a , length = NOS)
- allmat1 <- expand.grid(shape1.a = gshape1.a)
- allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
-
- ll.para <- function(scaleval, x = x, y = y, w = w, extraargs) {
- ans <- sum(c(w) * dgenbetaII(x = y,
- scale = scaleval,
- shape1.a = extraargs$Shape1.a,
- shape2.p = 1,
- shape3.q = extraargs$Shape1.a,
- log = TRUE))
- ans
- }
-
- for (iloc in 1:nrow(allmat1)) {
- allmat2[iloc, ] <-
- grid.search(gscale, objfun = ll.para,
- y = yvec, x = x, w = wvec,
- ret.objfun = TRUE, # 2nd value is the loglik
- extraargs = list(Shape1.a = allmat1[iloc, 1]))
- }
- ind5 <- which.max(allmat2[, 2]) # 2nd value is the loglik
- sc.init[, spp.] <- allmat2[ind5, 1]
- aa.init[, spp.] <- allmat1[ind5, 1]
+ if (length( .iscale )) gscale <- rep_len( .iscale , NOS)
+ if (length( .ishape1.a )) gshape1.a <- rep_len( .ishape1.a , NOS)
+
+
+ paralogistic.Loglikfun2 <-
+ function(scaleval, shape1.a,
+ y, x, w, extraargs) {
+ sum(c(w) * dgenbetaII(x = y, scale = scaleval,
+ shape1.a = shape1.a,
+ shape2.p = 1,
+ shape3.q = shape1.a, log = TRUE))
+ }
+
+ try.this <-
+ grid.search2(gscale, gshape1.a, # vov3 = 1, vov4 = gshape1.a,
+ objfun = paralogistic.Loglikfun2, # x = x,
+ y = yvec, w = wvec, # extraargs = NULL,
+ ret.objfun = TRUE) # Last value is the loglik
+ sc.init[, spp.] <- try.this["Value1"]
+ aa.init[, spp.] <- try.this["Value2"]
} else { # .imethod == 2
qvec <- .probs.y
ishape3.q <- if (length( .ishape1.a )) .ishape1.a else 1
@@ -5692,8 +5804,8 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
last = eval(substitute(expression({
M1 <- 2
- misc$link <- c(rep( if ( .lss ) .lscale else .lshape1.a , len = ncoly),
- rep( if ( .lss ) .lshape1.a else .lscale , len = ncoly))[
+ misc$link <- c(rep_len(if ( .lss ) .lscale else .lshape1.a , ncoly),
+ rep_len(if ( .lss ) .lshape1.a else .lscale , ncoly))[
interleave.VGAM(M, M1 = M1)]
temp.names <- if ( .lss ) {
c(scaL.names, sha1.names)
@@ -5778,7 +5890,44 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
}, list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
.lss = lss ))),
- deriv = eval(substitute(expression({
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ if ( .lss ) {
+ Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lscale , earg = .escale )
+ aa <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lshape1.a , earg = .eshape1.a)
+ } else {
+ aa <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lshape1.a , earg = .eshape1.a)
+ Scale <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lscale , earg = .escale )
+ }
+ parg <- 1
+ qq <- aa
+
+
+ okay1 <- all(is.finite(Scale)) && all(Scale > 0) &&
+ all(is.finite(aa )) && all(aa > 0) &&
+ all(is.finite(parg )) && all(parg > 0) &&
+ all(is.finite(qq )) && all(qq > 0)
+ okay.support <- if (okay1) all(-aa * parg < 1 & 1 < aa * qq) else TRUE
+ if (!okay.support)
+ warning("parameter constraint -a < 1 < a*a has been violated; ",
+ "solution may be at the boundary of the ",
+ "parameter space.")
+ okay1 && okay.support
+ }, list( .lscale = lscale , .lshape1.a = lshape1.a,
+ .escale = escale , .eshape1.a = eshape1.a,
+ .lss = lss ))),
+
+
+
+
+ deriv = eval(substitute(expression({
+ M1 <- 2
NOS <- ncol(eta)/M1 # Needed for summary()
if ( .lss ) {
Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
@@ -5852,6 +6001,11 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
+
+
+
+
+
inv.paralogistic <-
function(lscale = "loge",
lshape1.a = "loge",
@@ -5860,7 +6014,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
imethod = 1,
lss = TRUE,
gscale = exp(-5:5),
- gshape1.a = exp(-5:5),
+ gshape1.a = seq(0.75, 4, by = 0.25), # exp(-5:5),
probs.y = c(0.25, 0.50, 0.75),
zero = "shape") {
@@ -5965,33 +6119,27 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
if ( .imethod == 1 ) {
gscale <- .gscale
gshape1.a <- .gshape1.a
- if (length( .iscale ))
- gscale <- rep( .iscale , length = NOS)
- if (length( .ishape1.a ))
- gshape1.a <- rep( .ishape1.a , length = NOS)
- allmat1 <- cbind(shape1.a = gshape1.a)
- allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
-
- ll.invp <- function(scaleval, x = x, y = y, w = w, extraargs) {
- ans <- sum(c(w) * dgenbetaII(x = y,
- scale = scaleval,
- shape1.a = extraargs$Shape1.a,
- shape2.p = extraargs$Shape1.a,
- shape3.q = 1,
- log = TRUE))
- ans
- }
-
- for (iloc in 1:nrow(allmat1)) {
- allmat2[iloc, ] <-
- grid.search(gscale, objfun = ll.invp,
- y = yvec, x = x, w = wvec,
- ret.objfun = TRUE, # 2nd value is the loglik
- extraargs = list(Shape1.a = allmat1[iloc, 1]))
- }
- ind5 <- which.max(allmat2[, 2]) # 2nd value is the loglik
- sc.init[, spp.] <- allmat2[ind5, 1]
- aa.init[, spp.] <- allmat1[ind5, 1]
+ if (length( .iscale )) gscale <- rep_len( .iscale , NOS)
+ if (length( .ishape1.a )) gshape1.a <- rep_len( .ishape1.a , NOS)
+
+
+ inv.paralogistic.Loglikfun2 <-
+ function(scaleval, shape1.a,
+ y, x, w, extraargs) {
+ sum(c(w) * dgenbetaII(x = y, scale = scaleval,
+ shape1.a = shape1.a,
+ shape2.p = shape1.a,
+ shape3.q = 1, log = TRUE))
+ }
+
+
+ try.this <-
+ grid.search2(gscale, gshape1.a, # vov3 = 1, vov4 = gshape1.a,
+ objfun = inv.paralogistic.Loglikfun2, # x = x,
+ y = yvec, w = wvec, # extraargs = NULL,
+ ret.objfun = TRUE) # Last value is the loglik
+ sc.init[, spp.] <- try.this["Value1"]
+ aa.init[, spp.] <- try.this["Value2"]
} else { # .imethod == 2
qvec <- .probs.y
ishape2.p <- if (length( .ishape1.a )) .ishape1.a else 1
@@ -6056,8 +6204,8 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
last = eval(substitute(expression({
M1 <- 2
- misc$link <- c(rep( if ( .lss ) .lscale else .lshape1.a , len = ncoly),
- rep( if ( .lss ) .lshape1.a else .lscale , len = ncoly))[
+ misc$link <- c(rep_len(if ( .lss ) .lscale else .lshape1.a , ncoly),
+ rep_len(if ( .lss ) .lshape1.a else .lscale , ncoly))[
interleave.VGAM(M, M1 = M1)]
temp.names <- if ( .lss ) {
c(scaL.names, sha1.names)
@@ -6143,6 +6291,41 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
.lss = lss ))),
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ if ( .lss ) {
+ Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lscale , earg = .escale )
+ aa <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lshape1.a , earg = .eshape1.a)
+ } else {
+ aa <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lshape1.a , earg = .eshape1.a)
+ Scale <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lscale , earg = .escale )
+ }
+ parg <- aa
+ qq <- 1
+
+ okay1 <- all(is.finite(Scale)) && all(Scale > 0) &&
+ all(is.finite(aa )) && all(aa > 0) &&
+ all(is.finite(parg )) && all(parg > 0) &&
+ all(is.finite(qq )) && all(qq > 0)
+ okay.support <- if (okay1) all(-aa * parg < 1 & 1 < aa * qq) else TRUE
+ if (!okay.support)
+ warning("parameter constraint -a*a < 1 < a has been violated; ",
+ "solution may be at the boundary of the ",
+ "parameter space.")
+ okay1 && okay.support
+ }, list( .lscale = lscale , .lshape1.a = lshape1.a,
+ .escale = escale , .eshape1.a = eshape1.a,
+ .lss = lss ))),
+
+
+
+
deriv = eval(substitute(expression({
M1 <- 2
NOS <- ncol(eta)/M1 # Needed for summary()
diff --git a/R/family.aunivariate.R b/R/family.aunivariate.R
index c586147..e33910b 100644
--- a/R/family.aunivariate.R
+++ b/R/family.aunivariate.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -21,11 +21,11 @@ dkumar <- function(x, shape1, shape2, log = FALSE) {
N <- max(length(x), length(shape1), length(shape2))
- if (length(x) != N) x <- rep(x, len = N)
- if (length(shape1) != N) shape1 <- rep(shape1, len = N)
- if (length(shape2) != N) shape2 <- rep(shape2, len = N)
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(shape1) != N) shape1 <- rep_len(shape1, N)
+ if (length(shape2) != N) shape2 <- rep_len(shape2, N)
- logdensity <- rep(log(0), len = N)
+ logdensity <- rep_len(log(0), N)
xok <- (0 <= x & x <= 1)
logdensity[xok] <- log(shape1[xok]) + log(shape2[xok]) +
(shape1[xok] - 1) * log(x[xok]) +
@@ -230,8 +230,8 @@ pkumar <- function(q, shape1, shape2,
}, list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
last = eval(substitute(expression({
- misc$link <- c(rep( .lshape1 , length = ncoly),
- rep( .lshape2 , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ misc$link <- c(rep_len( .lshape1 , ncoly),
+ rep_len( .lshape2 , ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
@@ -314,11 +314,11 @@ drice <- function(x, sigma, vee, log = FALSE) {
N <- max(length(x), length(vee), length(sigma))
- if (length(x) != N) x <- rep(x, len = N)
- if (length(vee) != N) vee <- rep(vee , len = N)
- if (length(sigma ) != N) sigma <- rep(sigma , len = N)
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(vee) != N) vee <- rep_len(vee , N)
+ if (length(sigma ) != N) sigma <- rep_len(sigma , N)
- logdensity <- rep(log(0), len = N)
+ logdensity <- rep_len(log(0), N)
xok <- (x > 0)
x.abs <- abs(x[xok] * vee[xok] / sigma[xok]^2)
logdensity[xok] <- log(x[xok]) - 2 * log(sigma[xok]) +
@@ -471,10 +471,10 @@ riceff.control <- function(save.weights = TRUE, ...) {
quantile(rep(y, w), probs = seq(0, 1, 0.2))["80%"], len = 11)
vee.init <- if (length( .ivee )) .ivee else
grid.search(vee.grid, objfun = riceff.Loglikfun, y = y, x = x, w = w)
- vee.init <- rep(vee.init, length = length(y))
+ vee.init <- rep_len(vee.init, length(y))
sigma.init <- if (length( .isigma )) .isigma else
sqrt(max((weighted.mean(y^2, w) - vee.init^2)/2, 0.001))
- sigma.init <- rep(sigma.init, length = length(y))
+ sigma.init <- rep_len(sigma.init, length(y))
etastart <-
cbind(theta2eta(sigma.init, .lsigma , earg = .esigma ),
@@ -582,7 +582,7 @@ riceff.control <- function(save.weights = TRUE, ...) {
n, dimm(M), byrow = TRUE) else cbind(run.var, run.cov)
dtheta.detas <- cbind(dsigma.deta, dvee.deta)
- index0 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ index0 <- iam(NA_real_, NA_real_, M = M, both = TRUE, diag = TRUE)
wz <- wz * dtheta.detas[, index0$row] *
dtheta.detas[, index0$col]
c(w) * wz
@@ -601,9 +601,9 @@ dskellam <- function(x, mu1, mu2, log = FALSE) {
L <- max(length(x), length(mu1), length(mu2))
- if (length(x) != L) x <- rep(x, len = L)
- if (length(mu1) != L) mu1 <- rep(mu1, len = L)
- if (length(mu2) != L) mu2 <- rep(mu2, len = L)
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(mu1) != L) mu1 <- rep_len(mu1, L)
+ if (length(mu2) != L) mu2 <- rep_len(mu2, L)
ok2 <- is.finite(mu1) & is.finite(mu2) & (mu1 >= 0) & (mu2 >= 0)
ok3 <- (mu1 == 0) & (mu2 > 0)
@@ -734,10 +734,8 @@ skellam.control <- function(save.weights = TRUE, ...) {
mu1.init <- max((var.y.est + mean.init) / 2, 0.01)
mu2.init <- max((var.y.est - mean.init) / 2, 0.01)
- mu1.init <- rep(if (length( .imu1 )) .imu1 else mu1.init,
- length = n)
- mu2.init <- rep(if (length( .imu2 )) .imu2 else mu2.init,
- length = n)
+ mu1.init <- rep_len(if (length( .imu1 )) .imu1 else mu1.init, n)
+ mu2.init <- rep_len(if (length( .imu2 )) .imu2 else mu2.init, n)
etastart <- cbind(theta2eta(mu1.init, .lmu1, earg = .emu1 ),
theta2eta(mu2.init, .lmu2, earg = .emu2 ))
@@ -855,7 +853,7 @@ skellam.control <- function(save.weights = TRUE, ...) {
cbind(run.var, run.cov)
dtheta.detas <- cbind(dmu1.deta, dmu2.deta)
- index0 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ index0 <- iam(NA_real_, NA_real_, M = M, both = TRUE, diag = TRUE)
wz <- wz * dtheta.detas[, index0$row] *
dtheta.detas[, index0$col]
c(w) * wz
@@ -1004,7 +1002,7 @@ yulesimon.control <- function(save.weights = TRUE, ...) {
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <- c(rep( .link , length = ncoly))
+ misc$link <- c(rep_len( .link , ncoly))
names(misc$link) <- mynames1
misc$earg <- vector("list", M)
@@ -1160,7 +1158,7 @@ rlind <- function(n, theta) {
- ifelse(runif(use.n) < rep(1 / (1 + 1/theta), length = use.n),
+ ifelse(runif(use.n) < rep_len(1 / (1 + 1/theta), use.n),
rexp(use.n, theta),
rgamma(use.n, shape = 2, scale = 1 / theta))
}
@@ -1250,7 +1248,7 @@ rlind <- function(n, theta) {
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <- c(rep( .link , length = ncoly))
+ misc$link <- c(rep_len( .link , ncoly))
names(misc$link) <- mynames1
misc$earg <- vector("list", M)
@@ -1458,7 +1456,7 @@ if (FALSE)
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <- c(rep( .link , length = ncoly))
+ misc$link <- c(rep_len( .link , ncoly))
names(misc$link) <- mynames1
misc$earg <- vector("list", M)
@@ -1539,9 +1537,9 @@ dslash <- function(x, mu = 0, sigma = 1, log = FALSE,
if (!is.Numeric(sigma) || any(sigma <= 0))
stop("argument 'sigma' must be positive")
L <- max(length(x), length(mu), length(sigma))
- if (length(x) != L) x <- rep(x, len = L)
- if (length(mu) != L) mu <- rep(mu, len = L)
- if (length(sigma) != L) sigma <- rep(sigma, len = L)
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(mu) != L) mu <- rep_len(mu, L)
+ if (length(sigma) != L) sigma <- rep_len(sigma, L)
zedd <- (x-mu)/sigma
if (log.arg) {
@@ -1560,7 +1558,7 @@ dslash <- function(x, mu = 0, sigma = 1, log = FALSE,
pslash <- function(q, mu = 0, sigma = 1, very.negative = -10000,
lower.tail = TRUE, log.p = FALSE) {
- if (any(is.na(q)))
+ if (anyNA(q))
stop("argument 'q' must have non-missing values")
if (!is.Numeric(mu))
stop("argument 'mu' must have finite and non-missing values")
@@ -1577,9 +1575,9 @@ pslash <- function(q, mu = 0, sigma = 1, very.negative = -10000,
stop("bad input for argument 'log.p'")
L <- max(length(q), length(mu), length(sigma))
- if (length(q) != L) q <- rep(q, len = L)
- if (length(mu) != L) mu <- rep(mu, len = L)
- if (length(sigma) != L) sigma <- rep(sigma, len = L)
+ if (length(q) != L) q <- rep_len(q, L)
+ if (length(mu) != L) mu <- rep_len(mu, L)
+ if (length(sigma) != L) sigma <- rep_len(sigma, L)
zedd <- (q - mu)/sigma
ans <- as.numeric(q * NA)
@@ -1735,7 +1733,7 @@ slash.control <- function(save.weights = TRUE, ...) {
max(0.01,
((quantile(rep(y, w), prob = 0.75)/2) -
mu.init) / qnorm(0.75))
- mu.init <- rep(mu.init, length = length(y))
+ mu.init <- rep_len(mu.init, length(y))
etastart <- matrix(0, n, 2)
etastart[, 1] <- theta2eta(mu.init, .lmu , earg = .emu )
etastart[, 2] <- theta2eta(sigma.init, .lsigma , earg = .esigma )
@@ -1822,7 +1820,7 @@ slash.control <- function(save.weights = TRUE, ...) {
.emu = emu, .esigma = esigma, .smallno = smallno ))),
weight = eval(substitute(expression({
run.varcov <- 0
- ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ ind1 <- iam(NA_real_, NA_real_, M = M, both = TRUE, diag = TRUE)
sd3 <- deriv3(~ w * log(1 - exp(-(((ysim - mu) / sigma)^2) / 2))-
log(sqrt(2 * pi) * sigma * ((ysim - mu) / sigma)^2),
c("mu", "sigma"))
@@ -1862,8 +1860,8 @@ dnefghs <- function(x, tau, log = FALSE) {
N <- max(length(x), length(tau))
- if (length(x) != N) x <- rep(x, len = N)
- if (length(tau) != N) tau <- rep(tau, len = N)
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(tau) != N) tau <- rep_len(tau, N)
logdensity <- log(sin(pi*tau)) + (1-tau)*x - log(pi) - log1pexp(x)
logdensity[tau < 0] <- NaN
@@ -1921,8 +1919,7 @@ dnefghs <- function(x, tau, log = FALSE) {
tau.init <- atan(pi / wmeany) / pi + 0.5
tau.init[tau.init < 0.03] <- 0.03
tau.init[tau.init > 0.97] <- 0.97
- tau.init <- rep(if (length( .itau )) .itau else tau.init,
- len = n)
+ tau.init <- rep_len(if (length( .itau )) .itau else tau.init, n)
etastart <- theta2eta(tau.init, .link , earg = .earg )
}
}), list( .link = link, .earg = earg,
@@ -2050,9 +2047,8 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
median(rep(y, w))
- shape1.init <- shape2.init <- rep( .ishape2 , len = n)
- shape1.init <- if (length( .ishape1))
- rep( .ishape1, len = n) else {
+ shape1.init <- shape2.init <- rep_len( .ishape2 , n)
+ shape1.init <- if (length( .ishape1)) rep_len( .ishape1, n) else {
index1 <- (y > wmeany)
shape1.init[ index1] <- shape2.init[ index1] + 1/1
shape1.init[!index1] <- shape2.init[!index1] - 1/1
@@ -2191,7 +2187,7 @@ rbenf <- function(n, ndigits = 1) {
stop("bad input for argument 'n'") else n
myrunif <- runif(use.n)
- ans <- rep(lowerlimit, length = use.n)
+ ans <- rep_len(lowerlimit, use.n)
for (ii in (lowerlimit+1):upperlimit) {
indexTF <- (pbenf(ii-1, ndigits = ndigits) < myrunif) &
(myrunif <= pbenf(ii, ndigits = ndigits))
@@ -2283,7 +2279,7 @@ qbenf <- function(p, ndigits = 1) {
if (any(bad))
stop("bad input for argument 'p'")
- ans <- rep(lowerlimit, length = length(p))
+ ans <- rep_len(lowerlimit, length(p))
for (ii in (lowerlimit+1):upperlimit) {
indexTF <- is.finite(p) &
(pbenf(ii-1, ndigits = ndigits) < p) &
@@ -2323,7 +2319,7 @@ qbenf <- function(p, ndigits = 1,
lowerlimit <- ifelse(ndigits == 1, 1, 10)
upperlimit <- ifelse(ndigits == 1, 9, 99)
- ans <- rep(lowerlimit, length = length(p))
+ ans <- rep_len(lowerlimit, length(p))
if (lower.tail) {
for (ii in (lowerlimit+1):upperlimit) {
@@ -2518,7 +2514,7 @@ qbenf <- function(p, ndigits = 1,
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <- c(rep( .link , length = ncoly))
+ misc$link <- c(rep_len( .link , ncoly))
names(misc$link) <- mynames1
misc$earg <- vector("list", M)
@@ -2598,3 +2594,979 @@ qbenf <- function(p, ndigits = 1,
+
+
+
+
+
+ betaff <-
+ function(A = 0, B = 1,
+ lmu = "logit",
+ lphi = "loge",
+ imu = NULL, iphi = NULL, imethod = 1, zero = NULL) {
+
+
+ stdbeta <- (A == 0 && B == 1)
+
+
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
+
+
+
+ lphi <- as.list(substitute(lphi))
+ ephi <- link2list(lphi)
+ lphi <- attr(ephi, "function.name")
+
+
+ if (!is.Numeric(A, length.arg = 1) ||
+ !is.Numeric(B, length.arg = 1) || A >= B)
+ stop("A must be < B, and both must be of length one")
+
+
+
+
+ if (length(imu) && (!is.Numeric(imu, positive = TRUE) ||
+ any(imu <= A) || any(imu >= B)))
+ stop("bad input for argument 'imu'")
+ if (length(iphi) && !is.Numeric(iphi, positive = TRUE))
+ stop("bad input for argument 'iphi'")
+ if (!is.Numeric(imethod, length.arg = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+
+ new("vglmff",
+ blurb = c("Beta distribution parameterized by mu and a ",
+ "precision parameter\n",
+ if (stdbeta) paste("f(y) = y^(mu*phi-1) * (1-y)^((1-mu)*phi-1)",
+ "/ beta(mu*phi,(1-mu)*phi),\n",
+ " 0<y<1, 0<mu<1, phi>0\n\n") else
+ paste("f(y) = (y-",A,")^(mu1*phi-1) * (",B,
+ "-y)^(((1-mu1)*phi)-1) / \n(beta(mu1*phi,(1-mu1)*phi) * (",
+ B, "-", A, ")^(phi-1)),\n",
+ A," < y < ",B, ", ", A," < mu < ",B,
+ ", mu = ", A, " + ", (B-A), " * mu1",
+ ", phi > 0\n\n", sep = ""),
+ "Links: ",
+ namesof("mu", lmu, earg = emu), ", ",
+ namesof("phi", lphi, earg = ephi)),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (min(y) <= .A || max(y) >= .B)
+ stop("data not within (A, B)")
+
+
+ w.y.check(w = w, y = y)
+
+
+ predictors.names <- c(namesof("mu", .lmu , .emu , short = TRUE),
+ namesof("phi", .lphi , .ephi, short = TRUE))
+ if (!length(etastart)) {
+ mu.init <- if (is.Numeric( .imu )) .imu else {
+ if ( .imethod == 1) weighted.mean(y, w) else
+ median(rep(y, w))
+ }
+ mu1.init <- (mu.init - .A ) / ( .B - .A ) # In (0,1)
+ phi.init <- if (is.Numeric( .iphi )) .iphi else
+ max(0.01, -1 + ( .B - .A )^2 * mu1.init*(1-mu1.init)/var(y))
+ etastart <- matrix(0, n, 2)
+ etastart[, 1] <- theta2eta(mu.init , .lmu , earg = .emu )
+ etastart[, 2] <- theta2eta(phi.init, .lphi , earg = .ephi )
+ }
+ }), list( .lmu = lmu, .lphi = lphi, .imu = imu, .iphi = iphi,
+ .A = A, .B = B, .emu = emu, .ephi = ephi,
+ .imethod = imethod ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ mu <- eta2theta(eta[, 1], .lmu , .emu )
+ mu
+ }, list( .lmu = lmu, .emu = emu, .A = A, .B = B))),
+ last = eval(substitute(expression({
+ misc$link <- c(mu = .lmu , phi = .lphi )
+ misc$earg <- list(mu = .emu , phi = .ephi )
+ misc$limits <- c( .A , .B )
+ misc$stdbeta <- .stdbeta
+ }), list( .lmu = lmu, .lphi = lphi, .A = A, .B = B,
+ .emu = emu, .ephi = ephi,
+ .stdbeta = stdbeta ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ mu <- eta2theta(eta[, 1], .lmu , earg = .emu )
+ phi <- eta2theta(eta[, 2], .lphi , earg = .ephi )
+ m1u <- if ( .stdbeta ) mu else (mu - .A ) / ( .B - .A )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ shape1 <- phi * m1u
+ shape2 <- (1 - m1u) * phi
+ zedd <- (y - .A) / ( .B - .A)
+ ll.elts <-
+ c(w) * (dbeta(x = zedd, shape1 = shape1, shape2 = shape2,
+ log = TRUE) -
+ log( abs( .B - .A )))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .lmu = lmu, .lphi = lphi, .A = A, .B = B,
+ .emu = emu, .ephi = ephi,
+ .stdbeta = stdbeta ))),
+ vfamily = "betaff",
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+
+ eta <- predict(object)
+ mu <- eta2theta(eta[, 1], .lmu , earg = .emu )
+ phi <- eta2theta(eta[, 2], .lphi , earg = .ephi )
+ m1u <- if ( .stdbeta ) mu else (mu - .A ) / ( .B - .A )
+ shape1 <- phi * m1u
+ shape2 <- (1 - m1u) * phi
+ .A + ( .B - .A ) *
+ rbeta(nsim * length(shape1), shape1 = shape1, shape2 = shape2)
+ }, list( .lmu = lmu, .lphi = lphi, .A = A, .B = B,
+ .emu = emu, .ephi = ephi,
+ .stdbeta = stdbeta ))),
+
+
+
+
+
+ deriv = eval(substitute(expression({
+ mu <- eta2theta(eta[, 1], .lmu , .emu )
+ phi <- eta2theta(eta[, 2], .lphi , .ephi )
+ m1u <- if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
+ dmu.deta <- dtheta.deta(mu, .lmu , .emu )
+ dmu1.dmu <- 1 / ( .B - .A)
+ dphi.deta <- dtheta.deta(phi, .lphi , .ephi )
+ temp1 <- m1u*phi
+ temp2 <- (1-m1u)*phi
+ if ( .stdbeta ) {
+ dl.dmu1 <- phi*(digamma(temp2) - digamma(temp1) + log(y) - log1p(-y))
+ dl.dphi <- digamma(phi) - mu*digamma(temp1) - (1-mu)*digamma(temp2) +
+ mu*log(y) + (1-mu)*log1p(-y)
+ } else {
+ dl.dmu1 <- phi*(digamma(temp2) - digamma(temp1) +
+ log(y-.A) - log( .B-y))
+ dl.dphi <- digamma(phi) - m1u*digamma(temp1) -
+ (1-m1u)*digamma(temp2) +
+ m1u*log(y-.A) + (1-m1u)*log( .B-y) - log( .B -.A)
+ }
+ c(w) * cbind(dl.dmu1 * dmu1.dmu * dmu.deta,
+ dl.dphi * dphi.deta)
+ }), list( .lmu = lmu, .lphi = lphi,
+ .emu = emu, .ephi = ephi,
+ .A = A, .B = B,
+ .stdbeta = stdbeta ))),
+ weight = eval(substitute(expression({
+ d2l.dmu12 <- (trigamma(temp1) + trigamma(temp2)) * phi^2
+ d2l.dphi2 <- -trigamma(phi) + trigamma(temp1) * m1u^2 +
+ trigamma(temp2) * (1-m1u)^2
+ d2l.dmu1phi <- temp1 * trigamma(temp1) - temp2 * trigamma(temp2)
+ wz <- matrix(NA_real_, n, dimm(M))
+ wz[, iam(1, 1, M)] <- d2l.dmu12 * dmu1.dmu^2 * dmu.deta^2
+ wz[, iam(2, 2, M)] <- d2l.dphi2 * dphi.deta^2
+ wz[, iam(1, 2, M)] <- d2l.dmu1phi * dmu1.dmu * dmu.deta * dphi.deta
+ c(w) * wz
+ }), list( .A = A, .B = B ))))
+}
+
+
+
+
+
+ betaR <-
+ function(lshape1 = "loge", lshape2 = "loge",
+ i1 = NULL, i2 = NULL, trim = 0.05,
+ A = 0, B = 1, parallel = FALSE, zero = NULL) {
+
+ lshape1 <- as.list(substitute(lshape1))
+ eshape1 <- link2list(lshape1)
+ lshape1 <- attr(eshape1, "function.name")
+
+ lshape2 <- as.list(substitute(lshape2))
+ eshape2 <- link2list(lshape2)
+ lshape2 <- attr(eshape2, "function.name")
+
+
+ if (length( i1 ) && !is.Numeric( i1, positive = TRUE))
+ stop("bad input for argument 'i1'")
+ if (length( i2 ) && !is.Numeric( i2, positive = TRUE))
+ stop("bad input for argument 'i2'")
+
+ if (!is.Numeric(A, length.arg = 1) ||
+ !is.Numeric(B, length.arg = 1) ||
+ A >= B)
+ stop("A must be < B, and both must be of length one")
+
+ stdbeta <- (A == 0 && B == 1) # stdbeta == T iff standard beta distn
+
+
+
+ new("vglmff",
+ blurb = c("Two-parameter Beta distribution ",
+ "(shape parameters parameterization)\n",
+ if (stdbeta)
+ paste("y^(shape1-1) * (1-y)^(shape2-1) / B(shape1,shape2),",
+ "0 <= y <= 1, shape1>0, shape2>0\n\n") else
+ paste("(y-",A,")^(shape1-1) * (",B,
+ "-y)^(shape2-1) / [B(shape1,shape2) * (",
+ B, "-", A, ")^(shape1+shape2-1)], ",
+ A," <= y <= ",B," shape1>0, shape2>0\n\n", sep = ""),
+ "Links: ",
+ namesof("shape1", lshape1, earg = eshape1), ", ",
+ namesof("shape2", lshape2, earg = eshape2)),
+ constraints = eval(substitute(expression({
+ constraints <- cm.VGAM(matrix(1, M, 1), x = x,
+ bool = .parallel ,
+ constraints, apply.int = TRUE)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
+ }), list( .parallel = parallel, .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ A = .A,
+ B = .B,
+ multipleResponses = FALSE,
+ zero = .zero )
+ }, list( .A = A, .B = B,
+ .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (min(y) <= .A || max(y) >= .B)
+ stop("data not within (A, B)")
+
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+
+
+ w.y.check(w = w, y = y)
+
+
+ predictors.names <-
+ c(namesof("shape1", .lshape1 , earg = .eshape1 , short = TRUE),
+ namesof("shape2", .lshape2 , earg = .eshape2 , short = TRUE))
+
+ if (!length(etastart)) {
+ mu1d <- mean(y, trim = .trim )
+ uu <- (mu1d - .A) / ( .B - .A)
+ DD <- ( .B - .A)^2
+ pinit <- max(0.01, uu^2 * (1 - uu) * DD / var(y) - uu)
+ qinit <- max(0.01, pinit * (1 - uu) / uu)
+ etastart <- matrix(0, n, 2)
+ etastart[, 1] <- theta2eta( pinit, .lshape1 , earg = .eshape1 )
+ etastart[, 2] <- theta2eta( qinit, .lshape2 , earg = .eshape2 )
+ }
+ if (is.Numeric( .i1 ))
+ etastart[, 1] <- theta2eta( .i1 , .lshape1 , earg = .eshape1 )
+ if (is.Numeric( .i2 ))
+ etastart[, 2] <- theta2eta( .i2 , .lshape2 , earg = .eshape2 )
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .i1 = i1, .i2 = i2, .trim = trim, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
+ eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
+ .A + ( .B - .A ) * shapes[, 1] / (shapes[, 1] + shapes[, 2])
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ last = eval(substitute(expression({
+ misc$link <- c(shape1 = .lshape1 , shape2 = .lshape2 )
+ misc$earg <- list(shape1 = .eshape1 , shape2 = .eshape2 )
+ misc$limits <- c( .A , .B )
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
+ eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ zedd <- (y - .A ) / ( .B - .A )
+ ll.elts <-
+ c(w) * (dbeta(x = zedd, shape1 = shapes[, 1],
+ shape2 = shapes[, 2],
+ log = TRUE) - log( abs( .B - .A )))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ vfamily = "betaR",
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+
+ eta <- predict(object)
+ shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
+ eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
+ .A + ( .B - .A ) *
+ rbeta(nsim * length(shapes[, 1]),
+ shape1 = shapes[, 1], shape2 = shapes[, 2])
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+
+
+
+ deriv = eval(substitute(expression({
+ shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
+ eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
+
+ dshapes.deta <-
+ cbind(dtheta.deta(shapes[, 1], .lshape1 , earg = .eshape1),
+ dtheta.deta(shapes[, 2], .lshape2 , earg = .eshape2))
+
+ dl.dshapes <- cbind(log(y - .A ), log( .B - y)) -
+ digamma(shapes) +
+ digamma(shapes[, 1] + shapes[, 2]) - log( .B - .A )
+
+ c(w) * dl.dshapes * dshapes.deta
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ weight = expression({
+ trig.sum <- trigamma(shapes[, 1] + shapes[, 2])
+ ned2l.dshape12 <- trigamma(shapes[, 1]) - trig.sum
+ ned2l.dshape22 <- trigamma(shapes[, 2]) - trig.sum
+ ned2l.dshape1shape2 <- -trig.sum
+ wz <- matrix(NA_real_, n, dimm(M)) # dimm(M) == 3
+ wz[, iam(1, 1, M)] <- ned2l.dshape12 * dshapes.deta[, 1]^2
+ wz[, iam(2, 2, M)] <- ned2l.dshape22 * dshapes.deta[, 2]^2
+ wz[, iam(1, 2, M)] <- ned2l.dshape1shape2 * dshapes.deta[, 1] *
+ dshapes.deta[, 2]
+ c(w) * wz
+ }))
+}
+
+
+
+
+
+ betaprime <- function(link = "loge", i1 = 2, i2 = NULL, zero = NULL) {
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ new("vglmff",
+ blurb = c("Beta-prime distribution\n",
+ "y^(shape1-1) * (1+y)^(-shape1-shape2) / Beta(shape1,shape2),",
+ " y>0, shape1>0, shape2>0\n\n",
+ "Links: ",
+ namesof("shape1", link, earg = earg), ", ",
+ namesof("shape2", link, earg = earg), "\n",
+ "Mean: shape1/(shape2-1) provided shape2>1"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 1)
+
+
+
+ predictors.names <-
+ c(namesof("shape1", .link , earg = .earg , short = TRUE),
+ namesof("shape2", .link , earg = .earg , short = TRUE))
+ if (is.numeric( .i1) && is.numeric( .i2)) {
+ vec <- c( .i1, .i2)
+ vec <- c(theta2eta(vec[1], .link , earg = .earg ),
+ theta2eta(vec[2], .link , earg = .earg ))
+ etastart <- matrix(vec, n, 2, byrow = TRUE)
+ }
+ if (!length(etastart)) {
+ init1 <- if (length( .i1 ))
+ rep_len( .i1 , n) else rep_len(1, n)
+ init2 <- if (length( .i2 ))
+ rep_len( .i2 , n) else 1 + init1 / (y + 0.1)
+ etastart <-
+ matrix(theta2eta(c(init1, init2), .link , earg = .earg ),
+ n, 2, byrow = TRUE)
+ }
+ }), list( .link = link, .earg = earg, .i1 = i1, .i2 = i2 ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ shapes <- eta2theta(eta, .link , earg = .earg )
+ ifelse(shapes[, 2] > 1, shapes[, 1] / (shapes[, 2] - 1), NA)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link <- c(shape1 = .link , shape2 = .link)
+ misc$earg <- list(shape1 = .earg , shape2 = .earg )
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ shapes <- eta2theta(eta, .link , earg = .earg )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * ((shapes[, 1]-1) * log(y) -
+ lbeta(shapes[, 1], shapes[, 2]) -
+ (shapes[, 2]+shapes[, 1]) * log1p(y))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = "betaprime",
+ deriv = eval(substitute(expression({
+ shapes <- eta2theta(eta, .link , earg = .earg )
+ dshapes.deta <- dtheta.deta(shapes, .link , earg = .earg )
+ dl.dshapes <- cbind(log(y) - log1p(y) - digamma(shapes[, 1]) +
+ digamma(shapes[, 1]+shapes[, 2]),
+ - log1p(y) - digamma(shapes[, 2]) +
+ digamma(shapes[, 1]+shapes[, 2]))
+ c(w) * dl.dshapes * dshapes.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = expression({
+ temp2 <- trigamma(shapes[, 1] + shapes[, 2])
+ d2l.dshape12 <- temp2 - trigamma(shapes[, 1])
+ d2l.dshape22 <- temp2 - trigamma(shapes[, 2])
+ d2l.dshape1shape2 <- temp2
+
+ wz <- matrix(NA_real_, n, dimm(M)) #3=dimm(M)
+ wz[, iam(1, 1, M)] <- d2l.dshape12 * dshapes.deta[, 1]^2
+ wz[, iam(2, 2, M)] <- d2l.dshape22 * dshapes.deta[, 2]^2
+ wz[, iam(1, 2, M)] <- d2l.dshape1shape2 *
+ dshapes.deta[, 1] * dshapes.deta[, 2]
+
+ -c(w) * wz
+ }))
+}
+
+
+
+
+
+
+
+
+
+
+ zoabetaR <-
+ function(lshape1 = "loge", lshape2 = "loge",
+ lpobs0 = "logit", lpobs1 = "logit",
+ ishape1 = NULL, ishape2 = NULL, trim = 0.05,
+ type.fitted = c("mean", "pobs0", "pobs1", "beta.mean"),
+ parallel.shape = FALSE,
+ parallel.pobs = FALSE,
+ zero = NULL) {
+
+ A <- 0
+ B <- 1
+
+ lshape1 <- as.list(substitute(lshape1))
+ eshape1 <- link2list(lshape1)
+ lshape1 <- attr(eshape1, "function.name")
+
+ lshape2 <- as.list(substitute(lshape2))
+ eshape2 <- link2list(lshape2)
+ lshape2 <- attr(eshape2, "function.name")
+
+ lprobb0 <- as.list(substitute(lpobs0))
+ eprobb0 <- link2list(lprobb0)
+ lprobb0 <- attr(eprobb0, "function.name")
+
+ lprobb1 <- as.list(substitute(lpobs1))
+ eprobb1 <- link2list(lprobb1)
+ lprobb1 <- attr(eprobb1, "function.name")
+
+ if (length( ishape1 ) && !is.Numeric( ishape1, positive = TRUE))
+ stop("bad input for argument 'ishape1'")
+ if (length( ishape2 ) && !is.Numeric( ishape2, positive = TRUE))
+ stop("bad input for argument 'ishape2'")
+
+ if (!is.Numeric(A, length.arg = 1) ||
+ !is.Numeric(B, length.arg = 1) ||
+ A >= B)
+ stop("A must be < B, and both must be of length one")
+
+ stdbeta <- (A == 0 && B == 1) # stdbeta == TRUE iff standard beta distn
+
+
+
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "pobs0", "pobs1", "beta.mean"))[1]
+
+
+ new("vglmff",
+ blurb = c("Standard Beta distribution with 0- and \n",
+ "1-inflation ",
+ "(shape parameters parameterization)\n",
+ if (stdbeta)
+ paste("y^(shape1-1) * (1-y)^(shape2-1) / beta(shape1,shape2),",
+ "0 <= y <= 1, shape1>0, shape2>0\n\n") else
+ paste("(y-",A,")^(shape1-1) * (",B,
+ "-y)^(shape2-1) / [beta(shape1,shape2) * (",
+ B, "-", A, ")^(shape1+shape2-1)], ",
+ A," <= y <= ",B," shape1>0, shape2>0, ",
+ "0 < pobs0 < 1, 0 < pobs1 < 1 \n\n", sep = ""),
+ "Links: ",
+ namesof("shape1", lshape1, earg = eshape1), ", ",
+ namesof("shape1", lshape1, earg = eshape1), ", ",
+ namesof("pobs0", lprobb0, earg = eprobb0), ", ",
+ namesof("pobs1", lprobb1, earg = eshape1)),
+
+
+
+ constraints = eval(substitute(expression({
+
+
+ constraints.orig <- constraints
+
+
+ if (is.logical( .parallel.probb ) && .parallel.probb &&
+ (cind0[1] + cind1[1] <= 1))
+ warning("argument 'parallel.pobs' specified when there is only ",
+ "one of 'pobs0' and 'pobs1'")
+
+
+
+
+ cmk.s <- kronecker(matrix(1, NOS, 1), rbind(1, 1, 0, 0))
+ cmk.S <- kronecker(diag(NOS), rbind(diag(2), 0*diag(2)))
+ con.s <- cm.VGAM(cmk.s, x = x,
+ bool = .parallel.shape , # Same as .parallel.b
+ constraints = constraints.orig,
+ apply.int = TRUE,
+ cm.default = cmk.S,
+ cm.intercept.default = cmk.S)
+
+
+
+ cmk.p <- kronecker(matrix(1, NOS, 1), rbind(0, 0, 1, 1))
+ cmk.P <- kronecker(diag(NOS), rbind(0*diag(2), diag(2)))
+ con.p <- cm.VGAM(cmk.p,
+ x = x,
+ bool = .parallel.probb , #
+ constraints = constraints.orig,
+ apply.int = TRUE,
+ cm.default = cmk.P,
+ cm.intercept.default = cmk.P)
+
+ con.use <- con.s
+ for (klocal in seq_along(con.s)) {
+ con.use[[klocal]] <-
+ cbind(con.s[[klocal]], con.p[[klocal]])
+ # Delete rows that are not needed:
+ if (!cind0[1]) {
+ con.use[[klocal]] <- (con.use[[klocal]])[c(TRUE, TRUE, FALSE, TRUE), ]
+ }
+ if (!cind1[1]) {
+ con.use[[klocal]] <- (con.use[[klocal]])[c(TRUE, TRUE, TRUE, FALSE), ]
+ }
+ col.delete <- apply(con.use[[klocal]], 2, function(HkCol) all(HkCol == 0))
+ con.use[[klocal]] <- (con.use[[klocal]])[, !col.delete]
+ }
+
+
+ constraints <- con.use
+
+
+
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = M1)
+
+ }), list( .parallel.shape = parallel.shape,
+ .parallel.probb = parallel.pobs,
+ .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(M1 = NA, # Either 3 or 4, data-dependent
+ Q1 = 1,
+ A = .A ,
+ B = .B ,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ type.fitted = .type.fitted ,
+ zero = .zero )
+ }, list( .A = A, .B = B,
+ .type.fitted = type.fitted,
+ .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (min(y) < .A || max(y) > .B)
+ stop("data not within [A, B]")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+ ncoly <- NOS <- ncol(y)
+ if (ncoly > 1 && !( .stdbeta ))
+ stop("can only input multiple responses with the standard beta")
+
+ cind0 <- colSums(ind0 <- y == 0) > 0
+ cind1 <- colSums(ind1 <- y == 1) > 0
+ if (!any(cind0 | cind1))
+ stop("no 0s or 1s in the responses to perform 0- and/or 1-inflation! ",
+ "Try using betaff() or betaR() instead.")
+
+ if (ncoly > 1 && !all(cind0 == cind0[1]) && # FALSE &&
+ !all(cind0 == cind0[1]))
+ stop("with multiple responses, cannot have 0-inflation in ",
+ "some responses and 1-inflation in other responses")
+ M1 <- 2 + cind0[1] + cind1[1] # 4 when there is both 0 and 1-inflation
+ M <- M1 * NOS
+
+ mynames1 <- param.names("shape1", ncoly)
+ mynames2 <- param.names("shape2", ncoly)
+ mynames3 <- param.names("pobs0", ncoly)
+ mynames4 <- param.names("pobs1", ncoly)
+ predictors.names <-
+ c(namesof(mynames1, .lshape1 , earg = .eshape1 , short = TRUE),
+ namesof(mynames2, .lshape2 , earg = .eshape2 , short = TRUE),
+ if (cind0[1])
+ namesof(mynames3, .lprobb0 , earg = .eprobb0 , short = TRUE)
+ else NULL,
+ if (cind1[1])
+ namesof(mynames4, .lprobb1 , earg = .eprobb1 , short = TRUE)
+ else NULL)[
+ interleave.VGAM(M, M1 = M1)]
+
+ extra$type.fitted <- .type.fitted
+ extra$M1 <- M1 # Determined from the data
+ extra$cind0 <- cind0
+ extra$cind1 <- cind1
+
+ if (!length(etastart)) {
+
+ p0init <- matrix(colMeans(ind0), n, ncoly, byrow = TRUE)
+ p1init <- matrix(colMeans(ind1), n, ncoly, byrow = TRUE)
+
+
+ mu1d <- matrix(NA_real_, n, NOS)
+ for (jay in 1:ncoly) {
+ yy <- y[, jay]
+ yy <- yy[ .A < yy & yy < .B ]
+ mu1d[, jay] <- weighted.mean(yy, trim = .trim )
+ }
+ uu <- (mu1d - .A ) / ( .B - .A )
+ DD <- ( .B - .A )^2
+ p.init <- if (is.Numeric( .ishape1 ))
+ matrix( .ishape1 , n, ncoly, byrow = TRUE) else
+ uu^2 * (1 - uu) * DD / var(yy) - uu
+ p.init[p.init < 0.01] <- 0.01
+ q.init <- if (is.Numeric( .ishape2 ))
+ matrix( .ishape2 , n, ncoly, byrow = TRUE) else
+ p.init * (1 - uu) / uu
+ q.init[q.init < 0.01] <- 0.01
+ etastart <- cbind(
+ theta2eta(p.init, .lshape1 , earg = .eshape1 ),
+ theta2eta(q.init, .lshape2 , earg = .eshape2 ),
+ if (cind0[1])
+ theta2eta(p0init, .lprobb0 , earg = .eprobb0 )
+ else NULL,
+ if (cind1[1])
+ theta2eta(p1init, .lprobb1 , earg = .eprobb1 )
+ else NULL)[,
+ interleave.VGAM(M, M1 = M1)]
+ }
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .eshape1 = eshape1, .eshape2 = eshape2,
+ .lprobb0 = lprobb0, .lprobb1 = lprobb1,
+ .eprobb0 = eprobb0, .eprobb1 = eprobb1,
+ .ishape1 = ishape1, .ishape2 = ishape2,
+ .trim = trim, .A = A, .B = B,
+ .type.fitted = type.fitted,
+ .stdbeta = stdbeta ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ M1 <- extra$M1
+ cind0 <- extra$cind0
+ cind1 <- extra$cind1
+ NOS <- ncol(eta) / M1
+ shape1 <- eta2theta(eta[, c(TRUE, rep(FALSE, M1 - 1)), drop = FALSE],
+ .lshape1 , earg = .eshape1 )
+ shape2 <- eta2theta(eta[, c(FALSE, TRUE,
+ rep(FALSE, M1 - 2)), drop = FALSE],
+ .lshape2 , earg = .eshape2 )
+ probb0 <- if (cind0[1])
+ eta2theta(eta[, c(FALSE, FALSE, TRUE,
+ if (cind1[1]) FALSE else NULL), drop = FALSE],
+ .lprobb0 , earg = .eprobb0 ) else 0
+ probb1 <- if (cind1[1])
+ eta2theta(eta[, c(FALSE, FALSE,
+ if (cind0[1]) FALSE else NULL, TRUE), drop = FALSE],
+ .lprobb1 , earg = .eprobb1 ) else 0
+
+ type.fitted <- match.arg(extra$type.fitted,
+ c("mean", "pobs0", "pobs1", "beta.mean"))[1]
+
+ ans <-
+ switch(type.fitted,
+ "mean" = (1 - probb0) * shape1 / (shape1 + shape2) +
+ probb1 * shape2 / (shape1 + shape2),
+ "beta.mean" = shape1/(shape1+shape2), # zz Mux by (1-pobs0-pobs1)??
+ "pobs0" = probb0,
+ "pobs1" = probb1)
+
+ if (length(extra$dimnamesy) &&
+ is.matrix(ans) &&
+ length(extra$dimnamesy[[2]]) == ncol(ans) &&
+ length(extra$dimnamesy[[2]]) > 0) {
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
+ } else
+ if (NCOL(ans) == 1 &&
+ is.matrix(ans)) {
+ colnames(ans) <- NULL
+ }
+ ans
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2,
+ .lprobb0 = lprobb0, .lprobb1 = lprobb1,
+ .eprobb0 = eprobb0, .eprobb1 = eprobb1,
+ .type.fitted = type.fitted ))),
+ last = eval(substitute(expression({
+ misc$link <- rep_len( c( .lshape1 , .lshape2 ,
+ if (cind0[1]) .lprobb0 else NULL,
+ if (cind1[1]) .lprobb1 else NULL), M)
+ names(misc$link) <- c(mynames1, mynames2,
+ if (cind0[1]) mynames3 else NULL,
+ if (cind1[1]) mynames4 else NULL)[
+ interleave.VGAM(M, M1 = M1)]
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ jay <- 1
+ while (jay <= M) {
+ misc$earg[[jay]] <- .eshape1
+ jay <- jay + 1
+ misc$earg[[jay]] <- .eshape2
+ jay <- jay + 1
+ if (cind0[1]) {
+ misc$earg[[jay]] <- .eprobb0
+ jay <- jay + 1
+ }
+ if (cind1[1]) {
+ misc$earg[[jay]] <- .eprobb1
+ jay <- jay + 1
+ }
+ }
+
+ misc$supportlimits <- c( .A , .B )
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .eshape1 = eshape1, .eshape2 = eshape2,
+ .lprobb0 = lprobb0, .lprobb1 = lprobb1,
+ .eprobb0 = eprobb0, .eprobb1 = eprobb1,
+ .A = A, .B = B ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+ summation = TRUE) {
+ M1 <- 4
+ M1 <- extra$M1
+ cind0 <- extra$cind0
+ cind1 <- extra$cind1
+ NOS <- ncol(eta) / M1
+ shape1 <- eta2theta(eta[, c(TRUE, rep(FALSE, M1 - 1)), drop = FALSE],
+ .lshape1 , earg = .eshape1 )
+ shape2 <- eta2theta(eta[, c(FALSE, TRUE,
+ rep(FALSE, M1 - 2)), drop = FALSE],
+ .lshape2 , earg = .eshape2 )
+ probb0 <- if (cind0[1])
+ eta2theta(eta[, c(FALSE, FALSE, TRUE,
+ if (cind1[1]) FALSE else NULL), drop = FALSE],
+ .lprobb0 , earg = .eprobb0 ) else 0
+ probb1 <- if (cind1[1])
+ eta2theta(eta[, c(FALSE, FALSE,
+ if (cind0[1]) FALSE else NULL, TRUE), drop = FALSE],
+ .lprobb1 , earg = .eprobb1 ) else 0
+
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ zedd <- (y - .A ) / ( .B - .A )
+ ll.elts <-
+ c(w) * (dzoabeta(x = zedd, shape1 = shape1, shape2 = shape2,
+ pobs0 = probb0, pobs1 = probb1,
+ log = TRUE) - log( abs( .B - .A )))
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2,
+ .lprobb0 = lprobb0, .lprobb1 = lprobb1,
+ .eprobb0 = eprobb0, .eprobb1 = eprobb1 ))),
+ vfamily = "zoabetaR",
+
+
+
+
+
+
+
+
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ M1 <- 4
+ M1 <- extra$M1
+ cind0 <- extra$cind0
+ cind1 <- extra$cind1
+ NOS <- ncol(eta) / M1
+ shape1 <- eta2theta(eta[, c(TRUE, rep(FALSE, M1 - 1)), drop = FALSE],
+ .lshape1 , earg = .eshape1 )
+ shape2 <- eta2theta(eta[, c(FALSE, TRUE,
+ rep(FALSE, M1 - 2)), drop = FALSE],
+ .lshape2 , earg = .eshape2 )
+ probb0 <- if (cind0[1])
+ eta2theta(eta[, c(FALSE, FALSE, TRUE,
+ if (cind1[1]) FALSE else NULL), drop = FALSE],
+ .lprobb0 , earg = .eprobb0 ) else 0.5
+ probb1 <- if (cind1[1])
+ eta2theta(eta[, c(FALSE, FALSE,
+ if (cind0[1]) FALSE else NULL, TRUE), drop = FALSE],
+ .lprobb1 , earg = .eprobb1 ) else 0.5
+
+
+
+ okay1 <- all(is.finite(shape1)) && all(shape1 > 0) &&
+ all(is.finite(shape2)) && all(shape2 > 0) &&
+ all(is.finite(probb0)) && all(probb0 > 0) && all(probb0 < 1) &&
+ all(is.finite(probb1)) && all(probb1 > 0) && all(probb1 < 1)
+ okay1
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2,
+ .lprobb0 = lprobb0, .lprobb1 = lprobb1,
+ .eprobb0 = eprobb0, .eprobb1 = eprobb1 ))),
+
+
+
+ deriv = eval(substitute(expression({
+ M1 <- 4
+ M1 <- extra$M1
+ cind0 <- extra$cind0
+ cind1 <- extra$cind1
+ NOS <- ncol(eta) / M1
+ shape1 <- eta2theta(eta[, c(TRUE, rep(FALSE, M1 - 1)), drop = FALSE],
+ .lshape1 , earg = .eshape1 )
+ shape2 <- eta2theta(eta[, c(FALSE, TRUE,
+ rep(FALSE, M1 - 2)), drop = FALSE],
+ .lshape2 , earg = .eshape2 )
+ probb0 <- if (cind0[1])
+ eta2theta(eta[, c(FALSE, FALSE, TRUE,
+ if (cind1[1]) FALSE else NULL), drop = FALSE],
+ .lprobb0 , earg = .eprobb0 ) else 0
+ probb1 <- if (cind1[1])
+ eta2theta(eta[, c(FALSE, FALSE,
+ if (cind0[1]) FALSE else NULL, TRUE), drop = FALSE],
+ .lprobb1 , earg = .eprobb1 ) else 0
+
+
+ dshape1.deta <- dtheta.deta(shape1, .lshape1 , earg = .eshape1 )
+ dshape2.deta <- dtheta.deta(shape2, .lshape2 , earg = .eshape2 )
+ dprobb0.deta <- dtheta.deta(probb0, .lprobb0 , earg = .eprobb0 )
+ dprobb1.deta <- dtheta.deta(probb1, .lprobb1 , earg = .eprobb1 )
+
+ index0 <- y == 0
+ index1 <- y == 1
+ indexi <- !index0 & !index1 # In the interior, i.e., (0, 1)
+ dig.sum <- digamma(shape1 + shape2)
+ QQ <- 1 - probb0 - probb1
+
+ if (cind0[1]) {
+ dl.dprobb0 <- -1 / QQ
+ dl.dprobb0[index0] <- 1 / probb0[index0]
+ dl.dprobb0[index1] <- 0
+ }
+
+ if (cind1[1]) {
+ dl.dprobb1 <- -1 / QQ
+ dl.dprobb1[index0] <- 0
+ dl.dprobb1[index1] <- 1 / probb1[index1]
+ }
+
+ dl.dshape1 <- log(y) - digamma(shape1) + dig.sum
+ dl.dshape2 <- log1p(-y) - digamma(shape2) + dig.sum
+ dl.dshape1[!indexi] <- 0
+ dl.dshape2[!indexi] <- 0
+
+ myderiv <- c(w) *
+ cbind(dl.dshape1 * dshape1.deta,
+ dl.dshape2 * dshape2.deta,
+ if (cind0[1]) dl.dprobb0 * dprobb0.deta else NULL,
+ if (cind1[1]) dl.dprobb1 * dprobb1.deta else NULL)
+ colnames(myderiv) <- NULL
+ myderiv[, interleave.VGAM(M, M1 = M1)]
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2,
+ .lprobb0 = lprobb0, .lprobb1 = lprobb1,
+ .eprobb0 = eprobb0, .eprobb1 = eprobb1 ))),
+ weight = expression({
+ trig.sum <- trigamma(shape1 + shape2)
+
+
+ned2l.dshape12 <- (trigamma(shape1) - trig.sum) * QQ
+ned2l.dshape22 <- (trigamma(shape2) - trig.sum) * QQ
+ned2l.dprobb02 <- (1 - probb1) / (probb0 * QQ)
+ned2l.dprobb12 <- (1 - probb0) / (probb1 * QQ)
+ned2l.dshape1shape2 <- -trig.sum * QQ # (1 - probb0 - probb0) zz
+ned2l.dshape2probb0 <- 0
+ned2l.dprobb0probb1 <- 1 / QQ
+ned2l.dshape1probb0 <- 0
+ned2l.dshape2probb1 <- 0
+ned2l.dshape1probb1 <- 0
+
+ned2l.dshape1probb0 <- 0
+ wz <- array(c(c(w) * ned2l.dshape12 * dshape1.deta^2,
+ c(w) * ned2l.dshape22 * dshape2.deta^2,
+ if (cind0[1]) c(w) * ned2l.dprobb02 * dprobb0.deta^2 else NULL,
+ if (cind1[1]) c(w) * ned2l.dprobb12 * dprobb1.deta^2 else NULL,
+ c(w) * ned2l.dshape1shape2 * dshape1.deta * dshape2.deta,
+ if (cind0[1]) c(w) * ned2l.dshape2probb0 * dshape2.deta * dprobb0.deta,
+ c(w) * ned2l.dprobb0probb1 * dprobb0.deta * dprobb1.deta,
+ if (cind0[1]) c(w) * ned2l.dshape1probb0 * dshape1.deta * dprobb0.deta,
+ if (cind1[1]) c(w) * ned2l.dshape2probb1 * dshape2.deta * dprobb1.deta,
+ if (cind1[1]) c(w) * ned2l.dshape1probb1 * dshape1.deta * dprobb1.deta),
+ dim = c(n, M / M1, M1*(M1+1)/2))
+
+ wz <- arwz2wz(wz, M = M, M1 = M1) # wz is tridiagonal but unexploited here
+ wz
+ }))
+} # zoabetaR
+
+
+
+
diff --git a/R/family.basics.R b/R/family.basics.R
index ff2d446..8cf5fb4 100644
--- a/R/family.basics.R
+++ b/R/family.basics.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -43,11 +43,11 @@ Select <-
col.names <- colnames(data)
if (is.logical(prefix)) {
- index <- if (prefix) 1:length(col.names) else
+ index <- if (prefix) seq_along(col.names) else
stop("cannot have 'prefix = FALSE'")
} else {
index <- NULL
- for (ii in 1:length(prefix)) {
+ for (ii in seq_along(prefix)) {
small.col.names <- substr(col.names, 1, nchar(prefix[ii]))
index <- c(index, grep(prefix[ii], small.col.names))
@@ -63,9 +63,9 @@ Select <-
if (length(exclude)) {
exclude.index <- NULL
- for (ii in 1:length(exclude)) {
+ for (ii in seq_along(exclude)) {
exclude.index <- c(exclude.index,
- (1:length(col.names))[exclude[ii] == col.names])
+ (seq_along(col.names))[exclude[ii] == col.names])
}
exclude.index <- unique(sort(exclude.index))
index <- setdiff(index, exclude.index)
@@ -185,7 +185,7 @@ subsetc <-
if (!is.vector(vov))
stop("argument 'vov' must be a vector")
objvals <- vov
- for (ii in 1:length(vov))
+ for (ii in seq_along(vov))
objvals[ii] <- objfun(vov[ii], y = y, x = x, w = w,
extraargs = extraargs, ...)
try.this <- if (abs.arg) {
@@ -210,6 +210,114 @@ subsetc <-
+ grid.search2 <-
+ function(vov1, vov2, objfun, y, x, w, extraargs = NULL,
+ maximize = TRUE, # abs.arg = FALSE,
+ ret.objfun = FALSE, ...) {
+ if (!is.vector(vov1))
+ stop("argument 'vov1' must be a vector")
+ if (!is.vector(vov2))
+ stop("argument 'vov2' must be a vector")
+
+ allmat1 <- expand.grid(vov1 = as.vector(vov1),
+ vov2 = as.vector(vov2))
+ objvals <- numeric(nrow(allmat1))
+ for (ii in seq_along(objvals))
+ objvals[ii] <- objfun(allmat1[ii, "vov1"],
+ allmat1[ii, "vov2"],
+ y = y, x = x, w = w,
+ extraargs = extraargs, ...)
+
+ ind5 <- if (maximize) which.max(objvals) else which.min(objvals)
+
+
+ c(Value1 = allmat1[ind5, "vov1"],
+ Value2 = allmat1[ind5, "vov2"],
+ ObjFun = if (ret.objfun) objvals[ind5] else NULL)
+}
+
+
+
+
+ grid.search3 <-
+ function(vov1, vov2, vov3, objfun, y, x, w, extraargs = NULL,
+ maximize = TRUE, # abs.arg = FALSE,
+ ret.objfun = FALSE, ...) {
+ if (!is.vector(vov1))
+ stop("argument 'vov1' must be a vector")
+ if (!is.vector(vov2))
+ stop("argument 'vov2' must be a vector")
+ if (!is.vector(vov3))
+ stop("argument 'vov3' must be a vector")
+
+ allmat1 <- expand.grid(vov1 = as.vector(vov1),
+ vov2 = as.vector(vov2),
+ vov3 = as.vector(vov3))
+ objvals <- numeric(nrow(allmat1))
+ for (ii in seq_along(objvals))
+ objvals[ii] <- objfun(allmat1[ii, "vov1"],
+ allmat1[ii, "vov2"],
+ allmat1[ii, "vov3"],
+ y = y, x = x, w = w,
+ extraargs = extraargs, ...)
+
+ ind5 <- if (maximize) which.max(objvals) else which.min(objvals)
+
+
+ c(Value1 = allmat1[ind5, "vov1"],
+ Value2 = allmat1[ind5, "vov2"],
+ Value3 = allmat1[ind5, "vov3"],
+ ObjFun = if (ret.objfun) objvals[ind5] else NULL)
+}
+
+
+
+
+ grid.search4 <-
+ function(vov1, vov2, vov3, vov4,
+ objfun, y, x, w, extraargs = NULL,
+ maximize = TRUE, # abs.arg = FALSE,
+ ret.objfun = FALSE, ...) {
+ if (!is.vector(vov1))
+ stop("argument 'vov1' must be a vector")
+ if (!is.vector(vov2))
+ stop("argument 'vov2' must be a vector")
+ if (!is.vector(vov3))
+ stop("argument 'vov3' must be a vector")
+ if (!is.vector(vov4))
+ stop("argument 'vov4' must be a vector")
+
+ allmat1 <- expand.grid(vov1 = as.vector(vov1),
+ vov2 = as.vector(vov2),
+ vov3 = as.vector(vov3),
+ vov4 = as.vector(vov4))
+ objvals <- numeric(nrow(allmat1))
+ for (ii in seq_along(objvals))
+ objvals[ii] <- objfun(allmat1[ii, "vov1"],
+ allmat1[ii, "vov2"],
+ allmat1[ii, "vov3"],
+ allmat1[ii, "vov4"],
+ y = y, x = x, w = w,
+ extraargs = extraargs, ...)
+
+ ind5 <- if (maximize) which.max(objvals) else which.min(objvals)
+
+
+ c(Value1 = allmat1[ind5, "vov1"],
+ Value2 = allmat1[ind5, "vov2"],
+ Value3 = allmat1[ind5, "vov3"],
+ Value4 = allmat1[ind5, "vov4"],
+ ObjFun = if (ret.objfun) objvals[ind5] else NULL)
+}
+
+
+
+
+
+
+
+
+
getind <- function(constraints, M, ncolx) {
@@ -229,7 +337,7 @@ subsetc <-
temp2 <- matrix(unlist(constraints), nrow = M)
for (kk in 1:M) {
ansx <- NULL
- for (ii in 1:length(constraints)) {
+ for (ii in seq_along(constraints)) {
temp <- constraints[[ii]]
isfox <- any(temp[kk, ] != 0)
if (isfox) {
@@ -272,7 +380,7 @@ subsetc <-
if (!length(constraints)) {
constraints <- vector("list", length(nasgn))
- for (ii in 1:length(nasgn)) {
+ for (ii in seq_along(nasgn)) {
constraints[[ii]] <- cm.default # diag(M)
}
names(constraints) <- nasgn
@@ -362,7 +470,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
if (!is.list(constraints))
stop("'constraints' must be a list")
- for (ii in 1:length(asgn))
+ for (ii in seq_along(asgn))
constraints[[nasgn[ii]]] <- if (is.null(constraints[[nasgn[ii]]]))
diag(M) else eval(constraints[[nasgn[ii]]])
@@ -403,7 +511,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
which.numeric.all <- NULL
- for (ii in 1:length(dotzero)) {
+ for (ii in seq_along(dotzero)) {
which.ones <-
grep(dotzero[ii], predictors.names, fixed = TRUE)
if (length(which.ones)) {
@@ -464,7 +572,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
if (!is.list(constraints))
stop("'constraints' must be a list")
- for (ii in 1:length(asgn))
+ for (ii in seq_along(asgn))
constraints[[nasgn[ii]]] <- if (is.null(constraints[[nasgn[ii]]]))
diag(M) else eval(constraints[[nasgn[ii]]])
@@ -511,7 +619,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
if (is.null(constraints)) {
constraints <- vector("list", length(nasgn))
- for (ii in 1:length(nasgn))
+ for (ii in seq_along(nasgn))
constraints[[ii]] <- diag(M)
names(constraints) <- nasgn
}
@@ -538,16 +646,16 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
}
if (is.null(names(constraints)))
- names(constraints) <- rep(nasgn, length.out = lenconstraints)
+ names(constraints) <- rep_len(nasgn, lenconstraints)
temp <- vector("list", length(nasgn))
names(temp) <- nasgn
- for (ii in 1:length(nasgn))
+ for (ii in seq_along(nasgn))
temp[[nasgn[ii]]] <-
if (is.null(constraints[[nasgn[ii]]])) diag(M) else
eval(constraints[[nasgn[ii]]])
- for (ii in 1:length(asgn)) {
+ for (ii in seq_along(asgn)) {
if (!is.matrix(temp[[ii]])) {
stop("not a constraint matrix")
}
@@ -560,7 +668,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
constraints <- temp
Hlist <- vector("list", ncol(x))
- for (ii in 1:length(asgn)) {
+ for (ii in seq_along(asgn)) {
cols <- asgn[[ii]]
ictr <- 0
for (jay in cols) {
@@ -615,9 +723,9 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
stop("target is not a matrix")
dimtar <- dim(target)
- trivc <- rep(1, length(Hlist))
+ trivc <- rep_len(1, length(Hlist))
names(trivc) <- names(Hlist)
- for (ii in 1:length(Hlist)) {
+ for (ii in seq_along(Hlist)) {
d <- dim(Hlist[[ii]])
if (d[1] != dimtar[1]) trivc[ii] <- 0
if (d[2] != dimtar[2]) trivc[ii] <- 0
@@ -720,7 +828,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
stop("range error in j or k")
both <- (i1 == jay & i2 == kay) |
(i1 == kay & i2 == jay)
- (1:length(i2))[both]
+ (seq_along(i2))[both]
}
}
@@ -829,7 +937,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
}
if (length.arg > length(ans))
stop("argument 'length.arg' too big")
- rep(ans, length.out = length.arg)
+ rep_len(ans, length.arg)
}
@@ -867,11 +975,11 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
if (any(slotNames(object) == "iter"))
iter <- object at iter
- w <- rep(1, n)
+ w <- rep_len(1, n)
if (any(slotNames(object) == "prior.weights"))
w <- object at prior.weights
if (!length(w))
- w <- rep(1, n)
+ w <- rep_len(1, n)
x <- object at x
if (!length(x))
@@ -940,7 +1048,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
ans
} else {
temp <- object at y
- ans <- rep(1, nrow(temp)) # Assumed all equal and unity.
+ ans <- rep_len(1, nrow(temp)) # Assumed all equal and unity.
names(ans) <- dimnames(temp)[[1]]
ans
}
@@ -959,7 +1067,7 @@ procVec <- function(vec, yn, Default) {
- if (any(is.na(vec)))
+ if (anyNA(vec))
stop("vec cannot contain any NAs")
L <- length(vec)
nvec <- names(vec) # vec[""] undefined
@@ -971,18 +1079,18 @@ procVec <- function(vec, yn, Default) {
default <- vec
}
- answer <- rep(default, length.out = length(yn))
+ answer <- rep_len(default, length(yn))
names(answer) <- yn
if (named) {
nvec2 <- nvec[nvec != ""]
if (length(nvec2)) {
if (any(!is.element(nvec2, yn)))
stop("some names given which are superfluous")
- answer <- rep(NA_real_, length.out = length(yn))
+ answer <- rep_len(NA_real_, length(yn))
names(answer) <- yn
answer[nvec2] <- vec[nvec2]
answer[is.na(answer)] <-
- rep(default, length.out <- sum(is.na(answer)))
+ rep_len(default, sum(is.na(answer)))
}
}
@@ -1065,13 +1173,13 @@ qnupdate <- function(w, wzold, dderiv, deta, M, keeppd = TRUE,
}
Bs <- mux22(t(wzold), deta, M = M,
upper = FALSE, as.matrix = TRUE) # n x M
- sBs <- c( (deta * Bs) %*% rep(1, M) ) # should have positive values
- sy <- c( (dderiv * deta) %*% rep(1, M) )
+ sBs <- c( (deta * Bs) %*% rep_len(1, M) ) # should have positive values
+ sy <- c( (dderiv * deta) %*% rep_len(1, M) )
wznew <- wzold
index <- iam(NA, NA, M = M, both = TRUE)
- index$row.index <- rep(index$row.index, len=ncol(wzold))
- index$col.index <- rep(index$col.index, len=ncol(wzold))
- updateThese <- if (keeppd) (sy > effpos) else rep(TRUE, len=length(sy))
+ index$row.index <- rep_len(index$row.index, ncol(wzold))
+ index$col.index <- rep_len(index$col.index, ncol(wzold))
+ updateThese <- if (keeppd) (sy > effpos) else rep_len(TRUE, length(sy))
if (!keeppd || any(updateThese)) {
wznew[updateThese,] <- wznew[updateThese,] -
Bs[updateThese,index$row] *
@@ -1171,7 +1279,7 @@ existsinVGAMenv <- function(varnames, prefix = "") {
assign2VGAMenv <- function(varnames, mylist, prefix = "") {
evarnames <- paste(prefix, varnames, sep = "")
- for (ii in 1:length(varnames)) {
+ for (ii in seq_along(varnames)) {
assign(evarnames[ii], mylist[[(varnames[ii])]],
envir = VGAMenv)
}
@@ -1204,9 +1312,9 @@ lerch <- function(x, s, v, tolerance = 1.0e-10, iter = 100) {
stop("bad input for argument 'iter'")
L <- max(length(x), length(s), length(v))
- x <- rep(x, length.out = L);
- s <- rep(s, length.out = L);
- v <- rep(v, length.out = L);
+ x <- rep_len(x, L)
+ s <- rep_len(s, L)
+ v <- rep_len(v, L)
xok <- abs(x) < 1 & !(v <= 0 & v == round(v))
x[!xok] <- 0 # Fix this later
@@ -1240,7 +1348,7 @@ negzero.expression.VGAM <- expression({
which.numeric.all <- NULL
- for (ii in 1:length(dotzero)) {
+ for (ii in seq_along(dotzero)) {
which.ones <-
grep(dotzero[ii], predictors.names, fixed = TRUE)
if (length(which.ones)) {
diff --git a/R/family.binomial.R b/R/family.binomial.R
index 0764111..8642ea6 100644
--- a/R/family.binomial.R
+++ b/R/family.binomial.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -173,7 +173,7 @@ betabinomial.control <- function(save.weights = TRUE, ...) {
mustart.use <- if (length(mustart.orig)) {
mustart.orig
} else if ( .imethod == 1) {
- rep(weighted.mean(y, w), len = n)
+ rep_len(weighted.mean(y, w), n)
} else if ( .imethod == 2) {
.ishrinkage * weighted.mean(y, w) + (1 - .ishrinkage ) * y
} else if ( .imethod == 3) {
@@ -191,9 +191,8 @@ betabinomial.control <- function(save.weights = TRUE, ...) {
nvec = if (is.numeric(extra$orig.w))
round(w / extra$orig.w) else round(w),
mustart = mustart.use))
- init.rho <- if (is.Numeric( .irho ))
- rep( .irho , length = n) else
- rep(try.this, length = n)
+ init.rho <- if (is.Numeric( .irho )) rep_len( .irho , n) else
+ rep_len(try.this, n)
etastart <-
cbind(theta2eta(mustart.use, .lmu , earg = .emu ),
theta2eta(init.rho, .lrho , earg = .erho ))
@@ -411,9 +410,9 @@ dbinom2.or <-
}
L <- max(length(mu1), length(mu2), length(oratio))
- if (length(oratio) != L) oratio <- rep(oratio, len = L)
- if (length(mu1 ) != L) mu1 <- rep(mu1, len = L)
- if (length(mu2 ) != L) mu2 <- rep(mu2, len = L)
+ if (length(oratio) != L) oratio <- rep_len(oratio, L)
+ if (length(mu1 ) != L) mu1 <- rep_len(mu1, L)
+ if (length(mu2 ) != L) mu2 <- rep_len(mu2, L)
a.temp <- 1 + (mu1+mu2)*(oratio-1)
b.temp <- -4 * oratio * (oratio-1) * mu1 * mu2
@@ -583,7 +582,7 @@ rbinom2.or <-
if (!length(etastart)) {
pmargin <- cbind(mustart[, 3] + mustart[, 4],
mustart[, 2] + mustart[, 4])
- ioratio <- if (length( .ioratio)) rep( .ioratio , len = n) else
+ ioratio <- if (length( .ioratio)) rep_len( .ioratio , n) else
mustart[, 4] * mustart[, 1] / (mustart[, 2] *
mustart[, 3])
if (length( .imu1 )) pmargin[, 1] <- .imu1
@@ -813,9 +812,9 @@ dbinom2.rho <-
}
nn <- max(length(mu1), length(mu2), length(rho))
- rho <- rep(rho, len = nn)
- mu1 <- rep(mu1, len = nn)
- mu2 <- rep(mu2, len = nn)
+ rho <- rep_len(rho, nn)
+ mu1 <- rep_len(mu1, nn)
+ mu2 <- rep_len(mu2, nn)
eta1 <- qnorm(mu1)
eta2 <- qnorm(mu2)
p11 <- pbinorm(eta1, eta2, cov12 = rho)
@@ -1010,10 +1009,10 @@ binom2.rho.control <- function(save.weights = TRUE, ...) {
mu2.init <- fitted(glm2.fit)
} else if ( .imethod == 2) {
mu1.init <- if (is.Numeric( .imu1 ))
- rep( .imu1 , length = n) else
+ rep_len( .imu1 , n) else
mu[, 3] + mu[, 4]
mu2.init <- if (is.Numeric( .imu2 ))
- rep( .imu2 , length = n) else
+ rep_len( .imu2 , n) else
mu[, 2] + mu[, 4]
} else {
stop("bad value for argument 'imethod'")
@@ -1055,7 +1054,7 @@ binom2.rho.control <- function(save.weights = TRUE, ...) {
rho.init <- if (is.Numeric( .irho ))
- rep( .irho , len = n) else {
+ rep_len( .irho , n) else {
try.this
}
@@ -1269,9 +1268,9 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
sd2 <- sqrt(var2)
rho <- cov12 / (sd1 * sd2)
- if (any(is.na(q1) | is.na(q2) |
- is.na(sd1) | is.na(sd2) |
- is.na(mean1) | is.na(mean2) | is.na(rho)))
+ if (anyNA(q1) || anyNA(q2) ||
+ anyNA(sd1) || anyNA(sd2) ||
+ anyNA(mean1) || anyNA(mean2) || anyNA(rho))
stop("no NAs allowed in arguments or variables 'q1', 'q2', 'mean1', ",
"'mean2', 'sd1', 'sd2', 'cov12'")
if (min(rho) < -1 || max(rho) > +1)
@@ -1287,13 +1286,13 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
length(mean1), length(mean2),
length(sd1), length(sd2),
length(rho))
- if (length(q1) != LLL) q1 <- rep(q1, len = LLL)
- if (length(q2) != LLL) q2 <- rep(q2, len = LLL)
- if (length(mean1) != LLL) mean1 <- rep(mean1, len = LLL)
- if (length(mean2) != LLL) mean2 <- rep(mean2, len = LLL)
- if (length(sd1) != LLL) sd1 <- rep(sd1, len = LLL)
- if (length(sd2) != LLL) sd2 <- rep(sd2, len = LLL)
- if (length(rho) != LLL) rho <- rep(rho, len = LLL)
+ if (length(q1) != LLL) q1 <- rep_len(q1, LLL)
+ if (length(q2) != LLL) q2 <- rep_len(q2, LLL)
+ if (length(mean1) != LLL) mean1 <- rep_len(mean1, LLL)
+ if (length(mean2) != LLL) mean2 <- rep_len(mean2, LLL)
+ if (length(sd1) != LLL) sd1 <- rep_len(sd1, LLL)
+ if (length(sd2) != LLL) sd2 <- rep_len(sd2, LLL)
+ if (length(rho) != LLL) rho <- rep_len(rho, LLL)
Zedd1 <- Z1 <- (q1 - mean1) / sd1
Zedd2 <- Z2 <- (q2 - mean2) / sd2
@@ -1341,9 +1340,9 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
sd2 <- sqrt(var2)
rho <- cov12 / (sd1 * sd2)
- if (any(is.na(x1) | is.na(x2) |
- is.na(sd1) | is.na(sd2) |
- is.na(mean1) | is.na(mean2) | is.na(rho)))
+ if (anyNA(x1) || anyNA(x2) ||
+ anyNA(sd1) || anyNA(sd2) ||
+ anyNA(mean1) || anyNA(mean2) || anyNA(rho))
stop("no NAs allowed in arguments or variables 'x1', 'x2', 'mean1', ",
"'mean2', 'sd1', 'sd2', 'cov12'")
if (min(rho) < -1 || max(rho) > +1)
@@ -1359,13 +1358,13 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
length(mean1), length(mean2),
length(sd1), length(sd2),
length(rho))
- if (length(x1) != LLL) x1 <- rep(x1, len = LLL)
- if (length(x2) != LLL) x2 <- rep(x2, len = LLL)
- if (length(mean1) != LLL) mean1 <- rep(mean1, len = LLL)
- if (length(mean2) != LLL) mean2 <- rep(mean2, len = LLL)
- if (length(sd1) != LLL) sd1 <- rep(sd1, len = LLL)
- if (length(sd2) != LLL) sd2 <- rep(sd2, len = LLL)
- if (length(rho) != LLL) rho <- rep(rho, len = LLL)
+ if (length(x1) != LLL) x1 <- rep_len(x1, LLL)
+ if (length(x2) != LLL) x2 <- rep_len(x2, LLL)
+ if (length(mean1) != LLL) mean1 <- rep_len(mean1, LLL)
+ if (length(mean2) != LLL) mean2 <- rep_len(mean2, LLL)
+ if (length(sd1) != LLL) sd1 <- rep_len(sd1, LLL)
+ if (length(sd2) != LLL) sd2 <- rep_len(sd2, LLL)
+ if (length(rho) != LLL) rho <- rep_len(rho, LLL)
Z1 <- (x1 - mean1) / sd1
Z2 <- (x2 - mean2) / sd2
@@ -1420,7 +1419,7 @@ my.dbinom <- function(x,
" Var(size) is intractable"),
initialize = eval(substitute(expression({
predictors.names <- "size"
- extra$temp2 <- rep( .prob , length = n)
+ extra$temp2 <- rep_len( .prob , n)
if (is.null(etastart)) {
nvec <- (y+0.1)/extra$temp2
@@ -1492,10 +1491,10 @@ my.dbinom <- function(x,
LLL <- max(length(x), length(size), length(shape1), length(shape2))
- if (length(x) != LLL) x <- rep(x, len = LLL)
- if (length(size) != LLL) size <- rep(size, len = LLL)
- if (length(shape1) != LLL) shape1 <- rep(shape1, len = LLL)
- if (length(shape2) != LLL) shape2 <- rep(shape2, len = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
+ if (length(shape1) != LLL) shape1 <- rep_len(shape1, LLL)
+ if (length(shape2) != LLL) shape2 <- rep_len(shape2, LLL)
ans <- x
ans[TRUE] <- log(0)
@@ -1606,10 +1605,10 @@ my.dbinom <- function(x,
stop("bad input for argument 'shape2'")
LLL <- max(length(q), length(size), length(shape1), length(shape2))
- if (length(q) != LLL) q <- rep(q, len = LLL)
- if (length(shape1) != LLL) shape1 <- rep(shape1, len = LLL)
- if (length(shape2) != LLL) shape2 <- rep(shape2, len = LLL)
- if (length(size) != LLL) size <- rep(size, len = LLL)
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(shape1) != LLL) shape1 <- rep_len(shape1, LLL)
+ if (length(shape2) != LLL) shape2 <- rep_len(shape2, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
ans <- q # Retains names(q)
ans[] <- 0 # Set all elements to 0
@@ -1661,11 +1660,11 @@ my.dbinom <- function(x,
if (!is.Numeric(n, integer.valued = TRUE,
length.arg = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- if (length(size) != use.n) size <- rep(size, len = use.n)
- if (length(shape1) != use.n) shape1 <- rep(shape1, len = use.n)
- if (length(shape2) != use.n) shape2 <- rep(shape2, len = use.n)
+ if (length(size) != use.n) size <- rep_len(size, use.n)
+ if (length(shape1) != use.n) shape1 <- rep_len(shape1, use.n)
+ if (length(shape2) != use.n) shape2 <- rep_len(shape2, use.n)
- ans <- rep(NA_real_, len = use.n)
+ ans <- rep_len(NA_real_, use.n)
okay0 <- is.finite(shape1) & is.finite(shape2)
if (smalln <- sum(okay0))
ans[okay0] <- rbinom(n = smalln, size = size[okay0],
@@ -1684,7 +1683,7 @@ my.dbinom <- function(x,
prob = 1)
if (sum.okay3 <- sum(okay3)) {
if (length( .dontuse.prob ) != use.n)
- .dontuse.prob <- rep(.dontuse.prob, len = use.n)
+ .dontuse.prob <- rep_len( .dontuse.prob , use.n)
ans[okay3] <- rbinom(n = sum.okay3, size = size[okay3],
prob = .dontuse.prob[okay3])
}
@@ -1723,7 +1722,7 @@ my.dbinom <- function(x,
NN <- length(nvec)
- ans <- rep(0.0, len = NN)
+ ans <- rep_len(0.0, NN)
if (first) {
for (ii in 1:NN) {
temp639 <- lbeta(shape1[ii], shape2[ii])
@@ -1840,9 +1839,9 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
mustart.use <- if (length(mustart.orig)) mustart.orig else
mustart
- shape1 <- rep( .ishape1 , len = n)
+ shape1 <- rep_len( .ishape1 , n)
shape2 <- if (length( .ishape2 )) {
- rep( .ishape2 , len = n)
+ rep_len( .ishape2 , n)
} else if (length(mustart.orig)) {
shape1 * (1 / mustart.use - 1)
} else if ( .imethod == 1) {
@@ -2083,11 +2082,11 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
namesof("shape", .lshape , earg = .eshape , tag = FALSE))
if (length( .iprob ))
- prob.init <- rep( .iprob , len = n)
+ prob.init <- rep_len( .iprob , n)
if (!length(etastart) ||
ncol(cbind(etastart)) != 2) {
- shape.init <- rep( .ishape , len = n)
+ shape.init <- rep_len( .ishape , n)
etastart <-
cbind(theta2eta(prob.init, .lprob , earg = .eprob ),
theta2eta(shape.init, .lshape , earg = .eshape ))
@@ -2316,12 +2315,11 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
namesof("prob2", .lprob2,earg = .eprob2, tag = FALSE))
prob1.init <- if (is.Numeric( .iprob1))
- rep( .iprob1 , len = n) else
- rep(weighted.mean(y[, 1], w = w), len = n)
+ rep_len( .iprob1 , n) else
+ rep_len(weighted.mean(y[, 1], w = w), n)
prob2.init <- if (is.Numeric( .iprob2 ))
- rep( .iprob2 , length = n) else
- rep(weighted.mean(y[, 2], w = w*y[, 1]),
- length = n)
+ rep_len( .iprob2 , n) else
+ rep_len(weighted.mean(y[, 2], w = w*y[, 1]), n)
if (!length(etastart)) {
etastart <-
@@ -2497,11 +2495,11 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
if (!length(etastart)) {
pstar.init <- ((mu[, 3]+mu[, 4]) + (mu[, 2]+mu[, 4])) / 2
- phi.init <- if (length(.iphi12)) rep(.iphi12, len = n) else
+ phi.init <- if (length(.iphi12)) rep_len( .iphi12 , n) else
min(propY1.eq.0 * 0.95, propY2.eq.0 * 0.95, pstar.init/1.5)
- oratio.init <- if (length( .ioratio)) rep( .ioratio, len = n) else
+ oratio.init <- if (length( .ioratio)) rep_len( .ioratio , n) else
mu[, 4]*mu[, 1]/(mu[, 2]*mu[, 3])
- mu12.init <- if (length(.imu12)) rep(.imu12, len = n) else
+ mu12.init <- if (length(.imu12)) rep_len( .imu12 , n) else
pstar.init / (1-phi.init)
etastart <- cbind(
@@ -2670,11 +2668,11 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
save.weights <- control$save.weights <- FALSE
}
if (is.null(etastart)) {
- mu1.init= if (is.Numeric(.imu1))
- rep(.imu1, length = n) else
+ mu1.init= if (is.Numeric( .imu1 ))
+ rep_len( .imu1 , n) else
mu[, 3] + mu[, 4]
- mu2.init= if (is.Numeric(.imu2))
- rep(.imu2, length = n) else
+ mu2.init= if (is.Numeric( .imu2 ))
+ rep_len( .imu2 , n) else
mu[, 2] + mu[, 4]
etastart <-
cbind(theta2eta(mu1.init, .lmu12 , earg = .emu12 ),
@@ -2685,7 +2683,7 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
linkinv = eval(substitute(function(eta, extra = NULL) {
pmargin <- cbind(eta2theta(eta[, 1], .lmu12 , earg = .emu12 ),
eta2theta(eta[, 2], .lmu12 , earg = .emu12 ))
- rhovec <- rep( .rho , len = nrow(eta))
+ rhovec <- rep_len( .rho , nrow(eta))
p11 <- pbinorm(eta[, 1], eta[, 2], cov12 = rhovec)
p01 <- pmin(pmargin[, 2] - p11, pmargin[, 2])
p10 <- pmin(pmargin[, 1] - p11, pmargin[, 1])
@@ -2736,7 +2734,7 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
deriv = eval(substitute(expression({
pmargin <- cbind(eta2theta(eta[, 1], .lmu12 , earg = .emu12 ),
eta2theta(eta[, 2], .lmu12 , earg = .emu12 ))
- rhovec <- rep( .rho , len = nrow(eta))
+ rhovec <- rep_len( .rho , nrow(eta))
p11 <- pbinorm(eta[, 1], eta[, 2], cov12 = rhovec)
p01 <- pmargin[, 2]-p11
p10 <- pmargin[, 1]-p11
@@ -2921,8 +2919,8 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
mu1.init <- weighted.mean(extra$ymat2col[, 1], c(w))
index1 <- (extra$ymat2col[, 1] == 1)
mu2.init <- weighted.mean(extra$ymat2col[index1, 2], w[index1, 1])
- mu1.init <- rep(mu1.init, len = n)
- mu2.init <- rep(mu2.init, len = n)
+ mu1.init <- rep_len(mu1.init, n)
+ mu2.init <- rep_len(mu2.init, n)
} else if ( .imethod == 2) {
warning("not working yet2")
@@ -2939,9 +2937,9 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
}
if (length( .imu1 ))
- mu1.init <- rep( .imu1 , length = n)
+ mu1.init <- rep_len( .imu1 , n)
if (length( .imu2 ))
- mu2.init <- rep( .imu2 , length = n)
+ mu2.init <- rep_len( .imu2 , n)
@@ -2982,8 +2980,7 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
initmu2 = mu2.init,
nvec = nvec ))
- rho.init <- if (is.Numeric( .irho ))
- rep( .irho , len = n) else {
+ rho.init <- if (is.Numeric( .irho )) rep_len( .irho , n) else {
try.this
}
diff --git a/R/family.bivariate.R b/R/family.bivariate.R
index 61a93e4..f44a297 100644
--- a/R/family.bivariate.R
+++ b/R/family.bivariate.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -27,12 +27,12 @@ dbiclaytoncop <- function(x1, x2, apar = 0, log = FALSE) {
logdensity[out.square] <- log(0.0)
- index0 <- (rep(apar, length = length(A)) < sqrt(.Machine$double.eps))
+ index0 <- (rep_len(apar, length(A)) < sqrt(.Machine$double.eps))
if (any(index0))
logdensity[index0] <- log(1.0)
- index1 <- (rep(apar, length = length(A)) < 0.0) | (A < 0.0)
+ index1 <- (rep_len(apar, length(A)) < 0.0) | (A < 0.0)
if (any(index1))
logdensity[index1] <- NaN
@@ -57,7 +57,7 @@ rbiclaytoncop <- function(n, apar = 0) {
(v2^(-apar / (1 + apar)) - 1) + 1)^(-1 / apar)
- index0 <- (rep(apar, length = length(u1)) < sqrt(.Machine$double.eps))
+ index0 <- (rep_len(apar, length(u1)) < sqrt(.Machine$double.eps))
if (any(index0))
u2[index0] <- runif(sum(index0))
@@ -175,7 +175,7 @@ rbiclaytoncop <- function(n, apar = 0) {
rhobit(pearson.rho)
}
- if (any(is.na(apar.init[, spp.])))
+ if (anyNA(apar.init[, spp.]))
apar.init[, spp.] <- apar.init0
}
@@ -199,7 +199,7 @@ rbiclaytoncop <- function(n, apar = 0) {
last = eval(substitute(expression({
M1 <- extra$M1
Q1 <- extra$Q1
- misc$link <- rep( .lapar , length = M)
+ misc$link <- rep_len( .lapar , M)
temp.names <- mynames1
names(misc$link) <- temp.names
@@ -491,7 +491,7 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
10
}
- if (any(is.na(dof.init[, spp.])))
+ if (anyNA(dof.init[, spp.]))
dof.init[, spp.] <- dof.init0
@@ -501,7 +501,7 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
cor(ymatj[, 1], ymatj[, 2])
}
- if (any(is.na(rho.init[, spp.])))
+ if (anyNA(rho.init[, spp.]))
rho.init[, spp.] <- rho.init0
}
@@ -535,9 +535,8 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
M1 <- extra$M1
Q1 <- extra$Q1
misc$link <-
- c(rep( .ldof , length = M / M1),
- rep( .lrho , length = M / M1))[
- interleave.VGAM(M, M1 = M1)]
+ c(rep_len( .ldof , M / M1),
+ rep_len( .lrho , M / M1))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
@@ -851,7 +850,7 @@ rbinormcop <- function(n, rho = 0 #, inverse = FALSE
- if (any(is.na(rho.init[, spp.])))
+ if (anyNA(rho.init[, spp.]))
rho.init[, spp.] <- rho.init0
}
@@ -878,7 +877,7 @@ rbinormcop <- function(n, rho = 0 #, inverse = FALSE
M1 <- extra$M1
Q1 <- extra$Q1
- misc$link <- rep( .lrho , length = M)
+ misc$link <- rep_len( .lrho , M)
temp.names <- mynames1
names(misc$link) <- temp.names
@@ -1063,18 +1062,14 @@ bilogistic.control <- function(save.weights = TRUE, ...) {
scale.init1 <- const4 * sum(c(w) *(y[, 1] - locat.init1)^2)
scale.init2 <- const4 * sum(c(w) *(y[, 2] - locat.init2)^2)
}
- loc1.init <- if (length( .iloc1 ))
- rep( .iloc1 , length.out = n) else
- rep(locat.init1, length.out = n)
- loc2.init <- if (length( .iloc2 ))
- rep( .iloc2 , length.out = n) else
- rep(locat.init2, length.out = n)
- scale1.init <- if (length( .iscale1 ))
- rep( .iscale1, length.out = n) else
- rep(1, length.out = n)
- scale2.init <- if (length( .iscale2 ))
- rep( .iscale2, length.out = n) else
- rep(1, length.out = n)
+ loc1.init <- if (length( .iloc1 )) rep_len( .iloc1 , n) else
+ rep_len(locat.init1, n)
+ loc2.init <- if (length( .iloc2 )) rep_len( .iloc2 , n) else
+ rep_len(locat.init2, n)
+ scale1.init <- if (length( .iscale1 )) rep_len( .iscale1 , n) else
+ rep_len(1, n)
+ scale2.init <- if (length( .iscale2 )) rep_len( .iscale2 , n) else
+ rep_len(1, n)
if ( .llocat == "loge")
locat.init1 <- abs(locat.init1) + 0.001
@@ -1226,12 +1221,12 @@ dbilogis <- function(x1, x2, loc1 = 0, scale1 = 1,
L <- max(length(x1), length(x2),
length(loc1), length(loc2),
length(scale1), length(scale2))
- if (length(x1 ) != L) x1 <- rep(x1, length.out = L)
- if (length(x2 ) != L) x2 <- rep(x2, length.out = L)
- if (length(loc1 ) != L) loc1 <- rep(loc1, length.out = L)
- if (length(loc2 ) != L) loc2 <- rep(loc2, length.out = L)
- if (length(scale1) != L) scale1 <- rep(scale1, length.out = L)
- if (length(scale2) != L) scale2 <- rep(scale2, length.out = L)
+ if (length(x1 ) != L) x1 <- rep_len(x1, L)
+ if (length(x2 ) != L) x2 <- rep_len(x2, L)
+ if (length(loc1 ) != L) loc1 <- rep_len(loc1, L)
+ if (length(loc2 ) != L) loc2 <- rep_len(loc2, L)
+ if (length(scale1) != L) scale1 <- rep_len(scale1, L)
+ if (length(scale2) != L) scale2 <- rep_len(scale2, L)
zedd1 <- (x1 - loc1) / scale1
zedd2 <- (x2 - loc2) / scale2
@@ -1377,20 +1372,20 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
sumx <- sumx * 1.1; sumxp <- sumxp * 1.2;
sumy <- sumy * 1.2; sumyp <- sumyp * 1.3;
}
- ainit <- if (length( .ia )) rep( .ia , length.out = n) else
+ ainit <- if (length( .ia )) rep_len( .ia , n) else
arr / (sumx + sumyp)
- apinit <- if (length( .iap )) rep( .iap , length.out = n) else
+ apinit <- if (length( .iap )) rep_len( .iap , n) else
(n-arr) / (sumxp - sumyp)
- binit <- if (length( .ib )) rep( .ib , length.out = n) else
+ binit <- if (length( .ib )) rep_len( .ib , n) else
(n-arr) / (sumx + sumyp)
- bpinit <- if (length( .ibp )) rep( .ibp , length.out = n) else
+ bpinit <- if (length( .ibp )) rep_len( .ibp , n) else
arr / (sumy - sumx)
etastart <-
- cbind(theta2eta(rep(ainit, length.out = n), .la , earg = .ea ),
- theta2eta(rep(apinit, length.out = n), .lap , earg = .eap ),
- theta2eta(rep(binit, length.out = n), .lb , earg = .eb ),
- theta2eta(rep(bpinit, length.out = n), .lbp , earg = .ebp ))
+ cbind(theta2eta(rep_len(ainit, n), .la , earg = .ea ),
+ theta2eta(rep_len(apinit, n), .lap , earg = .eap ),
+ theta2eta(rep_len(binit, n), .lb , earg = .eb ),
+ theta2eta(rep_len(bpinit, n), .lbp , earg = .ebp ))
}
}), list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
.ea = ea, .eap = eap, .eb = eb, .ebp = ebp,
@@ -1611,15 +1606,12 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
ainit <- grid.search(a.grid, objfun = mcg2.loglik,
y = y, x = x, w = w, maximize = TRUE,
extraargs = extraargs)
- ainit <- rep(if (is.Numeric( .iscale )) .iscale else ainit,
- length.out = n)
+ ainit <- rep_len(if (is.Numeric( .iscale )) .iscale else ainit, n)
pinit <- (1/ainit) * abs(momentsY[1]) + 0.01
qinit <- (1/ainit) * abs(momentsY[2] - momentsY[1]) + 0.01
- pinit <- rep(if (is.Numeric( .ishape1 )) .ishape1 else pinit,
- length.out = n)
- qinit <- rep(if (is.Numeric( .ishape2 )) .ishape2 else qinit,
- length.out = n)
+ pinit <- rep_len(if (is.Numeric( .ishape1 )) .ishape1 else pinit, n)
+ qinit <- rep_len(if (is.Numeric( .ishape2 )) .ishape2 else qinit, n)
etastart <-
cbind(theta2eta(ainit, .lscale),
@@ -1736,7 +1728,7 @@ rbifrankcop <- function(n, apar) {
if (!is.Numeric(apar, positive = TRUE))
stop("bad input for argument 'apar'")
if (length(apar) != use.n)
- apar <- rep(apar, length.out = use.n)
+ apar <- rep_len(apar, use.n)
U <- runif(use.n)
V <- runif(use.n)
@@ -1763,9 +1755,9 @@ pbifrankcop <- function(q1, q2, apar) {
if (!is.Numeric(apar, positive = TRUE)) stop("bad input for 'apar'")
L <- max(length(q1), length(q2), length(apar))
- if (length(apar) != L) apar <- rep(apar, length.out = L)
- if (length(q1 ) != L) q1 <- rep(q1, length.out = L)
- if (length(q2 ) != L) q2 <- rep(q2, length.out = L)
+ if (length(apar ) != L) apar <- rep_len(apar, L)
+ if (length(q1 ) != L) q1 <- rep_len(q1, L)
+ if (length(q2 ) != L) q2 <- rep_len(q2, L)
x <- q1; y <- q2
index <- (x >= 1 & y < 1) | (y >= 1 & x < 1) |
@@ -1814,9 +1806,9 @@ dbifrankcop <- function(x1, x2, apar, log = FALSE) {
if (!is.Numeric(apar, positive = TRUE)) stop("bad input for 'apar'")
L <- max(length(x1), length(x2), length(apar))
- if (length(apar) != L) apar <- rep(apar, length.out = L)
- if (length(x1 ) != L) x1 <- rep(x1, length.out = L)
- if (length(x2 ) != L) x2 <- rep(x2, length.out = L)
+ if (length(apar ) != L) apar <- rep_len(apar, L)
+ if (length(x1 ) != L) x1 <- rep_len(x1, L)
+ if (length(x2 ) != L) x2 <- rep_len(x2, L)
if (log.arg) {
denom <- apar-1 + (apar^x1 - 1) * (apar^x2 - 1)
@@ -1894,7 +1886,7 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
extra$dimnamesy2 <- dimnames(y)[[2]]
if (!length(etastart)) {
- apar.init <- rep(.iapar, length.out = n)
+ apar.init <- rep_len(.iapar , n)
etastart <- cbind(theta2eta(apar.init, .lapar , earg = .eapar ))
}
}), list( .lapar = lapar, .eapar = eapar, .iapar = iapar))),
@@ -2056,7 +2048,7 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
if (!length(etastart)) {
theta.init <- if (length( .itheta)) {
- rep( .itheta , length.out = n)
+ rep_len( .itheta , n)
} else {
1 / (y[, 2] - 1 + 0.01)
}
@@ -2171,7 +2163,7 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
extra$dimnamesy2 = dimnames(y)[[2]]
if (!length(etastart)) {
- ainit <- if (length(.iapar)) rep( .iapar , length.out = n) else {
+ ainit <- if (length(.iapar)) rep_len( .iapar , n) else {
mean1 <- if ( .imethod == 1) median(y[, 1]) else mean(y[, 1])
mean2 <- if ( .imethod == 1) median(y[, 2]) else mean(y[, 2])
Finit <- 0.01 + mean(y[, 1] <= mean1 & y[, 2] <= mean2)
@@ -2179,7 +2171,7 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
expm1(-mean1) * expm1(-mean2))
}
etastart <-
- theta2eta(rep(ainit, length.out = n), .lapar , earg = .earg )
+ theta2eta(rep_len(ainit, n), .lapar , earg = .earg )
}
}), list( .iapar = iapar, .lapar = lapar, .earg = earg,
.imethod = imethod ))),
@@ -2291,9 +2283,9 @@ dbifgmcop <- function(x1, x2, apar, log = FALSE) {
stop("bad input for argument 'log'")
L <- max(length(x1), length(x2), length(apar))
- if (length(x1) != L) x1 <- rep(x1, length.out = L)
- if (length(x2) != L) x2 <- rep(x2, length.out = L)
- if (length(apar) != L) apar <- rep(apar, length.out = L)
+ if (length(x1) != L) x1 <- rep_len(x1, L)
+ if (length(x2) != L) x2 <- rep_len(x2, L)
+ if (length(apar) != L) apar <- rep_len(apar, L)
ans <- 0 * x1
xnok <- (x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)
if ( log.arg ) {
@@ -2316,9 +2308,9 @@ pbifgmcop <- function(q1, q2, apar) {
if (any(abs(apar) > 1)) stop("'apar' values out of range")
L <- max(length(q1), length(q2), length(apar))
- if (length(q1) != L) q1 <- rep(q1, length.out = L)
- if (length(q2) != L) q2 <- rep(q2, length.out = L)
- if (length(apar) != L) apar <- rep(apar, length.out = L)
+ if (length(q1) != L) q1 <- rep_len(q1, L)
+ if (length(q2) != L) q2 <- rep_len(q2, L)
+ if (length(apar) != L) apar <- rep_len(apar, L)
x <- q1
y <- q2
@@ -2408,8 +2400,7 @@ pbifgmcop <- function(q1, q2, apar) {
ainit <- min(0.95, max(ainit, -0.95))
- etastart <-
- theta2eta(rep(ainit, length.out = n), .lapar , earg = .earg )
+ etastart <- theta2eta(rep_len(ainit, n), .lapar , earg = .earg )
}
}), list( .iapar = iapar, .lapar = lapar, .earg = earg,
.imethod = imethod ))),
@@ -2532,21 +2523,21 @@ pbifgmcop <- function(q1, q2, apar) {
c(namesof("apar", .lapar , earg = .earg , short = TRUE))
if (!length(etastart)) {
- ainit <- if (length( .iapar )) rep( .iapar, length.out = n) else {
+ ainit <- if (length( .iapar )) rep_len( .iapar, n) else {
mean1 <- if ( .imethod == 1) median(y[, 1]) else mean(y[, 1])
mean2 <- if ( .imethod == 1) median(y[, 2]) else mean(y[, 2])
Finit <- 0.01 + mean(y[, 1] <= mean1 & y[, 2] <= mean2)
(log(Finit+expm1(-mean1)+exp(-mean2))+mean1+mean2)/(mean1*mean2)
}
etastart <-
- theta2eta(rep(ainit, length.out = n), .lapar , earg = .earg )
+ theta2eta(rep_len(ainit, n), .lapar , earg = .earg )
}
}), list( .iapar = iapar, .lapar = lapar, .earg = earg,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
alpha <- eta2theta(eta, .lapar , earg = .earg )
- cbind(rep(1, len = length(alpha)),
- rep(1, len = length(alpha)))
+ cbind(rep_len(1, length(alpha)),
+ rep_len(1, length(alpha)))
}, list( .lapar = lapar, .earg = earg ))),
last = eval(substitute(expression({
misc$link <- c("apar" = .lapar )
@@ -2630,9 +2621,9 @@ pbiplackcop <- function(q1, q2, oratio) {
if (!is.Numeric(oratio, positive = TRUE)) stop("bad input for 'oratio'")
L <- max(length(q1), length(q2), length(oratio))
- if (length(q1) != L) q1 <- rep(q1, length.out = L)
- if (length(q2) != L) q2 <- rep(q2, length.out = L)
- if (length(oratio) != L) oratio <- rep(oratio, length.out = L)
+ if (length(q1) != L) q1 <- rep_len(q1, L)
+ if (length(q2) != L) q2 <- rep_len(q2, L)
+ if (length(oratio) != L) oratio <- rep_len(oratio, L)
x <- q1; y <- q2
index <- (x >= 1 & y < 1) | (y >= 1 & x < 1) |
@@ -2762,9 +2753,7 @@ biplackettcop.control <- function(save.weights = TRUE, ...) {
(0.5 + sum(w[(y[, 1] >= y10) & (y[, 2] < y20)]))))
}
}
- etastart <-
- theta2eta(rep(orinit, length.out = n),
- .link , earg = .earg )
+ etastart <- theta2eta(rep_len(orinit, n), .link , earg = .earg )
}
}), list( .ioratio = ioratio, .link = link, .earg = earg,
.imethod = imethod ))),
@@ -2873,9 +2862,9 @@ dbiamhcop <- function(x1, x2, apar, log = FALSE) {
L <- max(length(x1), length(x2), length(apar))
- apar <- rep(apar, length.out = L)
- x1 <- rep(x1, length.out = L)
- x2 <- rep(x2, length.out = L)
+ if (length(apar) != L) apar <- rep_len(apar, L)
+ if (length(x1) != L) x1 <- rep_len(x1, L)
+ if (length(x2) != L) x2 <- rep_len(x2, L)
temp <- 1 - apar*(1-x1)*(1-x2)
if (log.arg) {
@@ -2896,9 +2885,9 @@ pbiamhcop <- function(q1, q2, apar) {
if (!is.Numeric(apar)) stop("bad input for 'apar'")
L <- max(length(q1), length(q2), length(apar))
- if (length(q1) != L) q1 <- rep(q1, length.out = L)
- if (length(q2) != L) q2 <- rep(q2, length.out = L)
- if (length(apar) != L) apar <- rep(apar, length.out = L)
+ if (length(q1) != L) q1 <- rep_len(q1, L)
+ if (length(q2) != L) q2 <- rep_len(q2, L)
+ if (length(apar) != L) apar <- rep_len(apar, L)
x <- q1
y <- q2
@@ -3006,8 +2995,7 @@ biamhcop.control <- function(save.weights = TRUE, ...) {
(1 - (mean1 * mean2 / Finit)) / ((1-mean1) * (1-mean2))
}
ainit <- min(0.95, max(ainit, -0.95))
- etastart <-
- theta2eta(rep(ainit, length.out = n), .lapar , earg = .eapar )
+ etastart <- theta2eta(rep_len(ainit, n), .lapar , earg = .eapar )
}
}), list( .lapar = lapar, .eapar = eapar, .iapar = iapar,
.imethod = imethod))),
@@ -3249,7 +3237,7 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
con.use <- con.m
- for (klocal in 1:length(con.m)) {
+ for (klocal in seq_along(con.m)) {
con.use[[klocal]] <-
cbind(con.m[[klocal]],
con.s[[klocal]],
@@ -3304,16 +3292,13 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
extra$dimnamesy2 <- dimnames(y)[[2]]
if (!length(etastart)) {
- imean1 <- rep(if (length( .imean1 )) .imean1 else
- weighted.mean(y[, 1], w = w), length.out = n)
- imean2 <- rep(if (length( .imean2 )) .imean2 else
- weighted.mean(y[, 2], w = w), length.out = n)
- isd1 <- rep(if (length( .isd1 )) .isd1 else sd(y[, 1]),
- length.out = n)
- isd2 <- rep(if (length( .isd2 )) .isd2 else sd(y[, 2]),
- length.out = n)
- irho <- rep(if (length( .irho )) .irho else cor(y[, 1], y[, 2]),
- length.out = n)
+ imean1 <- rep_len(if (length( .imean1 )) .imean1 else
+ weighted.mean(y[, 1], w = w), n)
+ imean2 <- rep_len(if (length( .imean2 )) .imean2 else
+ weighted.mean(y[, 2], w = w), n)
+ isd1 <- rep_len(if (length( .isd1 )) .isd1 else sd(y[, 1]), n)
+ isd2 <- rep_len(if (length( .isd2 )) .isd2 else sd(y[, 2]), n)
+ irho <- rep_len(if (length( .irho )) .irho else cor(y[, 1], y[,2]),n)
if ( .imethod == 2) {
imean1 <- abs(imean1) + 0.01
@@ -3533,20 +3518,19 @@ gumbelI <-
predictors.names <-
c(namesof("a", .la, earg = .earg , short = TRUE))
if (!length(etastart)) {
- ainit <- if (length( .ia )) rep( .ia, len = n) else {
+ ainit <- if (length( .ia )) rep_len( .ia , n) else {
mean1 <- if ( .imethod == 1) median(y[,1]) else mean(y[,1])
mean2 <- if ( .imethod == 1) median(y[,2]) else mean(y[,2])
Finit <- 0.01 + mean(y[,1] <= mean1 & y[,2] <= mean2)
(log(Finit+expm1(-mean1)+exp(-mean2))+mean1+mean2)/(mean1*mean2)
}
- etastart <-
- theta2eta(rep(ainit, len = n), .la, earg = .earg )
+ etastart <- theta2eta(rep_len(ainit, n), .la , earg = .earg )
}
}), list( .ia=ia, .la = la, .earg = earg, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha <- eta2theta(eta, .la, earg = .earg )
- cbind(rep(1, len = length(alpha)),
- rep(1, len = length(alpha)))
+ alpha <- eta2theta(eta, .la , earg = .earg )
+ cbind(rep_len(1, length(alpha)),
+ rep_len(1, length(alpha)))
}, list( .la = la ))),
last = eval(substitute(expression({
misc$link <- c("a" = .la )
diff --git a/R/family.categorical.R b/R/family.categorical.R
index 4cb3731..ed06f61 100644
--- a/R/family.categorical.R
+++ b/R/family.categorical.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -127,7 +127,7 @@ Deviance.categorical.data.vgam <-
M <- if (is.matrix(eta)) ncol(eta) else 1
if (M > 1)
return(NULL)
- devi <- devi %*% rep(1, ncol(devi)) # deviance = \sum_i devi[i]
+ devi <- devi %*% rep_len(1, ncol(devi)) # deviance = \sum_i devi[i]
return(c(sign(y[, 1] - mu[, 1]) * sqrt(abs(devi) * w)))
} else {
dev.elts <- c(w) * devi
@@ -292,7 +292,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
fv.matrix
}, list( .earg = earg, .link = link, .reverse = reverse) )),
last = eval(substitute(expression({
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- mynames
misc$earg <- vector("list", M)
@@ -495,7 +495,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
}, list( .earg = earg, .link = link, .reverse = reverse) )),
last = eval(substitute(expression({
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- mynames
misc$earg <- vector("list", M)
@@ -641,7 +641,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
multinomial <- function(zero = NULL, parallel = FALSE,
- nointercept = NULL, refLevel = "last",
+ nointercept = NULL, refLevel = "(Last)",
whitespace = FALSE) {
@@ -649,24 +649,27 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
if (length(refLevel) != 1)
stop("the length of 'refLevel' must be one")
+ if ( is.numeric(refLevel) &&
+ !is.Numeric(refLevel, integer.valued = TRUE, positive = TRUE))
+ stop("argument 'refLevel' is not a positive integer")
+
if (is.character(refLevel)) {
- if (refLevel != "last")
- stop('if a character, refLevel must be "last"')
- refLevel <- -1
- } else
+ if (refLevel == "(Last)")
+ refLevel <- -1
+ }
if (is.factor(refLevel)) {
if (is.ordered(refLevel))
- warning("'refLevel' is from an ordered factor")
+ warning("argument 'refLevel' is from an ordered factor")
refLevel <- as.character(refLevel) == levels(refLevel)
- refLevel <- (1:length(refLevel))[refLevel]
+ refLevel <- (seq_along(refLevel))[refLevel]
if (!is.Numeric(refLevel, length.arg = 1,
integer.valued = TRUE, positive = TRUE))
stop("could not coerce 'refLevel' into a single positive integer")
- } else
- if (!is.Numeric(refLevel, length.arg = 1,
- integer.valued = TRUE, positive = TRUE))
- stop("'refLevel' must be a single positive integer")
+ }
+
+
+
stopifnot(is.logical(whitespace) &&
@@ -677,23 +680,31 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
new("vglmff",
blurb = c("Multinomial logit model\n\n",
"Links: ",
+ if (is.numeric(refLevel)) {
if (refLevel < 0) {
ifelse(whitespace,
"log(mu[,j] / mu[,M+1]), j = 1:M,\n",
"log(mu[,j]/mu[,M+1]), j=1:M,\n")
} else {
- if (refLevel == 1) {
- paste("log(mu[,", "j]", fillerChar, "/", fillerChar,
- "mu[,", refLevel, "]), j",
- fillerChar, "=", fillerChar, "2:(M+1),\n",
- sep = "")
- } else {
- paste("log(mu[,", "j]", fillerChar, "/",
+ if (refLevel == 1) {
+ paste("log(mu[,", "j]", fillerChar, "/", fillerChar,
"mu[,", refLevel, "]), j",
- fillerChar, "=", fillerChar, "c(1:", refLevel-1,
- ",", fillerChar, refLevel+1, ":(M+1)),\n",
- sep = "")
- }
+ fillerChar, "=", fillerChar, "2:(M+1),\n",
+ sep = "")
+ } else {
+ paste("log(mu[,", "j]", fillerChar, "/",
+ "mu[,", refLevel, "]), j",
+ fillerChar, "=", fillerChar, "c(1:", refLevel-1,
+ ",", fillerChar, refLevel+1, ":(M+1)),\n",
+ sep = "")
+ }
+ }
+ } else { # refLevel is character
+ paste("log(mu[,", "j]", fillerChar, "/",
+ "mu[,'", refLevel, "']), j",
+ fillerChar, " != '", fillerChar, refLevel,
+ "',\n",
+ sep = "")
},
"Variance: ",
ifelse(whitespace,
@@ -701,11 +712,6 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
"mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]")),
constraints = eval(substitute(expression({
-
-
-
-
-
constraints <- cm.VGAM(matrix(1, M, 1), x = x,
bool = .parallel ,
apply.int = TRUE,
@@ -715,14 +721,13 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
M1 = M)
constraints <- cm.nointercept.VGAM(constraints, x, .nointercept , M)
}), list( .parallel = parallel, .zero = zero,
- .nointercept = nointercept,
- .refLevel = refLevel ))),
+ .nointercept = nointercept ))),
deviance = Deviance.categorical.data.vgam,
infos = eval(substitute(function(...) {
list(parallel = .parallel ,
- refLevel = .refLevel ,
+ refLevel = .refLevel , # original
M1 = -1,
link = "multilogit",
expected = TRUE,
@@ -745,9 +750,18 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
eval(process.categorical.data.VGAM)
M <- ncol(y)-1
- use.refLevel <- if ( .refLevel < 0) M+1 else .refLevel
+ use.refLevel <- if (is.numeric( .refLevel )) {
+ if ( .refLevel < 0) M+1 else .refLevel
+ } else { # Is character. Match it with the levels of the response.
+ tmp6 <- match( .refLevel , colnames(y))
+ if (is.na(tmp6))
+ stop("could not match argument 'refLevel' with any columns ",
+ "of the response matrix")
+ tmp6
+ }
if (use.refLevel > (M+1))
stop("argument 'refLevel' has a value that is too high")
+ extra$use.refLevel <- use.refLevel # Used in all other slots.
allbut.refLevel <- (1:(M+1))[-use.refLevel]
@@ -756,18 +770,16 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
"]", .fillerChar, "/", .fillerChar, "mu[,",
use.refLevel, "])", sep = "")
- y.names <- paste("mu", 1:(M+1), sep = "")
}), list( .refLevel = refLevel,
.fillerChar = fillerChar,
.whitespace = whitespace ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
-
- if (any(is.na(eta)))
+ if (anyNA(eta))
warning("there are NAs in eta in slot inverse")
-
- ans <- multilogit(eta, refLevel = .refLevel , inverse = TRUE)
- if (any(is.na(ans)))
+ ans <- multilogit(eta, refLevel = extra$use.refLevel, # .refLevel ,
+ inverse = TRUE)
+ if (anyNA(ans))
warning("there are NAs here in slot linkinv")
if (min(ans) == 0 || max(ans) == 1)
warning("fitted probabilities numerically 0 or 1 occurred")
@@ -776,7 +788,6 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
}), list( .refLevel = refLevel )),
last = eval(substitute(expression({
- misc$refLevel <- if ( .refLevel < 0) M+1 else .refLevel
misc$link <- "multilogit"
misc$earg <- list(multilogit = list(
@@ -788,10 +799,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
if (!is.null(dy[[2]]))
dimnames(fit$fitted.values) <- dy
- misc$multipleResponses <- FALSE
misc$nointercept <- .nointercept
misc$parallel <- .parallel
- misc$refLevel <- use.refLevel
+ misc$refLevel <- use.refLevel # if ( .refLevel < 0) M+1 else .refLevel
misc$refLevel.orig <- .refLevel
misc$zero <- .zero
}), list( .refLevel = refLevel,
@@ -801,7 +811,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
))),
linkfun = eval(substitute( function(mu, extra = NULL) {
- multilogit(mu, refLevel = .refLevel )
+ multilogit(mu, refLevel = extra$use.refLevel) # .refLevel
}), list( .refLevel = refLevel )),
loglikelihood =
@@ -833,39 +843,32 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
},
vfamily = c("multinomial", "VGAMcategorical"),
deriv = eval(substitute(expression({
- if ( .refLevel < 0) {
- c(w) * (y[, -ncol(y)] - mu[, -ncol(y)])
- } else {
- use.refLevel <- if ( .refLevel < 0) M+1 else .refLevel
- c(w) * (y[, -use.refLevel] - mu[, -use.refLevel])
- }
+ use.refLevel <- extra$use.refLevel # Restore its value
+ c(w) * (y[, -use.refLevel] - mu[, -use.refLevel])
}), list( .refLevel = refLevel ))),
weight = eval(substitute(expression({
mytiny <- (mu < sqrt(.Machine$double.eps)) |
(mu > 1.0 - sqrt(.Machine$double.eps))
- use.refLevel <- if ( .refLevel < 0) M+1 else .refLevel
-
if (M == 1) {
- wz <- mu[, 3-use.refLevel] * (1-mu[, 3-use.refLevel])
+ wz <- mu[, 3 - use.refLevel] * (1 - mu[, 3 - use.refLevel])
} else {
index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
- myinc <- (index$row.index >= use.refLevel)
- index$row.index[myinc] <- index$row.index[myinc] + 1
- myinc <- (index$col.index >= use.refLevel)
- index$col.index[myinc] <- index$col.index[myinc] + 1
-
- wz <- -mu[, index$row] * mu[, index$col]
- wz[, 1:M] <- wz[, 1:M] + mu[, -use.refLevel ]
+ myinc <- (index$row.index >= use.refLevel)
+ index$row.index[myinc] <- index$row.index[myinc] + 1
+ myinc <- (index$col.index >= use.refLevel)
+ index$col.index[myinc] <- index$col.index[myinc] + 1
+ wz <- -mu[, index$row] * mu[, index$col]
+ wz[, 1:M] <- wz[, 1:M] + mu[, -use.refLevel ]
}
atiny <- (mytiny %*% rep(1, ncol(mu))) > 0 # apply(mytiny, 1, any)
if (any(atiny)) {
- if (M == 1) wz[atiny] <- wz[atiny] *
- (1 + .Machine$double.eps^0.5) +
- .Machine$double.eps else
- wz[atiny, 1:M] <- wz[atiny, 1:M] * (1 + .Machine$double.eps^0.5) +
- .Machine$double.eps
+ if (M == 1)
+ wz[atiny] <- wz[atiny] * (1 + .Machine$double.eps^0.5) +
+ .Machine$double.eps else
+ wz[atiny, 1:M] <- wz[atiny, 1:M] * (1 + .Machine$double.eps^0.5) +
+ .Machine$double.eps
}
c(w) * wz
}), list( .refLevel = refLevel ))))
@@ -1110,7 +1113,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
misc$earg <- list( .earg )
} else {
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- mynames
misc$earg <- vector("list", M)
@@ -1384,7 +1387,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
}, list( .earg = earg, .link = link, .reverse = reverse) )),
last = eval(substitute(expression({
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- mynames
misc$earg <- vector("list", M)
@@ -1542,11 +1545,10 @@ acat.deriv <- function(zeta, reverse, M, n) {
stop("use bratt(), not brat(), when there are ties")
try.index <- 1:400
- M <- (1:length(try.index))[(try.index+1)*(try.index) == ncol(y)]
+ M <- (seq_along(try.index))[(try.index+1)*(try.index) == ncol(y)]
if (!is.finite(M))
stop("cannot determine 'M'")
- ialpha <- matrix(rep( .ialpha , length.out = M),
- n, M, byrow = TRUE)
+ ialpha <- matrix(rep_len( .ialpha , M), n, M, byrow = TRUE)
etastart <- matrix(theta2eta(ialpha, "loge",
earg = list(theta = NULL)),
n, M, byrow = TRUE)
@@ -1577,7 +1579,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
}, list( .refgp = refgp, .refvalue = refvalue) )),
last = eval(substitute(expression({
- misc$link <- rep("loge", length = M)
+ misc$link <- rep_len("loge", M)
names(misc$link) <- paste("alpha", uindex, sep = "")
misc$earg <- vector("list", M)
@@ -1626,7 +1628,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
earg = list(theta = NULL)),
.refvalue, .refgp )
ymat <- InverseBrat(y[ii, ], NCo = M+1, diag = 0)
- answer <- rep(0, len = M)
+ answer <- rep_len(0, M)
for (aa in 1:(M+1)) {
answer <- answer + (1 - (aa == uindex)) *
(ymat[uindex, aa] * alpha[aa] - ymat[aa, uindex] *
@@ -1706,7 +1708,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
initialize = eval(substitute(expression({
try.index <- 1:400
- M <- (1:length(try.index))[(try.index*(try.index-1)) == ncol(y)]
+ M <- (seq_along(try.index))[(try.index*(try.index-1)) == ncol(y)]
if (!is.Numeric(M, length.arg = 1, integer.valued = TRUE))
stop("cannot determine 'M'")
NCo <- M # Number of contestants
@@ -1721,16 +1723,14 @@ acat.deriv <- function(zeta, reverse, M, n) {
ties <- 0 * y
}
- ialpha <- rep( .ialpha, len = NCo-1)
+ ialpha <- rep_len( .ialpha, NCo-1)
ialpha0 <- .i0
etastart <-
cbind(matrix(theta2eta(ialpha,
"loge",
list(theta = NULL)),
n, NCo-1, byrow = TRUE),
- theta2eta(rep(ialpha0, length.out = n),
- "loge",
- list(theta = NULL)))
+ theta2eta(rep_len(ialpha0, n), "loge", list(theta = NULL)))
refgp <- .refgp
if (!intercept.only)
warning("this function only works with intercept-only models")
@@ -1766,7 +1766,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
probs
}, list( .refgp = refgp, .refvalue = refvalue) )),
last = eval(substitute(expression({
- misc$link <- rep( "loge", length = M)
+ misc$link <- rep_len("loge", M)
names(misc$link) <- c(paste("alpha", uindex, sep = ""), "alpha0")
@@ -1810,7 +1810,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
alpha0 <- loge(eta[ii, M], inverse = TRUE)
ymat <- InverseBrat( y[ii, ], NCo = M, diag = 0)
tmat <- InverseBrat(ties[ii, ], NCo = M, diag = 0)
- answer <- rep(0, len = NCo-1) # deriv wrt eta[-M]
+ answer <- rep_len(0, NCo-1) # deriv wrt eta[-M]
for (aa in 1:NCo) {
Daj <- alpha[aa] + alpha[uindex] + alpha0
pja <- alpha[uindex] / Daj
@@ -1868,7 +1868,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
alphajunk[uindex[ind5$row]] / (alpha0 +
alphajunk[uindex[ind5$row]] + alphajunk[uindex[ind5$col]])^2
}
- for (sss in 1:length(uindex)) {
+ for (sss in seq_along(uindex)) {
jay <- uindex[sss]
naj <- ymat[, jay] + ymat[jay, ] + tmat[, jay]
Daj <- alpha[jay] + alpha + alpha0
@@ -1920,9 +1920,9 @@ acat.deriv <- function(zeta, reverse, M, n) {
allargs <- list(mat) # ,...
callit <- if (length(names(allargs))) names(allargs) else
- as.character(1:length(allargs))
+ as.character(seq_along(allargs))
ans <- ans.ties <- NULL
- for (ii in 1:length(allargs)) {
+ for (ii in seq_along(allargs)) {
m <- allargs[[ii]]
if (!is.matrix(m) || dim(m)[1] != dim(m)[2])
stop("m must be a square matrix")
@@ -1934,7 +1934,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
diag(ties) <- NA
diag(m) <- 0 # Could have been NAs
- if (any(is.na(m)))
+ if (anyNA(m))
stop("missing values not allowed (except on the diagonal)")
diag(m) <- NA
@@ -2036,7 +2036,7 @@ InverseBrat <-
positive = TRUE) ||
any(Levels < 2))
stop("'Levels' must have integer values (>= 2) only")
- Levels <- rep(Levels, length = NOS)
+ Levels <- rep_len(Levels, NOS)
}
@@ -2076,8 +2076,7 @@ InverseBrat <-
stop("the 'weights' argument must be a vector of all ones")
extra$NOS <- M <- NOS <- if (is.Numeric( .NOS )) .NOS else
ncol(orig.y)
- Levels <- rep(if (is.Numeric( .Levels )) .Levels else 0,
- len = NOS)
+ Levels <- rep_len(if (is.Numeric( .Levels )) .Levels else 0, NOS)
if (!is.Numeric( .Levels ))
for (iii in 1:NOS) {
Levels[iii] <- length(unique(sort(orig.y[,iii])))
@@ -2086,9 +2085,8 @@ InverseBrat <-
}
- initmu <- if (is.Numeric( .init.mu ))
- rep( .init.mu, len = NOS) else NULL
- cutpoints <- rep( .cutpoints, len = sum(Levels))
+ initmu <- if (is.Numeric( .init.mu )) rep_len( .init.mu , NOS) else NULL
+ cutpoints <- rep_len( .cutpoints, sum(Levels))
delete.zero.colns <- FALSE
use.y <- if ( .countdata ) y else matrix(0, n, sum(Levels))
use.etastart <- matrix(0, n, M)
@@ -2113,7 +2111,7 @@ InverseBrat <-
}
ncoly <- extra$ncoly <- sum(Levels)
- cp.vector <- rep( .cutpoints, length=ncoly)
+ cp.vector <- rep_len( .cutpoints , ncoly)
extra$countdata <- .countdata
extra$cutpoints <- cp.vector
extra$n <- n
@@ -2134,7 +2132,7 @@ InverseBrat <-
misc$link <- .link
misc$earg <- list( .earg )
} else {
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- mynames
misc$earg <- vector("list", M)
@@ -2257,7 +2255,7 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
findFirstMethod <- function(methodsfn, charvec) {
answer <- NULL
- for (ii in 1:length(charvec)) {
+ for (ii in seq_along(charvec)) {
if (existsMethod(methodsfn, signature(VGAMff = charvec[ii]))) {
answer <- charvec[ii]
break
@@ -2598,7 +2596,7 @@ setMethod("margeffS4VGAM", signature(VGAMff = "acat"),
return((1 - Thetamat[, jay-1]) *
hdot[, tee] * cpThetamat[, jay] / Thetamat[, tee])
}
- return(rep(0, length = nrow(Thetamat))) # Since jay-1 > tee
+ return(rep_len(0, nrow(Thetamat))) # Since jay-1 > tee
} else { # reverse = FALSE ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
if (jay == 1 && tee == 1) {
@@ -2612,7 +2610,7 @@ setMethod("margeffS4VGAM", signature(VGAMff = "acat"),
return((1 - Thetamat[, jay]) *
hdot[, tee] * cpThetamat[, jay-1] / Thetamat[, tee])
}
- return(rep(0, length = nrow(Thetamat))) # Since jay < tee
+ return(rep_len(0, nrow(Thetamat))) # Since jay < tee
} # reverse ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
} # cratio.derivs
@@ -2944,7 +2942,7 @@ setMethod("margeffS4VGAM", signature(VGAMff = "sratio"),
dimnames = list(dimnames(B)[[1]],
dimnames(B)[[2]],
dimnames(fitted(object)[ii, ])[[1]]))
- for (ilocal in 1:length(ii)) {
+ for (ilocal in seq_along(ii)) {
pvec <- fitted(object)[ii[ilocal], ]
temp1 <- B * matrix(pvec, ppp, M+1, byrow = TRUE)
temp2 <- matrix(rowSums(temp1), ppp, M+1)
@@ -3154,7 +3152,7 @@ setMethod("margeffS4VGAM", signature(VGAMff = "sratio"),
return((1 - Thetamat[, jay-1]) *
hdot[, tee] * cpThetamat[, jay] / Thetamat[, tee])
}
- return(rep(0, length = nrow(Thetamat))) # Since jay-1 > tee
+ return(rep_len(0, nrow(Thetamat))) # Since jay-1 > tee
} else { # reverse = FALSE ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
if (jay == 1 && tee == 1) {
@@ -3168,7 +3166,7 @@ setMethod("margeffS4VGAM", signature(VGAMff = "sratio"),
return((1 - Thetamat[, jay]) *
hdot[, tee] * cpThetamat[, jay-1] / Thetamat[, tee])
}
- return(rep(0, length = nrow(Thetamat))) # Since jay < tee
+ return(rep_len(0, nrow(Thetamat))) # Since jay < tee
} # reverse ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
} # cratio.derivs
@@ -3380,7 +3378,7 @@ is.zero.matrix <- function(object, ...) {
rnames <- rownames(object)
intercept.index <- if (length(rnames)) {
if (any(rnames == "(Intercept)")) {
- (1:length(rnames))[rnames == "(Intercept)"]
+ (seq_along(rnames))[rnames == "(Intercept)"]
} else {
stop("the matrix does not seem to have an intercept")
NULL
diff --git a/R/family.censored.R b/R/family.censored.R
index 8cc4b65..340c668 100644
--- a/R/family.censored.R
+++ b/R/family.censored.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -28,7 +28,7 @@
"Link: ", namesof("mu", link, earg = earg), "\n",
"Variance: mu"),
initialize = eval(substitute(expression({
- if (any(is.na(y)))
+ if (anyNA(y))
stop("NAs are not allowed in the response")
@@ -44,16 +44,16 @@
temp <- y[, 2]
extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
extra$rightcensored <- ifelse(temp == 0, TRUE, FALSE)
- extra$leftcensored <- rep(FALSE, len = n)
- extra$interval <- rep(FALSE, len = n)
+ extra$leftcensored <- rep_len(FALSE, n)
+ extra$interval <- rep_len(FALSE, n)
init.mu <- pmax(y[, 1], 1/8)
} else
if (centype == "left") {
temp <- y[, 2]
extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
- extra$rightcensored <- rep(FALSE, len = n)
+ extra$rightcensored <- rep_len(FALSE, n)
extra$leftcensored <- ifelse(temp == 0, TRUE, FALSE)
- extra$interval <- rep(FALSE, len = n)
+ extra$interval <- rep_len(FALSE, n)
init.mu <- pmax(y[, 1], 1/8)
} else
if (centype == "interval" ||
@@ -230,22 +230,22 @@ if (FALSE)
temp <- y[, 2]
extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
extra$rightcensored <- ifelse(temp == 0, TRUE, FALSE)
- extra$leftcensored <- rep(FALSE, len = n)
- extra$interval <- rep(FALSE, len = n)
+ extra$leftcensored <- rep_len(FALSE, n)
+ extra$interval <- rep_len(FALSE, n)
} else
if (type == "left") {
temp <- y[, 2]
extra$uncensored <- ifelse(temp == 1, TRUE, FALSE)
- extra$rightcensored <- rep(FALSE, len = n)
+ extra$rightcensored <- rep_len(FALSE, n)
extra$leftcensored <- ifelse(temp == 0, TRUE, FALSE)
- extra$interval <- rep(FALSE, len = n)
+ extra$interval <- rep_len(FALSE, n)
} else
if (type == "counting") {
stop("type == 'counting' not recognized")
extra$uncensored <- rep(temp == 1, TRUE, FALSE)
- extra$interval <- rep(FALSE, len = n)
- extra$leftcensored <- rep(FALSE, len = n)
- extra$rightcensored <- rep(FALSE, len = n)
+ extra$interval <- rep_len(FALSE, n)
+ extra$leftcensored <- rep_len(FALSE, n)
+ extra$rightcensored <- rep_len(FALSE, n)
extra$counting <- ifelse(temp == 0, TRUE, FALSE)
} else
if (type == "interval") {
@@ -385,9 +385,9 @@ if (FALSE)
if (!length(extra$leftcensored))
- extra$leftcensored <- rep(FALSE, len = n)
+ extra$leftcensored <- rep_len(FALSE, n)
if (!length(extra$rightcensored))
- extra$rightcensored <- rep(FALSE, len = n)
+ extra$rightcensored <- rep_len(FALSE, n)
if (any(extra$rightcensored & extra$leftcensored))
stop("some observations are both right and left censored!")
@@ -402,7 +402,7 @@ if (FALSE)
y = y[!i11], w = w[!i11])
sd.y.est <- sqrt(sum(w[!i11] * junk$resid^2) / junk$df.residual)
etastart <- cbind(mu = y,
- rep(theta2eta(sd.y.est, .lsd), length = n))
+ rep_len(theta2eta(sd.y.est, .lsd), n))
if (any(anyc))
etastart[anyc, 1] <- x[anyc, , drop = FALSE] %*% junk$coeff
}
@@ -559,7 +559,7 @@ if (FALSE)
stop("cannot handle left-censored data")
if (!length(extra$rightcensored))
- extra$rightcensored <- rep(FALSE, len = n)
+ extra$rightcensored <- rep_len(FALSE, n)
predictors.names <-
namesof("scale", .lscale , earg = .escale , tag = FALSE)
@@ -802,8 +802,8 @@ if (FALSE)
M1 <- extra$M1
- avector <- c(rep( .lmeann , length = ncoly),
- rep( .lshape , length = ncoly))
+ avector <- c(rep_len( .lmeann , ncoly),
+ rep_len( .lshape , ncoly))
misc$link <- avector[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
@@ -1087,10 +1087,10 @@ if (FALSE)
M1 <- extra$M1
- avector <- if ( .lss ) c(rep( .lscale , length = ncoly),
- rep( .lshape , length = ncoly)) else
- c(rep( .lshape , length = ncoly),
- rep( .lscale , length = ncoly))
+ avector <- if ( .lss ) c(rep_len( .lscale , ncoly),
+ rep_len( .lshape , ncoly)) else
+ c(rep_len( .lshape , ncoly),
+ rep_len( .lscale , ncoly))
misc$link <- avector[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
@@ -1541,8 +1541,8 @@ pgamma.deriv.unscaled <- function(q, shape) {
Alpha.init[, ilocal] <- (1 / bbb.init)^aaa.init
} # ilocal
} else {
- Alpha.init <- rep( .iAlpha , length = n)
- Betaa.init <- rep( .iBetaa , length = n)
+ Alpha.init <- rep_len( .iAlpha , n)
+ Betaa.init <- rep_len( .iBetaa , n)
}
etastart <-
@@ -1594,8 +1594,8 @@ pgamma.deriv.unscaled <- function(q, shape) {
M1 <- extra$M1
misc$link <-
- c(rep( .lAlpha , length = ncoly),
- rep( .lBetaa , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ c(rep_len( .lAlpha , ncoly),
+ rep_len( .lBetaa , ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
diff --git a/R/family.circular.R b/R/family.circular.R
index 1394023..8e1be77 100644
--- a/R/family.circular.R
+++ b/R/family.circular.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -17,11 +17,11 @@ dcard <- function(x, mu, rho, log = FALSE) {
L <- max(length(x), length(mu), length(rho))
- if (length(x) != L) x <- rep(x, len = L)
- if (length(mu) != L) mu <- rep(mu, len = L)
- if (length(rho) != L) rho <- rep(rho, len = L)
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(mu) != L) mu <- rep_len(mu, L)
+ if (length(rho) != L) rho <- rep_len(rho, L)
- logdensity <- rep(log(0), len = L)
+ logdensity <- rep_len(log(0), L)
xok <- (x > 0) & (x < (2*pi))
logdensity[xok] <- -log(2*pi) + log1p(2 * rho[xok] *
cos(x[xok]-mu[xok]))
@@ -75,9 +75,9 @@ qcard <- function(p, mu, rho, tolerance = 1.0e-7, maxits = 500,
stop("'p' must be between 0 and 1")
nn <- max(length(p), length(mu), length(rho))
- if (length(p) != nn) p <- rep(p, len = nn)
- if (length(mu) != nn) mu <- rep(mu, len = nn)
- if (length(rho) != nn) rho <- rep(rho, len = nn)
+ if (length(p) != nn) p <- rep_len(p, nn)
+ if (length(mu) != nn) mu <- rep_len(mu, nn)
+ if (length(rho) != nn) rho <- rep_len(rho, nn)
if (!is.logical(lower.tail) || length(lower.tail ) != 1)
@@ -180,8 +180,8 @@ rcard <- function(n, mu, rho, ...) {
if (!is.Numeric(rho) || max(abs(rho) > 0.5))
stop("argument 'rho' must be between -0.5 and 0.5 inclusive")
- mu <- rep(mu, len = use.n)
- rho <- rep(rho, len = use.n)
+ mu <- rep_len(mu, use.n)
+ rho <- rep_len(rho, use.n)
qcard(runif(use.n), mu = mu, rho = rho, ...)
}
@@ -272,7 +272,7 @@ cardioid.control <- function(save.weights = TRUE, ...) {
namesof("rho", .lrho , earg = .erho , tag = FALSE))
if (!length(etastart)) {
- rho.init <- rep(if (length( .irho )) .irho else 0.3, length = n)
+ rho.init <- rep_len(if (length( .irho )) .irho else 0.3, n)
cardioid.Loglikfun <- function(mu, y, x, w, extraargs) {
rho <- extraargs$irho
@@ -283,7 +283,7 @@ cardioid.control <- function(save.weights = TRUE, ...) {
grid.search(mu.grid, objfun = cardioid.Loglikfun,
y = y, x = x, w = w,
extraargs = list(irho = rho.init))
- mu.init <- rep(mu.init, length=length(y))
+ mu.init <- rep_len(mu.init, length(y))
etastart <-
cbind(theta2eta( mu.init, .lmu , earg = .emu ),
theta2eta(rho.init, .lrho , earg = .erho ))
@@ -438,11 +438,8 @@ cardioid.control <- function(save.weights = TRUE, ...) {
scale.init <- sqrt(sum(w*abs(y - locat.init)) / sum(w))
}
- locat.init <- if (length( .ilocat ))
- rep( .ilocat , len = n) else
- rep(locat.init, len = n)
- scale.init <- if (length( .iscale ))
- rep( .iscale , len = n) else rep(1, len = n)
+ locat.init <- rep_len(if (length( .ilocat )) .ilocat else locat.init,n)
+ scale.init <- rep_len(if (length( .iscale )) .iscale else 1, n)
etastart <- cbind(
theta2eta(locat.init, .llocat , earg = .elocat ),
theta2eta(scale.init, .lscale , earg = .escale ))
diff --git a/R/family.exp.R b/R/family.exp.R
index bc4b1d7..581d764 100644
--- a/R/family.exp.R
+++ b/R/family.exp.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -609,8 +609,8 @@ rsc.t2 <- function(n, location = 0, scale = 1) {
}
Scale.init <- if (length( .iscale )) .iscale else
diff(quantile(y, prob = c(0.25, 0.75))) / (2 * 1.155) + 1.0e-5
- locat.init <- rep(locat.init, length = length(y))
- Scale.init <- rep(Scale.init, length = length(y))
+ locat.init <- rep_len(locat.init, length(y))
+ Scale.init <- rep_len(Scale.init, length(y))
etastart <- cbind(theta2eta(locat.init, .llocat, earg = .elocat),
theta2eta(Scale.init, .lscale, earg = .escale))
}
@@ -623,7 +623,7 @@ rsc.t2 <- function(n, location = 0, scale = 1) {
locat <- eta2theta(eta[, 1], link = .llocat, earg = .elocat)
Scale <- eta2theta(eta[, 2], link = .lscale, earg = .escale)
answer <- matrix(locat, nrow(eta), length(Perce))
- for (ii in 1:length(Perce))
+ for (ii in seq_along(Perce))
answer[, ii] <- qsc.t2(Perce[ii] / 100, loc = locat, sc = Scale)
dimnames(answer) <- list(dimnames(eta)[[1]],
paste(as.character(Perce), "%", sep = ""))
@@ -642,7 +642,7 @@ rsc.t2 <- function(n, location = 0, scale = 1) {
misc$multipleResponses <- FALSE
ncoly <- ncol(y)
- for (ii in 1:length( .percentile )) {
+ for (ii in seq_along( .percentile )) {
y.use <- if (ncoly > 1) y[, ii] else y
mu <- cbind(mu)
extra$percentile[ii] <- 100 * weighted.mean(y.use <= mu[, ii], w)
diff --git a/R/family.extremes.R b/R/family.extremes.R
index 576277b..1ceea48 100644
--- a/R/family.extremes.R
+++ b/R/family.extremes.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -18,6 +18,7 @@
+
rgev <- function(n, location = 0, scale = 1, shape = 0) {
use.n <- if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
@@ -30,12 +31,9 @@ rgev <- function(n, location = 0, scale = 1, shape = 0) {
stop("bad input for argument argument 'shape'")
ans <- numeric(use.n)
- if (length(shape) != use.n)
- shape <- rep(shape, length.out = use.n)
- if (length(location) != use.n)
- location <- rep(location, length.out = use.n)
- if (length(scale) != use.n)
- scale <- rep(scale, length.out = use.n)
+ if (length(shape) != use.n) shape <- rep_len(shape, use.n)
+ if (length(location) != use.n) location <- rep_len(location, use.n)
+ if (length(scale) != use.n) scale <- rep_len(scale, use.n)
scase <- abs(shape) < sqrt( .Machine$double.eps )
nscase <- sum(scase)
@@ -52,35 +50,34 @@ rgev <- function(n, location = 0, scale = 1, shape = 0) {
dgev <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
- tolshape0 = sqrt( .Machine$double.eps ),
- oobounds.log = -Inf, giveWarning = FALSE) {
+ tolshape0 = sqrt( .Machine$double.eps )) {
+
+ oobounds.log <- -Inf # 20160412; No longer an argument.
+
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
- if (oobounds.log > 0)
- stop("bad input for argument 'oobounds.log'")
if (!is.Numeric(tolshape0, length.arg = 1, positive = TRUE))
stop("bad input for argument 'tolshape0'")
use.n <- max(length(x), length(location), length(scale), length(shape))
- if (length(shape) != use.n)
- shape <- rep(shape, length.out = use.n)
- if (length(location) != use.n)
- location <- rep(location, length.out = use.n)
- if (length(scale) != use.n)
- scale <- rep(scale, length.out = use.n)
+ if (length(shape) != use.n) shape <- rep_len(shape, use.n)
+ if (length(location) != use.n) location <- rep_len(location, use.n)
+ if (length(scale) != use.n) scale <- rep_len(scale, use.n)
+
+
+ if (length(x) != use.n) x <- rep_len(x, use.n)
- x <- rep(x, length.out = use.n)
- logdensity <- rep(log(0), length.out = use.n)
+ logdensity <- rep_len(log(0), use.n)
scase <- (abs(shape) < tolshape0)
nscase <- sum(scase)
if (use.n - nscase) {
- zedd <- 1 + shape * (x - location) / scale # pmax(0, (1+shape*xc/scale))
+ zedd <- 1 + shape * (x - location) / scale # pmax(0, (1+shape*xc/scale))
xok <- (!scase) & (zedd > 0)
logdensity[xok] <- -log(scale[xok]) - zedd[xok]^(-1/shape[xok]) -
(1 + 1/shape[xok]) * log(zedd[xok])
@@ -88,9 +85,6 @@ rgev <- function(n, location = 0, scale = 1, shape = 0) {
if (any(outofbounds)) {
logdensity[outofbounds] <- oobounds.log
no.oob <- sum(outofbounds)
- if (giveWarning)
- warning(no.oob, " observation",
- ifelse(no.oob > 1, "s are", " is"), " out of bounds")
}
}
if (nscase) {
@@ -99,7 +93,6 @@ rgev <- function(n, location = 0, scale = 1, shape = 0) {
}
logdensity[scale <= 0] <- NaN
-
logdensity[is.infinite(x)] <- log(0) # 20141209 KaiH
if (log.arg) logdensity else exp(logdensity)
@@ -107,6 +100,7 @@ rgev <- function(n, location = 0, scale = 1, shape = 0) {
+
pgev <- function(q, location = 0, scale = 1, shape = 0,
lower.tail = TRUE, log.p = FALSE) {
if (!is.logical(lower.tail) || length(lower.tail ) != 1)
@@ -115,16 +109,11 @@ pgev <- function(q, location = 0, scale = 1, shape = 0,
if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
stop("bad input for argument 'log.p'")
-
use.n <- max(length(q), length(location), length(scale), length(shape))
- if (length(shape) != use.n)
- shape <- rep(shape, length.out = use.n)
- if (length(location) != use.n)
- location <- rep(location, length.out = use.n)
- if (length(scale) != use.n)
- scale <- rep(scale, length.out = use.n)
- if (length(q) != use.n)
- q <- rep(q, length.out = use.n)
+ if (length(shape) != use.n) shape <- rep_len(shape, use.n)
+ if (length(location) != use.n) location <- rep_len(location, use.n)
+ if (length(scale) != use.n) scale <- rep_len(scale, use.n)
+ if (length(q) != use.n) q <- rep_len(q, use.n)
scase0 <- abs(shape) < sqrt( .Machine$double.eps ) # Effectively 0
zedd <- (q - location) / scale
@@ -164,16 +153,11 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
-
use.n <- max(length(p), length(location), length(scale), length(shape))
- if (length(shape) != use.n)
- shape <- rep(shape, length.out = use.n)
- if (length(location) != use.n)
- location <- rep(location, length.out = use.n)
- if (length(scale) != use.n)
- scale <- rep(scale, length.out = use.n)
- if (length(p) != use.n)
- p <- rep(p, length.out = use.n)
+ if (length(shape) != use.n) shape <- rep_len(shape, use.n)
+ if (length(location) != use.n) location <- rep_len(location, use.n)
+ if (length(scale) != use.n) scale <- rep_len(scale, use.n)
+ if (length(p) != use.n) p <- rep_len(p, use.n)
scase0 <- abs(shape) < sqrt( .Machine$double.eps )
@@ -194,7 +178,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
ans[ln.p > 0] <- NaN
} else {
ans <- location + scale * ((-log1p(-p))^(-shape) - 1) / shape
- ans[p == 1] <- Inf
+ ans[p == 1] <- Inf
ans[p > 1] <- NaN
ans[p < 0] <- NaN
}
@@ -212,20 +196,28 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
- gev <- function(
- llocation = "identitylink",
- lscale = "loge",
- lshape = logoff(offset = 0.5),
- percentiles = c(95, 99),
- iscale = NULL, ishape = NULL,
- imethod = 1, gshape = c(-0.45, 0.45),
- tolshape0 = 0.001,
- type.fitted = c("percentiles", "mean"),
- giveWarning = TRUE,
- zero = c("scale", "shape")) {
+ gev <-
+ function(
+ llocation = "identitylink",
+ lscale = "loge",
+ lshape = logoff(offset = 0.5),
+ percentiles = c(95, 99),
+ ilocation = NULL,
+ iscale = NULL, ishape = NULL,
+ imethod = 1,
+
+ gprobs.y = (1:9)/10, # 20160713; grid for finding locat.init
+ gscale.mux = exp((-5:5)/6), # exp(-5:5),
+ gshape = (-5:5) / 11 + 0.01, # c(-0.45, 0.45),
+ iprobs.y = NULL,
+
+ tolshape0 = 0.001,
+ type.fitted = c("percentiles", "mean"),
+ zero = c("scale", "shape")) {
+ ilocat <- ilocation
type.fitted <- match.arg(type.fitted,
c("percentiles", "mean"))[1]
@@ -242,18 +234,10 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
lshape <- attr(eshape, "function.name")
-
-
-
- if (!is.logical(giveWarning) || length(giveWarning) != 1)
- stop("bad input for argument 'giveWarning'")
-
if (length(iscale) &&
!is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
-
-
if (length(percentiles) &&
(!is.Numeric(percentiles, positive = TRUE) ||
max(percentiles) >= 100))
@@ -263,23 +247,21 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
positive = TRUE, integer.valued = TRUE) ||
imethod > 2.5)
stop("argument 'imethod' must be 1 or 2")
+
if (length(ishape) && !is.Numeric(ishape))
stop("bad input for argument 'ishape'")
if (!is.Numeric(tolshape0, length.arg = 1, positive = TRUE) ||
tolshape0 > 0.1)
stop("bad input for argument 'tolshape0'")
- if (!is.Numeric(gshape, length.arg = 2) ||
- gshape[1] >= gshape[2])
- stop("bad input for argument 'gshape'")
new("vglmff",
blurb = c("Generalized extreme value distribution\n",
"Links: ",
- namesof("location", llocat, elocat), ", ",
- namesof("scale", lscale, escale), ", ",
- namesof("shape", lshape, eshape)),
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape", lshape, earg = eshape)),
constraints = eval(substitute(expression({
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
@@ -303,6 +285,24 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
initialize = eval(substitute(expression({
+
+
+ temp16 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = FALSE,
+ Is.integer.y = FALSE,
+ ncol.w.max = 1, # Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = NULL, # Ignore this argument
+ maximize = TRUE)
+ w <- temp16$w
+ y <- temp16$y
+
+
+
+
+
M1 <- extra$M1 <- 3
ncoly <- ncol(y)
extra$ncoly <- ncoly
@@ -313,124 +313,114 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
mynames1 <- "location"
mynames2 <- "scale"
mynames3 <- "shape"
- llocat <- .llocat
- lscale <- .lscale
- lshape <- .lshape
predictors.names <- c(
- namesof(mynames1, .llocat , .elocat , short = TRUE),
- namesof(mynames2, .lscale , .escale , short = TRUE),
- namesof(mynames3, .lshape , .eshape , short = TRUE))
+ namesof(mynames1, .llocat , earg = .elocat , short = TRUE),
+ namesof(mynames2, .lscale , earg = .escale , short = TRUE),
+ namesof(mynames3, .lshape , earg = .eshape , short = TRUE))
+ if (ncol(y) > 1)
+ y <- -t(apply(-y, 1, sort, na.last = TRUE))
- y <- as.matrix(y)
+ r.vec <- rowSums(cbind(!is.na(y)))
+ if (any(r.vec == 0))
+ stop("A row contains all missing values")
+
+ extra$percentiles <- .percentiles
- if (ncol(y) > 1)
- y <- -t(apply(-y, 1, sort, na.last = TRUE))
+ NOS.proxy <- 1
+ gprobs.y <- .gprobs.y
+ ilocat <- .ilocat # Default is NULL
+ if (length(ilocat))
+ ilocat <- matrix(ilocat, n, NOS.proxy, byrow = TRUE)
+ if (!length(etastart)) {
+ locat.init <-
+ shape.init <-
+ scale.init <- matrix(NA_real_, n, NOS.proxy)
+ if (length( .iprobs.y ))
+ gprobs.y <- .iprobs.y
+ gscale.mux <- .gscale.mux # gscale.mux is on a relative scale
+ gshape <- .gshape
+ for (jay in 1:NOS.proxy) { # For each response 'y_jay'... do:
- r.vec <- rowSums(cbind(!is.na(y)))
+ scale.init.jay <- sd(y[, 1]) * sqrt(6) / pi # Based on the Gumbel
+ scale.init.jay <- gscale.mux * scale.init.jay
+ if (length( .iscale ))
+ scale.init.jay <- .iscale # iscale is on an absolute scale
- if (any(r.vec == 0))
- stop("A row contains all missing values")
- extra$percentiles <- .percentiles
- if (!length(etastart)) {
- init.sig <- if (length( .iscale ))
- rep( .iscale, length.out = nrow(y)) else NULL
- init.xi <- if (length( .ishape ))
- rep( .ishape, length.out = nrow(y)) else NULL
- LIST.lshape <- .lshape
-
- if ( .lshape == "extlogit" && length(init.xi) &&
- (any(init.xi <= LIST.lshape$min |
- init.xi >= LIST.lshape$max)))
- stop("bad input for an argument in 'lshape'")
-
- if ( .imethod == 1) {
- nvector <- 4:10 # Arbitrary; could be made an argument
- ynvector <- quantile(y[, 1], probs = 1 - 1/nvector)
- objecFunction <- -Inf # Actually the log-likelihood
- est.sigma <- !length(init.sig)
- gshape <- .gshape
- temp234 <- if (length(init.xi)) init.xi[1] else
- seq(gshape[1], gshape[2], length.out = 12)
- for (shapeTry in temp234) {
- xvec <- if (abs(shapeTry) < .tolshape0) log(nvector) else
- (nvector^shapeTry - 1) / shapeTry
- fit0 <- lsfit(x = xvec, y = ynvector, intercept = TRUE)
- sigmaTry <- if (est.sigma)
- rep(fit0$coef["X"], length.out = nrow(y)) else
- init.sig
- LocatTry <- rep(fit0$coef["Intercept"], length.out = nrow(y))
- llTry <- egev(giveWarning =
- FALSE)@loglikelihood(mu = NULL, y = y[, 1], w = w,
- residuals = FALSE,
- eta =
- cbind(theta2eta(LocatTry, .llocat , .elocat ),
- theta2eta(sigmaTry, .lscale , .escale ),
- theta2eta(shapeTry, .lshape , .eshape )))
- if (llTry >= objecFunction) {
- if (est.sigma)
- init.sig <- sigmaTry
- init.mu <- rep(LocatTry, length.out = nrow(y))
- objecFunction <- llTry
- bestxi <- shapeTry
- }
- }
- if (!length(init.xi))
- init.xi <- rep(bestxi, length.out = nrow(y))
- } else {
- init.xi <- rep(0.05, length.out = nrow(y))
- if (!length(init.sig))
- init.sig <- rep(sqrt(6 * var(y[, 1]))/pi,
- length.out = nrow(y))
- EulerM <- -digamma(1)
- init.mu <- rep(median(y[, 1]) - EulerM*init.sig,
- length.out = nrow(y))
- }
+ if (length( .ishape ))
+ gshape <- .ishape # ishape is on an absolute scale
+
+
+ locat.init.jay <- if ( .imethod == 1) {
+ quantile(y[, jay], probs = gprobs.y) # + 1/16
+ } else {
+
+ weighted.mean(y[, jay], w = w[, 1])
+ }
+ if (length(ilocat))
+ locat.init.jay <- ilocat[, jay]
+
+
+ gev.Loglikfun3 <- function(shapeval, locatval, scaleval,
+ y, x, w, extraargs) {
+ sum(c(w) * dgev(x = y,
+ locat = locatval,
+ scale = scaleval,
+ shape = shapeval, log = TRUE), na.rm = TRUE)
+ }
+
+ try.this <-
+ grid.search3(gshape, locat.init.jay, scale.init.jay,
+ objfun = gev.Loglikfun3,
+ y = y[, 1], w = w[, jay],
+ ret.objfun = TRUE, # Last value is the loglik
+ extraargs = NULL)
+
+ shape.init[, jay] <- try.this["Value1" ]
+ locat.init[, jay] <- try.this["Value2" ]
+ scale.init[, jay] <- try.this["Value3" ]
+ } # for (jay ...)
- bad <- ((1 + init.xi*(y-init.mu)/init.sig) <= 0)
- if (fred <- sum(bad)) {
- warning(fred, "observations violating boundary constraints ",
- "while initializing. Taking corrective action")
- init.xi[bad] <- ifelse(y[bad] > init.mu[bad], 0.1, -0.1)
- }
etastart <-
- cbind(theta2eta(init.mu, .llocat , .elocat ),
- theta2eta(init.sig, .lscale , .escale ),
- theta2eta(init.xi, .lshape , .eshape ))
+ cbind(theta2eta(locat.init, .llocat , .elocat ),
+ theta2eta(scale.init, .lscale , .escale ),
+ theta2eta(shape.init, .lshape , .eshape ))
}
- }), list(
+ }), list(
.llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape,
- .ishape = ishape, .iscale = iscale,
+ .ilocat = ilocat, .ishape = ishape, .iscale = iscale,
+
+ .gprobs.y = gprobs.y, .gscale.mux = gscale.mux,
+ .iprobs.y = iprobs.y,
.gshape = gshape, .type.fitted = type.fitted,
.percentiles = percentiles,
.tolshape0 = tolshape0,
- .imethod = imethod, .giveWarning = giveWarning ))),
+ .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
Locat <- eta2theta(eta[, 1], .llocat , .elocat )
sigma <- eta2theta(eta[, 2], .lscale , .escale )
shape <- eta2theta(eta[, 3], .lshape , .eshape )
-
type.fitted <-
if (length(extra$type.fitted)) {
extra$type.fitted
@@ -444,27 +434,23 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
- is.zero <- (abs(shape) < .tolshape0 )
-
- cent <- extra$percentiles
- LP <- length(cent)
+ pcent <- extra$percentiles
+ LP <- length(pcent)
if (type.fitted == "percentiles" && # Upward compatibility:
LP > 0) {
fv <- matrix(NA_real_, nrow(eta), LP)
for (ii in 1:LP) {
- yp <- -log(cent[ii] / 100)
- fv[!is.zero, ii] <- Locat[!is.zero] - sigma[!is.zero] *
- (1 - yp^(-shape[!is.zero])) / shape[!is.zero]
- fv[ is.zero, ii] <- Locat[ is.zero] - sigma[ is.zero] *
- log(yp)
+ fv[, ii] <- qgev(pcent[ii] /100, loc = Locat, scale = sigma,
+ shape = shape)
}
dimnames(fv) <- list(dimnames(eta)[[1]],
- paste(as.character(cent), "%", sep = ""))
+ paste(as.character(pcent), "%", sep = ""))
} else {
+ is.zero <- (abs(shape) < .tolshape0 )
EulerM <- -digamma(1)
fv <- Locat + sigma * EulerM # When shape = 0, is Gumbel
fv[!is.zero] <- Locat[!is.zero] + sigma[!is.zero] *
- (gamma(1-shape[!is.zero])-1) / shape[!is.zero]
+ (gamma(1 - shape[!is.zero]) - 1) / shape[!is.zero]
fv[shape >= 1] <- NA # Mean exists only if shape < 1.
}
fv
@@ -484,16 +470,11 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
names(misc$link) <- c(mynames1, mynames2, mynames3)
misc$M1 <- M1
- misc$expected <- TRUE
misc$multipleResponses <- FALSE
-
-
-
- misc$true.mu <- !length( .percentiles) # @fitted is not a true mu
+ misc$true.mu <- !length( .percentiles ) # @fitted is not a true mu
misc$percentiles <- .percentiles
- misc$expected <- TRUE
misc$tolshape0 <- .tolshape0
if (ncol(y) == 1)
y <- as.vector(y)
@@ -502,8 +483,8 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
}), list(
.llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape,
-
.tolshape0 = tolshape0, .percentiles = percentiles ))),
+
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
Locat <- eta2theta(eta[, 1], .llocat , .elocat )
@@ -511,55 +492,51 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
shape <- eta2theta(eta[, 3], .lshape , .eshape )
- is.zero <- (abs(shape) < .tolshape0 )
- zedd <- (y-Locat) / sigma
- r.vec <- rowSums(cbind(!is.na(y)))
- A <- 1 + shape * (y-Locat)/sigma
- ii <- 1:nrow(eta)
- A1 <- A[cbind(ii, r.vec)]
- mytolerance <- 0 # .Machine$double.eps
- if (any(bad <- (A1 <= mytolerance), na.rm = TRUE)) {
- if ( .giveWarning )
- warning("There are", sum(bad), "range violations in @loglikelihood")
- cat("There are", sum(bad),
- "range violations in @loglikelihood\n")
- flush.console()
- }
- igev <- !is.zero & !bad
- igum <- is.zero & !bad
- pow <- 1 + 1/shape[igev]
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
-
-
- old.answer <-
- sum(bad) * (-1.0e10) +
- sum(w[igum] * (-r.vec[igum] * log(sigma[igum]) -
- exp(-zedd[igum, r.vec[igum]]) -
- rowSums(cbind(zedd)[igum, , drop = FALSE], na.rm = TRUE))) +
- sum(w[igev] * (-r.vec[igev] * log(sigma[igev]) -
- pow * rowSums(cbind(log(A[igev])), na.rm = TRUE) -
- A1[igev]^(-1/shape[igev])))
-
- new.answer <-
- sum(w * dgev(x = y, location = Locat, scale = sigma, shape = shape,
- tolshape0 = .tolshape0 ,
- giveWarning = .giveWarning,
- log = TRUE, oobounds.log = -1.0e04))
- check0 <- old.answer - new.answer
- old.answer
+ new.answer <-
+ sum(c(w) * dgev(x = y, location = Locat, scale = sigma,
+ shape = shape, tolshape0 = .tolshape0 ,
+ log = TRUE), na.rm = TRUE)
new.answer
}
- }, list(
+ }, list(
.llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape,
-
- .giveWarning = giveWarning, .tolshape0 = tolshape0 ))),
+ .tolshape0 = tolshape0 ))),
vfamily = c("gev", "vextremes"),
+
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ Locat <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .llocat , .elocat )
+ sigma <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lscale , .escale )
+ shape <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lshape , .eshape )
+
+ okay1 <- all(is.finite(Locat)) &&
+ all(is.finite(sigma)) && all(sigma > 0) &&
+ all(is.finite(shape))
+ okay.support <-
+ if (okay1) {
+ Boundary <- Locat - sigma / shape
+ all((shape == 0) ||
+ (shape < 0 & y < Boundary) ||
+ (shape > 0 & y > Boundary))
+ } else {
+ TRUE
+ }
+ if (!okay.support)
+ warning("current parameter estimates are at the boundary of ",
+ "the parameter space. ",
+ "Try fitting a Gumbel model instead.")
+ okay1 && okay.support
+ }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape ))),
+
+
deriv = eval(substitute(expression({
M1 <- 3
r.vec <- rowSums(cbind(!is.na(y)))
@@ -574,12 +551,12 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
is.zero <- (abs(shape) < .tolshape0 )
ii <- 1:nrow(eta)
- zedd <- (y-Locat) / sigma
+ zedd <- (y - Locat) / sigma
A <- 1 + shape * zedd
dA.dxi <- zedd # matrix
dA.dmu <- -shape/sigma # vector
- dA.dsigma <- -shape*zedd/sigma # matrix
- pow <- 1 + 1/shape
+ dA.dsigma <- -shape * zedd / sigma # matrix
+ pow <- 1 + 1 / shape
A1 <- A[cbind(ii, r.vec)]
AAr1 <- dA.dmu/(shape * A1^pow) -
@@ -593,38 +570,38 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
dl.dxi <- rowSums(cbind(log(A)), na.rm = TRUE)/shape^2 -
pow * rowSums(cbind(dA.dxi/A), na.rm = TRUE) -
(log(A1) / shape^2 -
- dA.dxi[cbind(ii,r.vec)] / (shape*A1)) * A1^(-1/shape)
+ dA.dxi[cbind(ii, r.vec)] / (shape*A1)) * A1^(-1/shape)
if (any(is.zero)) {
- zorro <- c(zedd[cbind(1:n,r.vec)])
+ zorro <- c(zedd[cbind(1:n, r.vec)])
zorro <- zorro[is.zero]
- ezedd <- exp(-zorro)
- dl.dmu[is.zero] <- (1-ezedd) / sigma[is.zero]
- dl.dsi[is.zero] <- (zorro * (1 - ezedd) - 1) / sigma[is.zero]
- dl.dxi[is.zero] <- zorro * ((1 - ezedd) * zorro / 2 - 1)
+ ezm1 <- -expm1(-zorro) # 1 - exp(-zorro)
+ dl.dmu[is.zero] <- ezm1 / sigma[is.zero]
+ dl.dsi[is.zero] <- (zorro * ezm1 - 1) / sigma[is.zero]
+ dl.dxi[is.zero] <- zorro * (ezm1 * zorro / 2 - 1)
}
c(w) * cbind(dl.dmu * dmu.deta,
dl.dsi * dsi.deta,
dl.dxi * dxi.deta)
- }), list(
- .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape,
-
.tolshape0 = tolshape0 ))),
+
weight = eval(substitute(expression({
kay <- -shape
- dd <- digamma(r.vec-kay+1)
- ddd <- digamma(r.vec+1) # Unnecessarily evaluated at each iteration
- temp13 <- -kay * dd + (kay^2 - kay + 1) / (1-kay)
+ dd <- digamma(r.vec - kay + 1)
+ ddd <- digamma(r.vec + 1) # Unnecessarily evaluated at each iteration
+ temp13 <- -kay * dd + (kay^2 - kay + 1) / (1 - kay)
temp33 <- 1 - 2 * kay * ddd +
- kay^2 * (1 + trigamma(r.vec+1) + ddd^2)
- temp23 <- -kay * dd + (1+(1-kay)^2) / (1-kay)
- GR.gev <- function(j, ri, kay) gamma(ri - j*kay + 1) / gamma(ri)
- tmp2 <- (1-kay)^2 * GR.gev(2, r.vec, kay) # Latter is GR2
- tmp1 <- (1-2*kay) * GR.gev(1, r.vec, kay) # Latter is GR1
- k0 <- (1-2*kay)
+ kay^2 * (1 + trigamma(r.vec + 1) + ddd^2)
+ temp23 <- -kay * dd + (1 + (1-kay)^2) / (1-kay)
+ GR.gev <- function(jay, ri, kay)
+ gamma(ri - jay * kay + 1) / gamma(ri)
+ tmp2 <- (1 - kay)^2 * GR.gev(2, r.vec, kay) # Latter is GR2
+ tmp1 <- (1 - 2*kay) * GR.gev(1, r.vec, kay) # Latter is GR1
+ k0 <- (1 - 2*kay)
k1 <- k0 * kay
k2 <- k1 * kay
k3 <- k2 * kay # kay^3 * (1-2*kay)
@@ -635,9 +612,9 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
wz[, iam(1, 3, M)] <- (tmp1 * temp13 - tmp2) / (sigma * k2)
wz[, iam(2, 2, M)] <- (r.vec*k0 - 2*tmp1 + tmp2) / (sigma^2 * k2)
wz[, iam(2, 3, M)] <- (r.vec*k1*ddd + tmp1 *
- temp23 - tmp2 - r.vec*k0) / (sigma * k3)
+ temp23 - tmp2 - r.vec*k0) / (sigma * k3)
wz[, iam(3, 3, M)] <- (2*tmp1*(-temp13) + tmp2 +
- r.vec*k0*temp33)/(k3*kay)
+ r.vec*k0*temp33) / (k3*kay)
if (any(is.zero)) {
if (ncol(y) > 1)
@@ -647,19 +624,19 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
wz[is.zero, iam(2, 2, M)] <- (pi^2/6 + (1-EulerM)^2)/sigma[is.zero]^2
wz[is.zero, iam(3, 3, M)] <- 2.4236
wz[is.zero, iam(1, 2, M)] <-
- (digamma(2) + 2 * (EulerM-1)) / sigma[is.zero]^2
+ (digamma(2) + 2 * (EulerM - 1)) / sigma[is.zero]^2
wz[is.zero, iam(1, 3, M)] <-
- -(trigamma(1) / 2 + digamma(1) * (digamma(1)/2+1)) / sigma[is.zero]
+ -(trigamma(1) / 2 + digamma(1) * (digamma(1)/2 + 1)) / sigma[is.zero]
wz[is.zero, iam(2, 3, M)] <-
- (-dgammadx(2, 3)/6 + dgammadx(1, 1) +
- 2*dgammadx(1, 2) +
- 2*dgammadx(1, 3)/3) / sigma[is.zero]
+ (-dgammadx(2, 3)/6 + dgammadx(1, 1) +
+ 2*dgammadx(1, 2) +
+ 2*dgammadx(1, 3) / 3) / sigma[is.zero]
if (FALSE ) {
wz[, iam(1, 2, M)] <- 2 * r.vec / sigma^2
- wz[, iam(2, 2, M)] <- -4 * r.vec * digamma(r.vec+1) + 2 * r.vec +
- (4 * dgammadx(r.vec+1, deriv.arg = 1) -
- 3 * dgammadx(r.vec+1,
+ wz[, iam(2, 2, M)] <- -4 * r.vec * digamma(r.vec + 1) + 2 * r.vec +
+ (4 * dgammadx(r.vec + 1, deriv.arg = 1) -
+ 3 * dgammadx(r.vec + 1,
deriv.arg = 2)) / gamma(r.vec) # Not checked
}
}
@@ -704,19 +681,25 @@ dgammadx <- function(x, deriv.arg = 1) {
+ gevff <-
+ function(
+ llocation = "identitylink",
+ lscale = "loge",
+ lshape = logoff(offset = 0.5),
+ percentiles = c(95, 99),
+ ilocation = NULL, iscale = NULL, ishape = NULL,
+ imethod = 1,
- egev <- function(llocation = "identitylink",
- lscale = "loge",
- lshape = logoff(offset = 0.5),
- percentiles = c(95, 99),
- iscale = NULL, ishape = NULL,
- imethod = 1, gshape = c(-0.45, 0.45),
- tolshape0 = 0.001,
- type.fitted = c("percentiles", "mean"),
- giveWarning = TRUE,
- zero = c("scale", "shape")) {
- if (!is.logical(giveWarning) || length(giveWarning) != 1)
- stop("bad input for argument 'giveWarning'")
+ gprobs.y = (1:9)/10, # 20160713; grid for finding locat.init
+ gscale.mux = exp((-5:5)/6), # exp(-5:5),
+ gshape = (-5:5) / 11 + 0.01, # c(-0.45, 0.45),
+ iprobs.y = NULL,
+
+ tolshape0 = 0.001,
+ type.fitted = c("percentiles", "mean"),
+ zero = c("scale", "shape")) {
+
+ ilocat <- ilocation
if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
@@ -737,25 +720,23 @@ dgammadx <- function(x, deriv.arg = 1) {
lshape <- attr(eshape, "function.name")
+ if (length(percentiles) &&
+ (!is.Numeric(percentiles, positive = TRUE) ||
+ max(percentiles) >= 100))
+ stop("bad input for argument 'percentiles'")
+
+ if (!is.Numeric(imethod, length.arg = 1,
+ positive = TRUE, integer.valued = TRUE) ||
+ imethod > 2.5)
+ stop("argument 'imethod' must be 1 or 2")
+ if (length(ishape) && !is.Numeric(ishape))
+ stop("bad input for argument 'ishape'")
- if (!is.Numeric(gshape, length.arg = 2) ||
- gshape[1] >= gshape[2])
- stop("bad input for argument 'gshape'")
- if (length(percentiles) &&
- (!is.Numeric(percentiles, positive = TRUE) ||
- max(percentiles) >= 100))
- stop("bad input for argument 'percentiles'")
- if (!is.Numeric(imethod, length.arg = 1,
- positive = TRUE, integer.valued = TRUE) ||
- imethod > 2.5)
- stop("argument 'imethod' must be 1 or 2")
- if (length(ishape) && !is.Numeric(ishape))
- stop("bad input for argument 'ishape'")
- if (!is.Numeric(tolshape0, length.arg = 1,
- positive = TRUE) ||
- tolshape0 > 0.1)
- stop("bad input for argument 'tolshape0'")
+ if (!is.Numeric(tolshape0, length.arg = 1,
+ positive = TRUE) ||
+ tolshape0 > 0.1)
+ stop("bad input for argument 'tolshape0'")
new("vglmff",
@@ -788,101 +769,141 @@ dgammadx <- function(x, deriv.arg = 1) {
initialize = eval(substitute(expression({
+ temp16 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = FALSE,
+ Is.integer.y = FALSE,
+ ncol.w.max = Inf, # Differs from [e]gev()!
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp16$w
+ y <- temp16$y
+
M1 <- extra$M1 <- 3
- ncoly <- ncol(y)
+ NOS <- ncoly <- ncol(y)
extra$ncoly <- ncoly
extra$type.fitted <- .type.fitted
extra$M1 <- M1
+ M <- M1 * ncoly # Is now true!
- predictors.names <-
- c(namesof("location", .llocat , earg = .elocat , short = TRUE),
- namesof("scale", .lscale , earg = .escale , short = TRUE),
- namesof("shape", .lshape , earg = .eshape , short = TRUE))
+ mynames1 <- param.names("location", NOS)
+ mynames2 <- param.names("scale", NOS)
+ mynames3 <- param.names("shape", NOS)
+ predictors.names <- c(
+ namesof(mynames1, .llocat , earg = .elocat , short = TRUE),
+ namesof(mynames2, .lscale , earg = .escale , short = TRUE),
+ namesof(mynames3, .lshape , earg = .eshape , short = TRUE))[
+ interleave.VGAM(M, M1 = M1)]
- if (ncol(as.matrix(y)) != 1)
- stop("response must be a vector or one-column matrix")
+ extra$percentiles <- .percentiles
+
+
+
+
+ gprobs.y <- .gprobs.y
+ ilocat <- .ilocat # Default is NULL
+ if (length(ilocat))
+ ilocat <- matrix(ilocat, n, NOS, byrow = TRUE)
if (!length(etastart)) {
- init.sig <- if (length( .iscale ))
- rep( .iscale , length.out = length(y)) else NULL
- init.xi <- if (length( .ishape ))
- rep( .ishape , length.out = length(y)) else NULL
- eshape <- .eshape
- if ( .lshape == "extlogit" && length(init.xi) &&
- (any(init.xi <= eshape$min | init.xi >= eshape$max)))
+
+
+ if ( .lshape == "extlogit" && length( .ishape ) &&
+ (any( .ishape <= eshape$min | .ishape >= eshape$max)))
stop("bad input for argument 'eshape'")
- if ( .imethod == 1) {
- nvector <- 4:10 # Arbitrary; could be made an argument
- ynvector <- quantile(y, probs = 1-1/nvector)
- objecFunction <- -Inf # Actually the log-likelihood
- est.sigma <- !length(init.sig)
- gshape <- .gshape
- temp234 <- if (length(init.xi)) init.xi[1] else
- seq(gshape[1], gshape[2], length.out = 12)
- for (xi.try in temp234) {
- xvec <- if (abs(xi.try) < .tolshape0 ) log(nvector) else
- (nvector^xi.try - 1) / xi.try
- fit0 <- lsfit(x = xvec, y=ynvector, intercept = TRUE)
- if (est.sigma) {
- sigmaTry <- rep(fit0$coef["X"], length.out = length(y))
- } else {
- sigmaTry <- init.sig
- }
- muTry <- rep(fit0$coef["Intercept"], length.out = length(y))
- llTry <- egev(giveWarning = FALSE)@loglikelihood(mu = NULL,
- y = y, w = w,
- residuals = FALSE,
- eta <- cbind(theta2eta(muTry, .llocat , earg = .elocat ),
- theta2eta(sigmaTry, .lscale , earg = .escale ),
- theta2eta(xi.try, .lshape , earg = .eshape )))
- if (llTry >= objecFunction) {
- if (est.sigma)
- init.sig <- sigmaTry
- init.mu <- rep(muTry, length.out = length(y))
- objecFunction <- llTry
- bestxi <- xi.try
- }
- }
- if (!length(init.xi))
- init.xi <- rep(bestxi, length.out = length(y))
+
+
+
+
+
+ locat.init <-
+ shape.init <-
+ scale.init <- matrix(NA_real_, n, NOS)
+
+ if (length( .iprobs.y ))
+ gprobs.y <- .iprobs.y
+ gscale.mux <- .gscale.mux # gscale.mux is on a relative scale
+ gshape <- .gshape
+
+ for (jay in 1:NOS) { # For each response 'y_jay'... do:
+
+
+ scale.init.jay <- sd(y[, jay]) * sqrt(6) / pi # Based on the Gumbel
+ scale.init.jay <- gscale.mux * scale.init.jay
+ if (length( .iscale ))
+ scale.init.jay <- .iscale # iscale is on an absolute scale
+
+
+ if (length( .ishape ))
+ gshape <- .ishape # ishape is on an absolute scale
+
+
+
+ locat.init.jay <- if ( .imethod == 1) {
+ quantile(y[, jay], probs = gprobs.y) # + 1/16
} else {
- init.xi <- rep(if (length(init.xi)) init.xi else 0.05,
- length.out = length(y))
- if (!length(init.sig))
- init.sig <- rep(sqrt(6 * var(y)) / pi, length.out = length(y))
- EulerM <- -digamma(1)
- init.mu <- rep(median(y) - EulerM * init.sig,
- length.out = length(y))
+
+ weighted.mean(y[, jay], w = w[, jay])
}
- bad <- (1 + init.xi * (y - init.mu) / init.sig <= 0)
- if (fred <- sum(bad, na.rm = TRUE)) {
- warning(fred, "observations violating boundary constraints ",
- "while initializing. Taking corrective action")
- init.xi[bad] <- ifelse(y[bad] > init.mu[bad], 0.01, -0.01)
+ if (length(ilocat))
+ locat.init.jay <- ilocat[, jay]
+
+
+
+ gevff.Loglikfun3 <- function(shapeval, locatval, scaleval,
+ y, x, w, extraargs) {
+ sum(c(w) * dgev(x = y,
+ locat = locatval,
+ scale = scaleval,
+ shape = shapeval, log = TRUE), na.rm = TRUE)
}
- extra$percentiles <- .percentiles
+ try.this <-
+ grid.search3(gshape, locat.init.jay, scale.init.jay,
+ objfun = gevff.Loglikfun3,
+ y = y[, jay], w = w[, jay],
+ ret.objfun = TRUE, # Last value is the loglik
+ extraargs = NULL)
+
+ shape.init[, jay] <- try.this["Value1" ]
+ locat.init[, jay] <- try.this["Value2" ]
+ scale.init[, jay] <- try.this["Value3" ]
+ } # for (jay ...)
+
+
etastart <-
- cbind(theta2eta(init.mu, .llocat , earg = .elocat ),
- theta2eta(init.sig, .lscale , earg = .escale ),
- theta2eta(init.xi, .lshape , earg = .eshape ))
- }
+ cbind(theta2eta(locat.init, .llocat , earg = .elocat ),
+ theta2eta(scale.init, .lscale , earg = .escale ),
+ theta2eta(shape.init, .lshape , earg = .eshape ))
+ etastart <-
+ etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
+ }
}), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape,
+ .ilocat = ilocat, .iscale = iscale, .ishape = ishape,
+ .gshape = gshape,
+
+ .gprobs.y = gprobs.y, .gscale.mux = gscale.mux,
+ .iprobs.y = iprobs.y,
+
.percentiles = percentiles, .tolshape0 = tolshape0,
- .imethod = imethod, .type.fitted = type.fitted,
- .giveWarning= giveWarning,
- .iscale = iscale, .ishape = ishape, .gshape = gshape ))),
+ .imethod = imethod, .type.fitted = type.fitted ))),
+
+
linkinv = eval(substitute(function(eta, extra = NULL) {
- loc <- eta2theta(eta[, 1], .llocat , earg = .elocat )
- sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
- xi <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+ M1 <- 3
+ NOS <- ncol(eta) / M1
+ Locat <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lshape , earg = .eshape )
type.fitted <-
if (length(extra$type.fitted)) {
extra$type.fitted
@@ -894,48 +915,55 @@ dgammadx <- function(x, deriv.arg = 1) {
type.fitted <- match.arg(type.fitted,
c("percentiles", "mean"))[1]
-
-
- is.zero <- (abs(xi) < .tolshape0 )
- cent <- extra$percentiles
- LP <- length(cent)
+ pcent <- extra$percentiles
+ LP <- length(pcent)
if (type.fitted == "percentiles" && # Upward compatibility:
LP > 0) {
- fv <- matrix(NA_real_, nrow(eta), LP)
+ fv <- matrix(NA_real_, nrow(eta), LP * NOS)
+
+
+
+ icol <- (0:(NOS-1)) * LP
for (ii in 1:LP) {
- yp <- -log(cent[ii] / 100)
- fv[!is.zero, ii] <- loc[!is.zero] - sigma[!is.zero] *
- (1 - yp^(-xi[!is.zero])) / xi[!is.zero]
- fv[is.zero, ii] <- loc[is.zero] - sigma[is.zero] * log(yp)
+ icol <- icol + 1
+ fv[, icol] <- qgev(pcent[ii] / 100, loc = Locat, scale = Scale,
+ shape = shape)
}
- dimnames(fv) <- list(dimnames(eta)[[1]],
- paste(as.character(cent), "%", sep = ""))
+ colnames.fv <- rep_len(paste(as.character(pcent), "%", sep = ""), LP*NOS)
+ dimnames(fv) <- list(dimnames(eta)[[1]], colnames.fv)
} else {
+ is.zero <- (abs(shape) < .tolshape0 )
EulerM <- -digamma(1)
- fv <- loc + sigma * EulerM # When xi = 0, is Gumbel
+ fv <- loc + sigma * EulerM # When shape == 0 it is Gumbel
fv[!is.zero] <- loc[!is.zero] + sigma[!is.zero] *
- (gamma(1-xi[!is.zero])-1) / xi[!is.zero]
- fv[xi >= 1] <- NA # Mean exists only if xi < 1.
+ (gamma(1 - shape[!is.zero]) - 1) / shape[!is.zero]
+ fv[shape >= 1] <- NA # Mean exists only if shape < 1.
}
fv
}, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape,
.type.fitted = type.fitted, .tolshape0 = tolshape0 ))),
last = eval(substitute(expression({
- misc$links <- c(location = .llocat,
- scale = .lscale ,
- shape = .lshape)
+ temp0303 <- c(rep_len( .llocat , NOS),
+ rep_len( .lscale , NOS),
+ rep_len( .lshape , NOS))
+ names(temp0303) <- c(mynames1, mynames2, mynames3)
+ temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
+ misc$link <- temp0303 # Already named
- misc$earg <- list(location = .elocat,
- scale = .escale,
- shape = .eshape)
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for (ii in 1:NOS) {
+ misc$earg[[M1*ii-2]] <- .elocat
+ misc$earg[[M1*ii-1]] <- .escale
+ misc$earg[[M1*ii ]] <- .eshape
+ }
- misc$true.mu <- !length( .percentiles) # @fitted is not a true mu
+ misc$true.mu <- !length( .percentiles ) # @fitted is not a true mu
misc$percentiles <- .percentiles
misc$tolshape0 <- .tolshape0
- misc$expected <- TRUE
- if (any(xi < -0.5))
+ if (any(shape < -0.5))
warning("some values of the shape parameter are less than -0.5")
}), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape,
@@ -943,17 +971,18 @@ dgammadx <- function(x, deriv.arg = 1) {
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL,
summation = TRUE) {
- mmu <- eta2theta(eta[, 1], .llocat , earg = .elocat )
- sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
- xi <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+ M1 <- 3
+ NOS <- ncol(eta) / M1
+ Locat <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lshape , earg = .eshape )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- ll.elts <- c(w) * dgev(x = y, location = mmu, scale = sigma,
- shape = xi, tolshape0 = .tolshape0 ,
- log = TRUE, oobounds.log = -1.0e04,
- giveWarning = .giveWarning )
+ ll.elts <- c(w) * dgev(x = y, location = Locat, scale = Scale,
+ shape = shape, tolshape0 = .tolshape0 ,
+ log = TRUE)
if (summation) {
sum(ll.elts)
} else {
@@ -962,81 +991,130 @@ dgammadx <- function(x, deriv.arg = 1) {
}
}, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape,
- .giveWarning = giveWarning, .tolshape0 = tolshape0 ))),
- vfamily = c("egev", "vextremes"),
+ .tolshape0 = tolshape0 ))),
+ vfamily = c("gevff", "vextremes"),
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ Locat <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .llocat , .elocat )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lscale , .escale )
+ shape <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lshape , .eshape )
+
+ okay1 <- all(is.finite(Locat)) &&
+ all(is.finite(Scale)) && all(Scale > 0) &&
+ all(is.finite(shape))
+ okay.support <-
+ if (okay1) {
+ Boundary <- Locat - Scale / shape
+ all((shape == 0) ||
+ (shape < 0 & y < Boundary) ||
+ (shape > 0 & y > Boundary))
+ } else {
+ TRUE
+ }
+ if (!okay.support)
+ warning("current parameter estimates are at the boundary of ",
+ "the parameter space. ",
+ "Try fitting a Gumbel model instead.")
+ okay1 && okay.support
+ }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
+ .tolshape0 = tolshape0 ))),
+
+
+
+
+
deriv = eval(substitute(expression({
- Locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
- sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
- xi <- eta2theta(eta[, 3], .lshape , earg = .eshape)
- is.zero <- (abs(xi) < .tolshape0)
- zedd <- (y - Locat) / sigma
- A <- 1 + xi * zedd
- dA.dxi <- zedd
- dA.dmu <- -xi / sigma
- dA.dsigma <- -xi * zedd / sigma
- pow <- 1 + 1/xi
+ M1 <- 3
+ NOS <- ncol(eta) / M1
+ Locat <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lshape , earg = .eshape )
+
+
+ is.zero <- (abs(shape) < .tolshape0 )
+ zedd <- (y - Locat) / Scale
+ A <- 1 + shape * zedd
+ dA.dlocat <- -shape / Scale
+ dA.dshape <- zedd
+ dA.dScale <- -shape * zedd / Scale
+ pow <- 1 + 1/shape
+
if (any(bad <- A <= 0, na.rm = TRUE))
stop(sum(bad, na.rm = TRUE),
" observations violating boundary constraints in '@deriv'")
- AA <- 1/(xi*A^pow)- pow/A
- dl.dmu <- dA.dmu * AA
- dl.dsi <- dA.dsigma * AA - 1/sigma
- dl.dxi <- log(A)/xi^2 - pow * dA.dxi / A -
- (log(A)/xi^2 - dA.dxi /(xi*A)) * A^(-1/xi)
+
+ AA <- 1 / (shape * A^pow)- pow / A
+ dl.dlocat <- dA.dlocat * AA
+ dl.dscale <- dA.dScale * AA - 1/Scale
+ dl.dshape <- log(A)/shape^2 - pow * dA.dshape / A -
+ (log(A)/shape^2 - dA.dshape / (shape*A)) * A^(-1/shape)
+
if (any(is.zero)) {
- ezedd <- exp(-zedd[is.zero])
- dl.dmu[is.zero] <- (1 - ezedd) / sigma[is.zero]
- dl.dsi[is.zero] <- (zedd[is.zero] *
- (1 - ezedd) - 1) / sigma[is.zero]
- dl.dxi[is.zero] <- zedd[is.zero] *
- ((1 - ezedd) * zedd[is.zero] / 2 - 1)
+ omez <- -expm1(-zedd[is.zero])
+ zedd0 <- zedd[is.zero]
+ dl.dlocat[is.zero] <- omez / Scale[is.zero]
+ dl.dscale[is.zero] <- (zedd0 * omez - 1) / Scale[is.zero]
+ dl.dshape[is.zero] <- zedd0 * (omez * zedd0 / 2 - 1)
}
- dmu.deta <- dtheta.deta(Locat, .llocat , earg = .elocat )
- dsi.deta <- dtheta.deta(sigma, .lscale , earg = .escale )
- dxi.deta <- dtheta.deta(xi, .lshape , earg = .eshape)
- c(w) * cbind(dl.dmu * dmu.deta,
- dl.dsi * dsi.deta,
- dl.dxi * dxi.deta)
+
+ dlocat.deta <- dtheta.deta(Locat, .llocat , earg = .elocat )
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+ dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+ ans <- c(w) * cbind(dl.dlocat * dlocat.deta,
+ dl.dscale * dscale.deta,
+ dl.dshape * dshape.deta)
+ ans <- ans[, interleave.VGAM(M, M1 = M1)]
+ ans
}), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape,
.tolshape0 = tolshape0 ))),
+
weight = eval(substitute(expression({
+ EulerM <- -digamma(1)
+
+
bad <- A <= 0
if (any(bad, na.rm = TRUE))
stop(sum(bad, na.rm = TRUE),
" observations violating boundary constraints in '@weight'")
- kay <- -xi # for the formulae
- kay[abs(kay-0.5) < .tolshape0] <- 0.501
- temp100 <- gamma(2-kay)
- pp <- (1-kay)^2 * gamma(1-2*kay) # gamma(0) is undefined so kay != 0.5
- qq <- temp100 * (digamma(1-kay) - (1-kay)/kay)
- wz <- matrix(NA_real_, n, 6)
- wz[, iam(1, 1, M)] <- pp / sigma^2
- wz[, iam(2, 2, M)] <- (1 - 2*temp100 + pp) / (sigma * kay)^2
- EulerM <- -digamma(1)
- wz[, iam(3, 3, M)] <- (pi^2 / 6 + (1-EulerM-1/kay)^2 +
- (2*qq + pp/kay)/kay) / kay^2
- wz[, iam(1, 2, M)] <- (pp - temp100) / (sigma^2 * kay)
- wz[, iam(1, 3, M)] <- -(qq + pp/kay) / (sigma * kay)
- wz[, iam(2, 3, M)] <- (1-EulerM - (1-temp100)/kay - qq -
- pp/kay) / (sigma * kay^2)
+
+
+ shape[abs(shape + 0.5) < .tolshape0 ] <- -0.499
+ temp100 <- gamma(2 + shape)
+ pp <- (1 + shape)^2 * gamma(1 + 2*shape) # gamma(0) is undefined so shape != -0.5
+ qq <- temp100 * (digamma(1 + shape) + (1 + shape)/shape)
+ ned2l.dlocat2 <- pp / Scale^2
+ ned2l.dscale2 <- (1 - 2*temp100 + pp) / (Scale * shape)^2
+ ned2l.dshape2 <- (pi^2 / 6 + (1 - EulerM + 1/shape)^2 -
+ (2*qq - pp/shape)/shape) / shape^2
+ ned2l.dlocsca <- -(pp - temp100) / (Scale^2 * shape)
+ ned2l.dscasha <- -(1 - EulerM + (1 - temp100)/shape - qq +
+ pp/shape) / (Scale * shape^2)
+ ned2l.dlocsha <- -(qq - pp/shape) / (Scale * shape)
+
if (any(is.zero)) {
- wz[is.zero, iam(2, 2, M)] <- (pi^2/6 + (1-EulerM)^2) / sigma[is.zero]^2
- wz[is.zero, iam(3, 3, M)] <- 2.4236
- wz[is.zero, iam(1, 2, M)] <- (digamma(2) + 2*(EulerM-1)) / sigma[is.zero]^2
- wz[is.zero, iam(1, 3, M)] <- -(trigamma(1)/2 + digamma(1)*
- (digamma(1)/2+1))/sigma[is.zero]
- wz[is.zero, iam(2, 3, M)] <- (-dgammadx(2, 3)/6 + dgammadx(1, 1) +
- 2*dgammadx(1, 2) +
- 2*dgammadx(1, 3) / 3) / sigma[is.zero]
+ ned2l.dscale2[is.zero] <- (pi^2/6 + (1-EulerM)^2) / Scale[is.zero]^2
+ ned2l.dshape2[is.zero] <- 2.4236
+ ned2l.dlocsca[is.zero] <- (digamma(2) + 2*(EulerM - 1)) / Scale[is.zero]^2
+ ned2l.dscasha[is.zero] <- -( -dgammadx(2, 3) / 6 + dgammadx(1, 1) +
+ 2*dgammadx(1, 2) +
+ 2*dgammadx(1, 3) / 3) / Scale[is.zero]
+ ned2l.dlocsha[is.zero] <- (trigamma(1) / 2 + digamma(1)*
+ (digamma(1) / 2 + 1)) / Scale[is.zero]
}
- wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * dmu.deta^2
- wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] * dsi.deta^2
- wz[, iam(3, 3, M)] <- wz[, iam(3, 3, M)] * dxi.deta^2
- wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)] * dmu.deta * dsi.deta
- wz[, iam(1, 3, M)] <- wz[, iam(1, 3, M)] * dmu.deta * (-dxi.deta)
- wz[, iam(2, 3, M)] <- wz[, iam(2, 3, M)] * dsi.deta * (-dxi.deta)
- c(w) * wz
+
+
+
+ wz <- array( c(c(w) * ned2l.dlocat2 * dlocat.deta^2,
+ c(w) * ned2l.dscale2 * dscale.deta^2,
+ c(w) * ned2l.dshape2 * dshape.deta^2,
+ c(w) * ned2l.dlocsca * dlocat.deta * dscale.deta,
+ c(w) * ned2l.dscasha * dscale.deta * dshape.deta,
+ c(w) * ned2l.dlocsha * dlocat.deta * dshape.deta),
+ dim = c(n, NOS, 6))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
+ wz
}), list( .eshape = eshape, .tolshape0 = tolshape0 ))))
}
@@ -1044,6 +1122,7 @@ dgammadx <- function(x, deriv.arg = 1) {
+
rgumbel <- function(n, location = 0, scale = 1) {
answer <- location - scale * log(-log(runif(n)))
answer[scale <= 0] <- NaN
@@ -1157,7 +1236,6 @@ pgumbel <- function(q, location = 0, scale = 1,
max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
-
if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
@@ -1166,7 +1244,7 @@ pgumbel <- function(q, location = 0, scale = 1,
new("vglmff",
blurb = c("Gumbel distribution for extreme value regression\n",
"Links: ",
- namesof("location", llocat, earg = elocat ), ", ",
+ namesof("location", llocat, earg = elocat ), ", ",
namesof("scale", lscale, earg = escale )),
constraints = eval(substitute(expression({
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
@@ -1202,6 +1280,12 @@ pgumbel <- function(q, location = 0, scale = 1,
y <- -t(apply(-y, 1, sort, na.last = TRUE))
+ w <- as.matrix(w)
+ if (ncol(w) != 1)
+ stop("the 'weights' argument must be a vector or ",
+ "1-column matrix")
+
+
r.vec <- rowSums(cbind(!is.na(y)))
if (any(r.vec == 0))
@@ -1212,13 +1296,13 @@ pgumbel <- function(q, location = 0, scale = 1,
yiri <- y[cbind(1:nrow(y), r.vec)]
sc.init <- if (is.Numeric( .iscale, positive = TRUE))
.iscale else {3 * (rowMeans(y, na.rm = TRUE) - yiri)}
- sc.init <- rep(sc.init, length = nrow(y))
+ sc.init <- rep_len(sc.init, nrow(y))
sc.init[sc.init <= 0.0001] <- 1 # Used to be .iscale
loc.init <- yiri + sc.init * log(r.vec)
} else {
sc.init <- if (is.Numeric( .iscale, positive = TRUE))
- .iscale else 1.1 * (0.01+sqrt(var(y)*6)) / pi
- sc.init <- rep(sc.init, length.out = n)
+ .iscale else 1.1 * (0.01 + sqrt(6 * var(y))) / pi
+ sc.init <- rep_len(sc.init, n)
EulerM <- -digamma(1)
loc.init <- (y - sc.init * EulerM)
loc.init[loc.init <= 0] <- min(y)
@@ -1243,23 +1327,23 @@ pgumbel <- function(q, location = 0, scale = 1,
loc <- eta2theta(eta[, 1], .llocat , earg = .elocat )
sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
- Percentiles <- extra$percentiles
- LP <- length(Percentiles) # may be 0
+ pcent <- extra$percentiles
+ LP <- length(pcent) # may be 0
if (LP > 0) {
mpv <- extra$mpv
mu <- matrix(NA_real_, nrow(eta), LP + mpv) # LP may be 0
Rvec <- extra$R
for (ii in 1:LP) {
ci <- if (is.Numeric(Rvec))
- Rvec * (1 - Percentiles[ii] / 100) else
- -log(Percentiles[ii] / 100)
+ Rvec * (1 - pcent[ii] / 100) else
+ -log(pcent[ii] / 100)
mu[, ii] <- loc - sigma * log(ci)
}
if (mpv)
mu[, ncol(mu)] <- loc - sigma * log(log(2))
- dmn2 <- paste(as.character(Percentiles), "%", sep = "")
+ dmn2 <- paste(as.character(pcent), "%", sep = "")
if (mpv)
dmn2 <- c(dmn2, "MPV")
dimnames(mu) <- list(dimnames(eta)[[1]], dmn2)
@@ -1334,6 +1418,7 @@ pgumbel <- function(q, location = 0, scale = 1,
dl.dsigma * dsigma.deta)
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
+
weight = eval(substitute(expression({
temp6 <- digamma(r.vec) # , integer = T
temp5 <- digamma(1:max(r.vec)) # , integer=T
@@ -1350,7 +1435,6 @@ pgumbel <- function(q, location = 0, scale = 1,
wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * dlocat.deta^2
wz[, iam(2, 1, M)] <- wz[, iam(2, 1, M)] * dsigma.deta * dlocat.deta
wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] * dsigma.deta^2
-
c(w) * wz
}), list( .lscale = lscale ))))
}
@@ -1370,12 +1454,9 @@ rgpd <- function(n, location = 0, scale = 1, shape = 0) {
stop("bad input for argument 'shape'")
ans <- numeric(use.n)
- if (length(shape) != use.n)
- shape <- rep(shape, length.out = use.n)
- if (length(location) != use.n)
- location <- rep(location, length.out = use.n);
- if (length(scale) != use.n)
- scale <- rep(scale, length.out = use.n)
+ if (length(shape) != use.n) shape <- rep_len(shape, use.n)
+ if (length(location) != use.n) location <- rep_len(location, use.n)
+ if (length(scale) != use.n) scale <- rep_len(scale, use.n)
scase <- abs(shape) < sqrt( .Machine$double.eps )
@@ -1393,60 +1474,43 @@ rgpd <- function(n, location = 0, scale = 1, shape = 0) {
dgpd <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
- tolshape0 = sqrt( .Machine$double.eps ),
- oobounds.log = -Inf, giveWarning = FALSE) {
+ tolshape0 = sqrt( .Machine$double.eps )) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
-
- if (oobounds.log > 0)
- stop("bad input for argument 'oobounds.log'")
-
+
+ oobounds.log <- -Inf
if (!is.Numeric(tolshape0, length.arg = 1, positive = TRUE))
stop("bad input for argument 'tolshape0'")
L <- max(length(x), length(location), length(scale), length(shape))
- if (length(shape) != L)
- shape <- rep(shape, length.out = L)
- if (length(location) != L)
- location <- rep(location, length.out = L)
- if (length(scale) != L)
- scale <- rep(scale, length.out = L)
- if (length(x) != L)
- x <- rep(x, length.out = L)
-
+ if (length(shape) != L) shape <- rep_len(shape, L)
+ if (length(location) != L) location <- rep_len(location, L)
+ if (length(scale) != L) scale <- rep_len(scale, L)
+ if (length(x) != L) x <- rep_len(x, L)
-
- logdensity <- rep(log(0), length.out = L)
+ logdensity <- rep_len(log(0), L)
scase <- abs(shape) < tolshape0
nscase <- sum(scase)
if (L - nscase) {
zedd <- (x-location) / scale
xok <- (!scase) & (zedd > 0) & (1 + shape*zedd > 0)
logdensity[xok] <- -(1 + 1/shape[xok])*log1p(shape[xok]*zedd[xok]) -
- log(scale[xok])
+ log(scale[xok])
outofbounds <- (!scase) & ((zedd <= 0) | (1 + shape*zedd <= 0))
if (any(outofbounds)) {
logdensity[outofbounds] <- oobounds.log
- no.oob <- sum(outofbounds)
- if (giveWarning)
- warning(no.oob, " observation",
- ifelse(no.oob > 1, "s are", " is"), " out of bounds")
}
}
if (nscase) {
xok <- scase & (x > location)
logdensity[xok] <- -(x[xok] - location[xok]) / scale[xok] -
- log(scale[xok])
+ log(scale[xok])
outofbounds <- scase & (x <= location)
if (any(outofbounds)) {
- logdensity[outofbounds] <- oobounds.log
- no.oob <- sum(outofbounds)
- if (giveWarning)
- warning(no.oob, " observation",
- ifelse(no.oob > 1, "s are", " is"), " out of bounds")
+ logdensity[outofbounds] <- oobounds.log
}
}
@@ -1458,8 +1522,6 @@ dgpd <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
pgpd <- function(q, location = 0, scale = 1, shape = 0,
lower.tail = TRUE, log.p = FALSE) {
-
-
if (!is.logical(lower.tail) || length(lower.tail ) != 1)
stop("bad input for argument 'lower.tail'")
if (!is.logical(log.p) || length(log.p) != 1)
@@ -1469,14 +1531,10 @@ pgpd <- function(q, location = 0, scale = 1, shape = 0,
use.n <- max(length(q), length(location), length(scale), length(shape))
ans <- numeric(use.n)
- if (length(shape) != use.n)
- shape <- rep(shape, length.out = use.n)
- if (length(location) != use.n)
- location <- rep(location, length.out = use.n)
- if (length(scale) != use.n)
- scale <- rep(scale, length.out = use.n)
- if (length(q) != use.n)
- q <- rep(q, length.out = use.n)
+ if (length(shape) != use.n) shape <- rep_len(shape, use.n)
+ if (length(location) != use.n) location <- rep_len(location, use.n)
+ if (length(scale) != use.n) scale <- rep_len(scale, use.n)
+ if (length(q) != use.n) q <- rep_len(q, use.n)
zedd <- (q - location) / scale
use.zedd <- pmax(zedd, 0)
@@ -1507,7 +1565,6 @@ pgpd <- function(q, location = 0, scale = 1, shape = 0,
qgpd <- function(p, location = 0, scale = 1, shape = 0,
lower.tail = TRUE, log.p = FALSE) {
-
if (!is.logical(lower.tail) || length(lower.tail ) != 1)
stop("bad input for argument 'lower.tail'")
@@ -1524,14 +1581,10 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
use.n <- max(length(p), length(location), length(scale), length(shape))
ans <- numeric(use.n)
- if (length(shape) != use.n)
- shape <- rep(shape, length.out = use.n)
- if (length(location) != use.n)
- location <- rep(location, length.out = use.n)
- if (length(scale) != use.n)
- scale <- rep(scale, length.out = use.n)
- if (length(p) != use.n)
- p <- rep(p, length.out = use.n)
+ if (length(shape) != use.n) shape <- rep_len(shape, use.n)
+ if (length(location) != use.n) location <- rep_len(location, use.n)
+ if (length(scale) != use.n) scale <- rep_len(scale, use.n)
+ if (length(p) != use.n) p <- rep_len(p, use.n)
@@ -1570,8 +1623,9 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
ishape = NULL,
tolshape0 = 0.001,
type.fitted = c("percentiles", "mean"),
- giveWarning = TRUE,
imethod = 1,
+
+
zero = "shape") {
type.fitted <- match.arg(type.fitted,
@@ -1586,10 +1640,9 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
lshape <- attr(eshape, "function.name")
- if (!is.logical(giveWarning) || length(giveWarning) != 1)
- stop("bad input for argument 'giveWarning'")
if (!is.Numeric(threshold))
stop("bad input for argument 'threshold'")
+
if (!is.Numeric(imethod, length.arg = 1,
positive = TRUE, integer.valued = TRUE) ||
imethod > 2.5)
@@ -1599,18 +1652,18 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
(!is.Numeric(percentiles, positive = TRUE) ||
max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
+
if (!is.Numeric(tolshape0, length.arg = 1, positive = TRUE) ||
tolshape0 > 0.1)
stop("bad input for argument 'tolshape0'")
-
new("vglmff",
blurb = c("Generalized Pareto distribution\n",
- "Links: ",
- namesof("scale", link = lscale, earg = escale ), ", ",
- namesof("shape", link = lshape, earg = eshape )),
- constraints = eval(substitute(expression({
+ "Links: ",
+ namesof("scale", link = lscale, earg = escale), ", ",
+ namesof("shape", link = lshape, earg = eshape)),
+ constraints = eval(substitute(expression({
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 2)
@@ -1706,9 +1759,9 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
init.sig <- matrix(init.sig, n, ncoly, byrow = TRUE)
- init.sig[init.sig <= 0.0] <- 0.01 # sigma > 0
- init.xii[init.xii <= -0.5] <- -0.40 # Fisher scoring works if xi > -0.5
- init.xii[init.xii >= 1.0] <- 0.90 # Mean/var exists if xi < 1 / 0.5
+ init.sig[init.sig <= 0.0] <- 0.01 # sigma > 0
+ init.xii[init.xii <= -0.5] <- -0.40 # Fisher scoring works if xi > -0.5
+ init.xii[init.xii >= 1.0] <- 0.90 # Mean/var exists if xi < 1 / 0.5
if ( .lshape == "loge")
init.xii[init.xii <= 0.0] <- 0.05
@@ -1832,9 +1885,8 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <-
- c(rep( .lscale , length = ncoly),
- rep( .lshape , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ misc$link <- c(rep_len( .lscale , ncoly),
+ rep_len( .lshape , ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
@@ -1845,9 +1897,6 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
misc$earg[[M1*ii ]] <- .eshape
}
- misc$M1 <- M1
- misc$expected <- TRUE
- misc$multipleResponses <- TRUE
misc$true.mu <- FALSE # @fitted is not a true mu
misc$percentiles <- .percentiles
@@ -1868,21 +1917,47 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- ll.elts <-
+ ll.elts <-
c(w) * dgpd(x = y, location = Threshold, scale = sigma,
- shape = Shape, tolshape0 = .tolshape0,
- giveWarning = .giveWarning,
- log = TRUE, oobounds.log = -1.0e04)
+ shape = Shape, tolshape0 = .tolshape0 ,
+ log = TRUE)
if (summation) {
sum(ll.elts)
} else {
ll.elts
}
}
- }, list( .tolshape0 = tolshape0, .giveWarning= giveWarning,
+ }, list( .tolshape0 = tolshape0,
.escale = escale, .eshape = eshape,
.lscale = lscale, .lshape = lshape ))),
vfamily = c("gpd", "vextremes"),
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ sigma <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , earg = .escale )
+ Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+ Locat <- extra$threshold
+ okay1 <- all(is.finite(Locat)) &&
+ all(is.finite(sigma)) && all(sigma > 0) &&
+ all(is.finite(Shape))
+ okay.support <-
+ if (okay1) {
+ Boundary <- Locat - sigma / Shape
+ all((y > Locat) &
+ ((Shape < 0 & y < Boundary) ||
+ (Shape >= 0 & y < Inf)))
+ } else {
+ TRUE
+ }
+ if (!okay.support)
+ warning("current parameter estimates are at the boundary of ",
+ "the parameter space. ",
+ "This model needs attention.")
+ okay1 && okay.support
+ }, list( .tolshape0 = tolshape0,
+ .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
+
+
deriv = eval(substitute(expression({
M1 <- 2
sigma <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , earg = .escale )
@@ -1899,12 +1974,12 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
"observations violating boundary constraints\n")
flush.console()
}
- if (any(is.zero <- (abs(Shape) < .tolshape0))) {
+ if (any(is.zero <- (abs(Shape) < .tolshape0 ))) {
}
igpd <- !is.zero & !bad
iexp <- is.zero & !bad
- dl.dShape <- dl.dsigma <- rep(0, length.out = length(y))
+ dl.dShape <- dl.dsigma <- rep_len(0, length(y))
dl.dsigma[igpd] <- ((1 + Shape[igpd]) * ystar[igpd] / (sigma[igpd] +
Shape[igpd] * ystar[igpd])-1) / sigma[igpd]
@@ -1916,13 +1991,12 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
dsigma.deta <- dtheta.deta(sigma, .lscale , earg = .escale )
dShape.deta <- dtheta.deta(Shape, .lshape , earg = .eshape )
- myderiv <-
- c(w) * cbind(dl.dsigma * dsigma.deta,
- dl.dShape * dShape.deta)
+ myderiv <- c(w) * cbind(dl.dsigma * dsigma.deta,
+ dl.dShape * dShape.deta)
myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .tolshape0 = tolshape0,
- .lscale = lscale, .escale = escale,
- .lshape = lshape, .eshape = eshape ))),
+ .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape ))),
weight = eval(substitute(expression({
@@ -2056,11 +2130,14 @@ setMethod("guplot", "vlm",
- egumbel <- function(llocation = "identitylink",
- lscale = "loge",
- iscale = NULL,
- R = NA, percentiles = c(95, 99),
- mpv = FALSE, zero = NULL) {
+
+
+ gumbelff <- function(llocation = "identitylink",
+ lscale = "loge",
+ iscale = NULL,
+ R = NA, percentiles = c(95, 99),
+ zero = "scale", # Was NULL in egumbel()
+ mpv = FALSE) {
llocat <- as.list(substitute(llocation))
elocat <- link2list(llocat)
@@ -2073,17 +2150,17 @@ setMethod("guplot", "vlm",
if (!is.logical(mpv) || length(mpv) != 1)
stop("bad input for argument 'mpv'")
+
if (length(percentiles) &&
(!is.Numeric(percentiles, positive = TRUE) ||
max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
-
if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
new("vglmff",
- blurb = c("Gumbel distribution (univariate response)\n\n",
+ blurb = c("Gumbel distribution (multiple responses allowed)\n\n",
"Links: ",
namesof("location", llocat, earg = elocat, tag = TRUE), ", ",
namesof("scale", lscale, earg = escale, tag = TRUE), "\n",
@@ -2096,12 +2173,11 @@ setMethod("guplot", "vlm",
}), list( .zero = zero ))),
-
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
expected = TRUE,
- multipleResponses = FALSE,
+ multipleResponses = TRUE,
parameters.names = c("location", "scale"),
llocation = .llocat ,
lscale = .lscale ,
@@ -2114,20 +2190,37 @@ setMethod("guplot", "vlm",
initialize = eval(substitute(expression({
- y <- cbind(y)
- if (ncol(y) > 1)
- stop("Use gumbel() to handle multivariate responses")
- if (min(y) <= 0)
- stop("all response values must be positive")
+ temp16 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = FALSE,
+ Is.integer.y = FALSE,
+ ncol.w.max = Inf, # Differs from gumbel()!
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp16$w
+ y <- temp16$y
+
+
+ M1 <- extra$M1 <- 2
+ NOS <- ncoly <- ncol(y)
+ extra$ncoly <- ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly # Is now true!
+ mynames1 <- param.names("location", NOS)
+ mynames2 <- param.names("scale", NOS)
+ predictors.names <- c(
+ namesof(mynames1, .llocat , earg = .elocat , short = TRUE),
+ namesof(mynames2, .lscale , earg = .escale , short = TRUE))[
+ interleave.VGAM(M, M1 = M1)]
+
- predictors.names <-
- c(namesof("location", .llocat , earg = .elocat , tag = FALSE),
- namesof("scale", .lscale , earg = .escale , tag = FALSE))
extra$R <- .R
@@ -2135,65 +2228,112 @@ setMethod("guplot", "vlm",
extra$percentiles <- .percentiles
if (!length(etastart)) {
- sca.init <- if (is.Numeric( .iscale, positive = TRUE))
- .iscale else 1.5 * (0.01+sqrt(var(y)*6)) / pi
- sca.init <- rep(sca.init, length.out = n)
+ locat.init <-
+ scale.init <- matrix(NA_real_, n, NOS)
EulerM <- -digamma(1)
- loc.init <- (y - sca.init * EulerM)
+
+ for (jay in 1:NOS) { # For each response 'y_jay'... do:
+
+
+ scale.init.jay <- 1.5 * (0.01 + sqrt(6 * var(y[, jay]))) / pi
+ if (length( .iscale ))
+ scale.init.jay <- .iscale # iscale is on an absolute scale
+ scale.init[, jay] <- scale.init.jay
+
+ locat.init[, jay] <- (y[, jay] - scale.init[, jay] * EulerM)
+ } # NOS
+
+
etastart <-
- cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
- theta2eta(sca.init, .lscale , earg = .escale ))
+ cbind(theta2eta(locat.init, .llocat , earg = .elocat ),
+ theta2eta(scale.init, .lscale , earg = .escale ))
+ etastart <-
+ etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
}
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale,
.iscale = iscale,
.R = R, .mpv = mpv, .percentiles = percentiles ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
- locat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
- sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ Locat <- eta2theta(eta[, c(TRUE, FALSE)], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , earg = .escale )
+
EulerM <- -digamma(1)
- Percentiles <- extra$percentiles
+ pcent <- extra$percentiles
mpv <- extra$mpv
- LP <- length(Percentiles) # may be 0
- if (!LP) return(locat + sigma * EulerM)
- mu <- matrix(NA_real_, nrow(eta), LP + mpv)
+ LP <- length(pcent) # may be 0
+ if (!LP)
+ return(Locat + Scale * EulerM)
+
+ fv <- matrix(NA_real_, nrow(eta), (LP + mpv) * NOS)
+ dmn2 <- c(if (LP >= 1) paste(as.character(pcent), "%",
+ sep = "") else NULL,
+ if (mpv) "MPV" else NULL)
+
+ dmn2 <- rep_len(dmn2, ncol(fv))
Rvec <- extra$R
- if (1 <= LP)
- for (ii in 1:LP) {
- ci <- if (is.Numeric(Rvec)) Rvec * (1 - Percentiles[ii] / 100) else
- -log(Percentiles[ii] / 100)
- mu[,ii] <- locat - sigma * log(ci)
+
+ if (1 <= LP) {
+ icol <- (0:(NOS-1)) * (LP + mpv)
+
+ for (ii in 1:LP) {
+ icol <- icol + 1
+ use.p <- if (is.Numeric(Rvec))
+ exp(-Rvec * (1 - pcent[ii] / 100)) else
+ pcent[ii] / 100
+ fv[, icol] <- qgumbel(use.p, loc = Locat, scale = Scale)
}
- if (mpv)
- mu[, ncol(mu)] <- locat - sigma * log(log(2))
- dmn2 <- if (LP >= 1) paste(as.character(Percentiles), "%",
- sep = "") else NULL
- if (mpv)
- dmn2 <- c(dmn2, "MPV")
- dimnames(mu) <- list(dimnames(eta)[[1]], dmn2)
- mu
+ }
+
+ if (mpv) {
+ icol <- (0:(NOS-1)) * (LP + mpv)
+ icol <- icol + 1 + LP
+ fv[, icol] <- Locat - Scale * log(log(2))
+ }
+
+ dimnames(fv) <- list(dimnames(eta)[[1]], dmn2)
+ fv
}, list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
last = eval(substitute(expression({
- misc$link <- c(location = .llocat, scale = .lscale)
- misc$earg <- list(location = .elocat, scale = .escale)
- misc$true.mu <- !length( .percentiles) # @fitted is not a true mu
+ temp0303 <- c(rep_len( .llocat , NOS),
+ rep_len( .lscale , NOS))
+ names(temp0303) <- c(mynames1, mynames2)
+ temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
+
+ misc$link <- temp0303 # Already named
+
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for (ii in 1:NOS) {
+ misc$earg[[M1*ii-1]] <- .elocat
+ misc$earg[[M1*ii ]] <- .escale
+ }
+
+
+ misc$true.mu <- !length( .percentiles ) # @fitted is not a true mu
misc$R <- .R
misc$mpv <- .mpv
- misc$percentiles = .percentiles
+ misc$percentiles <- .percentiles
}), list( .llocat = llocat, .lscale = lscale, .mpv = mpv,
.elocat = elocat, .escale = escale,
.R = R, .percentiles = percentiles ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL,
summation = TRUE) {
- loc <- eta2theta(eta[, 1], .llocat , earg = .elocat )
- sca <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ Locat <- eta2theta(eta[, c(TRUE, FALSE)], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , earg = .escale )
+
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
ll.elts <- c(w) *
- dgumbel(x = y, location = loc, scale = sca, log = TRUE)
+ dgumbel(x = y, location = Locat, scale = Scale, log = TRUE)
if (summation) {
sum(ll.elts)
} else {
@@ -2202,32 +2342,48 @@ setMethod("guplot", "vlm",
}
}, list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
- vfamily = "egumbel",
+ vfamily = "gumbelff",
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ Locat <- eta2theta(eta[, c(TRUE, FALSE)], .llocat , .elocat )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+
+ okay1 <- all(is.finite(Locat)) &&
+ all(is.finite(Scale)) && all(Scale > 0)
+ okay1
+ }, list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale ))),
+
deriv = eval(substitute(expression({
- loc <- eta2theta(eta[, 1], .llocat , earg = .elocat )
- sca <- eta2theta(eta[, 2], .lscale , earg = .escale )
- zedd <- (y-loc) / sca
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+ Locat <- eta2theta(eta[, c(TRUE, FALSE)], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , earg = .escale )
+
+ zedd <- (y - Locat) / Scale
temp2 <- -expm1(-zedd)
- dl.dloc <- temp2 / sca
- dl.dsca <- -1/sca + temp2 * zedd / sca
- dloc.deta <- dtheta.deta(loc, .llocat , earg = .elocat)
- dsca.deta <- dtheta.deta(sca, .lscale , earg = .escale )
- c(w) * cbind(dl.dloc * dloc.deta,
- dl.dsca * dsca.deta)
+ dl.dlocat <- temp2 / Scale
+ dl.dscale <- -1/Scale + temp2 * zedd / Scale
+ dlocat.deta <- dtheta.deta(Locat, .llocat , earg = .elocat )
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+ ans <- c(w) * cbind(dl.dlocat * dlocat.deta,
+ dl.dscale * dscale.deta)
+ ans <- ans[, interleave.VGAM(M, M1 = M1)]
+ ans
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale ))),
- weight=expression({
+ weight = expression({
digamma1 <- digamma(1)
- ned2l.dsca2 <- ((2 + digamma1) * digamma1 + trigamma(1) + 1) / sca^2
- ned2l.dloc2 <- 1 / sca^2
- ned2l.dscaloc <- -(1 + digamma1) / sca^2
-
- wz = matrix(NA_real_, n, dimm(M = 2))
- wz[, iam(1, 1, M)] <- ned2l.dloc2 * dloc.deta^2
- wz[, iam(2, 2, M)] <- ned2l.dsca2 * dsca.deta^2
- wz[, iam(1, 2, M)] <- ned2l.dscaloc * dloc.deta * dsca.deta
-
- c(w) * wz
+ ned2l.dscale2 <- ((2 + digamma1) * digamma1 + trigamma(1) + 1) / Scale^2
+ ned2l.dlocat2 <- 1 / Scale^2
+ ned2l.dlocsca <- -(1 + digamma1) / Scale^2
+
+ wz <- array( c(c(w) * ned2l.dlocat2 * dlocat.deta^2,
+ c(w) * ned2l.dscale2 * dscale.deta^2,
+ c(w) * ned2l.dlocsca * dlocat.deta * dscale.deta),
+ dim = c(n, NOS, 3))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
+ wz
}))
}
@@ -2255,7 +2411,6 @@ setMethod("guplot", "vlm",
stop("valid percentiles values must be given when mean = FALSE")
-
new("vglmff",
blurb = c("Censored Gumbel distribution\n\n",
"Links: ",
@@ -2294,20 +2449,20 @@ setMethod("guplot", "vlm",
if (!length(extra$leftcensored))
- extra$leftcensored <- rep(FALSE, length.out = n)
+ extra$leftcensored <- rep_len(FALSE, n)
if (!length(extra$rightcensored))
- extra$rightcensored <- rep(FALSE, length.out = n)
+ extra$rightcensored <- rep_len(FALSE, n)
if (any(extra$rightcensored & extra$leftcensored))
stop("some observations are both right and left censored!")
predictors.names <-
- c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
- namesof("scale", .lscale , earg = .escale , tag = FALSE))
+ c(namesof("location", .llocat, earg = .elocat , tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
if (!length(etastart)) {
sca.init <- if (is.Numeric( .iscale, positive = TRUE))
.iscale else 1.1 * sqrt(var(y) * 6 ) / pi
- sca.init <- rep(sca.init, length.out = n)
+ sca.init <- rep_len(sca.init, n)
EulerM <- -digamma(1)
loc.init <- (y - sca.init * EulerM)
loc.init[loc.init <= 0] = min(y)
@@ -2440,12 +2595,12 @@ dfrechet <- function(x, location = 0, scale = 1, shape, log = FALSE) {
rm(log)
L <- max(length(x), length(scale), length(shape), length(location))
- x <- rep(x, length.out = L)
- scale <- rep(scale, length.out = L)
- shape <- rep(shape, length.out = L)
- location <- rep(location, length.out = L)
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(scale) != L) scale <- rep_len(scale, L)
+ if (length(shape) != L) shape <- rep_len(shape, L)
+ if (length(location) != L) location <- rep_len(location, L)
- logdensity <- rep(log(0), length.out = L)
+ logdensity <- rep_len(log(0), L)
xok <- (x > location)
rzedd <- scale / (x - location)
logdensity[xok] <- log(shape[xok]) - (rzedd[xok]^shape[xok]) +
@@ -2615,11 +2770,11 @@ frechet.control <- function(save.weights = TRUE, ...) {
predictors.names <-
- c(namesof("scale", .lscale , earg = .escale, short = TRUE),
- namesof("shape", .lshape , earg = .eshape, short = TRUE))
+ c(namesof("scale", .lscale , earg = .escale , short = TRUE),
+ namesof("shape", .lshape , earg = .eshape , short = TRUE))
- extra$location <- rep( .location , length.out = n) # stored here
+ extra$location <- rep_len( .location , n) # stored here
if (!length(etastart)) {
@@ -2643,8 +2798,8 @@ frechet.control <- function(save.weights = TRUE, ...) {
abs.arg = TRUE)
shape.init <- if (length( .ishape ))
- rep( .ishape , length.out = n) else {
- rep(try.this , length.out = n) # variance exists if shape > 2
+ rep_len( .ishape , n) else {
+ rep_len(try.this , n) # variance exists if shape > 2
}
@@ -2654,11 +2809,11 @@ frechet.control <- function(save.weights = TRUE, ...) {
myfit <- lsfit(x = myquant, y = myobsns)
Scale.init <- if (length( .iscale ))
- rep( .iscale , length.out = n) else {
+ rep_len( .iscale , n) else {
if (all(shape.init > 1)) {
myfit$coef[2]
} else {
- rep(1.0, length.out = n)
+ rep_len(1.0, n)
}
}
@@ -2675,7 +2830,7 @@ frechet.control <- function(save.weights = TRUE, ...) {
Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
- ans <- rep(NA_real_, length.out = length(shape))
+ ans <- rep_len(NA_real_, length(shape))
ok <- shape > 1
ans[ok] <- loc[ok] + Scale[ok] * gamma(1 - 1/shape[ok])
ans
@@ -2803,9 +2958,8 @@ rec.normal.control <- function(save.weights = TRUE, ...) {
new("vglmff",
blurb = c("Upper record values from a univariate normal distribution\n\n",
"Links: ",
- namesof("mean", lmean, emean, tag = TRUE), "; ",
- namesof("sd", lsdev, esdev, tag = TRUE),
- "\n",
+ namesof("mean", lmean, earg = emean, tag = TRUE), "; ",
+ namesof("sd", lsdev, earg = esdev, tag = TRUE), "\n",
"Variance: sd^2"),
constraints = eval(substitute(expression({
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
@@ -2817,7 +2971,7 @@ rec.normal.control <- function(save.weights = TRUE, ...) {
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
- expected = TRUE,
+ expected = FALSE,
multipleResponses = FALSE,
parameters.names = c("mean", "sd"),
lmean = .lmean ,
@@ -2836,8 +2990,8 @@ rec.normal.control <- function(save.weights = TRUE, ...) {
predictors.names <-
- c(namesof("mean", .lmean, .emean, tag = FALSE),
- namesof("sd", .lsdev, .esdev, tag = FALSE))
+ c(namesof("mean", .lmean, earg = .emean , tag = FALSE),
+ namesof("sd", .lsdev, earg = .esdev , tag = FALSE))
if (ncol(y <- cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
@@ -2849,17 +3003,16 @@ rec.normal.control <- function(save.weights = TRUE, ...) {
if (!length(etastart)) {
- mean.init <- if (length( .imean )) rep( .imean ,
- length.out = n) else {
+ mean.init <- if (length( .imean )) rep_len( .imean , n) else {
if (.lmean == "loge") pmax(1/1024, min(y)) else min(y)}
- sd.init <- if (length( .isdev)) rep( .isdev , length.out = n) else {
+ sd.init <- if (length( .isdev)) rep_len( .isdev , n) else {
if (.imethod == 1) 1*(sd(c(y))) else
if (.imethod == 2) 5*(sd(c(y))) else
.5*(sd(c(y)))
}
etastart <-
- cbind(theta2eta(rep(mean.init, len = n), .lmean, .emean ),
- theta2eta(rep(sd.init, len = n), .lsdev, .esdev ))
+ cbind(theta2eta(rep_len(mean.init, n), .lmean , .emean ),
+ theta2eta(rep_len(sd.init, n), .lsdev , .esdev ))
}
}), list( .lmean = lmean, .lsdev = lsdev,
.emean = emean, .esdev = esdev,
@@ -2868,12 +3021,10 @@ rec.normal.control <- function(save.weights = TRUE, ...) {
linkinv = eval(substitute(function(eta, extra = NULL) {
eta2theta(eta[, 1], .lmean, .emean )
}, list( .lmean = lmean, .emean = emean ))),
+
last = eval(substitute(expression({
misc$link <- c("mu" = .lmean , "sd" = .lsdev )
misc$earg <- list("mu" = .emean , "sd" = .esdev )
-
-
- misc$expected = FALSE
}), list( .lmean = lmean, .lsdev = lsdev,
.emean = emean, .esdev = esdev ))),
loglikelihood = eval(substitute(
@@ -2959,12 +3110,11 @@ rec.exp1.control <- function(save.weights = TRUE, ...) {
blurb = c("Upper record values from a ",
"1-parameter exponential distribution\n\n",
"Links: ",
- namesof("rate", lrate, erate, tag = TRUE),
- "\n",
+ namesof("rate", lrate, earg = erate, tag = TRUE), "\n",
"Variance: 1/rate^2"),
initialize = eval(substitute(expression({
predictors.names <-
- c(namesof("rate", .lrate , .erate , tag = FALSE))
+ c(namesof("rate", .lrate , earg = .erate , tag = FALSE))
if (ncol(y <- cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
@@ -2975,16 +3125,16 @@ rec.exp1.control <- function(save.weights = TRUE, ...) {
if (!length(etastart)) {
- rate.init <- if (length( .irate ))
- rep( .irate , len = n) else {
+ rate.init <- if (length( .irate )) rep_len( .irate , n) else {
init.rate <-
if (.imethod == 1) length(y) / y[length(y), 1] else
if (.imethod == 2) 1/mean(y) else 1/median(y)
if (.lrate == "loge") pmax(1/1024, init.rate) else
- init.rate}
+ init.rate
+ }
etastart <-
- cbind(theta2eta(rep(rate.init, len = n), .lrate , .erate ))
+ cbind(theta2eta(rep_len(rate.init, n), .lrate , .erate ))
}
}), list( .lrate = lrate,
.erate = erate,
@@ -3050,14 +3200,10 @@ dpois.points <- function(x, lambda, ostatistic,
L <- max(length(x), length(lambda),
length(ostatistic), length(dimension))
- if (length(x) != L)
- x <- rep(x, length.out = L)
- if (length(lambda) != L)
- lambda <- rep(lambda, length.out = L)
- if (length(ostatistic) != L)
- ostatistic <- rep(ostatistic, length.out = L)
- if (length(dimension) != L)
- dimension <- rep(dimension, length.out = L)
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(lambda) != L) lambda <- rep_len(lambda, L)
+ if (length(ostatistic) != L) ostatistic <- rep_len(ostatistic, L)
+ if (length(dimension) != L) dimension <- rep_len(dimension, L)
if (!all(dimension %in% c(2, 3)))
stop("argument 'dimension' must have values 2 and/or 3")
@@ -3119,6 +3265,17 @@ dpois.points <- function(x, lambda, ostatistic,
if (dimension == 2)
"Mean: gamma(s+0.5) / (gamma(s) * sqrt(density * pi))" else
"Mean: gamma(s+1/3) / (gamma(s) * (4*density*pi/3)^(1/3))"),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 1,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("density"),
+ link = .link )
+ }, list( .link = link ))),
+
+
initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
@@ -3128,7 +3285,7 @@ dpois.points <- function(x, lambda, ostatistic,
predictors.names <-
- namesof("density", .link, earg = .earg, tag = FALSE)
+ namesof("density", .link, earg = .earg , tag = FALSE)
@@ -3139,15 +3296,15 @@ dpois.points <- function(x, lambda, ostatistic,
myratio <- exp(lgamma( .ostatistic + 0.5) -
lgamma( .ostatistic ))
density.init <- if (is.Numeric( .idensity ))
- rep( .idensity , len = n) else
- rep(myratio^2 / (pi * use.this^2), len = n)
+ rep_len( .idensity , n) else
+ rep_len(myratio^2 / (pi * use.this^2), n)
etastart <- theta2eta(density.init, .link , earg = .earg )
} else {
myratio <- exp(lgamma( .ostatistic + 1/3) -
lgamma( .ostatistic ))
density.init <- if (is.Numeric( .idensity ))
- rep( .idensity , len = n) else
- rep(3 * myratio^3 / (4 * pi * use.this^3), len = n)
+ rep_len( .idensity , n) else
+ rep_len(3 * myratio^3 / (4 * pi * use.this^3), n)
etastart <- theta2eta(density.init, .link , earg = .earg )
}
}
@@ -3168,10 +3325,9 @@ dpois.points <- function(x, lambda, ostatistic,
.ostatistic = ostatistic,
.dimension = dimension ))),
last = eval(substitute(expression({
- misc$link <- c("density" = .link)
- misc$earg <- list("density" = .earg)
+ misc$link <- c("density" = .link )
+ misc$earg <- list("density" = .earg )
- misc$expected <- TRUE
misc$ostatistic <- .ostatistic
misc$dimension <- .dimension
}), list( .link = link, .earg = earg,
diff --git a/R/family.functions.R b/R/family.functions.R
index e37a468..fdcc035 100644
--- a/R/family.functions.R
+++ b/R/family.functions.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -253,6 +253,7 @@ ima <- function(j, k, M) {
+
checkwz <- function(wz, M, trace = FALSE,
wzepsilon = .Machine$double.eps^0.75) {
if (wzepsilon > 0.5)
diff --git a/R/family.genetic.R b/R/family.genetic.R
index 1cb92e7..3a9e2cb 100644
--- a/R/family.genetic.R
+++ b/R/family.genetic.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -87,15 +87,15 @@
mustart[, 2] * mustart[, 3] +
mustart[, 2] * mustart[, 5] +
mustart[, 3] * mustart[, 5], w)
- p1 <- if (is.numeric( .ip1 )) rep( .ip1 , len = n) else
+ p1 <- if (is.numeric( .ip1 )) rep_len( .ip1 , n) else
weighted.mean(mustart[, 2] * mustart[, 3], w) / mydeterminant
- p2 <- if (is.numeric( .ip2 )) rep( .ip2 , len = n) else
+ p2 <- if (is.numeric( .ip2 )) rep_len( .ip2 , n) else
weighted.mean(mustart[, 2] * mustart[, 5], w) / mydeterminant
- ff <- if (is.numeric( .iF )) rep( .iF , len = n) else
+ ff <- if (is.numeric( .iF )) rep_len( .iF , n) else
weighted.mean(abs(1 - mustart[, 2] / (2 * p1 * p2)), w)
- p1 <- rep(p1, len = n)
- p2 <- rep(p2, len = n)
- ff <- rep(ff, len = n)
+ p1 <- rep_len(p1, n)
+ p2 <- rep_len(p2, n)
+ ff <- rep_len(ff, n)
p1[p1 < 0.05] <- 0.05
p1[p1 > 0.99] <- 0.99
p2[p2 < 0.05] <- 0.05
@@ -253,12 +253,12 @@
namesof("nS", .link , earg = .earg , tag = FALSE))
if (is.null(etastart)) {
- ms <- if (is.numeric(.ims)) rep(.ims, n) else
+ ms <- if (is.numeric(.ims)) rep_len( .ims , n) else
c(sqrt(mustart[, 2]))
- ns <- c(sqrt(mustart[,6]))
- nS <- if (is.numeric(.inS)) rep(.inS, n) else
- c(-ns + sqrt(ns^2 + mustart[,5])) # Solve a quadratic eqn
- mS <- if (is.numeric(.imS)) rep(.imS, n) else
+ ns <- c(sqrt(mustart[, 6]))
+ nS <- if (is.numeric(.inS)) rep_len( .inS , n) else
+ c(-ns + sqrt(ns^2 + mustart[, 5])) # Solve a quadratic eqn
+ mS <- if (is.numeric(.imS)) rep_len( .imS , n) else
1-ns-ms-nS
etastart <- cbind(theta2eta(mS, .link , earg = .earg ),
theta2eta(ms, .link , earg = .earg ),
@@ -391,12 +391,11 @@
mustart <- (y + mustart) / 2
if (!length(etastart)) {
- pO <- if (is.Numeric( .ipO )) rep( .ipO , len = n) else
- rep(c(sqrt( weighted.mean(mustart[, 4], w)) ), len = n)
- pA <- if (is.Numeric( .ipA )) rep( .ipA , len = n) else
- rep(c(1 - sqrt(weighted.mean(mustart[, 2] + mustart[, 4], w))),
- len = n)
- pB <- if (is.Numeric( .ipB )) rep( .ipB , len = n) else
+ pO <- if (is.Numeric( .ipO )) rep_len( .ipO , n) else
+ rep_len(c(sqrt( weighted.mean(mustart[, 4], w)) ), n)
+ pA <- if (is.Numeric( .ipA )) rep_len( .ipA , n) else
+ rep_len(c(1-sqrt(weighted.mean(mustart[, 2] + mustart[, 4], w))), n)
+ pB <- if (is.Numeric( .ipB )) rep_len( .ipB , n) else
abs(1 - pA - pO)
etastart <- cbind(theta2eta(pA, .link.pA , earg = .earg.pA ),
theta2eta(pB, .link.pB , earg = .earg.pB ))
@@ -510,8 +509,8 @@
mustart <- (y + mustart) / 2
if (is.null(etastart)) {
- p.init <- if (is.numeric( .init.p )) rep( .init.p , len = n) else
- rep(c(sqrt(4 * weighted.mean(mustart[, 4], w))), len = n)
+ p.init <- if (is.numeric( .init.p )) rep_len( .init.p , n) else
+ rep_len(c(sqrt(4 * weighted.mean(mustart[, 4], w))), n)
etastart <- cbind(theta2eta(p.init, .link , earg = .earg ))
etastart <- jitter(etastart)
@@ -648,9 +647,9 @@
if (is.null(etastart)) {
- pA <- if (is.numeric( .ipA )) rep( .ipA , len = n) else
- rep(c(sqrt( weighted.mean(mustart[, 1], w))), len = n)
- fp <- if (is.numeric( .ifp )) rep( .ifp , len = n) else
+ pA <- if (is.numeric( .ipA )) rep_len( .ipA , n) else
+ rep_len(c(sqrt( weighted.mean(mustart[, 1], w))), n)
+ fp <- if (is.numeric( .ifp )) rep_len( .ifp , n) else
runif(n) # 1- mustart[, 2]/(2*pA*(1-pA))
etastart <- cbind(theta2eta(pA, .linkp , earg = .eargp ),
if ( .inbreeding )
diff --git a/R/family.glmgam.R b/R/family.glmgam.R
index 61f2859..248cf0e 100644
--- a/R/family.glmgam.R
+++ b/R/family.glmgam.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -149,7 +149,7 @@
if (NCOL(y) == 1) {
if (is.factor(y))
y <- (y != levels(y)[1])
- nvec <- rep(1, n)
+ nvec <- rep_len(1, n)
y[w == 0] <- 0
if (!all(y == 0 | y == 1))
stop("response values 'y' must be 0 or 1")
@@ -217,7 +217,7 @@
dtheta.deta(mu, link = .link ,
earg = .earg )^2) # w cancel
if (.multiple.responses && ! .onedpar ) {
- dpar <- rep(NA_real_, len = M)
+ dpar <- rep_len(NA_real_, M)
temp87 <- cbind(temp87)
nrow.mu <- if (is.matrix(mu)) nrow(mu) else length(mu)
for (ii in 1:M)
@@ -236,7 +236,7 @@
misc$bred <- .bred
misc$expected <- TRUE
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- if (M > 1) dn2 else new.name # Was old.name=="mu"
misc$earg <- vector("list", M)
@@ -501,7 +501,7 @@
temp <- c(w) * dmu.deta^2
dpar <- sum(c(w) * (y-mu)^2 * wz / temp) / (length(mu) - ncol(x))
} else {
- dpar <- rep(0, len = M)
+ dpar <- rep_len(0, M)
for (spp in 1:M) {
temp <- c(w) * dmu.deta[, spp]^2
dpar[spp] <- sum(c(w) * (y[,spp]-mu[, spp])^2 * wz[, spp]/temp) / (
@@ -513,7 +513,7 @@
misc$default.dispersion <- 0
misc$estimated.dispersion <- .estimated.dispersion
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- param.names("mu", M)
misc$earg <- vector("list", M)
@@ -632,7 +632,7 @@
misc$default.dispersion <- 0
misc$estimated.dispersion <- .estimated.dispersion
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- param.names("mu", M)
misc$earg <- vector("list", M)
@@ -672,11 +672,11 @@ dinv.gaussian <- function(x, mu, lambda, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- LLL <- max(length(x), length(mu), length(lambda))
- x <- rep(x, len = LLL);
- mu <- rep(mu, len = LLL);
- lambda <- rep(lambda, len = LLL)
- logdensity <- rep(log(0), len = LLL)
+ L <- max(length(x), length(mu), length(lambda))
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(mu) != L) mu <- rep_len(mu, L)
+ if (length(lambda) != L) lambda <- rep_len(lambda, L)
+ logdensity <- rep_len(log(0), L)
xok <- (x > 0)
logdensity[xok] = 0.5 * log(lambda[xok] / (2 * pi * x[xok]^3)) -
@@ -688,16 +688,12 @@ dinv.gaussian <- function(x, mu, lambda, log = FALSE) {
}
+
pinv.gaussian <- function(q, mu, lambda) {
- if (any(mu <= 0))
- stop("mu must be positive")
- if (any(lambda <= 0))
- stop("lambda must be positive")
-
- LLL <- max(length(q), length(mu), length(lambda))
- q <- rep(q, len = LLL)
- mu <- rep(mu, len = LLL)
- lambda <- rep(lambda, len = LLL)
+ L <- max(length(q), length(mu), length(lambda))
+ if (length(q) != L) q <- rep_len(q, L)
+ if (length(mu) != L) mu <- rep_len(mu, L)
+ if (length(lambda) != L) lambda <- rep_len(lambda, L)
ans <- q
ans[q <= 0] <- 0
@@ -705,21 +701,24 @@ pinv.gaussian <- function(q, mu, lambda) {
ans[bb] <- pnorm( sqrt(lambda[bb]/q[bb]) * (q[bb]/mu[bb] - 1)) +
exp(2*lambda[bb]/mu[bb]) *
pnorm(-sqrt(lambda[bb]/q[bb]) * (q[bb]/mu[bb] + 1))
+ ans[mu <= 0] <- NaN
+ ans[lambda <= 0] <- NaN
ans
}
+
rinv.gaussian <- function(n, mu, lambda) {
use.n <- if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
length.arg = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- mu <- rep(mu, len = use.n);
- lambda <- rep(lambda, len = use.n)
+ mu <- rep_len(mu, use.n)
+ lambda <- rep_len(lambda, use.n)
u <- runif(use.n)
- Z <- rnorm(use.n)^2 # rchisq(use.n, df = 1)
+ Z <- rnorm(use.n)^2 # rchisq(use.n, df = 1)
phi <- lambda / mu
y1 <- 1 - 0.5 * (sqrt(Z^2 + 4*phi*Z) - Z) / phi
ans <- mu * ifelse((1+y1)*u > 1, 1/y1, y1)
@@ -738,6 +737,7 @@ rinv.gaussian <- function(n, mu, lambda) {
+
inv.gaussianff <- function(lmu = "loge", llambda = "loge",
imethod = 1, ilambda = NULL,
parallel = FALSE,
@@ -866,9 +866,8 @@ rinv.gaussian <- function(n, mu, lambda) {
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <-
- c(rep( .lmu , length = ncoly),
- rep( .llambda , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ misc$link <- c(rep_len( .lmu , ncoly),
+ rep_len( .llambda , ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
@@ -1100,7 +1099,7 @@ rinv.gaussian <- function(n, mu, lambda) {
temp87 <- (y-mu)^2 *
wz / (dtheta.deta(mu, link = .link , earg = .earg )^2) # w cancel
if (M > 1 && ! .onedpar ) {
- dpar <- rep(NA_real_, length = M)
+ dpar <- rep_len(NA_real_, M)
temp87 <- cbind(temp87)
nrow.mu <- if (is.matrix(mu)) nrow(mu) else length(mu)
for (ii in 1:M)
@@ -1121,7 +1120,7 @@ rinv.gaussian <- function(n, mu, lambda) {
misc$bred <- .bred
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- if (M > 1) dn2 else new.name # Was old.name=="mu"
misc$earg <- vector("list", M)
@@ -1346,7 +1345,7 @@ rinv.gaussian <- function(n, mu, lambda) {
namesof("dispersion", link = .ldisp, earg = .edisp, short = TRUE))
init.mu <- pmax(y, 1/8)
- tmp2 <- rep( .idisp , length.out = n)
+ tmp2 <- rep_len( .idisp , n)
if (!length(etastart))
etastart <-
@@ -1477,7 +1476,7 @@ rinv.gaussian <- function(n, mu, lambda) {
if (is.factor(y)) y <- (y != levels(y)[1])
- nvec <- rep(1, n)
+ nvec <- rep_len(1, n)
y[w == 0] <- 0
if (!all(y == 0 | y == 1))
stop("response values 'y' must be 0 or 1")
@@ -1514,7 +1513,7 @@ rinv.gaussian <- function(n, mu, lambda) {
c(namesof(dn2, .lmean , earg = .emean , short = TRUE),
namesof("dispersion", .ldisp , earg = .edisp , short = TRUE))
- tmp2 <- rep( .idisp , len = n)
+ tmp2 <- rep_len( .idisp , n)
if (!length(etastart))
etastart <- cbind(theta2eta(init.mu, .lmean, earg = .emean),
@@ -1680,7 +1679,7 @@ rinv.gaussian <- function(n, mu, lambda) {
is.data.frame(x)) ncol(x) else as.integer(1)
if (NCOL(y) == 1) {
if (is.factor(y)) y = (y != levels(y)[1])
- nvec = rep(1, n)
+ nvec = rep_len(1, n)
y[w == 0] <- 0
if (!all(y == 0 | y == 1))
stop("response values 'y' must be 0 or 1")
@@ -1726,7 +1725,7 @@ rinv.gaussian <- function(n, mu, lambda) {
mu
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- if (M > 1) dn2 else "mu"
misc$earg <- vector("list", M)
diff --git a/R/family.loglin.R b/R/family.loglin.R
index 14da3d7..a1ac483 100644
--- a/R/family.loglin.R
+++ b/R/family.loglin.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -267,7 +267,7 @@
"111" = exp(u1+u2+u3+u12+u13+u23)) / denom
},
last = expression({
- misc$link <- rep("identitylink", length = M)
+ misc$link <- rep_len("identitylink", M)
names(misc$link) <- predictors.names
misc$earg <- list(u1 = list(), u2 = list(), u3 = list(),
diff --git a/R/family.math.R b/R/family.math.R
index 4fe2699..26eb6c8 100644
--- a/R/family.math.R
+++ b/R/family.math.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -106,8 +106,8 @@ lambertW <- function(x, tolerance = 1.0e-10, maxit = 50) {
pgamma.deriv <- function(q, shape, tmax = 100) {
nnn <- max(length(q), length(shape))
- if (length(q) != nnn) q <- rep(q, length = nnn)
- if (length(shape) != nnn) shape <- rep(shape, length = nnn)
+ if (length(q) != nnn) q <- rep_len(q, nnn)
+ if (length(shape) != nnn) shape <- rep_len(shape, nnn)
if (!is.Numeric(q, positive = TRUE))
stop("bad input for argument 'q'")
@@ -169,7 +169,7 @@ expint <- function (x, deriv = 0) {
if (!is.Numeric(deriv, integer.valued = TRUE, positive = TRUE) ||
deriv > 3)
stop("Bad input for argument 'deriv'")
- answer <- rep(0, length(x))
+ answer <- rep_len(0, length(x))
if (deriv == 1) {
answer <- exp(x) / x
}
@@ -220,7 +220,7 @@ expint.E1 <- function (x, deriv = 0) {
if (!is.Numeric(deriv, integer.valued = TRUE, positive = TRUE) ||
deriv > 3)
stop("Bad input for argument 'deriv'")
- answer <- rep(0, length(x))
+ answer <- rep_len(0, length(x))
if (deriv == 1) {
answer <- exp(-x) / x
}
diff --git a/R/family.mixture.R b/R/family.mixture.R
index 2faf74b..fa046d9 100644
--- a/R/family.mixture.R
+++ b/R/family.mixture.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -141,9 +141,9 @@ mix2normal.control <- function(trace = TRUE, ...) {
if (!length(etastart)) {
qy <- quantile(y, prob = .qmu )
- init.phi <- rep(if (length( .iphi )) .iphi else 0.5, length = n)
- init.mu1 <- rep(if (length( .imu1 )) .imu1 else qy[1], length = n)
- init.mu2 <- rep(if (length( .imu2 )) .imu2 else qy[2], length = n)
+ init.phi <- rep_len(if (length( .iphi )) .iphi else 0.5, n)
+ init.mu1 <- rep_len(if (length( .imu1 )) .imu1 else qy[1], n)
+ init.mu2 <- rep_len(if (length( .imu2 )) .imu2 else qy[2], n)
ind.1 <- if (init.mu1[1] < init.mu2[1])
1:round(n* init.phi[1]) else
round(n* init.phi[1]):n
@@ -151,10 +151,8 @@ mix2normal.control <- function(trace = TRUE, ...) {
round(n* init.phi[1]):n else
1:round(n* init.phi[1])
sorty <- sort(y)
- init.sd1 <- rep(if (length( .isd1 )) .isd1 else sd(sorty[ind.1]),
- len = n)
- init.sd2 <- rep(if (length( .isd2 )) .isd2 else sd(sorty[ind.2]),
- len = n)
+ init.sd1 <- rep_len(if (length( .isd1 )) .isd1 else sd(sorty[ind.1]), n)
+ init.sd2 <- rep_len(if (length( .isd2 )) .isd2 else sd(sorty[ind.2]), n)
if ( .eq.sd ) {
init.sd1 <-
init.sd2 <- (init.sd1 + init.sd2) / 2
@@ -394,9 +392,9 @@ mix2poisson.control <- function(trace = TRUE, ...) {
if (!length(etastart)) {
qy <- quantile(y, prob = .qmu)
- init.phi <- rep(if (length( .iphi )) .iphi else 0.5, length = n)
- init.lambda1 <- rep(if (length( .il1 )) .il1 else qy[1], length = n)
- init.lambda2 <- rep(if (length( .il2 )) .il2 else qy[2], length = n)
+ init.phi <- rep_len(if (length( .iphi )) .iphi else 0.5, n)
+ init.lambda1 <- rep_len(if (length( .il1 )) .il1 else qy[1], n)
+ init.lambda2 <- rep_len(if (length( .il2 )) .il2 else qy[2], n)
if (!length(etastart))
etastart <- cbind(theta2eta(init.phi, .lphi , earg = .ephi ),
@@ -632,9 +630,9 @@ mix2exp.control <- function(trace = TRUE, ...) {
if (!length(etastart)) {
qy <- quantile(y, prob = .qmu)
- init.phi <- rep(if (length(.iphi)) .iphi else 0.5, length = n)
- init.lambda1 <- rep(if (length(.il1)) .il1 else 1/qy[1], length = n)
- init.lambda2 <- rep(if (length(.il2)) .il2 else 1/qy[2], length = n)
+ init.phi <- rep_len(if (length( .iphi )) .iphi else 0.5, n)
+ init.lambda1 <- rep_len(if (length( .il1 )) .il1 else 1/qy[1], n)
+ init.lambda2 <- rep_len(if (length( .il2 )) .il2 else 1/qy[2], n)
if (!length(etastart))
etastart <- cbind(theta2eta(init.phi, .lphi , earg = .ephi ),
theta2eta(init.lambda1, .llambda , earg = .el1 ),
diff --git a/R/family.nonlinear.R b/R/family.nonlinear.R
index ab3287b..377cb6f 100644
--- a/R/family.nonlinear.R
+++ b/R/family.nonlinear.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -25,7 +25,7 @@ vnonlinear.control <- function(save.weights = TRUE, ...) {
subset.lohi <- function(xvec, yvec,
probs.x = c(0.15, 0.85),
type = c("median", "wtmean", "unwtmean"),
- wtvec = rep(1, len = length(xvec))) {
+ wtvec = rep_len(1, length(xvec))) {
if (!is.Numeric(probs.x, length.arg = 2))
@@ -212,8 +212,8 @@ micmen.control <- function(save.weights = TRUE, ...) {
if (length( .init2 )) init2 <- .init2
etastart <- cbind(
- rep(theta2eta(init1, .link1 , earg = .earg1 ), len = n),
- rep(theta2eta(init2, .link2 , earg = .earg2 ), len = n))
+ rep_len(theta2eta(init1, .link1 , earg = .earg1 ), n),
+ rep_len(theta2eta(init2, .link2 , earg = .earg2 ), n))
} else {
stop("cannot handle 'etastart' or 'mustart'")
}
@@ -552,8 +552,8 @@ skira.control <- function(save.weights = TRUE, ...) {
if (length( .init1 )) init1 <- .init1
if (length( .init2 )) init2 <- .init2
etastart <- cbind(
- rep(theta2eta(init1, .link1 , earg = .earg1 ), len = n),
- rep(theta2eta(init2, .link2 , earg = .earg2 ), len = n))
+ rep_len(theta2eta(init1, .link1 , earg = .earg1 ), n),
+ rep_len(theta2eta(init2, .link2 , earg = .earg2 ), n))
} else {
stop("cannot handle 'etastart' or 'mustart'")
}
diff --git a/R/family.normal.R b/R/family.normal.R
index 7812083..438e923 100644
--- a/R/family.normal.R
+++ b/R/family.normal.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -131,7 +131,7 @@ VGAM.weights.function <- function(w, M, n) {
misc$default.dispersion <- 0
misc$estimated.dispersion <- .estimated.dispersion
- misc$link <- rep("identitylink", length = M)
+ misc$link <- rep_len("identitylink", M)
names(misc$link) <- predictors.names
misc$earg <- vector("list", M)
@@ -227,15 +227,16 @@ VGAM.weights.function <- function(w, M, n) {
+
dposnorm <- function(x, mean = 0, sd = 1, log = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
L <- max(length(x), length(mean), length(sd))
- if (length(x) != L) x <- rep(x, len = L)
- if (length(mean) != L) mean <- rep(mean, len = L)
- if (length(sd) != L) sd <- rep(sd, len = L)
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(mean) != L) mean <- rep_len(mean, L)
+ if (length(sd) != L) sd <- rep_len(sd, L)
if (log.arg) {
ifelse(x < 0, log(0), dnorm(x, mean = mean, sd = sd, log = TRUE) -
@@ -382,7 +383,7 @@ if (FALSE)
con.use <- con.m
- for (klocal in 1:length(con.m)) {
+ for (klocal in seq_along(con.m)) {
con.use[[klocal]] <- interleave.cmat(con.m[[klocal]], con.s[[klocal]])
@@ -451,7 +452,7 @@ if (FALSE)
for (jay in 1:NOS) {
yvec <- y[, jay]
wvec <- w[, jay]
- if (any(is.na(init.me[, jay]))) {
+ if (anyNA(init.me[, jay])) {
init.me[, jay] <- if ( .imethod == 1) {
weighted.mean(yvec, wvec)
} else if ( .imethod == 2) {
@@ -460,7 +461,7 @@ if (FALSE)
median(yvec)
}
}
- if (any(is.na(init.sd[, jay])))
+ if (anyNA(init.sd[, jay]))
init.sd[, jay] <- sd(yvec)
@@ -516,8 +517,8 @@ if (FALSE)
.emean = emean, .esd = esd
))),
last = eval(substitute(expression({
- misc$link <- c(rep( .lmean , length = NOS),
- rep( .lsd , length = NOS))[interleave.VGAM(M, M1 = M1)]
+ misc$link <- c(rep_len( .lmean , NOS),
+ rep_len( .lsd , NOS))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mean.names, sdev.names)
names(misc$link) <- temp.names[interleave.VGAM(M, M1 = M1)]
@@ -736,9 +737,9 @@ dtikuv <- function(x, d, mean = 0, sigma = 1, log = FALSE) {
stop("bad input for argument 'd'")
L <- max(length(x), length(mean), length(sigma))
- if (length(x) != L) x <- rep(x, len = L)
- if (length(mean) != L) mean <- rep(mean, len = L)
- if (length(sigma) != L) sigma <- rep(sigma, len = L)
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(mean) != L) mean <- rep_len(mean, L)
+ if (length(sigma) != L) sigma <- rep_len(sigma, L)
hh <- 2 - d
@@ -765,9 +766,9 @@ ptikuv <- function(q, d, mean = 0, sigma = 1,
rm(log.p) # 20141231 KaiH
L <- max(length(q), length(mean), length(sigma))
- if (length(q) != L) q <- rep(q, len = L)
- if (length(mean) != L) mean <- rep(mean, len = L)
- if (length(sigma) != L) sigma <- rep(sigma, len = L)
+ if (length(q) != L) q <- rep_len(q, L)
+ if (length(mean) != L) mean <- rep_len(mean, L)
+ if (length(sigma) != L) sigma <- rep_len(sigma, L)
zedd1 <- 0.5 * ((q - mean) / sigma)^2
ans <- q*0 + 0.5
@@ -810,10 +811,10 @@ qtikuv <- function(p, d, mean = 0, sigma = 1,
}
L <- max(length(p), length(mean), length(sigma))
- if (length(p) != L) p <- rep(p, len = L)
- if (length(mean) != L) mean <- rep(mean, len = L)
- if (length(sigma) != L) sigma <- rep(sigma, len = L)
- ans <- rep(0.0, len = L)
+ if (length(p) != L) p <- rep_len(p, L)
+ if (length(mean) != L) mean <- rep_len(mean, L)
+ if (length(sigma) != L) sigma <- rep_len(sigma, L)
+ ans <- rep_len(0.0, L)
myfun <- function(x, d, mean = 0, sigma = 1, p)
@@ -860,7 +861,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
Smallno > 0.01 ||
Smallno < 2 * .Machine$double.eps)
stop("bad input for argument 'Smallno'")
- ans <- rep(0.0, len = use.n)
+ ans <- rep_len(0.0, use.n)
ptr1 <- 1; ptr2 <- 0
hh <- 2 - d
@@ -945,13 +946,13 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
if (!length(etastart)) {
- sigma.init <- if (length( .isigma )) rep( .isigma , length = n) else {
+ sigma.init <- if (length( .isigma )) rep_len( .isigma , n) else {
hh <- 2 - .d
KK <- 1 / (1 + 1/hh + 0.75/hh^2)
K2 <- 1 + 3/hh + 15/(4*hh^2)
- rep(sqrt(var(y) / (KK*K2)), len = n)
+ rep_len(sqrt(var(y) / (KK*K2)), n)
}
- mean.init <- rep(weighted.mean(y, w), len = n)
+ mean.init <- rep_len(weighted.mean(y, w), n)
etastart <-
cbind(theta2eta(mean.init, .lmean , earg = .emean ),
theta2eta(sigma.init, .lsigma , earg = .esigma ))
@@ -1119,12 +1120,12 @@ qfoldnorm <- function(p, mean = 0, sd = 1, a1 = 1, a2 = 1,
}
L <- max(length(p), length(mean), length(sd), length(a1), length(a2))
- if (length(p) != L) p <- rep(p, len = L)
- if (length(mean) != L) mean <- rep(mean, len = L)
- if (length(sd) != L) sd <- rep(sd, len = L)
- if (length(a1) != L) a1 <- rep(a1, len = L)
- if (length(a2) != L) a2 <- rep(a2, len = L)
- ans <- rep(0.0 , len = L)
+ if (length(p) != L) p <- rep_len(p, L)
+ if (length(mean) != L) mean <- rep_len(mean, L)
+ if (length(sd) != L) sd <- rep_len(sd, L)
+ if (length(a1) != L) a1 <- rep_len(a1, L)
+ if (length(a2) != L) a2 <- rep_len(a2, L)
+ ans <- rep_len(0.0 , L)
myfun <- function(x, mean = 0, sd = 1, a1 = 1, a2 = 2, p)
pfoldnorm(q = x, mean = mean, sd = sd, a1 = a1, a2 = a2) - p
@@ -1250,19 +1251,19 @@ rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2 = 1) {
stddev <- sqrt( sum(c(w) * junk$resid^2) / junk$df.residual )
Ahat <- m1d^2 / m2d
thetahat <- sqrt(max(1/Ahat -1, 0.1))
- mean.init <- rep(if (length( .imean)) .imean else
- thetahat * sqrt((stddev^2 + meany^2) * Ahat), len = n)
- sd.init <- rep(if (length( .isd)) .isd else
- sqrt((stddev^2 + meany^2) * Ahat), len = n)
+ mean.init <- rep_len(if (length( .imean)) .imean else
+ thetahat * sqrt((stddev^2 + meany^2) * Ahat), n)
+ sd.init <- rep_len(if (length( .isd)) .isd else
+ sqrt((stddev^2 + meany^2) * Ahat), n)
}
stddev <- sqrt( sum(c(w) * junk$resid^2) / junk$df.residual )
meany <- weighted.mean(y, w)
- mean.init <- rep(if (length( .imean )) .imean else
- {if ( .imethod == 1) median(y) else meany}, len = n)
- sd.init <- rep(if (length( .isd )) .isd else
- {if ( .imethod == 1) stddev else 1.2*sd(c(y))}, len = n)
+ mean.init <- rep_len(if (length( .imean )) .imean else
+ {if ( .imethod == 1) median(y) else meany}, n)
+ sd.init <- rep_len(if (length( .isd )) .isd else
+ {if ( .imethod == 1) stddev else 1.2*sd(c(y))}, n)
etastart <- cbind(theta2eta(mean.init, .lmean , earg = .emean ),
theta2eta(sd.init, .lsd , earg = .esd ))
}
@@ -1438,11 +1439,11 @@ lqnorm <- function(qpower = 2,
if (!length(etastart)) {
meany <- weighted.mean(y, w)
- mean.init <- rep(if (length( .i.mu )) .i.mu else {
+ mean.init <- rep_len(if (length( .i.mu )) .i.mu else {
if ( .imethod == 2) median(y) else
if ( .imethod == 1) meany else
.ishrinkage * meany + (1 - .ishrinkage ) * y
- }, len = n)
+ }, n)
etastart <- theta2eta(mean.init, link = .link, earg = .earg)
}
}), list( .imethod = imethod, .i.mu = imu,
@@ -1456,7 +1457,7 @@ lqnorm <- function(qpower = 2,
dy <- dimnames(y)
if (!is.null(dy[[2]]))
dimnames(fit$fitted.values) = dy
- misc$link <- rep( .link, length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- predictors.names
misc$earg <- list(mu = .earg)
@@ -1501,11 +1502,11 @@ dtobit <- function(x, mean = 0, sd = 1,
L <- max(length(x), length(mean), length(sd),
length(Lower), length(Upper))
- if (length(x) != L) x <- rep(x, len = L)
- if (length(mean) != L) mean <- rep(mean, len = L)
- if (length(sd) != L) sd <- rep(sd, len = L)
- if (length(Lower) != L) Lower <- rep(Lower, len = L)
- if (length(Upper) != L) Upper <- rep(Upper, len = L)
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(mean) != L) mean <- rep_len(mean, L)
+ if (length(sd) != L) sd <- rep_len(sd, L)
+ if (length(Lower) != L) Lower <- rep_len(Lower, L)
+ if (length(Upper) != L) Upper <- rep_len(Upper, L)
if (!all(Lower < Upper, na.rm = TRUE))
stop("all(Lower < Upper) is not TRUE")
@@ -1605,10 +1606,10 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
length.arg = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
L <- use.n
- if (length(mean) != L) mean <- rep(mean, len = L)
- if (length(sd) != L) sd <- rep(sd, len = L)
- if (length(Lower) != L) Lower <- rep(Lower, len = L)
- if (length(Upper) != L) Upper <- rep(Upper, len = L)
+ if (length(mean) != L) mean <- rep_len(mean, L)
+ if (length(sd) != L) sd <- rep_len(sd, L)
+ if (length(Lower) != L) Lower <- rep_len(Lower, L)
+ if (length(Upper) != L) Upper <- rep_len(Upper, L)
if (!all(Lower < Upper, na.rm = TRUE))
stop("all(Lower < Upper) is not TRUE")
@@ -1771,7 +1772,7 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
use.i11 <- i11[, jay]
if (sum(!use.i11) < ncol(x)) {
- use.i11 <- rep(FALSE, length = n)
+ use.i11 <- rep_len(FALSE, n)
}
mylm <- lm.wfit(x = x[!use.i11, , drop = FALSE],
y = y[!use.i11, jay],
@@ -1857,8 +1858,8 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
.Lower = Lower, .Upper = Upper ))),
last = eval(substitute(expression({
- temp0303 <- c(rep( .lmu , length = ncoly),
- rep( .lsd , length = ncoly))
+ temp0303 <- c(rep_len( .lmu , ncoly),
+ rep_len( .lsd , ncoly))
names(temp0303) <- c(param.names("mu", ncoly),
param.names("sd", ncoly))
temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
@@ -2395,7 +2396,7 @@ moment.millsratio2 <- function(zedd) {
temp.names <- c(mynames1, mynames2)
temp.names <- temp.names[interleave.VGAM(M1 * ncoly, M1 = M1)]
- misc$link <- rep( .lmean , length = M1 * ncoly)
+ misc$link <- rep_len( .lmean , M1 * ncoly)
misc$earg <- vector("list", M1 * ncoly)
names(misc$link) <- names(misc$earg) <- temp.names
for (ii in 1:ncoly) {
@@ -2577,7 +2578,8 @@ moment.millsratio2 <- function(zedd) {
imethod = 1,
icoefficients = NULL,
isd = NULL,
- zero = "sd") {
+ zero = "sd",
+ sd.inflation.factor = 2.50) {
@@ -2602,8 +2604,8 @@ moment.millsratio2 <- function(zedd) {
if (!is.Numeric(imethod, length.arg = 1,
integer.valued = TRUE, positive = TRUE) ||
- imethod > 4)
- stop("argument 'imethod' must be 1 or 2 or 3 or 4")
+ imethod > 4)
+ stop("argument 'imethod' must be 1 or 2 or 3 or 4")
if (!is.logical(var.arg) || length(var.arg) != 1)
@@ -2659,72 +2661,72 @@ moment.millsratio2 <- function(zedd) {
- link.list.lengths <- unlist(lapply(asgn2, length))
+ link.list.lengths <- unlist(lapply(asgn2, length))
- link.list <- .link.list
- earg.list <- .earg.list
- if (FALSE) {
- if (length(link.list) > 0)
- if (length(nasgn2) != length(names(link.list)) ||
- !all(sort(nasgn2) == sort(names(link.list))))
- stop("names of 'link.list' do not match argument 'form2'")
- if (length(earg.list) > 0)
- if (length(nasgn2) != length(names(earg.list)) ||
- !all(sort(nasgn2) == sort(names(earg.list))))
- stop("names of 'earg.list' do not match argument 'form2'")
- }
+ link.list <- .link.list
+ earg.list <- .earg.list
+ if (FALSE) {
+ if (length(link.list) > 0)
+ if (length(nasgn2) != length(names(link.list)) ||
+ !all(sort(nasgn2) == sort(names(link.list))))
+ stop("names of 'link.list' do not match argument 'form2'")
+ if (length(earg.list) > 0)
+ if (length(nasgn2) != length(names(earg.list)) ||
+ !all(sort(nasgn2) == sort(names(earg.list))))
+ stop("names of 'earg.list' do not match argument 'form2'")
+ }
- link.list.ordered <- vector("list", ncol(Xm2))
- earg.list.ordered <- vector("list", ncol(Xm2))
+ link.list.ordered <- vector("list", ncol(Xm2))
+ earg.list.ordered <- vector("list", ncol(Xm2))
- if (sum(names(link.list) == "(Default)") > 1)
- stop("only one default allowed in argument 'link.list'!")
- if (sum(names(earg.list) == "(Default)") > 1)
- stop("only one default allowed in argument 'earg.list'!")
- default.link <- if (any(names(link.list) == "(Default)"))
- link.list[["(Default)"]] else "identitylink"
- default.earg <- if (any(names(earg.list) == "(Default)"))
- earg.list[["(Default)"]] else list()
+ if (sum(names(link.list) == "(Default)") > 1)
+ stop("only one default allowed in argument 'link.list'!")
+ if (sum(names(earg.list) == "(Default)") > 1)
+ stop("only one default allowed in argument 'earg.list'!")
+ default.link <- if (any(names(link.list) == "(Default)"))
+ link.list[["(Default)"]] else "identitylink"
+ default.earg <- if (any(names(earg.list) == "(Default)"))
+ earg.list[["(Default)"]] else list()
- names(link.list.ordered) <-
- names(earg.list.ordered) <- colnames(Xm2)
- i.ptr <- 1
- for (jlocal in 1:length(nasgn2)) {
- for (klocal in 1:link.list.lengths[jlocal]) {
- link.list.ordered[[i.ptr]] <-
- if (any(names(link.list) == nasgn2[jlocal]))
- link.list[[(nasgn2[jlocal])]] else
- default.link
- earg.list.ordered[[i.ptr]] <-
- if (any(names(earg.list) == nasgn2[jlocal]))
- earg.list[[(nasgn2[jlocal])]] else
- default.earg
- i.ptr <- i.ptr + 1
+ names(link.list.ordered) <-
+ names(earg.list.ordered) <- colnames(Xm2)
+ i.ptr <- 1
+ for (jlocal in seq_along(nasgn2)) {
+ for (klocal in 1:link.list.lengths[jlocal]) {
+ link.list.ordered[[i.ptr]] <-
+ if (any(names(link.list) == nasgn2[jlocal]))
+ link.list[[(nasgn2[jlocal])]] else
+ default.link
+ earg.list.ordered[[i.ptr]] <-
+ if (any(names(earg.list) == nasgn2[jlocal]))
+ earg.list[[(nasgn2[jlocal])]] else
+ default.earg
+ i.ptr <- i.ptr + 1
+ }
}
- }
- link.list <- link.list.ordered
- earg.list <- earg.list.ordered
- extra$link.list <- link.list
- extra$earg.list <- earg.list
+ link.list <- link.list.ordered
+ earg.list <- earg.list.ordered
+ extra$link.list <- link.list
+ extra$earg.list <- earg.list
- if (any(is.multilogit <- (unlist(link.list.ordered) == "multilogit"))) {
- if (sum(is.multilogit) < 2)
- stop("at least two 'multilogit' links need to be specified, ",
- "else none")
- col.index.is.multilogit <- (1:length(is.multilogit))[is.multilogit]
- extra$col.index.is.multilogit <- col.index.is.multilogit
- extra$is.multilogit <- is.multilogit
- }
+ if (any(is.multilogit <- (unlist(link.list.ordered) == "multilogit"))) {
+ if (sum(is.multilogit) < 2)
+ stop("at least two 'multilogit' links need to be specified, ",
+ "else none")
+ col.index.is.multilogit <- (seq_along(is.multilogit))[is.multilogit]
+ extra$col.index.is.multilogit <- col.index.is.multilogit
+ extra$is.multilogit <- is.multilogit
+ }
@@ -2754,19 +2756,19 @@ moment.millsratio2 <- function(zedd) {
- mynames1 <- paste("coeff:", colnames(Xm2), sep = "")
+ mynames1 <- paste("coeff:", colnames(Xm2), sep = "")
- for (jlocal in 1:length(mynames1)) {
- mynames1[jlocal] <- namesof(mynames1[jlocal],
- link = link.list[[jlocal]],
- earg = earg.list[[jlocal]], short = TRUE)
- }
- extra$all.mynames1 <- all.mynames1 <- mynames1
+ for (jlocal in seq_along(mynames1)) {
+ mynames1[jlocal] <- namesof(mynames1[jlocal],
+ link = link.list[[jlocal]],
+ earg = earg.list[[jlocal]], short = TRUE)
+ }
+ extra$all.mynames1 <- all.mynames1 <- mynames1
- if (LLL <- length(extra$is.multilogit)) {
- mynames1 <- mynames1[-max(extra$col.index.is.multilogit)]
- }
+ if (LLL <- length(extra$is.multilogit)) {
+ mynames1 <- mynames1[-max(extra$col.index.is.multilogit)]
+ }
mynames2 <- param.names(if ( .var.arg ) "var" else "sd", ncoly)
predictors.names <-
@@ -2786,12 +2788,12 @@ moment.millsratio2 <- function(zedd) {
if (icoefficients.given <- is.numeric( .icoefficients ))
- jfit.coeff <- rep( .icoefficients , length = length(jfit.coeff))
+ jfit.coeff <- rep_len( .icoefficients , length(jfit.coeff))
if (!icoefficients.given)
- for (jlocal in 1:length(nasgn2)) {
+ for (jlocal in seq_along(nasgn2)) {
if (link.list[[jlocal]] %in%
c("cauchit", "probit", "cloglog", "logit",
"logc", "golf", "polf", "nbolf") &&
@@ -2818,11 +2820,12 @@ moment.millsratio2 <- function(zedd) {
jfit.coeff[jlocal] <- 1/8
}
- if (!icoefficients.given)
- if (LLL <- length(extra$is.multilogit)) {
- raw.coeffs <- jfit.coeff[extra$col.index.is.multilogit]
- possum1 <- (0.01 + abs(raw.coeffs)) / sum(0.01 + abs(raw.coeffs))
- jfit.coeff[extra$is.multilogit] <- possum1
+ if (!icoefficients.given) {
+ if (LLL <- length(extra$is.multilogit)) {
+ raw.coeffs <- jfit.coeff[extra$col.index.is.multilogit]
+ possum1 <- (0.01 + abs(raw.coeffs)) / sum(0.01 + abs(raw.coeffs))
+ jfit.coeff[extra$is.multilogit] <- possum1
+ }
}
@@ -2850,23 +2853,22 @@ moment.millsratio2 <- function(zedd) {
}
- mean.init <- jfit$fitted
+ w.sum1 <- w / sum(w)
sdev.init <-
- if ( .imethod == 1) {
- sqrt( sum(w * (y - mean.init)^2) / sum(w) )
- } else if ( .imethod == 2) {
- if (jfit$df.resid > 0)
- sqrt( sum(w * jfit$resid^2) / jfit$df.resid ) else
- sqrt( sum(w * jfit$resid^2) / sum(w) )
- } else if ( .imethod == 3) {
- sqrt( sum(w * (y - mean.init)^1.5) / sum(w) )
- } else {
- sqrt( sum(w * abs(y - mean.init)) / sum(w) )
- }
+ if ( .imethod == 1) {
+ sqrt( sum(w.sum1 * jfit$resid^2) )
+ } else if ( .imethod == 2) {
+ sqrt( sum(w.sum1 * (abs(jfit$resid))^1.5) )
+ } else if ( .imethod == 3) {
+ sqrt( sum(w.sum1 * abs(jfit$resid)) )
+ } else {
+ wmean.init <- weighted.mean(y, w = w) # jfit$fitted
+ sqrt( sum(w.sum1 * (y - wmean.init)^2) )
+ }
- inflation.factor <- 1.5
- sdev.init <- sdev.init * inflation.factor
- sdev.init[sdev.init <= sqrt( .Machine$double.eps )] <- 0.01
+ sd.inflation.factor <- .sd.inflation.factor
+ sdev.init <- sdev.init * sd.inflation.factor
+ sdev.init <- pmax(sdev.init, ( .Machine$double.eps )^0.25) # Limit the smallness
if (length( .isdev )) {
sdev.init <- matrix( .isdev , n, ncoly, byrow = TRUE)
@@ -2886,10 +2888,29 @@ moment.millsratio2 <- function(zedd) {
.esd = esd, .evar = evar,
.orig.esd = orig.esd, .orig.evar = orig.evar,
.var.arg = var.arg,
+ .sd.inflation.factor = sd.inflation.factor,
.isdev = isd,
.icoefficients = icoefficients,
.imethod = imethod ))),
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ M1 <- ncol(eta)
+ NOS <- ncol(eta) / M1
+ sdev <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lsd , earg = .esd )
+
+ okay1 <- all(is.finite(sdev)) && all(sdev > 0) &&
+ all(is.finite(eta))
+ okay1
+ }, list( .link.list = link.list,
+ .earg.list = earg.list,
+ .lsd = lsd, .lvar = lvar,
+ .esd = esd, .evar = evar,
+ .orig.esd = orig.esd, .orig.evar = orig.evar,
+ .var.arg = var.arg ))),
+
+
linkinv = eval(substitute(function(eta, extra = NULL) {
M <- ncol(eta)
@@ -3147,7 +3168,7 @@ moment.millsratio2 <- function(zedd) {
dcoffs.deta[, indtw$col.index, drop = FALSE] *
ned2l.dmu2
- for (ilocal in 1:length(indtw$row.index))
+ for (ilocal in seq_along(indtw$row.index))
wz[, iam(indtw$row.index[ilocal],
indtw$col.index[ilocal], M = M)] <-
twz[, iam(indtw$row.index[ilocal],
@@ -3226,10 +3247,10 @@ moment.millsratio2 <- function(zedd) {
mylm <- lm.wfit(x = x, y = c(log(y)), w = c(w))
sdlog.y.est <- sqrt( sum(c(w) * mylm$resid^2) / mylm$df.residual )
etastart <- cbind(
- meanlog = rep(theta2eta(log(median(y)), .lmulog ,
- earg = .emulog ), length = n),
- sdlog = rep(theta2eta(sdlog.y.est, .lsdlog ,
- earg = .esdlog ), length = n))
+ meanlog = rep_len(theta2eta(log(median(y)), .lmulog ,
+ earg = .emulog ), n),
+ sdlog = rep_len(theta2eta(sdlog.y.est, .lsdlog ,
+ earg = .esdlog ), n))
}
}), list( .lmulog = lmulog, .lsdlog = lsdlog,
.emulog = emulog, .esdlog = esdlog ))),
@@ -3420,7 +3441,7 @@ rskewnorm <- function(n, location = 0, scale = 1, shape = 0) {
if (!length(etastart)) {
init.shape <- if (length( .ishape ))
- rep( .ishape , len = n) else {
+ rep_len( .ishape , n) else {
temp <- y
index <- abs(y) < sqrt(2/pi)-0.01
temp[!index] <- y[!index]
diff --git a/R/family.others.R b/R/family.others.R
index daa2e8b..89bc491 100644
--- a/R/family.others.R
+++ b/R/family.others.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -26,11 +26,11 @@ dexppois <- function(x, rate = 1, shape, log = FALSE) {
N <- max(length(x), length(shape), length(rate))
- x <- rep(x, len = N)
- shape <- rep(shape, len = N)
- rate <- rep(rate, len = N)
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(shape) != N) shape <- rep_len(shape, N)
+ if (length(rate) != N) rate <- rep_len(rate, N)
+ logdensity <- rep_len(log(0), N)
- logdensity <- rep(log(0), len = N)
xok <- (0 < x)
logdensity[xok] <- log(shape[xok]) + log(rate[xok]) -
@@ -217,15 +217,15 @@ rexppois <- function(n, rate = 1, shape) {
if (!length(etastart)) {
ratee.init <- if (length( .iratee ))
- rep( .iratee , len = n) else
+ rep_len( .iratee , n) else
stop("Need to input a value into argument 'iratee'")
shape.init <- if (length( .ishape ))
- rep( .ishape , len = n) else
+ rep_len( .ishape , n) else
(1/ratee.init - mean(y)) / ((y *
exp(-ratee.init * y))/n)
- ratee.init <- rep(weighted.mean(ratee.init, w = w), len = n)
+ ratee.init <- rep_len(weighted.mean(ratee.init, w = w), n)
etastart <-
cbind(theta2eta(ratee.init, .lratee , earg = .eratee ),
@@ -334,11 +334,11 @@ dgenray <- function(x, scale = 1, shape, log = FALSE) {
N <- max(length(x), length(shape), length(scale))
- x <- rep(x, len = N)
- shape <- rep(shape, len = N)
- scale <- rep(scale, len = N)
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(shape) != N) shape <- rep_len(shape, N)
+ if (length(scale) != N) scale <- rep_len(scale, N)
+ logdensity <- rep_len(log(0), N)
- logdensity <- rep(log(0), len = N)
if (any(xok <- (x > 0))) {
temp1 <- x[xok] / scale[xok]
logdensity[xok] <- log(2) + log(shape[xok]) + log(x[xok]) -
@@ -533,12 +533,12 @@ genrayleigh.control <- function(save.weights = TRUE, ...) {
scale.init <- if (length( .iscale )) .iscale else
grid.search(scale.grid, objfun = genrayleigh.Loglikfun,
y = y, x = x, w = w)
- scale.init <- rep(scale.init, length = length(y))
+ scale.init <- rep_len(scale.init, length(y))
shape.init <- if (length( .ishape )) .ishape else
-1 / weighted.mean(log1p(-exp(-(y/scale.init)^2)),
w = w)
- shape.init <- rep(shape.init, length = length(y))
+ shape.init <- rep_len(shape.init, length(y))
etastart <- cbind(theta2eta(scale.init, .lscale , earg = .escale ),
theta2eta(shape.init, .lshape , earg = .eshape ))
@@ -664,11 +664,11 @@ dexpgeom <- function(x, scale = 1, shape, log = FALSE) {
N <- max(length(x), length(scale), length(shape))
- x <- rep(x, len = N)
- scale <- rep(scale, len = N)
- shape <- rep(shape, len = N)
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(scale) != N) scale <- rep_len(scale, N)
+ if (length(shape) != N) shape <- rep_len(shape, N)
+ logdensity <- rep_len(log(0), N)
- logdensity <- rep(log(0), len = N)
if (any(xok <- (x > 0))) {
temp1 <- -x[xok] / scale[xok]
logdensity[xok] <- -log(scale[xok]) + log1p(-shape[xok]) +
@@ -798,15 +798,15 @@ expgeometric.control <- function(save.weights = TRUE, ...) {
if (!length(etastart)) {
scale.init <- if (is.Numeric( .iscale , positive = TRUE)) {
- rep( .iscale , len = n)
+ rep_len( .iscale , n)
} else {
stats::sd(c(y)) # The papers scale parameter beta
}
shape.init <- if (is.Numeric( .ishape , positive = TRUE)) {
- rep( .ishape , len = n)
+ rep_len( .ishape , n)
} else {
- rep(2 - exp(median(y)/scale.init), len = n)
+ rep_len(2 - exp(median(y)/scale.init), n)
}
shape.init[shape.init >= 0.95] <- 0.95
shape.init[shape.init <= 0.05] <- 0.05
@@ -945,11 +945,11 @@ dexplog <- function(x, scale = 1, shape, log = FALSE) {
N <- max(length(x), length(scale), length(shape))
- x <- rep(x, len = N)
- scale <- rep(scale, len = N)
- shape <- rep(shape, len = N)
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(scale) != N) scale <- rep_len(scale, N)
+ if (length(shape) != N) shape <- rep_len(shape, N)
- logdensity <- rep(log(0), len = N)
+ logdensity <- rep_len(log(0), N)
if (any(xok <- (x > 0))) {
temp1 <- -x[xok] / scale[xok]
logdensity[xok] <- -log(-log(shape[xok])) - log(scale[xok]) +
@@ -1086,15 +1086,15 @@ explogff.control <- function(save.weights = TRUE, ...) {
if (!length(etastart)) {
scale.init <- if (is.Numeric( .iscale , positive = TRUE)) {
- rep( .iscale , len = n)
+ rep_len( .iscale , n)
} else {
stats::sd(c(y))
}
shape.init <- if (is.Numeric( .ishape , positive = TRUE)) {
- rep( .ishape , len = n)
+ rep_len( .ishape , n)
} else {
- rep((exp(median(y)/scale.init) - 1)^2, len = n)
+ rep_len((exp(median(y)/scale.init) - 1)^2, n)
}
shape.init[shape.init >= 0.95] <- 0.95
shape.init[shape.init <= 0.05] <- 0.05
@@ -1276,12 +1276,12 @@ dtpn <- function(x, location = 0, scale = 1, skewpar = 0.5,
na.rm = TRUE))
stop("some parameters out of bound")
- LLL <- max(length(x), length(location), length(scale),
- length(skewpar))
- if (length(x) != LLL) x <- rep(x, length = LLL)
- if (length(location) != LLL) location <- rep(location, length = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length = LLL)
- if (length(skewpar) != LLL) skewpar <- rep(skewpar, length = LLL)
+ N <- max(length(x), length(location), length(scale),
+ length(skewpar))
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(scale) != N) scale <- rep_len(scale, N)
+ if (length(location) != N) location <- rep_len(location, N)
+ if (length(skewpar) != N) skewpar <- rep_len(skewpar, N)
zedd <- (x - location) / scale
@@ -1311,7 +1311,7 @@ ptpn <- function(q, location = 0, scale = 1, skewpar = 0.5) {
s2 <- skewpar + (1 - skewpar) *
pgamma(zedd^2 / (8 * (1-skewpar)^2), 0.5)
-ans <- rep(0.0, length(zedd))
+ans <- rep_len(0.0, length(zedd))
ans[zedd <= 0] <- s1[zedd <= 0]
ans[zedd > 0] <- s2[zedd > 0]
@@ -1335,13 +1335,13 @@ qtpn <- function(p, location = 0, scale = 1, skewpar = 0.5) {
stop("some parameters out of bound")
# Recycle the vectors to equal lengths
LLL <- max(length(pp), length(location), length(scale),
- length(skewpar))
- if (length(pp) != LLL) pp <- rep(pp, length = LLL)
- if (length(location) != LLL) location <- rep(location, length = LLL)
- if (length(scale) != LLL) scale <- rep(scale, length = LLL)
- if (length(skewpar) != LLL) skewpar <- rep(skewpar, length = LLL)
+ length(skewpar))
+ if (length(pp) != LLL) pp <- rep_len(pp, LLL)
+ if (length(location) != LLL) location <- rep_len(location, LLL)
+ if (length(scale) != LLL) scale <- rep_len(scale, LLL)
+ if (length(skewpar) != LLL) skewpar <- rep_len(skewpar, LLL)
- qtpn <- rep(NA_real_, length(LLL))
+ qtpn <- rep_len(NA_real_, length(LLL))
qtpn <- qnorm(pp / (2 * skewpar), sd = 2 * skewpar)
qtpn[pp > skewpar] <- sqrt(8 * ( 1 - skewpar)^2 *
qgamma(pos( pp - skewpar) / (
@@ -1438,9 +1438,9 @@ tpnff <- function(llocation = "identitylink", lscale = "loge",
pmax(1/1024, y) else {
if ( .method.init == 3) {
- rep(weighted.mean(y, w), len = n)
+ rep_len(weighted.mean(y, w), n)
} else if ( .method.init == 2) {
- rep(median(rep(y, w)), len = n)
+ rep_len(median(rep(y, w)), n)
} else if ( .method.init == 1) {
junk$fitted
} else {
@@ -1620,9 +1620,9 @@ tpnff3 <- function(llocation = "identitylink",
scale.y.est <- sqrt(sum(c(w) * junk$resid^2) / junk$df.residual)
location.init <- if ( .llocat == "loge") pmax(1/1024, y) else {
if ( .method.init == 3) {
- rep(weighted.mean(y, w), len = n)
+ rep_len(weighted.mean(y, w), n)
} else if ( .method.init == 2) {
- rep(median(rep(y, w)), len = n)
+ rep_len(median(rep(y, w)), n)
} else if ( .method.init == 1) {
junk$fitted
} else {
@@ -1740,23 +1740,20 @@ tpnff3 <- function(llocation = "identitylink",
-dozibeta <- function(x, shape1, shape2, pobs0 = 0,
+
+dzoabeta <- function(x, shape1, shape2, pobs0 = 0,
pobs1 = 0, log = FALSE, tol = .Machine$double.eps) {
log.arg <- log
rm(log)
LLL <- max(length(x), length(shape1),
length(shape2), length(pobs0), length(pobs1))
- if (LLL != length(x))
- x <- rep(x, length = LLL)
- if (LLL != length(shape1))
- shape1 <- rep(shape1, length = LLL)
- if (LLL != length(shape2))
- shape2 <- rep(shape2, length = LLL)
- if (LLL != length(pobs0))
- pobs0 <- rep(pobs0, length = LLL)
- if (LLL != length(pobs1))
- pobs1 <- rep(pobs1, length = LLL)
- ans <- rep(NA, length = LLL)
+ if (LLL != length(x)) x <- rep_len(x, LLL)
+ if (LLL != length(shape1)) shape1 <- rep_len(shape1, LLL)
+ if (LLL != length(shape2)) shape2 <- rep_len(shape2, LLL)
+ if (LLL != length(pobs0)) pobs0 <- rep_len(pobs0, LLL)
+ if (LLL != length(pobs1)) pobs1 <- rep_len(pobs1, LLL)
+ ans <- rep_len(NA_real_, LLL)
+
k1 <- (pobs0 < -tol | pobs1 < -tol |
(pobs0 + pobs1) > (1 + tol))
k4 <- is.na(pobs0) | is.na(pobs1)
@@ -1777,7 +1774,8 @@ dozibeta <- function(x, shape1, shape2, pobs0 = 0,
}
-rozibeta <- function(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
+
+rzoabeta <- function(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
tol = .Machine$double.eps) {
use.n <- if ((length.n <- length(n)) > 1) {
length.n
@@ -1789,16 +1787,16 @@ rozibeta <- function(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
n
}
}
- shape1 <- rep(shape1, length.out = use.n)
- shape2 <- rep(shape2, length.out = use.n)
- pobs0 <- rep(pobs0, length.out = use.n)
- pobs1 <- rep(pobs1, length.out = use.n)
+ shape1 <- rep_len(shape1, use.n)
+ shape2 <- rep_len(shape2, use.n)
+ pobs0 <- rep_len(pobs0, use.n)
+ pobs1 <- rep_len(pobs1, use.n)
random.number <- runif(use.n)
- ans <- rep(NA, length = use.n)
+ ans <- rep_len(NA_real_, use.n)
k5 <- (pobs0 < -tol | pobs1 < -tol |
(pobs0 + pobs1) > (1 + tol))
k4 <- is.na(pobs0) | is.na(pobs1)
- ans[!k4] <- qozibeta(random.number[!k4], shape1 = shape1,
+ ans[!k4] <- qzoabeta(random.number[!k4], shape1 = shape1,
shape2 = shape2, pobs0 = pobs0,
pobs1 = pobs1)
if (any(k5 & !k4)) {
@@ -1809,25 +1807,21 @@ rozibeta <- function(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
}
-pozibeta <- function(q, shape1, shape2, pobs0 = 0, pobs1 = 0,
+
+pzoabeta <- function(q, shape1, shape2, pobs0 = 0, pobs1 = 0,
lower.tail = TRUE, log.p = FALSE,
tol = .Machine$double.eps) {
LLL <- max(length(q), length(shape1),
length(shape2), length(pobs0), length(pobs1))
- if (LLL != length(q))
- q <- rep(q, length = LLL)
- if (LLL != length(shape1))
- shape1 <- rep(shape1, length = LLL)
- if (LLL != length(shape2))
- shape2 <- rep(shape2, length = LLL)
- if (LLL != length(pobs0))
- pobs0 <- rep(pobs0, length = LLL)
- if (LLL != length(pobs1))
- pobs1 <- rep(pobs1, length = LLL)
+ if (LLL != length(q)) q <- rep_len(q, LLL)
+ if (LLL != length(shape1)) shape1 <- rep_len(shape1, LLL)
+ if (LLL != length(shape2)) shape2 <- rep_len(shape2, LLL)
+ if (LLL != length(pobs0)) pobs0 <- rep_len(pobs0, LLL)
+ if (LLL != length(pobs1)) pobs1 <- rep_len(pobs1, LLL)
k3 <- (pobs0 < -tol | pobs1 < -tol |
- (pobs0 + pobs1) > (1 + tol))
+ (pobs0 + pobs1) > (1 + tol))
k4 <- is.na(pobs0) | is.na(pobs1)
- ans <- rep(NA, length = LLL)
+ ans <- rep_len(NA_real_, LLL)
ans[!k3 & !k4] <- pbeta(q[!k3 & !k4],
shape1[!k3 & !k4],
shape2[!k3 & !k4], log.p = TRUE) +
@@ -1855,25 +1849,21 @@ pozibeta <- function(q, shape1, shape2, pobs0 = 0, pobs1 = 0,
}
-qozibeta <- function(p, shape1, shape2, pobs0 = 0, pobs1 = 0,
+
+qzoabeta <- function(p, shape1, shape2, pobs0 = 0, pobs1 = 0,
lower.tail = TRUE, log.p = FALSE,
tol = .Machine$double.eps) {
LLL <- max(length(p), length(shape1),
length(shape2), length(pobs0), length(pobs1))
- if (LLL != length(p))
- p <- rep(p, length = LLL)
- if (LLL != length(shape1))
- shape1 <- rep(shape1, length = LLL)
- if (LLL != length(shape2))
- shape2 <- rep(shape2, length = LLL)
- if (LLL != length(pobs0))
- pobs0 <- rep(pobs0, length = LLL)
- if (LLL != length(pobs1))
- pobs1 <- rep(pobs1, length = LLL)
+ if (LLL != length(p)) p <- rep_len(p, LLL)
+ if (LLL != length(shape1)) shape1 <- rep_len(shape1, LLL)
+ if (LLL != length(shape2)) shape2 <- rep_len(shape2, LLL)
+ if (LLL != length(pobs0)) pobs0 <- rep_len(pobs0, LLL)
+ if (LLL != length(pobs1)) pobs1 <- rep_len(pobs1, LLL)
k0 <- (pobs0 < -tol | pobs1 < -tol |
- (pobs0 + pobs1) > (1 + tol))
+ (pobs0 + pobs1) > (1 + tol))
k4 <- is.na(pobs0) | is.na(pobs1)
- ans <- rep(NA, length = LLL)
+ ans <- rep_len(NA_real_, LLL)
if (!lower.tail & log.p) {
p <- -expm1(p)
} else{
@@ -1924,25 +1914,19 @@ log1pexp <- function(x){
-dozibetabinom.ab <- function(x, size, shape1, shape2, pstr0 = 0,
+dzoibetabinom.ab <- function(x, size, shape1, shape2, pstr0 = 0,
pstrsize = 0, log = FALSE) {
log.arg <- log
rm(log)
LLL <- max(length(x), length(size), length(shape1),
length(shape2), length(pstr0), length(pstrsize))
- if (LLL != length(x))
- x <- rep(x, length = LLL)
- if (LLL != length(size))
- size <- rep(size, length = LLL)
- if (LLL != length(shape1))
- shape1 <- rep(shape1, length = LLL)
- if (LLL != length(shape2))
- shape2 <- rep(shape2, length = LLL)
- if (LLL != length(pstr0))
- pstr0 <- rep(pstr0, length = LLL)
- if (LLL != length(pstrsize))
- pstrsize <- rep(pstrsize, length = LLL)
- ans <- rep(NA, length = LLL)
+ if (LLL != length(x)) x <- rep_len(x, LLL)
+ if (LLL != length(size)) size <- rep_len(size, LLL)
+ if (LLL != length(shape1)) shape1 <- rep_len(shape1, LLL)
+ if (LLL != length(shape2)) shape2 <- rep_len(shape2, LLL)
+ if (LLL != length(pstr0)) pstr0 <- rep_len(pstr0, LLL)
+ if (LLL != length(pstrsize)) pstrsize <- rep_len(pstrsize, LLL)
+ ans <- rep_len(NA_real_, LLL)
k1 <- pstr0 < 0 | pstrsize < 0 |
(pstr0 + pstrsize) > 1
k <- is.na(size) | is.na(shape1) | is.na(shape2) |
@@ -1971,16 +1955,16 @@ dozibetabinom.ab <- function(x, size, shape1, shape2, pstr0 = 0,
-dozibetabinom <- function(x, size, prob, rho = 0, pstr0 = 0,
+dzoibetabinom <- function(x, size, prob, rho = 0, pstr0 = 0,
pstrsize = 0, log = FALSE) {
- dozibetabinom.ab(x, size, shape1 = prob * (1 - rho) / rho,
+ dzoibetabinom.ab(x, size, shape1 = prob * (1 - rho) / rho,
shape2 = (1 - prob) * (1 - rho) / rho,
pstr0 = pstr0, pstrsize = pstrsize, log = log)
}
-rozibetabinom.ab <- function(n, size, shape1, shape2,
+rzoibetabinom.ab <- function(n, size, shape1, shape2,
pstr0 = 0, pstrsize = 0) {
use.n <- if ((length.n <- length(n)) > 1) {
length.n
@@ -1992,14 +1976,14 @@ rozibetabinom.ab <- function(n, size, shape1, shape2,
n
}
}
- size <- rep(size, length.out = use.n)
- shape1 <- rep(shape1, length.out = use.n)
- shape2 <- rep(shape2, length.out = use.n)
- pstr0 <- rep(pstr0, length.out = use.n)
- pstrsize <- rep(pstrsize, length.out = use.n)
+ size <- rep_len(size, use.n)
+ shape1 <- rep_len(shape1, use.n)
+ shape2 <- rep_len(shape2, use.n)
+ pstr0 <- rep_len(pstr0, use.n)
+ pstrsize <- rep_len(pstrsize, use.n)
+ ans <- rep_len(NA_real_, use.n)
k <- is.na(size) | is.na(shape1) | is.na(shape2) |
- is.na(pstr0) | is.na(pstrsize)
- ans <- rep(NA, length = use.n)
+ is.na(pstr0) | is.na(pstrsize)
k1 <- pstr0 < 0 | pstrsize < 0 |
(pstr0 + pstrsize) > 1
random.number <- runif(use.n)
@@ -2021,9 +2005,9 @@ rozibetabinom.ab <- function(n, size, shape1, shape2,
-rozibetabinom <- function(n, size, prob, rho = 0, pstr0 = 0,
+rzoibetabinom <- function(n, size, prob, rho = 0, pstr0 = 0,
pstrsize = 0) {
- rozibetabinom.ab(n, size, shape1 = prob * (1 - rho) / rho,
+ rzoibetabinom.ab(n, size, shape1 = prob * (1 - rho) / rho,
shape2 = (1 - prob) * (1 - rho) / rho,
pstr0 = pstr0,
pstrsize = pstrsize)
@@ -2031,26 +2015,20 @@ rozibetabinom <- function(n, size, prob, rho = 0, pstr0 = 0,
-pozibetabinom.ab <- function(q, size, shape1, shape2, pstr0 = 0,
+pzoibetabinom.ab <- function(q, size, shape1, shape2, pstr0 = 0,
pstrsize = 0, lower.tail = TRUE,
log.p = FALSE) {
LLL <- max(length(q), length(size), length(shape1),
length(shape2), length(pstr0), length(pstrsize))
- if (LLL != length(q))
- q <- rep(q, length = LLL)
- if (LLL != length(size))
- size <- rep(size, length = LLL)
- if (LLL != length(shape1))
- shape1 <- rep(shape1, length = LLL)
- if (LLL != length(shape2))
- shape2 <- rep(shape2, length = LLL)
- if (LLL != length(pstr0))
- pstr0 <- rep(pstr0, length = LLL)
- if (LLL != length(pstrsize))
- pstrsize <- rep(pstrsize, length = LLL)
+ if (LLL != length(q)) q <- rep_len(q, LLL)
+ if (LLL != length(size)) size <- rep_len(size, LLL)
+ if (LLL != length(shape1)) shape1 <- rep_len(shape1, LLL)
+ if (LLL != length(shape2)) shape2 <- rep_len(shape2, LLL)
+ if (LLL != length(pstr0)) pstr0 <- rep_len(pstr0, LLL)
+ if (LLL != length(pstrsize)) pstrsize <- rep_len(pstrsize, LLL)
+ ans <- rep_len(NA_real_, LLL)
k <- is.na(size) | is.na(shape1) | is.na(shape2) |
is.na(pstr0) | is.na(pstrsize) | is.na(q)
- ans <- rep(NA, length = LLL)
k1 <- pstr0 < 0 | pstrsize < 0 |
(pstr0 + pstrsize) > 1
if (sum(!k1 & !k) > 0)
@@ -2083,10 +2061,10 @@ pozibetabinom.ab <- function(q, size, shape1, shape2, pstr0 = 0,
}
-pozibetabinom <- function(q, size, prob, rho,
+pzoibetabinom <- function(q, size, prob, rho,
pstr0 = 0, pstrsize = 0,
lower.tail = TRUE, log.p = FALSE) {
- pozibetabinom.ab(q, size, shape1 = prob * (1 - rho) / rho,
+ pzoibetabinom.ab(q, size, shape1 = prob * (1 - rho) / rho,
shape2 = (1 - prob) * (1 - rho) / rho,
pstr0 = pstr0, pstrsize = pstrsize,
lower.tail = lower.tail, log.p = log.p)
@@ -2104,3 +2082,224 @@ pozibetabinom <- function(q, size, prob, rho,
+
+ AR1EIM<- function(x = NULL,
+ var.arg = NULL,
+ p.drift = NULL,
+ WNsd = NULL,
+ ARcoeff1 = NULL,
+ eps.porat = 1e-2) {
+
+ if (!is.matrix(x))
+ stop("Argument 'x' must be a matrix.")
+
+ yy <- x
+ M <- 3
+ nn <- nrow(x)
+ nn0 <- numeric(0)
+ NOS <- ncol(x)
+
+ if (!is.matrix(WNsd))
+ WNsd <- matrix(WNsd, nrow = nn, ncol = NOS, byrow = TRUE)
+
+ if (!is.matrix(ARcoeff1))
+ ARcoeff1 <- matrix(ARcoeff1, nrow = nn, ncol = NOS, byrow = TRUE)
+
+ if (!is.Numeric(eps.porat, length.arg = 1) || eps.porat < 0 ||
+ eps.porat > 1e-2)
+ stop("Bad input for argument 'eps.porat'.")
+
+ sdTSR <- colMeans(WNsd)
+ sdTSv <- colMeans(WNsd)
+ drift.v <- rep(p.drift, NOS)[1:NOS]
+ Aux11 <- (NOS > 1)
+ the1v <- colMeans(ARcoeff1)
+ JFin <- array(0.0, dim = c(nn, NOS, M + (M - 1) + (M - 2) ))
+
+ for (spp in 1:NOS) {
+
+ x <- yy[, spp]
+ the1 <- the1v[spp]
+ drift.p <- drift.v[spp]
+ sdTS <- sdTSv[spp]
+
+ r <- numeric(nn)
+ r <- AR1.gammas(x = x, lags = nn - 1)
+ r[nn] <- r[1]
+
+ s0 <- numeric(nn)
+ s1 <- numeric(nn)
+ s1 <- if (var.arg) (the1^(0:(nn - 1))) / (1 - the1^2) else
+ 2 * (the1^(0:(nn - 1))) * sdTS / (1 - the1^2)
+
+ s2 <- numeric(nn)
+ help1 <- c(0:(nn - 1))
+ s2 <- help1 * (the1^(help1 - 1)) * (sdTS^2) / (1 - the1^2) +
+ 2 * (sdTS^2) * (the1^(help1 + 1)) / (1 - the1^2)^2
+ sMat <- cbind(s0, s1, s2)
+
+ J <- array(NA_real_,
+ dim = c(length(the1) + 2, length(the1) + 2, nn))
+ Jp <- array(NA_real_,
+ dim = c(length(the1) + 2, length(the1) + 2, nn))
+
+ alpha <- numeric(nn)
+ alpha[1] <- 1
+ delta <- r[1]
+ eta <- matrix(NA_real_, nrow = nn, ncol = M)
+ eta[1, ] <- cbind(s0[1], s1[1], s2[1])
+
+ psi <- matrix(0, nrow = nn, ncol = length(the1) + 2)
+ psi[1, ] <- cbind(s0[1], s1[1], s2[1]) / r[1]
+
+ u0 <- rep(1/(1 - sign(the1v[1]) * min(0.975, abs(the1v[1]))), nn )
+ u1 <- rep(drift.p/(1 - the1)^2, nn)
+ uMat <- cbind(u0, rep(0, nn), u1)
+
+ aux1 <- matrix(sMat[1, ],
+ nrow = 2 + length(the1),
+ ncol = 2 + length(the1), byrow = TRUE)
+ diag(aux1) <- sMat[1, ]
+ J[, , 1] <- Jp[, , 1] <- aux1 * t(aux1) / (2 * r[1]^2)
+ J[1, 1, 1] <- Jp[1, 1, 1] <- 1 / sdTS^2
+ JFin[1, spp, 1:M] <- Jp[, , 1][row(Jp[, , 1]) == col(Jp[, , 1])]
+ Neps.porat <- 1.819*eps.porat*(1e-10)
+
+ dk <- matrix(NA_real_, nrow = 1, ncol = length(the1) + 2)
+ eR <- matrix(NA_real_, nrow = 1, ncol = length(the1) + 2)
+ cAux2 <- d55 <- numeric(nn); d55[1] <- 0.1
+
+ for (jay in 1:(nn - 1)) {
+
+ cAux <- as.numeric(alpha[1:jay] %*%
+ r[2:(jay + 1)][length(r[2:(jay + 1)]):1])/delta
+
+ dk <- alpha[1:jay] %*%
+ sMat[2:(jay + 1), , drop = FALSE][length(sMat[2:(jay + 1)]):1, ]
+
+ delta <- delta * (1 - cAux^2)
+ d55[jay + 1] <- cAux^2
+
+ if ((d55[jay + 1] < eps.porat*1e-2) || (jay > 1e1)) {
+ nn0 <- jay
+ break
+ }
+
+ eta[jay + 1, ] <- dk
+ tAux <- numeric(jay + 1)
+ tAux <- alpha[1:(jay + 1)] -
+ cAux * alpha[1:(jay + 1)][(jay + 1):1]
+ alpha[1:(jay + 1)] <- tAux[1:(jay + 1)]
+
+ eR <- alpha[1:(jay + 1)][(jay + 1):1] %*%
+ eta[1:(jay + 1), , drop = FALSE]
+
+ tAux <- eta[1:(jay + 1), ] -
+ cAux * eta[1:(jay + 1), ][(jay + 1):1, ]
+
+ eta[1:(jay + 1), ] <- tAux
+
+ AuxE <- matrix(eR, nrow = jay + 1, ncol = M, byrow = TRUE)
+ Aux3 <- matrix(alpha[1:(jay + 1)][(jay + 1):1],
+ nrow = jay + 1, ncol = M, byrow = FALSE)
+ Aux4 <- matrix(alpha[1:(jay + 1)],
+ nrow = jay + 1, ncol = M, byrow = FALSE)
+ tAux <- psi[1:(jay + 1), ] -
+ cAux * psi[1:(jay + 1), ][(jay + 1):1, ] +
+ AuxE * (Aux3 - cAux * Aux4) / delta
+
+ if (any(dim(psi[1:(jay + 1), ])) != any(dim(tAux)) )
+ stop("Invalids 'psi' and 'tAux'.")
+
+ psi[1:(jay + 1), ] <- tAux
+ fk <- alpha[1:(jay + 1)] %*% eta[1:(jay + 1), ]
+ gk <- alpha[1:(jay + 1)][(jay + 1):1] %*% uMat[1:(jay + 1), ]
+
+ Auxf <- matrix(fk, nrow = M, ncol = M, byrow = FALSE)
+ Auxg <- matrix(gk, nrow = M, ncol = M, byrow = FALSE)
+ J[, , jay + 1] <-
+ J[, , jay] + t(eta[1:(jay + 1), ]) %*% psi[1:(jay + 1), ] /
+ delta - 0.5 * Auxf * t(Auxf) / delta^2 +
+ Auxg * t(Auxg) / delta
+
+ Jp[, , jay + 1] <- J[, , jay + 1] - J[, , jay]
+ JFin[jay + 1, spp , 1:M ] <-
+ Jp[, , jay + 1][col(Jp[, , jay + 1]) == row(Jp[, , jay + 1])]
+
+ helpC <- numeric(0)
+ for (kk in 1:(M - 1)) {
+ TF1 <- ( col(Jp[, , jay + 1]) >= row(Jp[, , jay + 1]) )
+ TF2 <- (abs(col(Jp[, , jay + 1]) - row(Jp[, , jay + 1])) == kk )
+ helpC <- c(helpC, Jp[, , jay + 1][TF1 & TF2])
+ }
+ rm(TF1, TF2)
+
+ JFin[jay + 1, spp , -(1:M) ] <- helpC
+ }
+
+ if (length(nn0))
+ for (kk in nn0:(nn - 1)) {
+ J[, , kk + 1] <- J[, , nn0] + (kk - nn0 + 1) * Jp[, , nn0]
+ Jp[, , kk + 1] <- J[, , kk + 1] - J[, , kk]
+
+ JFin[kk + 1, spp , 1:M ] <-
+ Jp[, , kk + 1][col(Jp[, , kk + 1]) == row(Jp[, , kk + 1])]
+
+ helpC <- numeric(0)
+ for (ll in 1:(M - 1)) {
+ TF1 <- ( col(Jp[, , kk + 1]) >= row(Jp[, , kk + 1]) )
+ TF2 <- (abs(col(Jp[, , kk + 1]) - row(Jp[, , kk + 1])) == ll)
+ helpC <- c(helpC, Jp[, , kk + 1][TF1 & TF2])
+ }
+ rm(TF1, TF2)
+ JFin[kk + 1, spp , -(1:M) ] <- helpC
+ }
+ JFin[which(JFin <= Neps.porat)] <-
+ abs( JFin[which(JFin <= Neps.porat)])
+ }
+
+ JFin
+
+ } # End
+
+
+
+
+
+
+AR1.gammas <- function(x, y = NULL, lags = 1) {
+ xx <- matrix(x, ncol = 1)
+ nx <- nrow(xx)
+
+ if (lags < 0 || !(is.Numeric(lags, integer.valued = TRUE)))
+ stop("'lags' must be a positive integer.")
+
+ if (length(y)) {
+ yy <- matrix(y, ncol = 1)
+ ny <- nrow(yy)
+ if (nx != ny)
+ stop("Number of rows differs.") else
+ n <- nx
+ } else {
+ yy <- xx
+ n <- nrow(xx)
+ }
+
+ myD <- numeric(lags + 1)
+ myD[1] <- if (length(y)) cov(xx, yy) else cov(xx, xx) # i.e. var(xx)
+ if (lags > 0)
+ for (ii in 1:lags)
+ myD[ii + 1] <- cov(xx[-(1:ii), 1], yy[1:(n - ii) , 1])
+
+ myD
+}
+
+
+
+
+
+
+
+
+
+
diff --git a/R/family.positive.R b/R/family.positive.R
index 0f49c8e..c1425a1 100644
--- a/R/family.positive.R
+++ b/R/family.positive.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -45,8 +45,8 @@ N.hat.posbernoulli <-
jay.index <-
switch(model.type,
- "0" = rep(1, length = tau),
- "b" = rep(1, length = tau), # Subset: 2 out of 1:2
+ "0" = rep_len(1, tau),
+ "b" = rep_len(1, tau), # Subset: 2 out of 1:2
"t" = 1:tau, # All of them
"tb" = 1:tau) # Subset: first tau of them out of M = 2*tau-2
@@ -101,14 +101,14 @@ N.hat.posbernoulli <-
ncol.X.vlm <- nrow(R)
rinv <- diag(ncol.X.vlm)
rinv <- backsolve(R, rinv)
- rowlen <- drop(((rinv^2) %*% rep(1, ncol.X.vlm))^0.5)
+ rowlen <- drop(((rinv^2) %*% rep_len(1, ncol.X.vlm))^0.5)
covun <- rinv %*% t(rinv)
vecTF <- FALSE
for (jay in 1:tau) {
linpred.index <- jay.index[jay]
vecTF <- vecTF | (Hmatrices[linpred.index, ] != 0)
}
- vecTF.index <- (1:length(vecTF))[vecTF]
+ vecTF.index <- (seq_along(vecTF))[vecTF]
covun <- covun[vecTF.index, vecTF.index, drop = FALSE]
dvect <- dvect[vecTF.index, drop = FALSE]
}
@@ -228,7 +228,7 @@ rposbern <-
if (is.null(Xmatrix)) {
- Xmatrix <- cbind(x1 = rep(1.0, len = use.n))
+ Xmatrix <- cbind(x1 = rep_len(1.0, use.n))
if (pvars > 1)
Xmatrix <- cbind(Xmatrix,
matrix(runif(n = use.n * (pvars-1)),
@@ -243,8 +243,8 @@ rposbern <-
lin.pred.baseline <- lin.pred.baseline +
Xmatrix[, 2:pvars, drop = FALSE] %*%
xcoeff[2:pvars]
- sumrowy <- rep(0, length = use.n)
- cap.effect <- rep(cap.effect.orig, length = use.n)
+ sumrowy <- rep_len(0, use.n)
+ cap.effect <- rep_len(cap.effect.orig, use.n)
for (jlocal in 1:nTimePts) {
CHmatrix[, jlocal] <- as.numeric(sumrowy > 0)
@@ -349,9 +349,9 @@ dposnegbin <- function(x, size, prob = NULL, munb = NULL, log = FALSE) {
LLL <- max(length(x), length(prob), length(size))
- if (length(x) != LLL) x <- rep(x, len = LLL)
- if (length(prob) != LLL) prob <- rep(prob, len = LLL)
- if (length(size) != LLL) size <- rep(size, len = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
ans <- dnbinom(x = x, size = size, prob = prob, log = log.arg)
index0 <- (x == 0)
@@ -366,6 +366,10 @@ dposnegbin <- function(x, size, prob = NULL, munb = NULL, log = FALSE) {
size = size[!index0], prob = prob[!index0],
lower.tail = FALSE)
}
+
+ ans[prob == 0] <- NaN
+ ans[prob == 1] <- NaN
+
ans
}
@@ -379,17 +383,18 @@ pposnegbin <- function(q, size, prob = NULL, munb = NULL) {
prob <- size / (size + munb)
}
L <- max(length(q), length(prob), length(size))
- if (length(q) != L)
- q <- rep(q, length.out = L)
- if (length(prob) != L)
- prob <- rep(prob, length.out = L)
- if (length(size) != L)
- size <- rep(size, length.out = L)
+ if (length(q) != L) q <- rep_len(q, L)
+ if (length(prob) != L) prob <- rep_len(prob, L)
+ if (length(size) != L) size <- rep_len(size, L)
- ifelse(q < 1, 0,
- (pnbinom(q, size = size, prob = prob) -
- dnbinom(0, size = size, prob = prob))
+ ans <- ifelse(q < 1, 0, (pnbinom(q, size = size, prob = prob) -
+ dnbinom(0, size = size, prob = prob))
/ pnbinom(0, size = size, prob = prob, lower.tail = FALSE))
+
+ ans[prob == 0] <- NaN
+ ans[prob == 1] <- NaN
+
+ ans
}
@@ -407,9 +412,13 @@ qposnegbin <- function(p, size, prob = NULL, munb = NULL) {
lower.tail = FALSE) * p +
dnbinom(x = 0, size = size, prob = prob),
size = size, prob = prob)
- ans[p > 1] <- NaN
- ans[p < 0] <- NaN
ans[p == 1] <- Inf
+
+ ans[prob == 0] <- NaN
+ ans[prob == 1] <- NaN
+
+ ans[p < 0] <- NaN
+ ans[1 < p] <- NaN
ans
}
@@ -531,6 +540,14 @@ qposnegbin <- function(p, size, prob = NULL, munb = NULL) {
+posNBD.Loglikfun2 <- function(munbval, sizeval,
+ y, x, w, extraargs) {
+ sum(c(w) * dposnegbin(x = y, munb = munbval,
+ size = sizeval, log = TRUE))
+}
+
+
+
posnegbinomial.control <- function(save.weights = TRUE, ...) {
list(save.weights = save.weights)
}
@@ -541,6 +558,7 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
function(
zero = "size",
type.fitted = c("mean", "munb", "prob0"),
+ mds.min = 1e-3,
nsimEIM = 500,
cutoff.prob = 0.999, # higher is better for large 'size'
eps.trig = 1e-7,
@@ -549,13 +567,19 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
lmunb = "loge", lsize = "loge",
imethod = 1,
imunb = NULL,
- probs.y = 0.35,
- ishrinkage = 0.95,
+ iprobs.y = NULL, # 0.35,
+ gprobs.y = (0:9)/10, # 20160709; grid for finding munb.init
isize = NULL,
- gsize.mux = exp((-12:6)/2)) {
+ gsize.mux = exp(c(-30, -20, -15, -10, -6:3))) {
+ if (!is.Numeric(imethod, length.arg = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+
if (length(isize) && !is.Numeric(isize, positive = TRUE))
stop("bad input for argument 'isize'")
@@ -589,7 +613,6 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
namesof("size", lsize, earg = esize ), "\n",
"Mean: munb / (1 - (size / (size + munb))^size)"),
constraints = eval(substitute(expression({
-
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 2)
@@ -598,6 +621,7 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
list(M1 = 2,
Q1 = 1,
expected = TRUE,
+ mds.min = .mds.min ,
multipleResponses = TRUE,
parameters.names = c("munb", "size"),
nsimEIM = .nsimEIM ,
@@ -611,14 +635,15 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
}, list( .lmunb = lmunb, .lsize = lsize, .isize = isize,
.emunb = emunb, .esize = esize,
.zero = zero, .nsimEIM = nsimEIM,
- .ishrinkage = ishrinkage, .eps.trig = eps.trig,
+ .eps.trig = eps.trig,
.imethod = imethod,
- .type.fitted = type.fitted ))),
+ .type.fitted = type.fitted,
+ .mds.min = mds.min))),
initialize = eval(substitute(expression({
M1 <- 2
- temp5 <-
+ temp12 <-
w.y.check(w = w, y = y,
Is.integer.y = TRUE,
Is.positive.y = TRUE,
@@ -627,46 +652,65 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
- w <- temp5$w
- y <- temp5$y
+ w <- temp12$w
+ y <- temp12$y
M <- M1 * ncol(y)
extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
- extra$type.fitted <- .type.fitted
- extra$dimnamesy <- dimnames(y)
+ extra$type.fitted <- .type.fitted
+ extra$dimnamesy <- dimnames(y)
predictors.names <- c(
namesof(param.names("munb", NOS), .lmunb , earg = .emunb , tag = FALSE),
namesof(param.names("size", NOS), .lsize , earg = .esize , tag = FALSE))
predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
+ gprobs.y <- .gprobs.y
+ imunb <- .imunb # Default in NULL
+ if (length(imunb))
+ imunb <- matrix(imunb, n, NOS, byrow = TRUE)
+
if (!length(etastart)) {
- munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
- imu = .imunb , ishrinkage = .ishrinkage ,
- probs.y = .probs.y )
- if ( is.Numeric( .isize )) {
- size.init <- matrix( .isize , nrow = n, ncol = NOS, byrow = TRUE)
- } else {
- posnegbinomial.Loglikfun <- function(kval, y, x, w, extraargs) {
- munb <- extraargs
- sum(c(w) * dposnegbin(x = y, mu = munb, size = kval, log = TRUE))
- }
- size.init <- matrix(0, nrow = n, ncol = NOS)
- for (jay in 1:NOS) {
- size.grid <- .gsize.mux * mean(munb.init[, jay])
- size.init[, jay] <-
- grid.search(size.grid,
- objfun = posnegbinomial.Loglikfun,
- y = y[, jay], # x = x,
- w = w[, jay],
- extraargs = munb.init[, jay])
+ munb.init <-
+ size.init <- matrix(NA_real_, n, NOS)
+ gprobs.y <- .gprobs.y
+ if (length( .iprobs.y ))
+ gprobs.y <- .iprobs.y
+ gsize.mux <- .gsize.mux # gsize.mux is on a relative scale
+
+ for (jay in 1:NOS) { # For each response 'y_jay'... do:
+ munb.init.jay <- if ( .imethod == 1 ) {
+ quantile(y[, jay], probs = gprobs.y) - 1/2 # + 1/16
+ } else {
+ weighted.mean(y[, jay], w = w[, jay]) - 1/2
}
- }
+ if (length(imunb))
+ munb.init.jay <- imunb[, jay]
+
+
+ gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) +
+ weighted.mean(y[, jay], w = w[, jay]))
+ if (length( .isize ))
+ gsize <- .isize # isize is on an absolute scale
+
+
+ try.this <-
+ grid.search2(munb.init.jay, gsize,
+ objfun = posNBD.Loglikfun2,
+ y = y[, jay], w = w[, jay],
+ ret.objfun = TRUE) # Last value is the loglik
+ munb.init[, jay] <- try.this["Value1"]
+ size.init[, jay] <- try.this["Value2"]
+ } # for (jay ...)
+
+
+
+
@@ -676,10 +720,11 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
theta2eta(size.init, .lsize , earg = .esize ))
etastart <- etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
}
- }), list( .lmunb = lmunb, .lsize = lsize,
+ }), list( .lmunb = lmunb, .lsize = lsize,
.imunb = imunb, .isize = isize,
- .emunb = emunb, .esize = esize, .gsize.mux = gsize.mux,
- .ishrinkage = ishrinkage, .probs.y = probs.y,
+ .emunb = emunb, .esize = esize,
+ .gprobs.y = gprobs.y, .gsize.mux = gsize.mux,
+ .iprobs.y = iprobs.y,
.imethod = imethod,
.type.fitted = type.fitted ))),
@@ -697,12 +742,11 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
munb <- eta2theta(eta[, TF, drop = FALSE], .lmunb , earg = .emunb )
kmat <- eta2theta(eta[, !TF, drop = FALSE], .lsize , earg = .esize )
-
tempk <- 1 / (1 + munb / kmat) # kmat / (kmat + munb)
prob0 <- tempk^kmat
oneminusf0 <- 1 - prob0
- smallval <- 1e-3 # Something like this is needed
+ smallval <- .mds.min # Something like this is needed
if (any(big.size <- munb / kmat < smallval)) {
prob0[big.size] <- exp(-munb[big.size]) # The limit as kmat --> Inf
oneminusf0[big.size] <- -expm1(-munb[big.size])
@@ -725,10 +769,11 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
}
ans
}, list( .lsize = lsize, .lmunb = lmunb,
- .esize = esize, .emunb = emunb ))),
+ .esize = esize, .emunb = emunb,
+ .mds.min = mds.min ))),
last = eval(substitute(expression({
- temp0303 <- c(rep( .lmunb , length = NOS),
- rep( .lsize , length = NOS))
+ temp0303 <- c(rep_len( .lmunb , NOS),
+ rep_len( .lsize , NOS))
names(temp0303) <- c(param.names("munb", NOS),
param.names("size", NOS))
temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
@@ -746,13 +791,11 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
misc$imethod <- .imethod
misc$nsimEIM <- .nsimEIM
misc$expected <- TRUE
- misc$ishrinkage <- .ishrinkage
misc$multipleResponses <- TRUE
}), list( .lmunb = lmunb, .lsize = lsize,
.emunb = emunb, .esize = esize,
.cutoff.prob = cutoff.prob,
.max.chunk.MB = max.chunk.MB,
- .ishrinkage = ishrinkage,
.nsimEIM = nsimEIM, .imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
@@ -797,25 +840,25 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
.emunb = emunb, .esize = esize ))),
- validparams = eval(substitute(function(eta, extra = NULL) {
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
munb <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
.lmunb , earg = .emunb )
size <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
.lsize , earg = .esize )
- smallval <- 1e-2
- ans <- all(is.finite(munb)) && all(munb > 0) &&
- all(is.finite(size)) && all(size > 0) &&
- (overdispersion <- all(munb / size > smallval))
+ smallval <- .mds.min # .munb.div.size
+ okay1 <- all(is.finite(munb)) && all(munb > 0) &&
+ all(is.finite(size)) && all(size > 0)
+ overdispersion <- if (okay1) all(munb / size > smallval) else FALSE
if (!overdispersion)
- warning("parameter 'size' has very large values; ",
- "replacing them by an arbitrary large value within ",
- "the parameter space. Try fitting a positive-Poisson ",
- "model instead.")
- ans
+ warning("parameter 'size' has very large values; ",
+ "try fitting a positive-Poisson ",
+ "model instead.")
+ okay1 && overdispersion
}, list( .lmunb = lmunb, .emunb = emunb,
- .lsize = lsize, .esize = esize))),
-
+ .lsize = lsize, .esize = esize,
+ .mds.min = mds.min))),
deriv = eval(substitute(expression({
@@ -827,8 +870,9 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
kmat <- eta2theta(eta[, !TFvec, drop = FALSE], .lsize , earg = .esize )
- smallval <- 1e-3 # Something like this is needed
+ smallval <- .mds.min # Something like this is needed
if (any(big.size <- munb / kmat < smallval)) {
+ if (FALSE)
warning("parameter 'size' has very large values; ",
"try fitting a positive-Poisson ",
"model instead")
@@ -870,7 +914,7 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
smallno <- 1e-6
- if (FALSE && all(near.boundary <- oneminusf0 < smallno)) {
+ if (TRUE && any(near.boundary <- oneminusf0 < smallno)) {
warning("solution near the boundary; either there is no need ",
"to fit a positive NBD or the distribution is centred ",
"on the value 1")
@@ -889,7 +933,6 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
if (any(big.size)) {
- dl.dsize[big.size] <- 1e-8 # A small number
}
@@ -898,7 +941,8 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
dl.dsize * dsize.deta)
myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lmunb = lmunb, .lsize = lsize,
- .emunb = emunb, .esize = esize ))),
+ .emunb = emunb, .esize = esize,
+ .mds.min = mds.min ))),
weight = eval(substitute(expression({
@@ -1039,16 +1083,16 @@ dposgeom <- function(x, prob, log = FALSE) {
pposgeom <- function(q, prob) {
- if (!is.Numeric(prob, positive = TRUE))
- stop("bad input for argument 'prob'")
L <- max(length(q), length(prob))
- if (length(q) != L) q <- rep(q, length.out = L)
- if (length(prob) != L) prob <- rep(prob, length.out = L)
+ if (length(q) != L) q <- rep_len(q, L)
+ if (length(prob) != L) prob <- rep_len(prob, L)
+
+ ans <- ifelse(q < 1, 0, (pgeom(q, prob) - dgeom(0, prob))
+ / pgeom(0, prob, lower.tail = FALSE))
+ ans[prob == 1] <- NaN
+ ans[prob == 0] <- NaN
+ ans
- ifelse(q < 1, 0,
- (pgeom(q, prob) -
- dgeom(0, prob))
- / pgeom(0, prob, lower.tail = FALSE))
}
@@ -1058,19 +1102,25 @@ qposgeom <- function(p, prob) {
- ans <- qgeom(pgeom(0, prob, lower.tail = FALSE) * p +
- dgeom(0, prob),
- prob = prob)
- ans[p > 1] <- NaN
- ans[p < 0] <- NaN
+ ans <- qgeom(pgeom(0, prob, lower.tail = FALSE) * p + dgeom(0, prob),
+ prob)
+
ans[p == 1] <- Inf
+
+ ans[p <= 0] <- NaN
+ ans[1 < p] <- NaN
+ ans[prob == 0] <- NaN
+ ans[prob == 1] <- NaN
ans
}
rposgeom <- function(n, prob) {
- qgeom(p = runif(n, min = dgeom(0, prob)), prob)
+ ans <- qgeom(p = runif(n, min = dgeom(0, prob)), prob)
+ ans[prob == 0] <- NaN
+ ans[prob == 1] <- NaN
+ ans
}
@@ -1081,17 +1131,16 @@ rposgeom <- function(n, prob) {
+
dpospois <- function(x, lambda, log = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
- if (!is.Numeric(lambda, positive = TRUE))
- stop("bad input for argument 'lambda'")
L <- max(length(x), length(lambda))
- if (length(x) != L) x <- rep(x, len = L)
- if (length(lambda) != L) lambda <- rep(lambda, len = L)
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(lambda) != L) lambda <- rep_len(lambda, L)
ans <- if (log.arg) {
ifelse(x == 0, log(0.0), dpois(x, lambda, log = TRUE) -
@@ -1099,24 +1148,25 @@ dpospois <- function(x, lambda, log = FALSE) {
} else {
ifelse(x == 0, 0, -dpois(x, lambda) / expm1(-lambda))
}
+ ans[lambda <= 0] <- NaN
ans
}
+
ppospois <- function(q, lambda) {
- if (!is.Numeric(lambda, positive = TRUE))
- stop("bad input for argument 'lambda'")
L <- max(length(q), length(lambda))
- if (length(q) != L) q <- rep(q, length.out = L)
- if (length(lambda) != L) lambda <- rep(lambda, length.out = L)
+ if (length(q) != L) q <- rep_len(q, L)
+ if (length(lambda) != L) lambda <- rep_len(lambda, L)
- ifelse(q < 1, 0,
- (ppois(q, lambda) -
- dpois(0, lambda))
- / ppois(0, lambda, lower.tail = FALSE))
+ ans <- ifelse(q < 1, 0, (ppois(q, lambda) - dpois(0, lambda))
+ / ppois(0, lambda, lower.tail = FALSE))
+ ans[lambda <= 0] <- NaN
+ ans
}
+
qpospois <- function(p, lambda) {
@@ -1124,9 +1174,11 @@ qpospois <- function(p, lambda) {
dpois(0, lambda),
lambda = lambda)
- ans[p > 1] <- NaN
- ans[p < 0] <- NaN
ans[p == 1] <- Inf
+
+ ans[p < 0] <- NaN
+ ans[1 < p] <- NaN
+ ans[lambda <= 0] <- NaN
ans
}
@@ -1134,15 +1186,20 @@ qpospois <- function(p, lambda) {
rpospois <- function(n, lambda) {
- qpois(p = runif(n, min = dpois(0, lambda)), lambda)
+ ans <- qpois(p = runif(n, min = dpois(0, lambda)), lambda)
+ ans[lambda <= 0] <- NaN
+ ans
}
rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
- if (!is.null(munb)) {
+ ans <- if (!is.null(munb)) {
if (!is.null(prob))
stop("'prob' and 'mu' both specified")
+
+ prob <- size / (size + munb)
+
qnbinom(p = runif(n,
min = dnbinom(0, size, mu = munb)),
size, mu = munb)
@@ -1151,6 +1208,9 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
min = dnbinom(0, size, prob = prob )),
size, prob = prob )
}
+ ans[prob == 0] <- NaN
+ ans[prob == 1] <- NaN
+ ans
}
@@ -1264,7 +1324,7 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
ans
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link <- rep( .link , len = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- mynames1
misc$earg <- vector("list", M)
@@ -1340,6 +1400,37 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
+
+dposbinom <- function(x, size, prob, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+
+ L <- max(length(x), length(size), length(prob))
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(size) != L) size <- rep_len(size, L)
+ if (length(prob) != L) prob <- rep_len(prob, L)
+
+ answer <- NaN * x
+ is0 <- (x == 0)
+ ok2 <- (prob > 0) & (prob <= 1) &
+ (size == round(size)) & (size > 0)
+
+ answer <- dbinom(x = x, size = size, prob = prob, log = TRUE) -
+ log1p(-dbinom(x = 0, size = size, prob = prob))
+ answer[!ok2] <- NaN
+ if (log.arg) {
+ answer[is0 & ok2] <- log(0.0)
+ } else {
+ answer <- exp(answer)
+ answer[is0 & ok2] <- 0.0
+ }
+ answer
+}
+
+
+
pposbinom <- function(q, size, prob
) {
@@ -1347,14 +1438,13 @@ pposbinom <- function(q, size, prob
if (!is.Numeric(prob, positive = TRUE))
stop("no zero or non-numeric values allowed for argument 'prob'")
L <- max(length(q), length(size), length(prob))
- if (length(q) != L) q <- rep(q, length.out = L)
- if (length(size) != L) size <- rep(size, length.out = L)
- if (length(prob) != L) prob <- rep(prob, length.out = L)
+ if (length(q) != L) q <- rep_len(q, L)
+ if (length(size) != L) size <- rep_len(size, L)
+ if (length(prob) != L) prob <- rep_len(prob, L)
ifelse(q < 1, 0,
- (pbinom(q = q, size = size, prob = prob) -
- dbinom(x = 0, size = size, prob = prob))
- / pbinom(q = 0, size = size, prob = prob, lower.tail = FALSE))
+ (pbinom(q = q, size, prob) - dbinom(x = 0, size, prob))
+ / pbinom(q = 0, size, prob, lower.tail = FALSE))
}
@@ -1363,14 +1453,17 @@ qposbinom <- function(p, size, prob
-
ans <- qbinom(pbinom(0, size, prob, lower.tail = FALSE) * p +
dbinom(0, size, prob),
- size = size, prob = prob)
+ size, prob)
- ans[p > 1] <- NaN
- ans[p < 0] <- NaN
ans[p == 1] <- size[p == 1]
+
+ ans[p == 0] <- 1
+ ans[prob == 0] <- NaN
+
+ ans[p < 0] <- NaN
+ ans[1 < p] <- NaN
ans
}
@@ -1382,38 +1475,6 @@ rposbinom <- function(n, size, prob) {
-dposbinom <- function(x, size, prob, log = FALSE) {
- if (!is.logical(log.arg <- log) || length(log) != 1)
- stop("bad input for argument 'log'")
- rm(log)
-
-
- L <- max(length(x), length(size), length(prob))
- if (length(x) != L) x <- rep(x, len = L)
- if (length(size) != L) size <- rep(size, len = L)
- if (length(prob) != L) prob <- rep(prob, len = L)
-
- answer <- NaN * x
- is0 <- (x == 0)
- ok2 <- (prob > 0) & (prob <= 1) &
- (size == round(size)) & (size > 0)
-
- answer <- dbinom(x = x, size = size, prob = prob, log = TRUE) -
- log1p(-dbinom(x = 0, size = size, prob = prob))
- answer[!ok2] <- NaN
- if (log.arg) {
- answer[is0 & ok2] <- log(0.0)
- } else {
- answer <- exp(answer)
- answer[is0 & ok2] <- 0.0
- }
- answer
-}
-
-
-
-
-
posbinomial <-
@@ -1453,7 +1514,7 @@ dposbinom <- function(x, size, prob, log = FALSE) {
c(namesof("prob1", link, earg = earg, tag = FALSE),
",...,",
namesof("probM", link, earg = earg, tag = FALSE)) else
- namesof("prob", link, earg = earg, tag = FALSE),
+ namesof("prob", link, earg = earg, tag = FALSE),
"\n"),
constraints = eval(substitute(expression({
constraints <- cm.VGAM(matrix(1, M, 1), x = x,
@@ -1574,7 +1635,7 @@ dposbinom <- function(x, size, prob, log = FALSE) {
extra$w <- NULL # Kill it off
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- if (M > 1) dn2 else "prob"
misc$earg <- vector("list", M)
@@ -1687,7 +1748,7 @@ dposbinom <- function(x, size, prob, log = FALSE) {
deriv = eval(substitute(expression({
use.orig.w <- if (is.numeric(extra$orig.w)) extra$orig.w else
- rep(1, n)
+ rep_len(1, n)
nvec <- if ( .multiple.responses ) {
w
@@ -1877,7 +1938,7 @@ dposbinom <- function(x, size, prob, log = FALSE) {
extra$w <- NULL # Kill it off
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- if (M > 1) dn2 else "prob"
misc$earg <- vector("list", M)
@@ -2449,8 +2510,8 @@ dposbinom <- function(x, size, prob, log = FALSE) {
cm1.b <-
cmk.b <- rbind(matrix(0, tau, tau-1), diag(tau-1))
- con.b <- cm.VGAM(matrix(c(rep(0, len = tau ),
- rep(1, len = tau-1)), M, 1), x = x,
+ con.b <- cm.VGAM(matrix(c(rep_len(0, tau ),
+ rep_len(1, tau-1)), M, 1), x = x,
bool = .parallel.b , # Same as .parallel.b
constraints = constraints.orig,
apply.int = .apply.parint.b , # FALSE,
@@ -2458,7 +2519,7 @@ dposbinom <- function(x, size, prob, log = FALSE) {
cm.intercept.default = cm1.b)
con.use <- con.b
- for (klocal in 1:length(con.b)) {
+ for (klocal in seq_along(con.b)) {
con.use[[klocal]] <-
cbind(if (any(con.d[[klocal]] == 1)) NULL else con.b[[klocal]],
con.t[[klocal]])
@@ -2563,7 +2624,7 @@ dposbinom <- function(x, size, prob, log = FALSE) {
if (length( .iprob ))
matrix( .iprob , n, M, byrow = TRUE) else
if (length(mustart.orig))
- matrix(rep(mustart.orig, length = n * M), n, M) else
+ matrix(rep_len(mustart.orig, n * M), n, M) else
mustart # Already n x M
} else {
matrix(runif(n * M), n, M)
@@ -2645,7 +2706,7 @@ dposbinom <- function(x, size, prob, log = FALSE) {
extra$w <- NULL # Kill it off
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- c(dn2.cap, dn2.recap)
misc$earg <- vector("list", M)
diff --git a/R/family.qreg.R b/R/family.qreg.R
index 1ce841f..8879d42 100644
--- a/R/family.qreg.R
+++ b/R/family.qreg.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -459,8 +459,8 @@ lms.yjn.control <- function(trace = TRUE, ...)
dy.dpsi.yeojohnson <- function(psi, lambda) {
L <- max(length(psi), length(lambda))
- if (length(psi) != L) psi <- rep(psi, length.out = L)
- if (length(lambda) != L) lambda <- rep(lambda, length.out = L)
+ if (length(psi) != L) psi <- rep_len(psi, L)
+ if (length(lambda) != L) lambda <- rep_len(lambda, L)
ifelse(psi > 0, (1 + psi * lambda)^(1/lambda - 1),
(1 - (2-lambda) * psi)^((lambda - 1) / (2-lambda)))
@@ -469,8 +469,8 @@ dy.dpsi.yeojohnson <- function(psi, lambda) {
dyj.dy.yeojohnson <- function(y, lambda) {
L <- max(length(y), length(lambda))
- if (length(y) != L) y <- rep(y, length.out = L)
- if (length(lambda) != L) lambda <- rep(lambda, length.out = L)
+ if (length(y) != L) y <- rep_len(y, L)
+ if (length(lambda) != L) lambda <- rep_len(lambda, L)
ifelse(y>0, (1 + y)^(lambda - 1), (1 - y)^(1 - lambda))
}
@@ -489,10 +489,8 @@ dyj.dy.yeojohnson <- function(y, lambda) {
if (!is.Numeric(epsilon, length.arg = 1, positive = TRUE))
stop("argument 'epsilon' must be a single positive number")
L <- max(length(lambda), length(y))
- if (length(y) != L)
- y <- rep(y, length.out = L)
- if (length(lambda) != L)
- lambda <- rep(lambda, length.out = L) # lambda may be of length 1
+ if (length(y) != L) y <- rep_len(y, L)
+ if (length(lambda) != L) lambda <- rep_len(lambda, L)
if (inverse) {
if (derivative != 0)
@@ -549,10 +547,10 @@ dpsi.dlambda.yjn <- function(psi, lambda, mymu, sigma,
stop("argument 'smallno' must be a single positive number")
L <- max(length(psi), length(lambda), length(mymu), length(sigma))
- if (length(psi) != L) psi <- rep(psi, length.out = L)
- if (length(lambda) != L) lambda <- rep(lambda, length.out = L)
- if (length(mymu) != L) mymu <- rep(mymu, length.out = L)
- if (length(sigma) != L) sigma <- rep(sigma, length.out = L)
+ if (length(psi) != L) psi <- rep_len(psi, L)
+ if (length(lambda) != L) lambda <- rep_len(lambda, L)
+ if (length(mymu) != L) mymu <- rep_len(mymu, L)
+ if (length(sigma) != L) sigma <- rep_len(sigma, L)
answer <- matrix(NA_real_, L, derivative+1)
CC <- psi >= 0
@@ -797,7 +795,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
y = y.tx, w = w, df = .idf.mu )
c(predict(fit700, x = x[, min(ncol(x), 2)])$y)
} else {
- rep(weighted.mean(y, w), length.out = n)
+ rep_len(weighted.mean(y, w), n)
}
sigma.init <- if (!is.Numeric(.isigma)) {
@@ -1016,7 +1014,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
y = y.tx, w = w, df = .idf.mu )
fv.init <- c(predict(fit700, x = x[, min(ncol(x), 2)])$y)
} else {
- fv.init <- rep(weighted.mean(y, w), length.out = n)
+ fv.init <- rep_len(weighted.mean(y, w), n)
}
sigma.init <- if (!is.Numeric( .isigma )) {
@@ -1198,7 +1196,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
UU <- pmax(discontinuity, 0)
if (FALSE) {
AA <- (UU-LL)/2
- for (kk in 1:length(gleg.wts)) {
+ for (kk in seq_along(gleg.wts)) {
temp1 <- AA * gleg.wts[kk]
abscissae <- (UU+LL)/2 + AA * gleg.abs[kk]
psi <- mymu + sqrt(2) * sigma * abscissae
@@ -1228,7 +1226,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
- for (kk in 1:length(sgh.wts)) {
+ for (kk in seq_along(sgh.wts)) {
abscissae <- sign(-discontinuity) * sgh.abs[kk]
psi <- mymu + sqrt(2) * sigma * abscissae # abscissae = z
@@ -1243,7 +1241,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
}
temp1 <- exp(-discontinuity^2)
- for (kk in 1:length(glag.wts)) {
+ for (kk in seq_along(glag.wts)) {
abscissae <- sign(discontinuity) * sqrt(glag.abs[kk]) + discontinuity^2
psi <- mymu + sqrt(2) * sigma * abscissae
temp9 <- dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 2)
@@ -1390,9 +1388,9 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
if (!length(etastart)) {
mean.init <-
if ( .imethod == 1)
- rep(median(y), length = n) else
+ rep_len(median(y), n) else
if ( .imethod == 2 || .imethod == 3)
- rep(weighted.mean(y, w), length = n) else {
+ rep_len(weighted.mean(y, w), n) else {
junk <- lm.wfit(x = x, y = c(y), w = c(w))
junk$fitted
}
@@ -1419,7 +1417,7 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
ans
}, list( .lexpectile = lexpectile, .eexpectile = eexpectile ))),
last = eval(substitute(expression({
- misc$link <- rep(.lexpectile , length = M)
+ misc$link <- rep_len(.lexpectile , M)
names(misc$link) <- extra$y.names
misc$earg <- vector("list", M)
@@ -1550,9 +1548,9 @@ amlpoisson.deviance <- function(mu, y, w, residuals = FALSE, eta,
if (!length(etastart)) {
mean.init <- if ( .imethod == 2)
- rep(median(y), length = n) else
+ rep_len(median(y), n) else
if ( .imethod == 1)
- rep(weighted.mean(y, w), length = n) else {
+ rep_len(weighted.mean(y, w), n) else {
junk = lm.wfit(x = x, y = c(y), w = c(w))
abs(junk$fitted)
}
@@ -1574,7 +1572,7 @@ amlpoisson.deviance <- function(mu, y, w, residuals = FALSE, eta,
misc$parallel <- .parallel
- misc$link <- rep(.link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- extra$y.names
misc$earg <- vector("list", M)
@@ -1692,7 +1690,7 @@ amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
if (NCOL(y) == 1) {
if (is.factor(y)) y <- y != levels(y)[1]
- nn <- rep(1, n)
+ nn <- rep_len(1, n)
if (!all(y >= 0 & y <= 1))
stop("response values must be in [0, 1]")
if (!length(mustart) && !length(etastart))
@@ -1740,7 +1738,7 @@ amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
mu.ans
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link <- rep(.link , length = M)
+ misc$link <- rep_len(.link , M)
names(misc$link) <- extra$y.names
misc$earg <- vector("list", M)
@@ -1880,9 +1878,9 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
if (!length(etastart)) {
mean.init <- if ( .imethod == 1)
- rep(median(y), length = n) else
+ rep_len(median(y), n) else
if ( .imethod == 2)
- rep(weighted.mean(y, w), length = n) else {
+ rep_len(weighted.mean(y, w), n) else {
1 / (y + 1)
}
etastart <- matrix(theta2eta(mean.init, .link , earg = .earg ),
@@ -1902,7 +1900,7 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
misc$expected <- TRUE
misc$parallel <- .parallel
- misc$link <- rep(.link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- extra$y.names
misc$earg <- vector("list", M)
@@ -1966,11 +1964,11 @@ dalap <- function(x, location = 0, scale = 1, tau = 0.5,
NN <- max(length(x), length(location), length(scale), length(kappa),
length(tau))
- if (length(x) != NN) x <- rep(x, length.out = NN)
- if (length(location) != NN) location <- rep(location, length.out = NN)
- if (length(scale) != NN) scale <- rep(scale, length.out = NN)
- if (length(kappa) != NN) kappa <- rep(kappa, length.out = NN)
- if (length(tau) != NN) tau <- rep(tau, length.out = NN)
+ if (length(x) != NN) x <- rep_len(x, NN)
+ if (length(location) != NN) location <- rep_len(location, NN)
+ if (length(scale) != NN) scale <- rep_len(scale, NN)
+ if (length(kappa) != NN) kappa <- rep_len(kappa, NN)
+ if (length(tau) != NN) tau <- rep_len(tau, NN)
logconst <- 0.5 * log(2) - log(scale) + log(kappa) - log1p(kappa^2)
exponent <- -(sqrt(2) / scale) * abs(x - location) *
@@ -1990,10 +1988,10 @@ ralap <- function(n, location = 0, scale = 1, tau = 0.5,
length.arg = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- location <- rep(location, length.out = use.n);
- scale <- rep(scale, length.out = use.n)
- tau <- rep(tau, length.out = use.n);
- kappa <- rep(kappa, length.out = use.n);
+ location <- rep_len(location, use.n)
+ scale <- rep_len(scale, use.n)
+ tau <- rep_len(tau, use.n)
+ kappa <- rep_len(kappa, use.n)
ans <- location + scale *
log(runif(use.n)^kappa / runif(use.n)^(1/kappa)) / sqrt(2)
indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
@@ -2015,11 +2013,11 @@ palap <- function(q, location = 0, scale = 1, tau = 0.5,
NN <- max(length(q), length(location), length(scale), length(kappa),
length(tau))
- if (length(q) != NN) q <- rep(q, length.out = NN)
- if (length(location) != NN) location <- rep(location, length.out = NN)
- if (length(scale) != NN) scale <- rep(scale, length.out = NN)
- if (length(kappa) != NN) kappa <- rep(kappa, length.out = NN)
- if (length(tau) != NN) tau <- rep(tau, length.out = NN)
+ if (length(q) != NN) q <- rep_len(q, NN)
+ if (length(location) != NN) location <- rep_len(location, NN)
+ if (length(scale) != NN) scale <- rep_len(scale, NN)
+ if (length(kappa) != NN) kappa <- rep_len(kappa, NN)
+ if (length(tau) != NN) tau <- rep_len(tau, NN)
exponent <- -(sqrt(2) / scale) * abs(q - location) *
ifelse(q >= location, kappa, 1/kappa)
@@ -2065,11 +2063,11 @@ qalap <- function(p, location = 0, scale = 1, tau = 0.5,
NN <- max(length(p), length(location), length(scale), length(kappa),
length(tau))
- if (length(p) != NN) p <- rep(p, length.out = NN)
- if (length(location) != NN) location <- rep(location, length.out = NN)
- if (length(scale) != NN) scale <- rep(scale, length.out = NN)
- if (length(kappa) != NN) kappa <- rep(kappa, length.out = NN)
- if (length(tau) != NN) tau <- rep(tau, length.out = NN)
+ if (length(p) != NN) p <- rep_len(p, NN)
+ if (length(location) != NN) location <- rep_len(location, NN)
+ if (length(scale) != NN) scale <- rep_len(scale, NN)
+ if (length(kappa) != NN) kappa <- rep_len(kappa, NN)
+ if (length(tau) != NN) tau <- rep_len(tau, NN)
@@ -2126,16 +2124,17 @@ qalap <- function(p, location = 0, scale = 1, tau = 0.5,
+
rloglap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
kappa = sqrt(tau/(1-tau))) {
use.n <- if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
length.arg = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- location.ald <- rep(location.ald, length.out = use.n);
- scale.ald <- rep(scale.ald, length.out = use.n);
- tau <- rep(tau, length.out = use.n);
- kappa <- rep(kappa, length.out = use.n);
+ location.ald <- rep_len(location.ald, use.n)
+ scale.ald <- rep_len(scale.ald, use.n)
+ tau <- rep_len(tau, use.n)
+ kappa <- rep_len(kappa, use.n)
ans <- exp(location.ald) *
(runif(use.n)^kappa / runif(use.n)^(1/kappa))^(scale.ald / sqrt(2))
indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
@@ -2157,11 +2156,11 @@ dloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
NN <- max(length(x), length(location),
length(scale), length(kappa), length(tau))
- if (length(x) != NN) x <- rep(x, length.out = NN)
- if (length(location) != NN) location <- rep(location, length.out = NN)
- if (length(scale) != NN) scale <- rep(scale, length.out = NN)
- if (length(kappa) != NN) kappa <- rep(kappa, length.out = NN)
- if (length(tau) != NN) tau <- rep(tau, length.out = NN)
+ if (length(x) != NN) x <- rep_len(x, NN)
+ if (length(location) != NN) location <- rep_len(location, NN)
+ if (length(scale) != NN) scale <- rep_len(scale, NN)
+ if (length(kappa) != NN) kappa <- rep_len(kappa, NN)
+ if (length(tau) != NN) tau <- rep_len(tau, NN)
Alpha <- sqrt(2) * kappa / scale.ald
@@ -2192,11 +2191,11 @@ qloglap <- function(p, location.ald = 0, scale.ald = 1,
NN <- max(length(p), length(location.ald), length(scale.ald),
length(kappa))
- p <- rep(p, length.out = NN)
- location <- rep(location.ald, length.out = NN)
- scale <- rep(scale.ald, length.out = NN)
- kappa <- rep(kappa, length.out = NN)
- tau <- rep(tau, length.out = NN)
+ p <- rep_len(p, NN)
+ location <- rep_len(location.ald, NN)
+ scale <- rep_len(scale.ald, NN)
+ kappa <- rep_len(kappa, NN)
+ tau <- rep_len(tau, NN)
Alpha <- sqrt(2) * kappa / scale.ald
@@ -2257,11 +2256,11 @@ ploglap <- function(q, location.ald = 0, scale.ald = 1,
NN <- max(length(q), length(location.ald), length(scale.ald),
length(kappa))
- location <- rep(location.ald, length.out = NN)
- scale <- rep(scale.ald, length.out = NN)
- kappa <- rep(kappa, length.out = NN)
- q <- rep(q, length.out = NN)
- tau <- rep(tau, length.out = NN)
+ location <- rep_len(location.ald, NN)
+ scale <- rep_len(scale.ald, NN)
+ kappa <- rep_len(kappa, NN)
+ q <- rep_len(q, NN)
+ tau <- rep_len(tau, NN)
Alpha <- sqrt(2) * kappa / scale.ald
Beta <- sqrt(2) / (scale.ald * kappa)
@@ -2324,11 +2323,11 @@ dlogitlap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
NN <- max(length(x), length(location.ald),
length(scale.ald), length(kappa))
- location <- rep(location.ald, length.out = NN);
- scale <- rep(scale.ald, length.out = NN)
- kappa <- rep(kappa, length.out = NN);
- x <- rep(x, length.out = NN)
- tau <- rep(tau, length.out = NN)
+ location <- rep_len(location.ald, NN)
+ scale <- rep_len(scale.ald, NN)
+ kappa <- rep_len(kappa, NN)
+ x <- rep_len(x, NN)
+ tau <- rep_len(tau, NN)
Alpha <- sqrt(2) * kappa / scale.ald
Beta <- sqrt(2) / (scale.ald * kappa)
@@ -2364,11 +2363,12 @@ qlogitlap <- function(p, location.ald = 0, scale.ald = 1,
plogitlap <- function(q, location.ald = 0, scale.ald = 1,
tau = 0.5, kappa = sqrt(tau/(1-tau))) {
NN <- max(length(q), length(location.ald), length(scale.ald),
- length(kappa))
- location.ald <- rep(location.ald, length.out = NN);
- scale.ald <- rep(scale.ald, length.out = NN)
- kappa <- rep(kappa, length.out = NN); q <- rep(q, length.out = NN)
- tau <- rep(tau, length.out = NN);
+ length(kappa))
+ location.ald <- rep_len(location.ald, NN)
+ scale.ald <- rep_len(scale.ald, NN)
+ kappa <- rep_len(kappa, NN)
+ q <- rep_len(q, NN)
+ tau <- rep_len(tau, NN)
indexTF <- (q > 0) & (q < 1)
qqq <- logit(q[indexTF]) # earg = earg
@@ -2408,10 +2408,11 @@ dprobitlap <-
NN <- max(length(x), length(location.ald), length(scale.ald),
length(kappa))
- location.ald <- rep(location.ald, length.out = NN);
- scale.ald <- rep(scale.ald, length.out = NN)
- kappa <- rep(kappa, length.out = NN); x = rep(x, length.out = NN)
- tau <- rep(tau, length.out = NN)
+ location.ald <- rep_len(location.ald, NN)
+ scale.ald <- rep_len(scale.ald, NN)
+ kappa <- rep_len(kappa, NN)
+ x <- rep_len(x, NN)
+ tau <- rep_len(tau, NN)
logdensity <- x * NaN
index1 <- (x > 0) & (x < 1)
@@ -2470,12 +2471,12 @@ qprobitlap <- function(p, location.ald = 0, scale.ald = 1,
pprobitlap <- function(q, location.ald = 0, scale.ald = 1,
tau = 0.5, kappa = sqrt(tau/(1-tau))) {
NN <- max(length(q), length(location.ald), length(scale.ald),
- length(kappa))
- location.ald <- rep(location.ald, length.out = NN);
- scale.ald <- rep(scale.ald, length.out = NN)
- kappa <- rep(kappa, length.out = NN);
- q <- rep(q, length.out = NN)
- tau <- rep(tau, length.out = NN);
+ length(kappa))
+ location.ald <- rep_len(location.ald, NN)
+ scale.ald <- rep_len(scale.ald, NN)
+ kappa <- rep_len(kappa, NN)
+ q <- rep_len(q, NN)
+ tau <- rep_len(tau, NN)
indexTF <- (q > 0) & (q < 1)
qqq <- probit(q[indexTF]) # earg = earg
@@ -2512,11 +2513,11 @@ dclogloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
NN <- max(length(x), length(location.ald), length(scale.ald),
length(kappa))
- location.ald <- rep(location.ald, length.out = NN)
- scale.ald <- rep(scale.ald, length.out = NN)
- kappa <- rep(kappa, length.out = NN)
- x <- rep(x, length.out = NN)
- tau <- rep(tau, length.out = NN)
+ location.ald <- rep_len(location.ald, NN)
+ scale.ald <- rep_len(scale.ald, NN)
+ kappa <- rep_len(kappa, NN)
+ x <- rep_len(x, NN)
+ tau <- rep_len(tau, NN)
logdensity <- x * NaN
index1 <- (x > 0) & (x < 1)
@@ -2574,12 +2575,13 @@ qclogloglap <- function(p, location.ald = 0, scale.ald = 1,
pclogloglap <- function(q, location.ald = 0, scale.ald = 1,
tau = 0.5, kappa = sqrt(tau/(1-tau))) {
NN <- max(length(q), length(location.ald), length(scale.ald),
- length(kappa))
- location.ald <- rep(location.ald, length.out = NN);
- scale.ald <- rep(scale.ald, length.out = NN)
- kappa <- rep(kappa, length.out = NN);
- q <- rep(q, length.out = NN)
- tau <- rep(tau, length.out = NN);
+ length(kappa))
+ location.ald <- rep_len(location.ald, NN)
+ scale.ald <- rep_len(scale.ald, NN)
+ kappa <- rep_len(kappa, NN)
+ q <- rep_len(q, NN)
+ tau <- rep_len(tau, NN)
+
indexTF <- (q > 0) & (q < 1)
qqq <- cloglog(q[indexTF]) # earg = earg
@@ -2616,6 +2618,7 @@ alaplace2.control <- function(maxit = 100, ...) {
ishrinkage = 0.95,
parallel.locat = TRUE ~ 0,
+
parallel.scale = FALSE ~ 0,
digt = 4,
@@ -2693,6 +2696,8 @@ alaplace2.control <- function(maxit = 100, ...) {
constraints = eval(substitute(expression({
+ print("Mdiv2")
+ print( Mdiv2 )
onemat <- matrix(1, Mdiv2, 1)
constraints.orig <- constraints
@@ -2702,10 +2707,11 @@ alaplace2.control <- function(maxit = 100, ...) {
con.locat <- cm.VGAM(cmk.locat,
x = x, bool = .parallel.locat ,
constraints = constraints.orig,
- apply.int = .apply.parint.locat ,
cm.default = cm1.locat,
cm.intercept.default = cm1.locat)
-
+ print("con.locat[[1]]")
+ print( con.locat[[1]] )
+
cm1.scale <- kronecker(diag(Mdiv2), rbind(0, 1))
@@ -2716,12 +2722,16 @@ alaplace2.control <- function(maxit = 100, ...) {
apply.int = .apply.parint.scale ,
cm.default = cm1.scale,
cm.intercept.default = cm1.scale)
+ print("con.scale[[1]],,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,")
+ print( con.scale[[1]] )
con.use <- con.scale
- for (klocal in 1:length(con.scale)) {
+ for (klocal in seq_along(con.scale)) {
con.use[[klocal]] <- cbind(con.locat[[klocal]],
con.scale[[klocal]])
}
+ print("con.use[[1]],,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,")
+ print( con.use[[1]] )
constraints <- con.use
@@ -2729,6 +2739,8 @@ alaplace2.control <- function(maxit = 100, ...) {
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 2)
+ print("names(constraints),,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,")
+ print( names(constraints) )
}), list( .parallel.locat = parallel.locat,
.parallel.scale = parallel.scale,
.zero = zero,
@@ -2848,13 +2860,14 @@ alaplace2.control <- function(maxit = 100, ...) {
.ilocat = ilocat, .iscale = iscale ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
Mdiv2 <- extra$Mdiv2
- locat <- eta2theta(eta[, 2 * (1:Mdiv2) - 1, drop = FALSE],
+ M1 <- 2
+ locat <- eta2theta(eta[, M1 * (1:Mdiv2) - 1, drop = FALSE],
.llocat , earg = .elocat )
dimnames(locat) <- list(dimnames(eta)[[1]], extra$y.names)
myans <- if ( .fittedMean ) {
kappamat <- matrix(extra$kappa, extra$n, extra$Mdiv2,
byrow = TRUE)
- Scale <- eta2theta(eta[, 2 * (1:Mdiv2) , drop = FALSE],
+ Scale <- eta2theta(eta[, M1 * (1:Mdiv2) , drop = FALSE],
.lscale , earg = .escale )
locat + Scale * (1/kappamat - kappamat)
} else {
@@ -2869,8 +2882,8 @@ alaplace2.control <- function(maxit = 100, ...) {
last = eval(substitute(expression({
M1 <- extra$M1
- tmp34 <- c(rep( .llocat , length = Mdiv2),
- rep( .lscale , length = Mdiv2))
+ tmp34 <- c(rep_len( .llocat , Mdiv2),
+ rep_len( .lscale , Mdiv2))
names(tmp34) <- c(mynames1, mynames2)
tmp34 <- tmp34[interleave.VGAM(M, M1 = M1)]
misc$link <- tmp34 # Already named
@@ -3159,8 +3172,8 @@ alaplace1.control <- function(maxit = 100, ...) {
extra$M <- M <- max(length( .Scale.arg ),
ncoly,
length( .kappa )) # Recycle
- extra$Scale <- rep( .Scale.arg , length = M)
- extra$kappa <- rep( .kappa , length = M)
+ extra$Scale <- rep_len( .Scale.arg , M)
+ extra$kappa <- rep_len( .kappa , M)
extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
extra$n <- n
@@ -3236,7 +3249,7 @@ alaplace1.control <- function(maxit = 100, ...) {
misc$M1 <- M1
misc$multipleResponses <- TRUE
- tmp34 <- c(rep( .llocat , length = M))
+ tmp34 <- c(rep_len( .llocat , M))
names(tmp34) <- mynames1
misc$link <- tmp34 # Already named
@@ -3423,8 +3436,8 @@ alaplace3.control <- function(maxit = 100, ...) {
if (!length(etastart)) {
kappa.init <- if (length( .ikappa ))
- rep( .ikappa, length.out = n) else
- rep( 1.0, length.out = n)
+ rep_len( .ikappa , n) else
+ rep_len( 1.0 , n)
if ( .imethod == 1) {
locat.init <- median(y)
scale.init <- sqrt(var(y) / 2)
@@ -3432,12 +3445,12 @@ alaplace3.control <- function(maxit = 100, ...) {
locat.init <- y
scale.init <- sqrt(sum(c(w)*abs(y-median(y ))) / (sum(w) *2))
}
- locat.init <- if (length( .ilocat))
- rep( .ilocat, length.out = n) else
- rep(locat.init, length.out = n)
- scale.init <- if (length( .iscale))
- rep( .iscale, length.out = n) else
- rep(scale.init, length.out = n)
+ locat.init <- if (length( .ilocat ))
+ rep_len( .ilocat , n) else
+ rep_len(locat.init, n)
+ scale.init <- if (length( .iscale ))
+ rep_len( .iscale , n) else
+ rep_len(scale.init, n)
etastart <-
cbind(theta2eta(locat.init, .llocat , earg = .elocat ),
theta2eta(scale.init, .lscale , earg = .escale ),
@@ -3561,9 +3574,9 @@ plaplace <- function(q, location = 0, scale = 1,
stop("bad input for argument 'log.p'")
L <- max(length(q), length(location), length(scale))
- if (length(q) != L) q <- rep(q, length.out = L)
- if (length(location) != L) location <- rep(location, length.out = L)
- if (length(scale) != L) scale <- rep(scale, length.out = L)
+ if (length(q) != L) q <- rep_len(q, L)
+ if (length(location) != L) location <- rep_len(location, L)
+ if (length(scale) != L) scale <- rep_len(scale, L)
if (lower.tail) {
@@ -3595,9 +3608,9 @@ qlaplace <- function(p, location = 0, scale = 1,
L <- max(length(p), length(location), length(scale))
- if (length(p) != L) p <- rep(p, length.out = L)
- if (length(location) != L) location <- rep(location, length.out = L)
- if (length(scale) != L) scale <- rep(scale, length.out = L)
+ if (length(p) != L) p <- rep_len(p, L)
+ if (length(location) != L) location <- rep_len(location, L)
+ if (length(scale) != L) scale <- rep_len(scale, L)
if (lower.tail) {
@@ -3636,8 +3649,8 @@ rlaplace <- function(n, location = 0, scale = 1) {
if (!is.Numeric(scale, positive = TRUE))
stop("'scale' must be positive")
- location <- rep(location, length.out = use.n)
- scale <- rep(scale, length.out = use.n)
+ location <- rep_len(location, use.n)
+ scale <- rep_len(scale, use.n)
rrrr <- runif(use.n)
@@ -3725,12 +3738,12 @@ rlaplace <- function(n, location = 0, scale = 1) {
locat.init <- median(y)
scale.init <- sqrt(sum(c(w)*abs(y-median(y ))) / (sum(w) *2))
}
- locat.init <- if (length( .ilocat))
- rep( .ilocat, length.out = n) else
- rep(locat.init, length.out = n)
- scale.init <- if (length( .iscale))
- rep( .iscale, length.out = n) else
- rep(scale.init, length.out = n)
+ locat.init <- if (length( .ilocat ))
+ rep_len( .ilocat , n) else
+ rep_len(locat.init, n)
+ scale.init <- if (length( .iscale ))
+ rep_len( .iscale , n) else
+ rep_len(scale.init, n)
etastart <-
cbind(theta2eta(locat.init, .llocat , earg = .elocat ),
theta2eta(scale.init, .lscale , earg = .escale ))
@@ -3879,11 +3892,11 @@ fff.control <- function(save.weights = TRUE, ...) {
df1.init <- 2*b^2*(b-2)/(var.est*(b-2)^2 * (b-4) - 2*b^2)
}
df1.init <- if (length( .idf1 ))
- rep( .idf1 , length.out = n) else
- rep(df1.init, length.out = n)
+ rep_len( .idf1 , n) else
+ rep_len(df1.init, n)
df2.init <- if (length( .idf2 ))
- rep( .idf2 , length.out = n) else
- rep(1, length.out = n)
+ rep_len( .idf2 , n) else
+ rep_len(1, n)
etastart <- cbind(theta2eta(df1.init, .link , earg = .earg ),
theta2eta(df2.init, .link , earg = .earg ))
}
@@ -4002,7 +4015,7 @@ fff.control <- function(save.weights = TRUE, ...) {
is.data.frame(x)) ncol(x) else as.integer(1)
if (NCOL(y) == 1) {
if (is.factor(y)) y <- y != levels(y)[1]
- nn <- rep(1, length.out = n)
+ nn <- rep_len(1, n)
if (!all(y >= 0 & y <= 1))
stop("response values must be in [0, 1]")
mustart <- (0.5 + w * y) / (1 + w)
@@ -4027,7 +4040,7 @@ fff.control <- function(save.weights = TRUE, ...) {
extra$Nunknown <- length(extra$Nvector) == 0
if (!length(etastart)) {
init.prob <- if (length( .iprob))
- rep( .iprob, length.out = n) else
+ rep_len( .iprob, n) else
mustart
etastart <- matrix(init.prob, n, ncol(cbind(y )))
@@ -4138,11 +4151,11 @@ dbenini <- function(x, y0, shape, log = FALSE) {
N <- max(length(x), length(shape), length(y0))
- if (length(x) != N) x <- rep(x, length.out = N)
- if (length(shape) != N) shape <- rep(shape, length.out = N)
- if (length(y0) != N) y0 <- rep(y0, length.out = N)
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(shape) != N) shape <- rep_len(shape, N)
+ if (length(y0) != N) y0 <- rep_len(y0, N)
- logdensity <- rep(log(0), length.out = N)
+ logdensity <- rep_len(log(0), N)
xok <- (x > y0)
tempxok <- log(x[xok]/y0[xok])
logdensity[xok] <- log(2*shape[xok]) - shape[xok] * tempxok^2 +
@@ -4166,9 +4179,9 @@ pbenini <- function(q, y0, shape, lower.tail = TRUE, log.p = FALSE) {
stop("bad input for argument 'log.p'")
N <- max(length(q), length(shape), length(y0))
- if (length(q) != N) q <- rep(q, length.out = N)
- if (length(shape) != N) shape <- rep(shape, length.out = N)
- if (length(y0) != N) y0 <- rep(y0, length.out = N)
+ if (length(q) != N) q <- rep_len(q, N)
+ if (length(shape) != N) shape <- rep_len(shape, N)
+ if (length(y0) != N) y0 <- rep_len(y0, N)
ans <- y0 * 0
ok <- q > y0
@@ -4327,7 +4340,7 @@ rbenini <- function(n, y0, shape) {
}, list( .lshape = lshape, .eshape = eshape ))),
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <- c(rep( .lshape , length = ncoly))
+ misc$link <- c(rep_len( .lshape , ncoly))
names(misc$link) <- mynames1
misc$earg <- vector("list", M)
@@ -4434,7 +4447,7 @@ ppolono <- function(q, meanlog = 0, sdlog = 1,
isOne = 1 - sqrt( .Machine$double.eps ), ...) {
- .cumprob <- rep(0, length(q))
+ .cumprob <- rep_len(0, length(q))
.cumprob[q == Inf] <- 1 # special case
@@ -4479,14 +4492,14 @@ dtriangle <- function(x, theta, lower = 0, upper = 1, log = FALSE) {
N <- max(length(x), length(theta), length(lower), length(upper))
- if (length(x) != N) x <- rep(x, length.out = N)
- if (length(theta) != N) theta <- rep(theta, length.out = N)
- if (length(lower) != N) lower <- rep(lower, length.out = N)
- if (length(upper) != N) upper <- rep(upper, length.out = N)
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(theta) != N) theta <- rep_len(theta, N)
+ if (length(lower) != N) lower <- rep_len(lower, N)
+ if (length(upper) != N) upper <- rep_len(upper, N)
denom1 <- ((upper-lower)*(theta-lower))
denom2 <- ((upper-lower)*(upper-theta))
- logdensity <- rep(log(0), length.out = N)
+ logdensity <- rep_len(log(0), N)
xok.neg <- (lower < x) & (x <= theta)
xok.pos <- (theta <= x) & (x < upper)
logdensity[xok.neg] =
@@ -4519,9 +4532,9 @@ rtriangle <- function(n, theta, lower = 0, upper = 1) {
stop("lower < theta < upper values are required")
N <- use.n
- lower <- rep(lower, length.out = N)
- upper <- rep(upper, length.out = N)
- theta <- rep(theta, length.out = N)
+ lower <- rep_len(lower, N)
+ upper <- rep_len(upper, N)
+ theta <- rep_len(theta, N)
t1 <- sqrt(runif(n))
t2 <- sqrt(runif(n))
ifelse(runif(n) < (theta - lower) / (upper - lower),
@@ -4541,10 +4554,10 @@ qtriangle <- function(p, theta, lower = 0, upper = 1,
stop("bad input for argument 'log.p'")
N <- max(length(p), length(theta), length(lower), length(upper))
- if (length(p) != N) p <- rep(p, length.out = N)
- if (length(theta) != N) theta <- rep(theta, length.out = N)
- if (length(lower) != N) lower <- rep(lower, length.out = N)
- if (length(upper) != N) upper <- rep(upper, length.out = N)
+ if (length(p) != N) p <- rep_len(p, N)
+ if (length(theta) != N) theta <- rep_len(theta, N)
+ if (length(lower) != N) lower <- rep_len(lower, N)
+ if (length(upper) != N) upper <- rep_len(upper, N)
ans <- NA_real_ * p
if (lower.tail) {
@@ -4597,10 +4610,10 @@ ptriangle <- function(q, theta, lower = 0, upper = 1,
lower.tail = TRUE, log.p = FALSE) {
N <- max(length(q), length(theta), length(lower), length(upper))
- if (length(q) != N) q <- rep(q, length.out = N)
- if (length(theta) != N) theta <- rep(theta, length.out = N)
- if (length(lower) != N) lower <- rep(lower, length.out = N)
- if (length(upper) != N) upper <- rep(upper, length.out = N)
+ if (length(q) != N) q <- rep_len(q, N)
+ if (length(theta) != N) theta <- rep_len(theta, N)
+ if (length(lower) != N) lower <- rep_len(lower, N)
+ if (length(upper) != N) upper <- rep_len(upper, N)
if (!is.logical(lower.tail) || length(lower.tail ) != 1)
stop("bad input for argument 'lower.tail'")
@@ -4726,8 +4739,8 @@ triangle.control <- function(stepsize = 0.33, maxit = 100, ...) {
- extra$lower <- rep( .lower , length.out = n)
- extra$upper <- rep( .upper , length.out = n)
+ extra$lower <- rep_len( .lower , n)
+ extra$upper <- rep_len( .upper , n)
if (any(y <= extra$lower | y >= extra$upper))
stop("some y values in [lower,upper] detected")
@@ -4740,7 +4753,7 @@ triangle.control <- function(stepsize = 0.33, maxit = 100, ...) {
Theta.init <- if (length( .itheta )) .itheta else {
weighted.mean(y, w)
}
- Theta.init <- rep(Theta.init, length = n)
+ Theta.init <- rep_len(Theta.init, n)
etastart <- theta2eta(Theta.init, .link , earg = .earg )
}
}), list( .link = link, .earg = earg, .itheta=itheta,
@@ -4939,8 +4952,8 @@ loglaplace1.control <- function(maxit = 300, ...) {
initialize = eval(substitute(expression({
extra$M <- M <- max(length( .Scale.arg ), length( .kappa )) # Recycle
- extra$Scale <- rep( .Scale.arg , length = M)
- extra$kappa <- rep( .kappa , length = M)
+ extra$Scale <- rep_len( .Scale.arg , M)
+ extra$kappa <- rep_len( .kappa , M)
extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
@@ -4996,9 +5009,9 @@ loglaplace1.control <- function(maxit = 300, ...) {
use.this <- weighted.mean(y, w)
locat.init <- (1- .ishrinkage )*y + .ishrinkage * use.this
}
- locat.init <- if (length( .ilocat))
- rep( .ilocat, length.out = M) else
- rep(locat.init, length.out = M)
+ locat.init <- if (length( .ilocat ))
+ rep_len( .ilocat , M) else
+ rep_len(locat.init, M)
locat.init <- matrix(locat.init, n, M, byrow = TRUE)
if ( .llocat == "loge")
locat.init <- abs(locat.init)
@@ -5048,7 +5061,7 @@ loglaplace1.control <- function(maxit = 300, ...) {
extra$percentile <- numeric(length(misc$kappa))
locat.y <- as.matrix(locat.y)
- for (ii in 1:length(misc$kappa))
+ for (ii in seq_along(misc$kappa))
extra$percentile[ii] <- 100 * weighted.mean(y <= locat.y[, ii], w)
}), list( .elocat = elocat, .llocat = llocat,
.Scale.arg = Scale.arg, .fittedMean = fittedMean,
@@ -5129,6 +5142,7 @@ loglaplace2.control <- function(save.weights = TRUE, ...) {
list(save.weights = save.weights)
}
+
loglaplace2 <- function(tau = NULL,
llocation = "loge", lscale = "loge",
ilocation = NULL, iscale = NULL,
@@ -5288,12 +5302,12 @@ loglaplace2.control <- function(save.weights = TRUE, ...) {
scale.init <- sqrt(sum(c(w)*abs(y-median(y ))) / (sum(w) *2))
}
locat.init.y <- if (length( .ilocat ))
- rep( .ilocat , length.out = n) else
- rep(locat.init.y, length.out = n)
+ rep_len( .ilocat , n) else
+ rep_len(locat.init.y, n)
locat.init.y <- matrix(locat.init.y, n, M/2)
- scale.init <- if (length( .iscale))
- rep( .iscale, length.out = n) else
- rep(scale.init, length.out = n)
+ scale.init <- if (length( .iscale ))
+ rep_len( .iscale , n) else
+ rep_len(scale.init, n)
scale.init <- matrix(scale.init, n, M/2)
etastart <-
cbind(theta2eta(locat.init.y, .llocat , earg = .elocat ),
@@ -5335,7 +5349,7 @@ loglaplace2.control <- function(save.weights = TRUE, ...) {
misc$rep0 <- .rep0
extra$percentile <- numeric(length(misc$kappa))
locat <- as.matrix(locat.y)
- for (ii in 1:length(misc$kappa))
+ for (ii in seq_along(misc$kappa))
extra$percentile[ii] <- 100 *
weighted.mean(y <= locat.y[, ii], w)
}), list( .elocat = elocat, .llocat = llocat,
@@ -5550,8 +5564,8 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
initialize = eval(substitute(expression({
extra$M <- M <- max(length( .Scale.arg ), length( .kappa )) # Recycle
- extra$Scale <- rep( .Scale.arg, length = M)
- extra$kappa <- rep( .kappa, length = M)
+ extra$Scale <- rep_len( .Scale.arg , M)
+ extra$kappa <- rep_len( .kappa , M)
extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
@@ -5605,8 +5619,8 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
locat.init <- if (length( .ilocat ))
- rep( .ilocat , length.out = M) else
- rep(locat.init, length.out = M)
+ rep_len( .ilocat , M) else
+ rep_len(locat.init, M)
locat.init <- matrix(locat.init, n, M, byrow = TRUE)
locat.init <- abs(locat.init)
etastart <-
@@ -5649,7 +5663,7 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
extra$percentile <- numeric(length(misc$kappa))
locat.y <- eta2theta(eta, .llocat , earg = .elocat )
locat.y <- as.matrix(locat.y)
- for (ii in 1:length(misc$kappa))
+ for (ii in seq_along(misc$kappa))
extra$percentile[ii] <- 100 *
weighted.mean(y <= locat.y[, ii], w)
diff --git a/R/family.rcim.R b/R/family.rcim.R
index 5842e06..eddda91 100644
--- a/R/family.rcim.R
+++ b/R/family.rcim.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -186,7 +186,7 @@
for (ii in cindex) {
temp6.mat <- modmat.col[, ii, drop = FALSE]
Hlist[[paste(cprefix, ii, sep = "")]] <- temp6.mat
- .rcim.df[[paste(cprefix, ii, sep = "")]] <- rep(1, nrow(y))
+ .rcim.df[[paste(cprefix, ii, sep = "")]] <- rep_len(1, nrow(y))
}
@@ -271,7 +271,7 @@
kmat1[which.linpred, 1] <- 1
kmat0 <- (diag(M1))[, -which.linpred, drop = FALSE]
- for (ii in 1:length(Hlist)) {
+ for (ii in seq_along(Hlist)) {
Hlist[[ii]] <- kronecker(Hlist[[ii]], kmat1)
}
if (has.intercept)
@@ -299,7 +299,7 @@
constraints = Hlist,
offset = offset.matrix,
weights = if (length(weights))
- weights else rep(1, length = nrow(y)),
+ weights else rep_len(1, nrow(y)),
...,
control = mycontrol, data = .rcim.df )
} else {
@@ -309,7 +309,7 @@
constraints = Hlist,
offset = offset.matrix,
weights = if (length(weights))
- weights else rep(1, length = nrow(y)),
+ weights else rep_len(1, nrow(y)),
...,
control = mycontrol, data = .rcim.df )
}
@@ -521,7 +521,7 @@ setMethod("summary", "rcim",
axes = FALSE, col = rcol, main = rmain,
sub = rsub, xlab = rxlab, ylab = rylab, ...)
- axis(1, at = 1:length(raxisl),
+ axis(1, at = seq_along(raxisl),
cex.lab = rcex.lab,
cex.axis = rcex.axis,
labels = raxisl)
@@ -537,7 +537,7 @@ setMethod("summary", "rcim",
axes = FALSE, col = ccol, main = cmain, # lwd = 2, xpd = FALSE,
sub = csub, xlab = cxlab, ylab = cylab, ...)
- axis(1, at = 1:length(caxisl),
+ axis(1, at = seq_along(caxisl),
cex.lab = ccex.lab,
cex.axis = ccex.axis,
labels = caxisl)
@@ -752,7 +752,7 @@ Confint.nb1 <- function(nb1, level = 0.95) {
- myvec <- cbind(c(-1, 1, rep(0, len = nrow(myvcov) - 2)))
+ myvec <- cbind(c(-1, 1, rep_len(0, nrow(myvcov) - 2)))
(se.mydiff <- sqrt(t(myvec) %*% myvcov %*% myvec))
ci.mydiff <- mydiff + c(-1, 1) * qnorm(1 - (1 - level)/2) * se.mydiff
@@ -1059,7 +1059,7 @@ plota21 <- function(rrvglm2, show.plot = TRUE, nseq.a21 = 31,
covmat[jlocal, jlocal] -
covmat[ilocal, jlocal] * 2
- diag(allvcov) <- rep(1.0, length = LLL) # Any positive value should do
+ diag(allvcov) <- rep_len(1.0, LLL) # Any positive value should do
wmat <- matrix(1.0, LLL, LLL)
@@ -1090,7 +1090,7 @@ WorstErrors <- function(qv.object) {
reducedForm <- function(covmat, qvmat) {
nlevels <- dim(covmat)[1]
firstRow <- covmat[1, ]
- ones <- rep(1, nlevels)
+ ones <- rep_len(1, nlevels)
J <- outer(ones, ones)
notzero <- 2:nlevels
r.covmat <- covmat + (firstRow[1]*J) -
@@ -1124,7 +1124,7 @@ IndentPrint <- function(object, indent = 4, ...) {
try(print(object, ...))
sink()
close(tc)
- indent <- paste(rep(" ", indent), sep = "", collapse = "")
+ indent <- paste(rep_len(" ", indent), sep = "", collapse = "")
cat(paste(indent, zz, sep = ""), sep = "\n")
}
@@ -1154,7 +1154,7 @@ summary.qvar <- function(object, ...) {
is.matrix(object at extra$attributes.y$estimates))
names( estimates) <- rownames(object at extra$attributes.y$estimates)
if (!length(names(estimates)))
- names( estimates) <- paste("Level", 1:length(estimates), sep = "")
+ names( estimates) <- paste("Level", seq_along(estimates), sep = "")
regularVar <- c(object at extra$attributes.y$regularVar)
@@ -1284,7 +1284,7 @@ qvplot <- function(object,
if (length(level.names) == length(estimates)) {
names(estimates) <- level.names
} else if (!length(names(estimates)))
- names(estimates) <- paste("Level", 1:length(estimates),
+ names(estimates) <- paste("Level", seq_along(estimates),
sep = "")
diff --git a/R/family.rcqo.R b/R/family.rcqo.R
index 1c14b50..316a5b4 100644
--- a/R/family.rcqo.R
+++ b/R/family.rcqo.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -81,12 +81,12 @@ rcqo <- function(n, p, S,
if (!is.logical(Crow1positive)) {
stop("bad input for argument 'Crow1positive)'")
} else {
- Crow1positive <- rep(Crow1positive, len = Rank)
+ Crow1positive <- rep_len(Crow1positive, Rank)
}
- Shape <- rep(Shape, len = S)
- sd.latvar <- rep(sd.latvar, len = Rank)
- sd.optimums <- rep(sd.optimums, len = Rank)
- sd.tolerances <- rep(sd.tolerances, len = Rank)
+ Shape <- rep_len(Shape, S)
+ sd.latvar <- rep_len(sd.latvar, Rank)
+ sd.optimums <- rep_len(sd.optimums, Rank)
+ sd.tolerances <- rep_len(sd.tolerances, Rank)
AA <- sd.optimums / 3^0.5
if (Rank > 1 && any(diff(sd.latvar) > 0))
stop("argument 'sd.latvar)' must be a vector with decreasing values")
@@ -197,7 +197,7 @@ rcqo <- function(n, p, S,
ynames <- paste("y", 1:S, sep = "")
- Kvector <- rep(Kvector, len = S)
+ Kvector <- rep_len(Kvector, S)
names(Kvector) <- ynames
latvarnames <- if (Rank == 1) "latvar" else
paste("latvar", 1:Rank, sep = "")
@@ -397,7 +397,7 @@ getInitVals <- function(gvals, llfun, ...) {
LLFUN <- match.fun(llfun)
ff <- function(myx, ...) LLFUN(myx, ...)
objFun <- gvals
- for (ii in 1:length(gvals))
+ for (ii in seq_along(gvals))
objFun[ii] <- ff(myx = gvals[ii], ...)
try.this <- gvals[objFun == max(objFun)] # Usually scalar, maybe vector
try.this
diff --git a/R/family.robust.R b/R/family.robust.R
index 5817bf7..8b7f37d 100644
--- a/R/family.robust.R
+++ b/R/family.robust.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -55,7 +55,7 @@ rhuber <- function(n, k = 0.862, mu = 0, sigma = 1) {
length.arg = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- myl <- rep(0.0, len = use.n)
+ myl <- rep_len(0.0, use.n)
lowlim <- 1
upplim <- 0
@@ -72,15 +72,15 @@ rhuber <- function(n, k = 0.862, mu = 0, sigma = 1) {
upplim <- upplim + sumyok
if (upplim > use.n)
- myl <- rep(myl, len = upplim)
+ myl <- rep_len(myl, upplim)
myl[lowlim:upplim] <- y[yok]
lowlim <- lowlim + sumyok
}
}
- myl <- rep(myl, len = use.n) # Prune to right length
+ myl <- rep_len(myl, use.n) # Prune to right length
- rep(mu + sigma * myl, len = use.n)
+ rep_len(mu + sigma * myl, use.n)
}
@@ -255,9 +255,9 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1,
scale.y.est <- sqrt( sum(c(w) * junk$resid^2) / junk$df.residual )
location.init <- if ( .llocat == "loge") pmax(1/1024, y) else {
if ( .imethod == 3) {
- rep(weighted.mean(y, w), len = n)
+ rep_len(weighted.mean(y, w), n)
} else if ( .imethod == 2) {
- rep(median(rep(y, w)), len = n)
+ rep_len(median(rep(y, w)), n)
} else if ( .imethod == 1) {
junk$fitted
} else {
@@ -404,9 +404,9 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1,
junk <- lm.wfit(x = x, y = c(y), w = c(w))
location.init <- if ( .llocat == "loge") pmax(1/1024, y) else {
if ( .imethod == 3) {
- rep(weighted.mean(y, w), len = n)
+ rep_len(weighted.mean(y, w), n)
} else if ( .imethod == 2) {
- rep(median(rep(y, w)), len = n)
+ rep_len(median(rep(y, w)), n)
} else if ( .imethod == 1) {
junk$fitted
} else {
diff --git a/R/family.rrr.R b/R/family.rrr.R
index 542915b..88c31ea 100644
--- a/R/family.rrr.R
+++ b/R/family.rrr.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -69,7 +69,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
Alphavec = c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
60, 80, 100, 125, 2^(8:12)),
Criterion = c("ResSS", "coefficients"),
- Crow1positive = rep(TRUE, length.out = Rank),
+ Crow1positive = rep_len(TRUE, Rank),
colx1.index,
Linesearch = FALSE,
Maxit = 20,
@@ -194,7 +194,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
direction1 <- (xnew - xold) # / sqrt(1 + sum((xnew-xold)^2))
ftemp <- fit$ResSS # Most recent objective function
use.alpha <- 0 # The current step relative to (xold, yold)
- for (itter in 1:length(Alphavec)) {
+ for (itter in seq_along(Alphavec)) {
CC <- xold + Alphavec[itter] * direction1
try.latvar.mat <- x[, colx2.index, drop = FALSE] %*% CC
@@ -304,7 +304,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
asx <- attr(x, "assign")
asx <- vector("list", ncol(new.latvar.model.matrix))
names(asx) <- names(clist2)
- for (ii in 1:length(names(asx))) {
+ for (ii in seq_along(names(asx))) {
asx[[ii]] <- ii
}
attr(new.latvar.model.matrix, "assign") <- asx
@@ -367,7 +367,7 @@ valt.1iter <- function(x, z, U, Hlist, C, control,
clist2 <- NULL # for vlm.wfit
- i5 <- rep(0, length.out = MSratio)
+ i5 <- rep_len(0, MSratio)
for (ii in 1:NOS) {
i5 <- i5 + 1:MSratio
@@ -625,8 +625,8 @@ rrr.end.expression <- expression({
}
mu <- family at linkinv(eta, extra)
- if (any(is.na(mu)))
- warning("there are NAs in mu")
+ if (anyNA(mu))
+ warning("there are NAs in mu")
deriv.mu <- eval(family at deriv)
wz <- eval(family at weight)
@@ -712,8 +712,8 @@ rrr.derivative.expression <- expression({
method = which.optimizer,
control = list(fnscale = 1,
trace = as.integer(control$trace),
- parscale = rep(control$Parscale,
- length.out=length(Cmat)),
+ parscale = rep_len(control$Parscale,
+ length(Cmat)),
maxit = 250),
etamat = eta, xmat = x, ymat = y, wvec = w,
X.vlm.1save = if (nice31) NULL else X.vlm.1save,
@@ -948,7 +948,7 @@ Coef.qrrvglm <-
lp.names <- object at misc$predictors.names
if (!length(lp.names)) lp.names <- NULL
- dzero.vector <- rep(FALSE, length = M)
+ dzero.vector <- rep_len(FALSE, M)
if (length(Dzero))
dzero.vector[Dzero] <- TRUE
names(dzero.vector) <- ynames
@@ -989,7 +989,7 @@ Coef.qrrvglm <-
Cmat <- object at extra$Cmat # p2 x Rank
Dmat <- object at extra$Dmat #
B1 <- object at extra$B1 #
- bellshaped <- rep(FALSE, length = M)
+ bellshaped <- rep_len(FALSE, M)
if (is.character(refResponse)) {
refResponse <- (1:NOS)[refResponse == ynames]
@@ -1127,7 +1127,7 @@ Coef.qrrvglm <-
mymax <- object at family@linkinv(rbind(eta.temp), extra = object at extra)
c(mymax) # Convert from matrix to vector
} else {
- 5 * rep(NA_real_, length.out = M) # Make "numeric"
+ 5 * rep_len(NA_real_, M) # Make "numeric"
}
names(maximum) <- ynames
@@ -1371,7 +1371,7 @@ predictqrrvglm <-
if (nrow(X) != nrow(newdata)) {
as.save <- attr(X, "assign")
- X <- X[rep(1, nrow(newdata)),, drop = FALSE]
+ X <- X[rep_len(1, nrow(newdata)),, drop = FALSE]
dimnames(X) <- list(dimnames(newdata)[[1]], "(Intercept)")
attr(X, "assign") <- as.save # Restored
}
@@ -1413,7 +1413,7 @@ predictqrrvglm <-
etamat <- as.matrix(X1mat %*% Coefs at B1 + latvarmat %*% t(Coefs at A))
which.species <- 1:NOS # Do it all for all species
- for (sppno in 1:length(which.species)) {
+ for (sppno in seq_along(which.species)) {
thisSpecies <- which.species[sppno]
Dmat <- matrix(Coefs at D[,,thisSpecies], Rank, Rank)
etamat[, thisSpecies] <- etamat[, thisSpecies] +
@@ -1489,7 +1489,7 @@ show.rrvglm <- function(x, ...) {
if (any(nas <- is.na(vecOfBetas))) {
if (is.null(names(vecOfBetas)))
names(vecOfBetas) <- paste("b",
- 1:length(vecOfBetas), sep = "")
+ seq_along(vecOfBetas), sep = "")
cat("\nCoefficients: (", sum(nas),
" not defined because of singularities)\n", sep = "")
} else
@@ -1756,7 +1756,7 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
if (p1) {
ooo <- fit at assign
bb <- NULL
- for (ii in 1:length(ooo)) {
+ for (ii in seq_along(ooo)) {
if (any(ooo[[ii]][1] == colx1.index))
bb <- c(bb, names(ooo)[ii])
}
@@ -1948,7 +1948,7 @@ dctda.fast.only <- function(theta, wz, U, zmat, M, r, x1mat, x2mat,
temp <- m2a(temp, M = p2 * r) # Note M != M here!
G <- solve(rowSums(temp, dims = 2)) # p2*r by p2*r
- dc.da <- array(NA, c(p2, r, M, r)) # different from other functions
+ dc.da <- array(NA_real_, c(p2, r, M, r)) # different from other functions
if (length(Index.corner) == M)
stop("cannot handle full rank models yet")
cbindex <- (1:M)[-Index.corner] # complement of Index.corner
@@ -2033,7 +2033,7 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
temp <- m2a(temp, M = r * pp) # Note M != M here!
G <- solve(rowSums(temp, dims = 2))
- dc.da <- array(NA, c(pp,r,M,r)) # different from other functions
+ dc.da <- array(NA_real_, c(pp, r, M, r)) # different from other functions
cbindex <- (1:M)[-Index.corner]
resid2 <- mux22(t(wz),
z - matrix(int.vec, nn, M, byrow = TRUE), M = M,
@@ -2045,7 +2045,7 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
t(xmat)) * matrix(resid2[, tt], pp, nn, byrow = TRUE)
temp2 <- kronecker(I.col(s, r), rowSums(fred))
- temp4 <- rep(0,pp)
+ temp4 <- rep_len(0, pp)
for (k in 1:r) {
Wiak <- mux22(t(wz),
matrix(Aimat[, k], nn, M, byrow = TRUE),
@@ -2173,7 +2173,7 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
temp <- m2a(temp, M = r * pp) # Note M != M here!
G <- solve(rowSums(temp, dims = 2))
- dc.da <- array(NA,c(pp,r,r,M))
+ dc.da <- array(NA_real_, c(pp, r, r, M))
cbindex <- (1:M)[-Index.corner]
resid2 <- mux22(t(wz), z - matrix(int.vec, nn, M, byrow = TRUE),
M = M,
@@ -2185,7 +2185,7 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
t(xmat)) * matrix(resid2[, tt], pp, nn, byrow = TRUE)
temp2 <- kronecker(I.col(s, r), rowSums(fred))
- temp4 <- rep(0,pp)
+ temp4 <- rep_len(0, pp)
for (k in 1:r) {
Wiak <- mux22(t(wz),
matrix(Aimat[, k], nn, M, byrow = TRUE),
@@ -2300,7 +2300,7 @@ biplot.qrrvglm <- function(x, ...) {
type <- match.arg(type, c("fitted.values", "predictors"))[1]
if (is.numeric(OriginC))
- OriginC <- rep(OriginC, length.out = 2) else {
+ OriginC <- rep_len(OriginC, 2) else {
if (mode(OriginC) != "character" && mode(OriginC) != "name")
OriginC <- as.character(substitute(OriginC))
OriginC <- match.arg(OriginC, c("origin","mean"))[1]
@@ -2348,22 +2348,22 @@ biplot.qrrvglm <- function(x, ...) {
- pch <- rep(pch, length = ncol(r.curves))
- pcol <- rep(pcol, length = ncol(r.curves))
- pcex <- rep(pcex, length = ncol(r.curves))
- llty <- rep(llty, length = ncol(r.curves))
- lcol <- rep(lcol, length = ncol(r.curves))
- llwd <- rep(llwd, length = ncol(r.curves))
- elty <- rep(elty, length = ncol(r.curves))
- ecol <- rep(ecol, length = ncol(r.curves))
- elwd <- rep(elwd, length = ncol(r.curves))
- adj.arg <- rep(adj.arg, length = ncol(r.curves))
+ pch <- rep_len(pch, ncol(r.curves))
+ pcol <- rep_len(pcol, ncol(r.curves))
+ pcex <- rep_len(pcex, ncol(r.curves))
+ llty <- rep_len(llty, ncol(r.curves))
+ lcol <- rep_len(lcol, ncol(r.curves))
+ llwd <- rep_len(llwd, ncol(r.curves))
+ elty <- rep_len(elty, ncol(r.curves))
+ ecol <- rep_len(ecol, ncol(r.curves))
+ elwd <- rep_len(elwd, ncol(r.curves))
+ adj.arg <- rep_len(adj.arg, ncol(r.curves))
if ( C ) {
- Clwd <- rep(Clwd, length = nrow(Cmat))
- Clty <- rep(Clty, length = nrow(Cmat))
- Ccol <- rep(Ccol, length = nrow(Cmat))
- Ccex <- rep(Ccex, length = nrow(Cmat))
- Cadj.arg <- rep(Cadj.arg, length = nrow(Cmat))
+ Clwd <- rep_len(Clwd, nrow(Cmat))
+ Clty <- rep_len(Clty, nrow(Cmat))
+ Ccol <- rep_len(Ccol, nrow(Cmat))
+ Ccex <- rep_len(Ccex, nrow(Cmat))
+ Cadj.arg <- rep_len(Cadj.arg, nrow(Cmat))
}
if (Rank == 1) {
@@ -2420,8 +2420,8 @@ biplot.qrrvglm <- function(x, ...) {
cutpoint <- object at family@linkfun(Coef.list at Maximum[i],
extra = object at extra) - cutpoint
if (is.finite(cutpoint) && cutpoint > 0) {
- Mmat <- diag(rep(ifelse(object at control$Crow1positive, 1, -1),
- length.out = Rank))
+ Mmat <- diag(rep_len(ifelse(object at control$Crow1positive, 1, -1),
+ Rank))
etoli <- eigen(t(Mmat) %*% Coef.list at Tolerance[,,i] %*% Mmat)
A <- ifelse(etoli$val[1]>0, sqrt(2*cutpoint*etoli$val[1]), Inf)
B <- ifelse(etoli$val[2]>0, sqrt(2*cutpoint*etoli$val[2]), Inf)
@@ -2464,7 +2464,7 @@ biplot.qrrvglm <- function(x, ...) {
if (sites) {
text(nustar[, 1], nustar[, 2], adj = 0.5,
labels = if (is.null(spch)) dimnames(nustar)[[1]] else
- rep(spch, length = nrow(nustar)), col = scol,
+ rep_len(spch, nrow(nustar)), col = scol,
cex = scex, font = sfont)
}
}
@@ -2534,13 +2534,13 @@ lvplot.rrvglm <- function(object,
xlab=xlab, ylab=ylab, ...) # xlim etc. supplied through ...
if (A) {
- Aadj <- rep(Aadj, length.out = length(index.nosz))
- Acex <- rep(Acex, length.out = length(index.nosz))
- Acol <- rep(Acol, length.out = length(index.nosz))
+ Aadj <- rep_len(Aadj, length(index.nosz))
+ Acex <- rep_len(Acex, length(index.nosz))
+ Acol <- rep_len(Acol, length(index.nosz))
if (length(Alabels) != M)
stop("'Alabels' must be of length ", M)
if (length(Apch)) {
- Apch <- rep(Apch, length.out = length(index.nosz))
+ Apch <- rep_len(Apch, length(index.nosz))
for (i in index.nosz)
points(Amat[i, 1],
Amat[i, 2],
@@ -2555,12 +2555,12 @@ lvplot.rrvglm <- function(object,
if (C) {
p2 <- nrow(Cmat)
- gapC <- rep(gapC, length.out = p2)
- Cadj <- rep(Cadj, length.out = p2)
- Ccex <- rep(Ccex, length.out = p2)
- Ccol <- rep(Ccol, length.out = p2)
- Clwd <- rep(Clwd, length.out = p2)
- Clty <- rep(Clty, length.out = p2)
+ gapC <- rep_len(gapC, p2)
+ Cadj <- rep_len(Cadj, p2)
+ Ccex <- rep_len(Ccex, p2)
+ Ccol <- rep_len(Ccol, p2)
+ Clwd <- rep_len(Clwd, p2)
+ Clty <- rep_len(Clty, p2)
if (length(Clabels) != p2)
stop("'length(Clabels)' must be equal to ", p2)
for (ii in 1:p2) {
@@ -2576,13 +2576,12 @@ lvplot.rrvglm <- function(object,
if (scores) {
ugrp <- unique(groups)
nlev <- length(ugrp) # number of groups
- clty <- rep(clty, length.out = nlev)
- clwd <- rep(clwd, length.out = nlev)
- ccol <- rep(ccol, length.out = nlev)
- if (length(spch))
- spch <- rep(spch, length.out = n)
- scol <- rep(scol, length.out = n)
- scex <- rep(scex, length.out = n)
+ clty <- rep_len(clty, nlev)
+ clwd <- rep_len(clwd, nlev)
+ ccol <- rep_len(ccol, nlev)
+ if (length(spch)) spch <- rep_len(spch, n)
+ scol <- rep_len(scol, n)
+ scex <- rep_len(scex, n)
for (ii in ugrp) {
gp <- groups == ii
if (nlev > 1 && (length(unique(spch[gp])) != 1 ||
@@ -2841,7 +2840,7 @@ setMethod("show", "Coef.rrvglm", function(object)
for (ii in 2:ncol(y)) {
cms[[paste("Col.", ii, sep = "")]] <-
modmat.col[,ii, drop = FALSE]
- .grc.df[[paste("Col.", ii, sep = "")]] <- rep(1, nrow(y))
+ .grc.df[[paste("Col.", ii, sep = "")]] <- rep_len(1, nrow(y))
}
for (ii in 2:nrow(y)) {
cms[[yn1[ii]]] <- diag(ncol(y))
@@ -2906,9 +2905,9 @@ trplot.qrrvglm <-
cex = par()$cex,
col = 1:(nos*(nos-1)/2),
log = "",
- lty = rep(par()$lty, length.out = nos*(nos-1)/2),
- lwd = rep(par()$lwd, length.out = nos*(nos-1)/2),
- tcol = rep(par()$col, length.out = nos*(nos-1)/2),
+ lty = rep_len(par()$lty, nos*(nos-1)/2),
+ lwd = rep_len(par()$lwd, nos*(nos-1)/2),
+ tcol = rep_len(par()$col, nos*(nos-1)/2),
xlab = NULL, ylab = NULL,
main = "", # "Trajectory plot",
type = "b",
@@ -2922,7 +2921,7 @@ trplot.qrrvglm <-
M <- object at misc$M #
nn <- nrow(fv) # Number of sites
if (length(sitenames))
- sitenames <- rep(sitenames, length.out = nn)
+ sitenames <- rep_len(sitenames, nn)
sppNames <- dimnames(object at y)[[2]]
if (!length(which.species)) {
which.species <- sppNames[1:NOS]
@@ -2968,14 +2967,14 @@ trplot.qrrvglm <-
ylab = myylab, main = main, ...)
}
- lwd <- rep(lwd, length.out = nos*(nos-1)/2)
- col <- rep(col, length.out = nos*(nos-1)/2)
- lty <- rep(lty, length.out = nos*(nos-1)/2)
- tcol <- rep(tcol, length.out = nos*(nos-1)/2)
+ lwd <- rep_len(lwd, nos*(nos-1)/2)
+ col <- rep_len(col, nos*(nos-1)/2)
+ lty <- rep_len(lty, nos*(nos-1)/2)
+ tcol <- rep_len(tcol, nos*(nos-1)/2)
oo <- order(coef.obj at latvar) # Sort by the latent variable
ii <- 0
- col <- rep(col, length = nos*(nos-1)/2)
+ col <- rep_len(col, nos*(nos-1)/2)
species.names <- NULL
if (show.plot)
for (i1 in seq(which.species.numer)) {
@@ -3017,12 +3016,13 @@ vcovrrvglm <- function(object, ...) {
-vcovqrrvglm <- function(object,
- I.tolerances = object at control$eq.tolerances,
- MaxScale = c("predictors", "response"),
- dispersion = rep(if (length(sobj at dispersion))
- sobj at dispersion else 1,
- length.out = M), ...) {
+vcovqrrvglm <-
+ function(object,
+ I.tolerances = object at control$eq.tolerances,
+ MaxScale = c("predictors", "response"),
+ dispersion = rep_len(if (length(sobj at dispersion))
+ sobj at dispersion else 1, M),
+ ...) {
stop("this function is not yet completed")
if (mode(MaxScale) != "character" && mode(MaxScale) != "name")
@@ -3034,7 +3034,7 @@ vcovqrrvglm <- function(object,
sobj <- summary(object)
cobj <- Coef(object, I.tolerances = I.tolerances, ...)
M <- nrow(cobj at A)
- dispersion <- rep(dispersion, length.out = M)
+ dispersion <- rep_len(dispersion, M)
if (cobj at Rank != 1)
stop("object must be a rank 1 model")
@@ -3052,7 +3052,7 @@ vcovqrrvglm <- function(object,
"eq.tolerances = FALSE")
answer <- NULL
- Cov.unscaled <- array(NA, c(3, 3, M), dimnames = list(
+ Cov.unscaled <- array(NA_real_, c(3, 3, M), dimnames = list(
c("(Intercept)", "latvar", "latvar^2"),
c("(Intercept)", "latvar", "latvar^2"), dimnames(cobj at D)[[3]]))
for (spp in 1:M) {
@@ -3075,7 +3075,7 @@ vcovqrrvglm <- function(object,
if (nchar(link.function))
paste(link.function, "(Maximum)", sep = "") else
"Maximum"))
- NAthere <- is.na(answer %*% rep(1, length.out = 3))
+ NAthere <- is.na(answer %*% rep_len(1, 3))
answer[NAthere,] <- NA # NA in tolerance means NA everywhere else
new(Class = "vcov.qrrvglm",
Cov.unscaled = Cov.unscaled,
@@ -3151,14 +3151,12 @@ perspqrrvglm <-
NOS <- ncol(fv) # Number of species
M <- object at misc$M #
- xlim <- rep(if (length(xlim)) xlim else
- range(coef.obj at latvar[, 1]), length = 2)
+ xlim <- rep_len(if (length(xlim)) xlim else range(coef.obj at latvar[, 1]), 2)
if (!length(oylim)) {
- ylim <- if (Rank == 1)
- c(0, max(fv) * stretch) else
- rep(range(coef.obj at latvar[, 2]), length = 2)
+ ylim <- if (Rank == 1) c(0, max(fv) * stretch) else
+ rep_len(range(coef.obj at latvar[, 2]), 2)
}
- gridlength <- rep(gridlength, length = Rank)
+ gridlength <- rep_len(gridlength, Rank)
latvar1 <- seq(xlim[1], xlim[2], length = gridlength[1])
if (Rank == 1) {
m <- cbind(latvar1)
@@ -3204,14 +3202,14 @@ perspqrrvglm <-
if (!length(oylim))
ylim <- c(0, max(fitvals[,which.species.numer]) *
stretch) # A revision
- col <- rep(col, length.out = length(which.species.numer))
- llty <- rep(llty, leng = length(which.species.numer))
- llwd <- rep(llwd, leng = length(which.species.numer))
+ col <- rep_len(col, length(which.species.numer))
+ llty <- rep_len(llty, length(which.species.numer))
+ llwd <- rep_len(llwd, length(which.species.numer))
if (!add1)
matplot(latvar1, fitvals, xlab = xlab, ylab = ylab,
type = "n",
main = main, xlim = xlim, ylim = ylim, ...)
- for (jloc in 1:length(which.species.numer)) {
+ for (jloc in seq_along(which.species.numer)) {
ptr2 <- which.species.numer[jloc] # points to species column
lines(latvar1, fitvals[, ptr2],
col = col[jloc],
@@ -3520,7 +3518,7 @@ is.bell.vlm <-
is.bell.rrvglm <- function(object, ...) {
M <- object at misc$M
ynames <- object at misc$ynames
- ans <- rep(FALSE, length.out = M)
+ ans <- rep_len(FALSE, M)
if (length(ynames)) names(ans) <- ynames
ans
}
diff --git a/R/family.sur.R b/R/family.sur.R
index 63f67ac..6ec9324 100644
--- a/R/family.sur.R
+++ b/R/family.sur.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -159,7 +159,7 @@
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <- c(rep( .lmean , length = ncoly))
+ misc$link <- c(rep_len( .lmean , ncoly))
temp.names <- predictors.names
names(misc$link) <- temp.names
@@ -200,7 +200,7 @@
stop("argument 'divisor' unmatched"))
Sigma.elts <- Sigma.elts * ratio.df
} else {
- ratio.df <- rep(1, length = M*(M+1)/2)
+ ratio.df <- rep_len(1, M*(M+1)/2)
}
Sigma.mat <- matrix(0, M, M)
@@ -230,7 +230,7 @@
if (length( .Varcov )) {
Sigma.mat <- if ( .matrix.arg ) .Varcov else {
- temp.vec <- rep( .Varcov , len = M*(M+1)/2)
+ temp.vec <- rep_len( .Varcov , M*(M+1)/2)
temp.mat <- matrix(0, M, M)
temp.mat[cbind(iam.indices$col.index,
iam.indices$row.index)] <- temp.vec
diff --git a/R/family.survival.R b/R/family.survival.R
index 41e130c..c892b14 100644
--- a/R/family.survival.R
+++ b/R/family.survival.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -81,8 +81,8 @@
junk <- lm.wfit(x = x, y = c(y), w = c(w))
1.25 * sqrt( sum(w * junk$resid^2) / junk$df.residual )
}
- yyyy.est <- rep(yyyy.est , len = n)
- sd.y.est <- rep(sd.y.est , len = n)
+ yyyy.est <- rep_len(yyyy.est , n)
+ sd.y.est <- rep_len(sd.y.est , n)
etastart <- cbind(mu = theta2eta(yyyy.est, .lmu , earg =.emu ),
sd = theta2eta(sd.y.est, .lsd , earg =.esd ))
}
@@ -186,11 +186,11 @@ dbisa <- function(x, scale = 1, shape, log = FALSE) {
L <- max(length(x), length(shape), length(scale))
- if (length(x) != L) x <- rep(x, len = L)
- if (length(shape) != L) shape <- rep(shape, len = L)
- if (length(scale) != L) scale <- rep(scale, len = L)
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(shape) != L) shape <- rep_len(shape, L)
+ if (length(scale) != L) scale <- rep_len(scale, L)
- logdensity <- rep(log(0), len = L)
+ logdensity <- rep_len(log(0), L)
xok <- (x > 0)
xifun <- function(x) {
@@ -358,16 +358,16 @@ rbisa <- function(n, scale = 1, shape) {
namesof("shape", .lshape , earg = .eshape , tag = FALSE))
if (!length(etastart)) {
- scale.init <- rep( .iscale , len = n)
- shape.init <- if (is.Numeric( .ishape)) rep( .ishape , len = n) else {
+ scale.init <- rep_len( .iscale , n)
+ shape.init <- if (is.Numeric( .ishape)) rep_len( .ishape , n) else {
if ( .imethod == 1) {
- ybar <- rep(weighted.mean(y, w), len = n)
- ybarr <- rep(1 / weighted.mean(1/y, w), len = n) # Reqrs y > 0
+ ybar <- rep_len(weighted.mean(y, w), n)
+ ybarr <- rep_len(1 / weighted.mean(1/y, w), n) # Reqrs y > 0
sqrt(ybar / scale.init + scale.init / ybarr - 2)
} else if ( .imethod == 2) {
sqrt(2*( pmax(y, scale.init+0.1) / scale.init - 1))
} else {
- ybar <- rep(weighted.mean(y, w), len = n)
+ ybar <- rep_len(weighted.mean(y, w), n)
sqrt(2*(pmax(ybar, scale.init + 0.1) / scale.init - 1))
}
}
diff --git a/R/family.ts.R b/R/family.ts.R
index 74fb898..0975e7b 100644
--- a/R/family.ts.R
+++ b/R/family.ts.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -9,6 +9,7 @@
+
rrar.Ci <- function(i, coeffs, aa, Ranks., MM) {
index <- cumsum(c(aa, MM*Ranks.))
ans <- matrix(coeffs[(index[i]+1):index[i+1]],
@@ -184,7 +185,7 @@ rrar.control <- function(stepsize = 0.5, save.weights = TRUE, ...) {
new.coeffs <- .coefstart # Needed for iter = 1 of $weight
new.coeffs <- if (length(new.coeffs))
- rep(new.coeffs, len = aa+sum(Ranks.)*MM) else
+ rep_len(new.coeffs, aa+sum(Ranks.)*MM) else
runif(aa+sum(Ranks.)*MM)
temp8 <- rrar.Wmat(y.save, Ranks., MM, ki, plag,
aa, uu, nn, new.coeffs)
@@ -339,8 +340,8 @@ vglm.garma.control <- function(save.weights = TRUE, ...) {
new.coeffs <- .coefstart # Needed for iter = 1 of @weight
new.coeffs <- if (length(new.coeffs))
- rep(new.coeffs, len = p.lm + plag) else
- c(rnorm(p.lm, sd = 0.1), rep(0, plag))
+ rep_len(new.coeffs, p.lm + plag) else
+ c(rnorm(p.lm, sd = 0.1), rep_len(0, plag))
if (!length(etastart)) {
etastart <- x[-indices, , drop = FALSE] %*% new.coeffs[1:p.lm]
@@ -439,7 +440,7 @@ vglm.garma.control <- function(save.weights = TRUE, ...) {
x[, p.lm+ii] <- temp - x.save[tt.index-ii, 1:p.lm, drop = FALSE] %*%
new.coeffs[1:p.lm]
}
- class(x) <- "matrix" # Added 27/2/02; 26/2/04
+ class(x) <- "matrix" # Added 20020227; 20040226
if (iter == 1)
old.coeffs <- new.coeffs
@@ -518,492 +519,6 @@ setMethod("show", "Coef.rrar",
-if (FALSE)
- AR1.control <- function(criterion = "loglikelihood",
- stepsize = 1,
- maxit = 30, ...) {
- list(criterion = criterion,
- stepsize = stepsize,
- maxit = maxit)
-}
-
-
-
-if (TRUE)
- AR1.control <-
- function(half.stepsizing = FALSE, # Avoids jittering very near the solution
- ...) {
- list(half.stepsizing = half.stepsizing
- )
-}
-
-
-
- AR1 <-
- function(ldrift = "identitylink",
- lsd = "loge",
- lvar = "loge",
- lrho = "rhobit",
- idrift = NULL,
- isd = NULL,
- ivar = NULL,
- irho = NULL,
- imethod = 1,
- ishrinkage = 1, # 0.90; unity means a constant
- type.likelihood = c("exact", "conditional"),
- var.arg = FALSE, # TRUE,
- nodrift = FALSE, # TRUE,
- almost1 = 0.99,
- zero = c(if (var.arg) "var" else "sd", "rho") # "ARcoef1"
- ) {
- type.likelihood <- match.arg(type.likelihood,
- c("exact", "conditional"))[1]
-
- if (!is.Numeric(almost1, length.arg = 1) || almost1 < 0.9 ||
- almost1 >= 1)
- stop("Bad input for argument 'almost1'")
-
- if (length(isd) && !is.Numeric(isd, positive = TRUE))
- stop("Bad input for argument 'isd'")
-
- if (length(ivar) && !is.Numeric(ivar, positive = TRUE))
- stop("Bad input for argument 'ivar'")
-
- if (length(irho) &&
- (!is.Numeric(irho) || any(abs(irho) > 1.0)))
- stop("Bad input for argument 'irho'")
-
-
-
- if (!is.logical(nodrift) ||
- length(nodrift) != 1)
- stop("argument 'nodrift' must be a single logical")
-
- if (!is.logical(var.arg) ||
- length(var.arg) != 1)
- stop("argument 'var.arg' must be a single logical")
-
- ismn <- idrift
- lsmn <- as.list(substitute(ldrift))
- esmn <- link2list(lsmn)
- lsmn <- attr(esmn, "function.name")
-
- lsdv <- as.list(substitute(lsd))
- esdv <- link2list(lsdv)
- lsdv <- attr(esdv, "function.name")
-
- lvar <- as.list(substitute(lvar))
- evar <- link2list(lvar)
- lvar <- attr(evar, "function.name")
-
- lrho <- as.list(substitute(lrho))
- erho <- link2list(lrho)
- lrho <- attr(erho, "function.name")
-
- n.sc <- if (var.arg) "var" else "sd"
- l.sc <- if (var.arg) lvar else lsdv
- e.sc <- if (var.arg) evar else esdv
-
-
- new("vglmff",
- blurb = c(ifelse(nodrift, "Two", "Three"),
- "-parameter autoregressive process of order-1\n\n",
- "Links: ",
- if (nodrift) "" else
- paste(namesof("drift", lsmn, earg = esmn), ", ", sep = ""),
- namesof(n.sc , l.sc, earg = e.sc), ", ",
- namesof("rho", lrho, earg = erho), "\n",
- "Model: Y_t = drift + rho * Y_{t-1} + error_{t},", "\n",
- " where 'error_{2:n}' ~ N(0, sigma^2) independently",
- if (nodrift) ", and drift = 0" else "",
- "\n",
- "Mean: drift / (1 - rho)", "\n",
- "Correlation: rho = ARcoef1", "\n",
- "Variance: sd^2 / (1 - rho^2)"),
- constraints = eval(substitute(expression({
-
- M1 <- 3 - .nodrift
- dotzero <- .zero
- eval(negzero.expression.VGAM)
- }), list( .zero = zero,
- .nodrift = nodrift ))),
- infos = eval(substitute(function(...) {
- list(M1 = 3 - nodrift,
- Q1 = 1,
- expected = TRUE,
- multipleResponse = TRUE,
- type.likelihood = .type.likelihood ,
- ldrift = if ( .nodrift ) NULL else .lsmn ,
- edrift = if ( .nodrift ) NULL else .esmn ,
- lvar = .lvar ,
- lsd = .lsdv ,
- evar = .evar ,
- esd = .esdv ,
- lrho = .lrho ,
- erho = .erho ,
- almost1 = .almost1 ,
- zero = .zero )
- }, list( .lsmn = lsmn, .lvar = lvar, .lsdv = lsdv, .lrho = lrho,
- .esmn = esmn, .evar = evar, .esdv = esdv, .erho = erho,
- .type.likelihood = type.likelihood,
- .nodrift = nodrift,
- .almost1 = almost1, .zero = zero))),
- initialize = eval(substitute(expression({
- extra$M1 <- M1 <- 3 - .nodrift
- check <- w.y.check(w = w, y = y,
- Is.positive.y = FALSE,
- ncol.w.max = Inf,
- ncol.y.max = Inf,
- out.wy = TRUE,
- colsyperw = 1,
- maximize = TRUE)
- w <- check$w
- y <- check$y
- if ( .type.likelihood == "conditional")
- w[1, ] <- 1.0e-8 # 1.0e-6
-
-
- NOS <- ncoly <- ncol(y)
- M <- M1*NOS
- var.names <- param.names("var", NOS)
- sdv.names <- param.names("sd", NOS)
- smn.names <- if ( .nodrift ) NULL else
- param.names("drift", NOS)
- rho.names <- param.names("rho", NOS)
-
- mynames1 <- smn.names
- mynames2 <- if ( .var.arg ) var.names else sdv.names
- mynames3 <- rho.names
-
- predictors.names <-
- c(if ( .nodrift ) NULL else
- namesof(smn.names, .lsmn , earg = .esmn , tag = FALSE),
- if ( .var.arg )
- namesof(var.names, .lvar , earg = .evar , tag = FALSE) else
- namesof(sdv.names, .lsdv , earg = .esdv , tag = FALSE),
- namesof(rho.names, .lrho , earg = .erho , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
-
-
- if (!length(etastart)) {
- init.smn <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
- imu = .ismn , ishrinkage = .ishrinkage ,
- pos.only = FALSE)
-
-
-
-
- init.rho <- matrix(if (length( .irho )) .irho else 0.1, # Dummy value
- n, NOS, byrow = TRUE)
- init.sdv <- matrix(if (length( .isdv )) .isdv else 1.0, # Dummy value
- n, NOS, byrow = TRUE)
- init.var <- matrix(if (length( .ivar )) .ivar else 1.0, # Dummy value
- n, NOS, byrow = TRUE)
- for (jay in 1: NOS) {
- mycor <- cor(y[-1, jay], y[-n, jay])
- init.smn[-1, jay] <- init.smn[-1, jay] * (1 - mycor)
- if (!length( .irho ))
- init.rho[, jay] <- sign(mycor) * min(0.95, abs(mycor))
- if (!length( .ivar ))
- init.var[, jay] <- var(y[, jay]) * (1 - mycor^2)
- if (!length( .isdv ))
- init.sdv[, jay] <- sqrt(init.var[, jay])
- } # for
-
- etastart <-
- cbind(if ( .nodrift ) NULL else
- theta2eta(init.smn, .lsmn , earg = .esmn ),
- if ( .var.arg )
- theta2eta(init.var, .lvar , earg = .evar ) else
- theta2eta(init.sdv, .lsdv , earg = .esdv ),
- theta2eta(init.rho, .lrho , earg = .erho ))
- etastart <- etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
- } # end of etastart
- }), list( .lsmn = lsmn, .lrho = lrho, .lsdv = lsdv, .lvar = lvar,
- .esmn = esmn, .erho = erho, .esdv = esdv, .evar = evar,
- .ismn = ismn, .irho = irho, .isdv = isd , .ivar = ivar,
- .type.likelihood = type.likelihood, .ishrinkage = ishrinkage,
- .var.arg = var.arg,
- .nodrift = nodrift,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- M1 <- 3 - .nodrift
- NOS <- ncol(eta)/M1
- ar.smn <- if ( .nodrift ) 0 else
- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
- .lsmn , earg = .esmn )
- ar.rho <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
- .lrho , earg = .erho )
- ar.smn / (1 - ar.rho)
- }, list ( .lsmn = lsmn, .lrho = lrho , .lsdv = lsdv, .lvar = lvar ,
- .var.arg = var.arg, .type.likelihood = type.likelihood,
- .nodrift = nodrift,
- .esmn = esmn, .erho = erho , .esdv = esdv, .evar = evar ))),
- last = eval(substitute(expression({
- if (any(abs(ar.rho) > 1))
- warning("Regularity conditions are violated at the final",
- "IRLS iteration, since 'abs(rho) > 1")
-
- M1 <- extra$M1
-
- temp.names <- c(mynames1, mynames2, mynames3)
- temp.names <- temp.names[interleave.VGAM(M1 * ncoly, M1 = M1)]
-
- misc$link <- rep( .lrho , length = M1 * ncoly)
- misc$earg <- vector("list", M1 * ncoly)
- names(misc$link) <-
- names(misc$earg) <- temp.names
- for (ii in 1:ncoly) {
- if ( !( .nodrift ))
- misc$link[ M1*ii-2 ] <- .lsmn
- misc$link[ M1*ii-1 ] <- if ( .var.arg ) .lvar else .lsdv
- misc$link[ M1*ii ] <- .lrho
- if ( !( .nodrift ))
- misc$earg[[M1*ii-2]] <- .esmn
- misc$earg[[M1*ii-1]] <- if ( .var.arg ) .evar else .esdv
- misc$earg[[M1*ii ]] <- .erho
- }
-
- misc$type.likelihood <- .type.likelihood
- misc$var.arg <- .var.arg
- misc$M1 <- M1
- misc$expected <- TRUE
- misc$imethod <- .imethod
- misc$multipleResponses <- TRUE
- misc$nodrift <- .nodrift
-
-
- }), list( .lsmn = lsmn, .lrho = lrho, .lsdv = lsdv, .lvar = lvar,
- .esmn = esmn, .erho = erho, .esdv = esdv, .evar = evar,
- .irho = irho, .isdv = isd , .ivar = ivar,
- .nodrift = nodrift,
- .var.arg = var.arg, .type.likelihood = type.likelihood,
- .imethod = imethod ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals= FALSE, eta,
- extra = NULL, summation = TRUE) {
- M1 <- 3 - .nodrift
- NOS <- ncol(eta)/M1
-
- if ( .var.arg ) {
- ar.var <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
- .lvar , earg = .evar )
- } else {
- ar.sdv <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
- .lsdv , earg = .esdv )
- ar.var <- ar.sdv^2
- }
- ar.smn <- if ( .nodrift ) 0 else
- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
- .lsmn , earg = .esmn )
- ar.rho <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
- .lrho , earg = .erho )
-
- if (residuals) {
- stop("Loglikelihood not implemented yet to handle",
- "residuals.")
- } else {
- loglik.terms <- c(w) * dAR1(x = y,
- drift = ar.smn,
- var.error = ar.var,
- type.likelihood = .type.likelihood ,
- ARcoef1 = ar.rho, log = TRUE)
- loglik.terms <- as.matrix(loglik.terms)
- if (summation) {
- sum(if ( .type.likelihood == "exact") loglik.terms else
- loglik.terms[-1, ])
- } else {
- loglik.terms
- }
- }
-
- }, list( .lsmn = lsmn, .lrho = lrho , .lsdv = lsdv, .lvar = lvar ,
- .var.arg = var.arg, .type.likelihood = type.likelihood,
- .nodrift = nodrift,
- .esmn = esmn, .erho = erho , .esdv = esdv, .evar = evar ))),
-
- vfamily = c("AR1"),
-
-
- simslot = eval(substitute(
- function(object, nsim) {
-
- pwts <- if (length(pwts <- object at prior.weights) > 0)
- pwts else weights(object, type = "prior")
- if (any(pwts != 1))
- warning("ignoring prior weights")
- eta <- predict(object)
- fva <- fitted(object)
- M1 <- 3 - .nodrift
- NOS <- ncol(eta)/M1
-
- if ( .var.arg ) {
- ar.var <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
- .lvar , earg = .evar )
- } else {
- ar.sdv <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
- .lsdv , earg = .esdv )
- ar.var <- ar.sdv^2
- }
- ar.smn <- if ( .nodrift ) matrix(0, n, NOS) else
- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
- .lsmn , earg = .esmn )
- ar.rho <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
- .lrho , earg = .erho )
-
- ans <- array(0, c(nrow(eta), NOS, nsim))
- for (jay in 1:NOS) {
- ans[1, jay, ] <- rnorm(nsim, m = fva[1, jay], # zz
- sd = sqrt(ar.var[1, jay]))
- for (ii in 2:nrow(eta))
- ans[ii, jay, ] <- ar.smn[ii, jay] +
- ar.rho[ii, jay] * ans[ii-1, jay, ] +
- rnorm(nsim, sd = sqrt(ar.var[ii, jay]))
- }
- ans <- matrix(c(ans), c(nrow(eta) * NOS, nsim))
- ans
- }, list( .lsmn = lsmn, .lrho = lrho , .lsdv = lsdv, .lvar = lvar ,
- .var.arg = var.arg, .type.likelihood = type.likelihood,
- .nodrift = nodrift,
- .esmn = esmn, .erho = erho , .esdv = esdv, .evar = evar ))),
-
-
-
-
- deriv = eval(substitute(expression({
- M1 <- 3 - .nodrift
- NOS <- ncol(eta)/M1
- ncoly <- ncol(as.matrix(y))
-
- if ( .var.arg ) {
- ar.var <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
- .lvar , earg = .evar )
- } else {
- ar.sdv <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
- .lsdv , earg = .esdv )
- ar.var <- ar.sdv^2
- }
-
- ar.smn <- if ( .nodrift ) matrix(0, n, NOS) else
- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
- .lsmn , earg = .esmn )
- ar.rho <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
- .lrho , earg = .erho )
-
-
- temp1 <- y[-1, , drop = FALSE] -
- ar.smn[-1, , drop = FALSE] -
- y[-n, , drop = FALSE] * ar.rho[-1, , drop = FALSE]
- temp5 <- 1 - ar.rho^2
-
- dl.dsmn <- rbind(rep(0, length = ncoly),
- temp1 / ar.var[-1, , drop = FALSE])
- if ( .var.arg ) {
- dl.dvar <- rbind(rep(0, length = ncoly),
- 0.5 * (temp1 / ar.var[-1, , drop = FALSE])^2 -
- 0.5 / ar.var[-1, , drop = FALSE])
- } else {
- dl.dsdv <- rbind(rep(0, length = ncoly),
- temp1^2 / (ar.sdv[-1, , drop = FALSE])^3 -
- 1 / ar.sdv[-1, , drop = FALSE])
- }
- dl.drho <- rbind(rep(0, length = ncoly),
- y[-n, , drop = FALSE] *
- temp1 / ar.var[-1, , drop = FALSE])
- dl.dsmn[1, ] <- (y[1, ] - mu[1, ]) * (1 + ar.rho[1, ]) / ar.var[1, ]
- dl.drho[1, ] <- ar.rho[1, ] * (y[1, ] - mu[1, ])^2 / ar.var[1, ] -
- ar.rho[1, ] / temp5[1, ]
- if ( .var.arg ) {
- dl.dvar[1, ] <- -0.5 / ar.var[1, ] +
- 0.5 * temp5[1, ] *
- ((y[1, ] - mu[1, ]) / ar.var[1, ])^2
- } else {
- dl.dsdv[1, ] <- -1 / ar.sdv[1, ] +
- temp5[1, ] * (y[1, ] - mu[1, ])^2 / (ar.sdv[1, ])^3
- }
-
-
-
-
- dsmn.deta <- dtheta.deta(ar.smn, .lsmn , earg = .esmn )
- drho.deta <- dtheta.deta(ar.rho, .lrho , earg = .erho )
- if ( .var.arg ) {
- dvar.deta <- dtheta.deta(ar.var, .lvar , earg = .evar )
- } else {
- dsdv.deta <- dtheta.deta(ar.sdv, .lsdv , earg = .esdv )
- }
- myderiv <-
- c(w) * cbind(if ( .nodrift ) NULL else dl.dsmn * dsmn.deta,
- if ( .var.arg ) dl.dvar * dvar.deta else
- dl.dsdv * dsdv.deta,
- dl.drho * drho.deta)
-
- myderiv[, interleave.VGAM(M, M1 = M1)]
- }), list( .lsmn = lsmn, .lrho = lrho, .lsdv = lsdv, .lvar = lvar,
- .esmn = esmn, .erho = erho, .esdv = esdv, .evar = evar,
- .nodrift = nodrift,
- .var.arg = var.arg, .type.likelihood = type.likelihood ))),
- weight = eval(substitute(expression({
- if ( .var.arg ) {
- ned2l.dvar <- 0.5 / ar.var^2
- ned2l.drhovar <- matrix(0, n, ncoly)
- ned2l.drhovar[1, ] <- .almost1 * ar.rho[1, ] / (ar.var[1, ] *
- temp5[1, ])
- } else {
- ned2l.dsdv <- 2 / ar.var
- ned2l.drhosdv <- matrix(0, n, ncoly)
- ned2l.drhosdv[1, ] <- 2 *
- .almost1 * ar.rho[1, ] / (ar.sdv[1, ] *
- temp5[1, ])
- }
-
- if ( !( .nodrift )) {
- ned2l.dsmn <- 1 / ar.var
- ned2l.dsmn[1, ] <- (1 + ar.rho[1, ]) / ((1 - ar.rho[1, ]) *
- ar.var[1, ])
- ned2l.dsmnrho <- mu / ar.var
- ned2l.dsmnrho[1, ] <- 0
- }
-
- ned2l.drho <- (( mu[-n, , drop = FALSE])^2 +
- ar.var[-n, , drop = FALSE] /
- temp5[-n, , drop = FALSE]) / ar.var[-1, , drop = FALSE]
- ned2l.drho <- rbind(0, ned2l.drho)
- ned2l.drho[1, ] <- 2 * (ar.rho[1, ] / temp5[1, ])^2
-
- ncol.wz <- M + (M - 1) + ifelse( .nodrift , 0, M - 2)
- wz <- matrix(0, n, ncol.wz)
-
- if ( !( .nodrift ))
- wz[, M1*(1:NOS) - 2] <- ned2l.dsmn * dsmn.deta^2
- wz[, M1*(1:NOS) - 1] <-
- if ( .var.arg ) ned2l.dvar * dvar.deta^2 else
- ned2l.dsdv * dsdv.deta^2
- wz[, M1*(1:NOS) ] <- ned2l.drho * drho.deta^2
-
- if ( !( .nodrift ))
- wz[, M1*(1:NOS) + M + (M - 1) - M1 + 1] <- ned2l.dsmnrho *
- dsmn.deta * drho.deta
- wz[, M1*(1:NOS) + M - 1] <-
- if ( .var.arg ) ned2l.drhovar * drho.deta * dvar.deta else
- ned2l.drhosdv * drho.deta * dsdv.deta
-
- wz <- w.wz.merge(w = w, wz = wz, n = n, M = ncol.wz, ndepy = NOS)
-
- wz
- }), list( .var.arg = var.arg, .type.likelihood = type.likelihood,
- .nodrift = nodrift,
- .almost1 = almost1)))
- )
-} # End of function 'AR1'
-
-
-
-
-
-
-
-
dAR1 <- function(x,
@@ -1011,12 +526,12 @@ dAR1 <- function(x,
var.error = 1, ARcoef1 = 0.0,
type.likelihood = c("exact", "conditional"),
log = FALSE) {
-
+
type.likelihood <- match.arg(type.likelihood,
c("exact", "conditional"))[1]
-
+
is.vector.x <- is.vector(x)
-
+
x <- as.matrix(x)
drift <- as.matrix(drift)
var.error <- as.matrix(var.error)
@@ -1024,28 +539,29 @@ dAR1 <- function(x,
LLL <- max(nrow(x), nrow(drift), nrow(var.error), nrow(ARcoef1))
UUU <- max(ncol(x), ncol(drift), ncol(var.error), ncol(ARcoef1))
x <- matrix(x, LLL, UUU)
- drift <- matrix(drift, LLL, UUU)
+ drift <- matrix(drift, LLL, UUU)
var.error <- matrix(var.error, LLL, UUU)
rho <- matrix(ARcoef1, LLL, UUU)
-
+
if (any(abs(rho) > 1))
warning("Values of argument 'ARcoef1' are greater ",
"than 1 in absolute value")
-
+
if (!is.logical(log.arg <- log) || length(log) != 1)
- stop("Bad input for argument 'log'.")
+ stop("Bad input for argument 'log'")
rm(log)
-
+
ans <- matrix(0.0, LLL, UUU)
+
var.noise <- var.error / (1 - rho^2)
-
+
ans[ 1, ] <- dnorm(x = x[1, ],
mean = drift[ 1, ] / (1 - rho[1, ]),
sd = sqrt(var.noise[1, ]), log = log.arg)
ans[-1, ] <- dnorm(x = x[-1, ],
mean = drift[-1, ] + rho[-1, ] * x[-nrow(x), ],
- sd = sqrt(var.noise[-1, ]), log = log.arg)
-
+ sd = sqrt(var.error[-1, ]), log = log.arg)
+
if (type.likelihood == "conditional")
ans[1, ] <- NA
@@ -1054,6 +570,537 @@ dAR1 <- function(x,
+if (FALSE)
+AR1.control <- function(epsilon = 1e-6,
+ maxit = 30,
+ stepsize = 1,...){
+ list(epsilon = epsilon,
+ maxit = maxit,
+ stepsize = stepsize,
+ ...)
+}
+
+
+
+AR1 <-
+ function(ldrift = "identitylink",
+ lsd = "loge",
+ lvar = "loge",
+ lrho = "rhobit",
+ idrift = NULL,
+ isd = NULL,
+ ivar = NULL,
+ irho = NULL,
+ imethod = 1,
+ ishrinkage = 0.95, # 0.90; unity means a constant
+ type.likelihood = c("exact", "conditional"),
+ type.EIM = c("exact", "approximate"),
+ var.arg = FALSE, # TRUE,
+ nodrift = FALSE, # TRUE,
+ print.EIM = FALSE,
+ zero = c(if (var.arg) "var" else "sd", "rho") # "ARcoeff1"
+ ) {
+
+ type.likelihood <- match.arg(type.likelihood,
+ c("exact", "conditional"))[1]
+
+ if (length(isd) && !is.Numeric(isd, positive = TRUE))
+ stop("Bad input for argument 'isd'")
+
+ if (length(ivar) && !is.Numeric(ivar, positive = TRUE))
+ stop("Bad input for argument 'ivar'")
+
+ if (length(irho) &&
+ (!is.Numeric(irho) || any(abs(irho) > 1.0)))
+ stop("Bad input for argument 'irho'")
+
+ type.EIM <- match.arg(type.EIM, c("exact", "approximate"))[1]
+ poratM <- (type.EIM == "exact")
+
+ if (!is.logical(nodrift) ||
+ length(nodrift) != 1)
+ stop("argument 'nodrift' must be a single logical")
+
+ if (!is.logical(var.arg) ||
+ length(var.arg) != 1)
+ stop("argument 'var.arg' must be a single logical")
+
+ if (!is.logical(print.EIM))
+ stop("Invalid 'print.EIM'.")
+
+ ismn <- idrift
+ lsmn <- as.list(substitute(ldrift))
+ esmn <- link2list(lsmn)
+ lsmn <- attr(esmn, "function.name")
+
+ lsdv <- as.list(substitute(lsd))
+ esdv <- link2list(lsdv)
+ lsdv <- attr(esdv, "function.name")
+
+ lvar <- as.list(substitute(lvar))
+ evar <- link2list(lvar)
+ lvar <- attr(evar, "function.name")
+
+ lrho <- as.list(substitute(lrho))
+ erho <- link2list(lrho)
+ lrho <- attr(erho, "function.name")
+
+ n.sc <- if (var.arg) "var" else "sd"
+ l.sc <- if (var.arg) lvar else lsdv
+ e.sc <- if (var.arg) evar else esdv
+
+ new("vglmff",
+ blurb = c(ifelse(nodrift, "Two", "Three"),
+ "-parameter autoregressive process of order-1\n\n",
+ "Links: ",
+ if (nodrift) "" else
+ paste(namesof("drift", lsmn, earg = esmn), ", ",
+ sep = ""),
+ namesof(n.sc , l.sc, earg = e.sc), ", ",
+ namesof("rho", lrho, earg = erho), "\n",
+ "Model: Y_t = drift + rho * Y_{t-1} + error_{t},",
+ "\n",
+ " where 'error_{2:n}' ~ N(0, sigma^2) ",
+ "independently",
+ if (nodrift) ", and drift = 0" else "",
+ "\n",
+ "Mean: drift / (1 - rho)", "\n",
+ "Correlation: rho = ARcoef1", "\n",
+ "Variance: sd^2 / (1 - rho^2)"),
+
+ constraints = eval(substitute(expression({
+
+ M1 <- 3 - .nodrift
+ dotzero <- .zero
+ # eval(negzero.expression.VGAM)
+ constraints <-
+ cm.zero.VGAM(constraints, x = x, zero = .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = M1)
+
+ }), list( .zero = zero,
+ .nodrift = nodrift ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 3 - .nodrift ,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponse = TRUE,
+ type.likelihood = .type.likelihood ,
+ ldrift = if ( .nodrift ) NULL else .lsmn ,
+ edrift = if ( .nodrift ) NULL else .esmn ,
+ lvar = .lvar ,
+ lsd = .lsdv ,
+ evar = .evar ,
+ esd = .esdv ,
+ lrho = .lrho ,
+ erho = .erho ,
+ zero = .zero )
+ }, list( .lsmn = lsmn, .lvar = lvar, .lsdv = lsdv, .lrho = lrho,
+ .esmn = esmn, .evar = evar, .esdv = esdv, .erho = erho,
+ .type.likelihood = type.likelihood,
+ .nodrift = nodrift, .zero = zero))),
+
+ initialize = eval(substitute(expression({
+ extra$M1 <- M1 <- 3 - .nodrift
+ check <- w.y.check(w = w, y = y,
+ Is.positive.y = FALSE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- check$w
+ y <- check$y
+ if ( .type.likelihood == "conditional") {
+ w[1, ] <- 1.0e-6
+ } else {
+ if (!(.nodrift ))
+ w[1, ] <- 1.0e-1
+ }
+
+ NOS <- ncoly <- ncol(y)
+ n <- nrow(y)
+ M <- M1*NOS
+
+ var.names <- param.names("var", NOS)
+ sdv.names <- param.names("sd", NOS)
+ smn.names <- if ( .nodrift ) NULL else
+ param.names("drift", NOS)
+ rho.names <- param.names("rho", NOS)
+
+ mynames1 <- smn.names
+ mynames2 <- if ( .var.arg ) var.names else sdv.names
+ mynames3 <- rho.names
+
+ predictors.names <-
+ c(if ( .nodrift ) NULL else
+ namesof(smn.names, .lsmn , earg = .esmn , tag = FALSE),
+ if ( .var.arg )
+ namesof(var.names, .lvar , earg = .evar , tag = FALSE) else
+ namesof(sdv.names, .lsdv , earg = .esdv , tag = FALSE),
+ namesof(rho.names, .lrho , earg = .erho , tag = FALSE))
+
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
+
+ if ( .nodrift )
+ y <- scale(y, scale = FALSE)
+
+ if (!length(etastart)) {
+ init.smn <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ imu = .ismn , ishrinkage = .ishrinkage ,
+ pos.only = FALSE)
+
+ init.rho <- matrix(if (length( .irho )) .irho else 0.1,
+ n, NOS, byrow = TRUE)
+ init.sdv <- matrix(if (length( .isdv )) .isdv else 1.0,
+ n, NOS, byrow = TRUE)
+ init.var <- matrix(if (length( .ivar )) .ivar else 1.0,
+ n, NOS, byrow = TRUE)
+ for (jay in 1: NOS) {
+ mycor <- cor(y[-1, jay], y[-n, jay])
+ init.smn[ , jay] <- mean(y[, jay]) * (1 - mycor)
+ if (!length( .irho ))
+ init.rho[, jay] <- sign(mycor) * min(0.95, abs(mycor))
+ if (!length( .ivar ))
+ init.var[, jay] <- var(y[, jay]) * (1 - mycor^2)
+ if (!length( .isdv ))
+ init.sdv[, jay] <- sqrt(init.var[, jay])
+ } # for
+
+ etastart <-
+ cbind(if ( .nodrift ) NULL else
+ theta2eta(init.smn, .lsmn , earg = .esmn ),
+ if ( .var.arg )
+ theta2eta(init.var, .lvar , earg = .evar ) else
+ theta2eta(init.sdv, .lsdv , earg = .esdv ),
+ theta2eta(init.rho, .lrho , earg = .erho ))
+
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
+ } # end of etastart
+
+ }), list( .lsmn = lsmn, .lrho = lrho, .lsdv = lsdv, .lvar = lvar,
+ .esmn = esmn, .erho = erho, .esdv = esdv, .evar = evar,
+ .ismn = ismn, .irho = irho, .isdv = isd , .ivar = ivar,
+ .type.likelihood = type.likelihood,
+ .ishrinkage = ishrinkage, .poratM = poratM,
+ .var.arg = var.arg,
+ .nodrift = nodrift,
+ .imethod = imethod ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ M1 <- 3 - .nodrift
+ NOS <- ncol(eta)/M1
+ ar.smn <- if ( .nodrift ) 0 else
+ eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+ .lsmn , earg = .esmn )
+ ar.rho <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lrho , earg = .erho )
+ ar.smn / (1 - ar.rho)
+
+ }, list ( .lsmn = lsmn, .lrho = lrho , .lsdv = lsdv, .lvar = lvar ,
+ .var.arg = var.arg, .type.likelihood = type.likelihood,
+ .nodrift = nodrift,
+ .esmn = esmn, .erho = erho , .esdv = esdv, .evar = evar ))),
+
+ last = eval(substitute(expression({
+ if (any(abs(ar.rho) > 1))
+ warning("Regularity conditions are violated at the final",
+ "IRLS iteration, since 'abs(rho) > 1")
+
+ M1 <- extra$M1
+
+ temp.names <- c(mynames1, mynames2, mynames3)
+ temp.names <- temp.names[interleave.VGAM(M1 * ncoly, M1 = M1)]
+
+ misc$link <- rep_len( .lrho , M1 * ncoly)
+ misc$earg <- vector("list", M1 * ncoly)
+ names(misc$link) <-
+ names(misc$earg) <- temp.names
+ for (ii in 1:ncoly) {
+ if ( !( .nodrift ))
+ misc$link[ M1*ii-2 ] <- .lsmn
+ misc$link[ M1*ii-1 ] <- if ( .var.arg ) .lvar else .lsdv
+ misc$link[ M1*ii ] <- .lrho
+ if ( !( .nodrift ))
+ misc$earg[[M1*ii-2]] <- .esmn
+ misc$earg[[M1*ii-1]] <- if ( .var.arg ) .evar else .esdv
+ misc$earg[[M1*ii ]] <- .erho
+ }
+
+ misc$type.likelihood <- .type.likelihood
+ misc$var.arg <- .var.arg
+ misc$M1 <- M1
+ misc$expected <- TRUE
+ misc$imethod <- .imethod
+ misc$multipleResponses <- TRUE
+ misc$nodrift <- .nodrift
+
+ }), list( .lsmn = lsmn, .lrho = lrho, .lsdv = lsdv, .lvar = lvar,
+ .esmn = esmn, .erho = erho, .esdv = esdv, .evar = evar,
+ .irho = irho, .isdv = isd , .ivar = ivar,
+ .nodrift = nodrift, .poratM = poratM,
+ .var.arg = var.arg, .type.likelihood = type.likelihood,
+ .imethod = imethod ))),
+
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals= FALSE, eta,
+ extra = NULL, summation = TRUE) {
+ M1 <- 3 - .nodrift
+ NOS <- ncol(eta)/M1
+
+ if ( .var.arg ) {
+ ar.var <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lvar , earg = .evar )
+ ar.sdv <- sqrt(ar.var)
+ } else {
+ ar.sdv <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lsdv , earg = .esdv )
+ ar.var <- ar.sdv^2
+ }
+ ar.smn <- if ( .nodrift ) 0 else
+ eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+ .lsmn , earg = .esmn )
+ ar.rho <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lrho , earg = .erho )
+
+ if (residuals) {
+ stop("Loglikelihood not implemented yet to handle",
+ "residuals.")
+ } else {
+ loglik.terms <-
+ c(w) * dAR1(x = y,
+ drift = ar.smn,
+ var.error = ar.var,
+ type.likelihood = .type.likelihood ,
+ ARcoef1 = ar.rho, log = TRUE)
+ loglik.terms <- as.matrix(loglik.terms)
+
+ if (summation) {
+ sum(if ( .type.likelihood == "exact") loglik.terms else
+ loglik.terms[-1, ] )
+ } else {
+ loglik.terms
+ }
+ }
+
+ }, list( .lsmn = lsmn, .lrho = lrho , .lsdv = lsdv, .lvar = lvar ,
+ .var.arg = var.arg, .type.likelihood = type.likelihood,
+ .nodrift = nodrift,
+ .esmn = esmn, .erho = erho ,
+ .esdv = esdv, .evar = evar ))),
+
+ vfamily = c("AR1"),
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ fva <- fitted(object)
+ M1 <- 3 - .nodrift
+ NOS <- ncol(eta)/M1
+
+ if ( .var.arg ) {
+ ar.var <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lvar , earg = .evar )
+ ar.sdv <- sqrt(ar.var)
+ } else {
+ ar.sdv <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lsdv , earg = .esdv )
+ ar.var <- ar.sdv^2
+ }
+ ar.smn <- if ( .nodrift ) matrix(0, n, NOS) else
+ eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+ .lsmn , earg = .esmn )
+ ar.rho <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lrho , earg = .erho )
+
+ ans <- array(0, c(nrow(eta), NOS, nsim))
+ for (jay in 1:NOS) {
+ ans[1, jay, ] <- rnorm(nsim, m = fva[1, jay], # zz
+ sd = sqrt(ar.var[1, jay]))
+ for (ii in 2:nrow(eta))
+ ans[ii, jay, ] <- ar.smn[ii, jay] +
+ ar.rho[ii, jay] * ans[ii-1, jay, ] +
+ rnorm(nsim, sd = sqrt(ar.var[ii, jay]))
+ }
+ ans <- matrix(c(ans), c(nrow(eta) * NOS, nsim))
+ ans
+
+ }, list( .lsmn = lsmn, .lrho = lrho , .lsdv = lsdv, .lvar = lvar ,
+ .var.arg = var.arg, .type.likelihood = type.likelihood,
+ .nodrift = nodrift,
+ .esmn = esmn, .erho = erho ,
+ .esdv = esdv, .evar = evar ))),
+
+
+ deriv = eval(substitute(expression({
+ M1 <- 3 - .nodrift
+ NOS <- ncol(eta)/M1
+ ncoly <- ncol(as.matrix(y))
+
+ if ( .var.arg ) {
+ ar.var <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lvar , earg = .evar )
+ ar.sdv <- sqrt(ar.var)
+ } else {
+ ar.sdv <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ .lsdv , earg = .esdv )
+ ar.var <- ar.sdv^2
+ }
+
+ ar.smn <- if ( .nodrift ) matrix(0, n, NOS) else
+ eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+ .lsmn , earg = .esmn )
+
+ ar.rho <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lrho , earg = .erho )
+
+ if (any(abs(ar.rho) < 1e-2))
+ warning("Estimated values of 'rho' are too close to zero.")
+
+ help2 <- (length(colnames(x)) >= 2)
+ myMeans <- matrix(colMeans(y), nrow = n, ncol = NOS, by = TRUE)
+ yLag <- matrix(y, ncol = NOS)
+ temp4 <- matrix(0.0, nrow = n, ncol = NOS)
+ temp4[-1, ] <- y[-1, , drop = FALSE] - ar.smn[-1, , drop = FALSE]
+ yLag[-1, ] <- y[-n, ]
+
+ temp1 <- matrix(0.0, nrow = n, ncol = NOS)
+ temp1[-1, ] <- y[-1, , drop = FALSE] - (ar.smn[-1, ,drop = FALSE] +
+ ar.rho[-1, , drop = FALSE] * y[-n, , drop = FALSE])
+ temp1[1, ] <- y[1, ] - ar.smn[1, ]
+ dl.dsmn <- temp1 / ar.var
+ dl.dsmn[1, ] <- ( (y[1, ] - myMeans[1, ]) *
+ (1 + ar.rho[1, ]) ) / ar.var[1, ]
+
+ if ( .var.arg ) {
+ dl.dvarSD <- temp1^2 / ( 2 * ar.var^2) - 1 / (2 * ar.var)
+ dl.dvarSD[1, ] <- ( (1 - ar.rho[1, ]^2) * (y[1, ] -
+ myMeans[1, ])^2 ) /(2 * ar.var[1, ]^2) - 1 / (2 * ar.var[1, ])
+ } else {
+ dl.dvarSD <- temp1^2 / ar.sdv^3 - 1 / ar.sdv
+ dl.dvarSD[1, ] <- ( (1 - ar.rho[1, ]^2) *
+ (y[1, ] - myMeans[1, ])^2 ) / ar.sdv[1, ]^3 - 1/ar.sdv[1, ]
+ }
+
+ dl.drho <- rbind(rep_len(0, 1),
+ ( (y[-n, , drop = FALSE] - myMeans[-n, ]) *
+ temp1[-1, , drop = FALSE ] )/ ar.var[-1, ] )
+ dl.drho[1, ] <- (ar.rho[1, ] * (y[1, ] - myMeans[1, ])^2 ) /
+ ar.var[1, ] - ar.rho[1, ] / (1 - ar.rho[1, ]^2)
+
+ dsmn.deta <- dtheta.deta(ar.smn, .lsmn , earg = .esmn )
+ drho.deta <- dtheta.deta(ar.rho, .lrho , earg = .erho )
+ if ( .var.arg ) {
+ dvarSD.deta <- dtheta.deta(ar.var, .lvar , earg = .evar )
+ } else {
+ dvarSD.deta <- dtheta.deta(ar.sdv, .lsdv , earg = .esdv )
+ }
+
+ myderiv <-
+ c(w) * cbind(if ( .nodrift ) NULL else dl.dsmn * dsmn.deta,
+ dl.dvarSD * dvarSD.deta,
+ dl.drho * drho.deta)
+ myderiv <- myderiv[, interleave.VGAM(M, M1 = M1)]
+ myderiv
+
+ }), list( .lsmn = lsmn, .lrho = lrho, .lsdv = lsdv, .lvar = lvar,
+ .esmn = esmn, .erho = erho, .esdv = esdv, .evar = evar,
+ .nodrift = nodrift ,
+ .var.arg = var.arg,
+ .type.likelihood = type.likelihood ))),
+
+ weight = eval(substitute(expression({
+
+ ned2l.dsmn <- 1 / ar.var
+ ned2l.dsmn[1, ] <- ( (1 + ar.rho[1, ]) / (1 - ar.rho[1, ]) ) *
+ (1 / ar.var[1, ])
+ # Here, same results for the first and t > 1 observations.
+ ned2l.dvarSD <- if ( .var.arg ) 1 / (2 * ar.var^2) else 2 / ar.var
+ gamma0 <- (1 - help2) * ar.var/(1 - ar.rho^2) +
+ help2 * (yLag - myMeans)^2
+ ned2l.drho <- gamma0 / ar.var
+ ned2l.drho[1, ] <- 2 * ar.rho[1, ]^2 / (1 - ar.rho[1, ]^2)^2
+ ned2l.drdv <- matrix(0.0, nrow = n, ncol = NOS)
+ ned2l.drdv[1, ] <- 2 * temp4[1, ] /
+ ((1 - temp4[1, ]^2) * ar.sdv[1, ])
+ ncol.wz <- M + (M - 1) + ifelse( .nodrift , 0, M - 2)
+ ncol.pf <- 3 * (M + ( .nodrift ) ) - 3
+ wz <- matrix(0, nrow = n, ncol = ncol.wz)
+ helpPor <- .poratM
+
+ pf.mat <- if (helpPor)
+ AR1EIM(x = scale(y, scale = FALSE),
+ var.arg = .var.arg ,
+ p.drift = 0,
+ WNsd = ar.sdv,
+ ARcoeff1 = ar.rho ) else
+ array(0.0, dim= c(n, NOS, ncol.pf))
+
+ if (!( .nodrift ))
+ wz[, M1*(1:NOS) - 2] <- ( (helpPor) * pf.mat[, , 1] +
+ (1 - (helpPor)) * ned2l.dsmn) * dsmn.deta^2
+ wz[, M1*(1:NOS) - 1] <- ( (helpPor) * pf.mat[, , 2 ] +
+ (1 - (helpPor)) * ned2l.dvarSD) * dvarSD.deta^2
+ wz[, M1*(1:NOS) ] <- ( (helpPor) * pf.mat[, , 3] +
+ (1 - (helpPor)) * ned2l.drho) * drho.deta^2
+ wz[, M1*(1:NOS) + (M - 1) ] <- ((helpPor) * pf.mat[, , 4] +
+ (1 - (helpPor)) * ned2l.drdv) * drho.deta * dvarSD.deta
+
+ wz <- w.wz.merge(w = w, wz = wz, n = n,
+ M = ncol.wz, ndepy = NOS)
+
+ if ( .print.EIM ) {
+ wz2 <- matrix(0, nrow = n, ncol = ncol.wz)
+ if (!(.nodrift ))
+ wz2[, M1*(1:NOS) - 2] <- ned2l.dsmn
+ wz2[, M1*(1:NOS) - 1] <-
+ if ( .var.arg ) 1 / (2 * ar.var^2) else 2 / ar.var
+ wz2[, M1*(1:NOS) ] <- ned2l.drho
+
+ wz2 <- wz2[, interleave.VGAM( M1 * NOS, M1)]
+ if (NOS > 1) {
+
+ matAux1 <- matAux2 <- matrix(NA_real_, nrow = n, ncol = NOS)
+ approxMat <- array(wz2[, 1:(M1*NOS)], dim = c(n, M1, NOS))
+ for (kk in 1:NOS) {
+ matAux1[, kk] <- rowSums(approxMat[, , kk])
+ matAux2[, kk] <- rowSums(pf.mat[, kk , ])
+ }
+ matAux <- cbind(matAux1, if (.poratM ) matAux2 else NULL)
+ colnames(matAux) <- c(paste("ApproxEIM.R",1:NOS, sep = ""),
+ if (!(.poratM )) NULL else
+ paste("ExactEIM.R",1:NOS, sep = ""))
+
+ matAux <- matAux[, interleave.VGAM( (1 + .poratM) * NOS,
+ M1 = 1 + .poratM)]
+ } else {
+
+ matAux <- cbind(rowSums(wz2),
+ if (helpPor)
+ rowSums(pf.mat[, 1, ][, 1:3]) else NULL)
+ colnames(matAux) <- c("Approximate",
+ if (helpPor) "Exact" else NULL)
+
+ }
+ print(matAux[1:10, , drop = FALSE])
+ }
+
+ wz
+
+ }), list( .var.arg = var.arg, .type.likelihood = type.likelihood,
+ .nodrift = nodrift, .poratM = poratM,
+ .print.EIM = print.EIM )))
+ )
+ }
+
+
+
diff --git a/R/family.univariate.R b/R/family.univariate.R
index bdc49fa..0a78f3b 100644
--- a/R/family.univariate.R
+++ b/R/family.univariate.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -75,7 +75,7 @@
if (!length(etastart)) {
theta.init <- if (length( .itheta )) {
- rep( .itheta , length = n)
+ rep_len( .itheta , n)
} else {
mccullagh89.aux <- function(thetaval, y, x, w, extraargs)
mean((y - thetaval) *
@@ -84,13 +84,13 @@
try.this <- grid.search(theta.grid, objfun = mccullagh89.aux,
y = y, x = x, w = w, maximize = FALSE,
abs.arg = TRUE)
- try.this <- rep(try.this, length.out = n)
+ try.this <- rep_len(try.this, n)
try.this
}
tmp <- y / (theta.init - y)
tmp[tmp < -0.4] <- -0.4
tmp[tmp > 10.0] <- 10.0
- nuvec.init <- rep(if (length( .inuvec )) .inuvec else tmp, length = n)
+ nuvec.init <- rep_len(if (length( .inuvec )) .inuvec else tmp, n)
nuvec.init[!is.finite(nuvec.init)] <- 0.4
etastart <-
cbind(theta2eta(theta.init, .ltheta , earg = .etheta ),
@@ -208,7 +208,7 @@ hzeta.control <- function(save.weights = TRUE, ...) {
if ((meany <- weighted.mean(y, w)) < 1.5) 3.0 else
if (meany < 2.5) 1.4 else 1.1
}
- a.init <- rep(a.init, length = n)
+ a.init <- rep_len(a.init, n)
etastart <- theta2eta(a.init, .link , earg = .earg )
}
}), list( .link = link, .earg = earg, .ialpha = ialpha ))),
@@ -307,12 +307,12 @@ dhzeta <- function(x, alpha, log = FALSE) {
stop("'alpha' must be numeric and have positive values")
nn <- max(length(x), length(alpha))
- if (length(x) != nn) x <- rep(x, length.out = nn)
- if (length(alpha) != nn) alpha <- rep(alpha, length.out = nn)
+ if (length(x) != nn) x <- rep_len(x, nn)
+ if (length(alpha) != nn) alpha <- rep_len(alpha, nn)
ox <- !is.finite(x)
zero <- ox | round(x) != x | x < 1
- ans <- rep(0, length.out = nn)
+ ans <- rep_len(0, nn)
ans[!zero] <- (2*x[!zero]-1)^(-alpha[!zero]) -
(2*x[!zero]+1)^(-alpha[!zero])
if (log.arg) log(ans) else ans
@@ -324,8 +324,8 @@ phzeta <- function(q, alpha, log.p = FALSE) {
nn <- max(length(q), length(alpha))
- q <- rep(q, length.out = nn)
- alpha <- rep(alpha, length.out = nn)
+ q <- rep_len(q, nn)
+ alpha <- rep_len(alpha, nn)
oq <- !is.finite(q)
zero <- oq | q < 1
q <- floor(q)
@@ -348,8 +348,8 @@ qhzeta <- function(p, alpha) {
stop("argument 'p' must have values inside the interval (0,1)")
nn <- max(length(p), length(alpha))
- p <- rep(p, length.out = nn)
- alpha <- rep(alpha, length.out = nn)
+ p <- rep_len(p, nn)
+ alpha <- rep_len(alpha, nn)
ans <- (((1 - p)^(-1/alpha) - 1) / 2) # p is in (0,1)
ans[alpha <= 0] <- NaN
floor(ans + 1)
@@ -364,7 +364,7 @@ rhzeta <- function(n, alpha) {
length.arg = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- alpha <- rep(alpha, length = use.n)
+ alpha <- rep_len(alpha, use.n)
ans <- (runif(use.n)^(-1/alpha) - 1) / 2
ans[alpha <= 0] <- NaN
floor(ans + 1)
@@ -451,7 +451,7 @@ rhzeta <- function(n, alpha) {
prob.init <- matrix(prob.init, n, M, byrow = TRUE)
}
- phi.init <- rep( .iphi , length.out = n)
+ phi.init <- rep_len( .iphi , n)
etastart <-
cbind(log(prob.init[, -M] / prob.init[, M]),
theta2eta(phi.init, .lphi , earg = .ephi ))
@@ -467,7 +467,7 @@ rhzeta <- function(n, alpha) {
}, list( .ephi = ephi, .lphi = lphi ))),
last = eval(substitute(expression({
- misc$link <- c(rep("loge", length = M-1), .lphi )
+ misc$link <- c(rep_len("loge", M-1), .lphi )
names(misc$link) <- c(
paste("prob[,", 1:(M-1), "]/prob[,", M, "])", sep = ""),
"phi")
@@ -500,7 +500,7 @@ rhzeta <- function(n, alpha) {
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- ans <- rep(0.0, length.out = n)
+ ans <- rep_len(0.0, n)
omega <- extra$n2
for (jay in 1:M) {
maxyj <- max(ycount[, jay])
@@ -551,7 +551,7 @@ rhzeta <- function(n, alpha) {
phi <- eta2theta(eta[, M], .lphi , earg = .ephi )
dl.dprobs <- matrix(0.0, n, M-1)
- dl.dphi <- rep(0.0, length.out = n)
+ dl.dphi <- rep_len(0.0, n)
omega <- extra$n2
ycount <- as.matrix(y * c(w))
@@ -752,11 +752,11 @@ dirmul.old <- function(link = "loge", ialpha = 0.01,
linkinv = eval(substitute(function(eta, extra = NULL) {
shape <- eta2theta(eta, .link , earg = .earg )
M <- if (is.matrix(eta)) ncol(eta) else 1
- sumshape <- as.vector(shape %*% rep(1, length.out = M))
+ sumshape <- as.vector(shape %*% rep_len(1, M))
(extra$y + shape) / (extra$n2 + sumshape)
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- paste("shape", 1:M, sep = "")
misc$earg <- vector("list", M)
@@ -772,7 +772,7 @@ dirmul.old <- function(link = "loge", ialpha = 0.01,
summation = TRUE) {
shape <- eta2theta(eta, .link , earg = .earg )
M <- if (is.matrix(eta)) ncol(eta) else 1
- sumshape <- as.vector(shape %*% rep(1, length.out = M))
+ sumshape <- as.vector(shape %*% rep_len(1, M))
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
@@ -790,7 +790,7 @@ dirmul.old <- function(link = "loge", ialpha = 0.01,
deriv = eval(substitute(expression({
shape <- eta2theta(eta, .link , earg = .earg )
- sumshape <- as.vector(shape %*% rep(1, length.out = M))
+ sumshape <- as.vector(shape %*% rep_len(1, M))
dl.dsh <- digamma(sumshape) - digamma(extra$n2 + sumshape) +
digamma(y + shape) - digamma(shape)
@@ -852,7 +852,7 @@ rdiric <- function(n, shape, dimension = NULL,
dimension <- length(shape)
if (length(shape) != dimension)
- shape <- rep(shape, length.out = dimension)
+ shape <- rep_len(shape, dimension)
ans <- rgamma(use.n * dimension,
rep(shape, rep(use.n, dimension)))
@@ -935,7 +935,7 @@ rdiric <- function(n, shape, dimension = NULL,
prop.table(shape, 1)
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link <- rep( .link , length.out = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- mynames1
misc$earg <- vector("list", M)
@@ -953,7 +953,7 @@ rdiric <- function(n, shape, dimension = NULL,
summation = TRUE) {
shape <- eta2theta(eta, .link , earg = .earg )
M <- if (is.matrix(eta)) ncol(eta) else 1
- sumshape <- as.vector(shape %*% rep(1, length.out = M))
+ sumshape <- as.vector(shape %*% rep_len(1, M))
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
@@ -993,7 +993,7 @@ rdiric <- function(n, shape, dimension = NULL,
deriv = eval(substitute(expression({
shape <- eta2theta(eta, .link , earg = .earg )
- sumshape <- as.vector(shape %*% rep(1, length.out = M))
+ sumshape <- as.vector(shape %*% rep_len(1, M))
dl.dsh <- digamma(sumshape) - digamma(shape) + log(y)
dsh.deta <- dtheta.deta(shape, .link , earg = .earg )
@@ -1089,7 +1089,7 @@ rdiric <- function(n, shape, dimension = NULL,
stop("Sorry, currently cannot handle x < 0")
ok <- is.finite(x) & x > 0 & x != 1 # Handles NAs
- ans <- rep(NA_real_, length(x))
+ ans <- rep_len(NA_real_, length(x))
nn <- sum(ok) # Effective length (excludes x < 0 and x = 1 values)
if (nn)
ans[ok] <- .C("vzetawr", as.double(x[ok]), ans = double(nn),
@@ -1114,13 +1114,13 @@ dzeta <- function(x, p, log = FALSE) {
if (!is.Numeric(p, positive = TRUE)) # || min(p) <= 1
stop("'p' must be numeric and > 0")
LLL <- max(length(p), length(x))
- x <- rep(x, length.out = LLL);
- p <- rep(p, length.out = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(p) != LLL) p <- rep_len(p, LLL)
ox <- !is.finite(x)
zero <- ox | round(x) != x | x < 1
if (any(zero)) warning("non-integer x and/or x < 1 or NAs")
- ans <- rep(if (log.arg) log(0) else 0, length.out = LLL)
+ ans <- rep_len(if (log.arg) log(0) else 0, LLL)
if (any(!zero)) {
if (log.arg) {
ans[!zero] <- (-p[!zero]-1)*log(x[!zero]) - log(zeta(p[!zero]+1))
@@ -1135,26 +1135,28 @@ dzeta <- function(x, p, log = FALSE) {
- zetaff <- function(link = "loge", init.p = NULL, zero = NULL) {
+ zetaff <-
+ function(link = "loge",
+ ishape = NULL,
+ gshape = exp(-3:4)/4,
+ zero = NULL) {
- if (length(init.p) && !is.Numeric(init.p, positive = TRUE))
- stop("argument 'init.p' must be > 0")
+ if (length(ishape) && !is.Numeric(ishape, positive = TRUE))
+ stop("argument 'ishape' must be > 0")
link <- as.list(substitute(link))
earg <- link2list(link)
link <- attr(earg, "function.name")
-
-
new("vglmff",
blurb = c("Zeta distribution ",
- "f(y) = 1/(y^(p+1) zeta(p+1)), p>0, y = 1, 2,..\n\n",
+ "f(y) = 1/(y^(shape+1) zeta(shape+1)), shape>0, y = 1, 2,..\n\n",
"Link: ",
- namesof("p", link, earg = earg), "\n\n",
- "Mean: zeta(p) / zeta(p+1), provided p>1\n",
- "Variance: zeta(p-1) / zeta(p+1) - mean^2, provided p>2"),
+ namesof("shape", link, earg = earg), "\n\n",
+ "Mean: zeta(shape) / zeta(shape+1), provided shape>1\n",
+ "Variance: zeta(shape-1) / zeta(shape+1) - mean^2, provided shape>2"),
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
@@ -1179,8 +1181,7 @@ dzeta <- function(x, p, log = FALSE) {
ncoly <- ncol(y)
-
- mynames1 <- param.names("p", ncoly)
+ mynames1 <- param.names("shape", ncoly)
predictors.names <-
namesof(mynames1, .link , earg = .earg , tag = FALSE)
@@ -1196,20 +1197,19 @@ dzeta <- function(x, p, log = FALSE) {
}
- p.grid <- seq(0.1, 3.0, length.out = 19)
- pp.init <- matrix( if (length( .init.p )) .init.p else -1,
- n, M, byrow = TRUE)
- if (!length( .init.p ))
- for (spp. in 1:ncoly) {
- pp.init[, spp.] <- grid.search(p.grid, objfun = zetaff.Loglikfun,
- y = y[, spp.], x = x, w = w[, spp.])
- if ( .link == "loglog")
- pp.init[pp.init <= 1, spp.] <- 1.2
- }
+ gshape <- .gshape
+ shape.init <- matrix(if (length( .ishape )) .ishape else -1,
+ n, M, byrow = TRUE)
+ if (!length( .ishape ))
+ for (jay in 1:ncoly) {
+ shape.init[, jay] <- grid.search(gshape, objfun = zetaff.Loglikfun,
+ y = y[, jay], x = x, w = w[, jay])
+ }
- etastart <- theta2eta(pp.init, .link , earg = .earg )
+ etastart <- theta2eta(shape.init, .link , earg = .earg )
}
- }), list( .link = link, .earg = earg, .init.p = init.p ))),
+ }), list( .link = link, .earg = earg,
+ .ishape = ishape, .gshape = gshape ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
ans <- pp <- eta2theta(eta, .link , earg = .earg )
ans[pp > 1] <- zeta(pp[pp > 1]) / zeta(pp[pp > 1] + 1)
@@ -1217,18 +1217,15 @@ dzeta <- function(x, p, log = FALSE) {
ans
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- M1 <- extra$M1
-
- misc$link <- rep( .link , length = ncoly)
+ misc$link <- rep_len( .link , ncoly)
names(misc$link) <- mynames1
misc$earg <- vector("list", M)
names(misc$earg) <- mynames1
- for (ii in 1:ncoly) {
- misc$earg[[ii]] <- .earg
+ for (jay in 1:ncoly) {
+ misc$earg[[jay]] <- .earg
}
- misc$multipleResponses <- TRUE
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
@@ -1248,20 +1245,20 @@ dzeta <- function(x, p, log = FALSE) {
}, list( .link = link, .earg = earg ))),
vfamily = c("zetaff"),
deriv = eval(substitute(expression({
- pp <- eta2theta(eta, .link , earg = .earg )
+ shape <- eta2theta(eta, .link , earg = .earg )
- fred1 <- zeta(pp+1)
- fred2 <- zeta(pp+1, deriv = 1)
- dl.dpp <- -log(y) - fred2 / fred1
+ fred1 <- zeta(shape+1)
+ fred2 <- zeta(shape+1, deriv = 1)
+ dl.dshape <- -log(y) - fred2 / fred1
- dpp.deta <- dtheta.deta(pp, .link , earg = .earg )
+ dshape.deta <- dtheta.deta(shape, .link , earg = .earg )
- c(w) * dl.dpp * dpp.deta
+ c(w) * dl.dshape * dshape.deta
}), list( .link = link, .earg = earg ))),
weight = expression({
- NOS <- ncol(y)
- nd2l.dpp2 <- zeta(pp + 1, deriv = 2) / fred1 - (fred2/fred1)^2
- wz <- nd2l.dpp2 * dpp.deta^2
+ NOS <- NCOL(y)
+ nd2l.dshape2 <- zeta(shape + 1, deriv = 2) / fred1 - (fred2/fred1)^2
+ wz <- nd2l.dshape2 * dshape.deta^2
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
}))
}
@@ -1270,25 +1267,26 @@ dzeta <- function(x, p, log = FALSE) {
gharmonic <- function(n, s = 1, lognexponent = 0) {
- if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'n'")
- if (!is.Numeric(lognexponent, length.arg = 1))
- stop("bad input for argument 'lognexponent'")
- if (length(n) == 1 && length(s) == 1) {
+ if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'n'")
+ if (!is.Numeric(lognexponent, length.arg = 1))
+ stop("bad input for argument 'lognexponent'")
+ if (length(n) == 1 && length(s) == 1) {
if (lognexponent != 0) sum(log(1:n)^lognexponent * (1:n)^(-s)) else
- sum((1:n)^(-s))
+ sum((1:n)^(-s))
+ } else {
+ LEN <- max(length(n), length(s))
+ n <- rep_len(n, LEN)
+ ans <- s <- rep_len(s, LEN)
+ if (lognexponent != 0) {
+ for (ii in 1:LEN)
+ ans[ii] <- sum(log(1:n[ii])^lognexponent * (1:n[ii])^(-s[ii]))
} else {
- LEN <- max(length(n), length(s))
- n <- rep(n, length.out = LEN)
- ans <- s <- rep(s, length.out = LEN)
- if (lognexponent != 0) {
- for (ii in 1:LEN)
- ans[ii] <- sum(log(1:n[ii])^lognexponent * (1:n[ii])^(-s[ii]))
- } else
- for (ii in 1:LEN)
- ans[ii] <- sum((1:n[ii])^(-s[ii]))
- ans
+ for (ii in 1:LEN)
+ ans[ii] <- sum((1:n[ii])^(-s[ii]))
}
+ ans
+ }
}
@@ -1316,9 +1314,9 @@ dzipf <- function(x, N, s, log = FALSE) {
if (!is.Numeric(s, positive = TRUE))
stop("bad input for argument 's'")
nn <- max(length(x), length(N), length(s))
- x <- rep(x, length.out = nn);
- N <- rep(N, length.out = nn);
- s <- rep(s, length.out = nn);
+ x <- rep_len(x, nn)
+ N <- rep_len(N, nn)
+ s <- rep_len(s, nn)
ox <- !is.finite(x)
zero <- ox | round(x) != x | x < 1 | x > N
ans <- (if (log.arg) log(0) else 0) * x
@@ -1335,17 +1333,17 @@ dzipf <- function(x, N, s, log = FALSE) {
pzipf <- function(q, N, s, log.p = FALSE) {
- if (!is.Numeric(q))
- stop("bad input for argument 'q'")
- if (!is.Numeric(N, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'N'")
- if (!is.Numeric(s, positive = TRUE))
- stop("bad input for argument 's'")
+ if (!is.Numeric(q))
+ stop("bad input for argument 'q'")
+ if (!is.Numeric(N, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'N'")
+ if (!is.Numeric(s, positive = TRUE))
+ stop("bad input for argument 's'")
- nn <- max(length(q), length(N), length(s))
- q <- rep(q, length.out = nn);
- N <- rep(N, length.out = nn);
- s <- rep(s, length.out = nn);
+ nn <- max(length(q), length(N), length(s))
+ if (length(q) != nn) q <- rep_len(q, nn)
+ if (length(N) != nn) N <- rep_len(N, nn)
+ if (length(s) != nn) s <- rep_len(s, nn)
oq <- !is.finite(q)
zeroOR1 <- oq | q < 1 | q >= N
floorq <- floor(q)
@@ -1410,7 +1408,7 @@ pzipf <- function(q, N, s, log.p = FALSE) {
getInitVals(gvals = seq(0.1, 3.0, length.out = 19),
llfun=llfun,
y = y, N=extra$N, w = w)
- ss.init <- rep(ss.init, length = length(y))
+ ss.init <- rep_len(ss.init, length(y))
if ( .link == "loglog") ss.init[ss.init <= 1] = 1.2
etastart <- theta2eta(ss.init, .link , earg = .earg )
}
@@ -1572,11 +1570,11 @@ cauchy.control <- function(save.weights = TRUE, ...) {
loc.grid <- c(quantile(y, probs = seq(0.1, 0.9, by = 0.05)))
try.this <- grid.search(loc.grid, objfun = cauchy2.Loglikfun,
y = y, x = x, w = w)
- try.this <- rep(c(try.this), length.out = n)
+ try.this <- rep_len(c(try.this), n)
try.this
}
}
- loc.init <- rep(c(loc.init), length.out = n)
+ loc.init <- rep_len(c(loc.init), n)
sca.init <- if (length( .iscale )) .iscale else {
@@ -1589,7 +1587,7 @@ cauchy.control <- function(save.weights = TRUE, ...) {
sca.init
}
- sca.init <- rep(c(sca.init), length.out = n)
+ sca.init <- rep_len(c(sca.init), n)
if ( .llocat == "loge") loc.init <- abs(loc.init)+0.01
etastart <-
cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
@@ -1600,7 +1598,7 @@ cauchy.control <- function(save.weights = TRUE, ...) {
.iscale = iscale, .escale = escale, .lscale = lscale,
.iprobs = iprobs, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ eta2theta(eta[, 1], .llocat , earg = .elocat )
}, list( .llocat = llocat,
.elocat = elocat ))),
last = eval(substitute(expression({
@@ -1760,11 +1758,11 @@ cauchy.control <- function(save.weights = TRUE, ...) {
objfun = cauchy1.Loglikfun,
y = y, x = x, w = w,
extraargs = .scale.arg )
- try.this <- rep(try.this, length.out = n)
+ try.this <- rep_len(try.this, n)
try.this
}
}
- loc.init <- rep(loc.init, length.out = n)
+ loc.init <- rep_len(loc.init, n)
if ( .llocat == "loge") loc.init = abs(loc.init)+0.01
etastart <-
theta2eta(loc.init, .llocat , earg = .elocat )
@@ -1879,7 +1877,7 @@ cauchy.control <- function(save.weights = TRUE, ...) {
if (!length(etastart)) {
locat.init <- if ( .imethod == 1) y else median(rep(y, w))
- locat.init <- rep(locat.init, length.out = n)
+ locat.init <- rep_len(locat.init, n)
if ( .llocat == "loge")
locat.init <- abs(locat.init) + 0.001
etastart <-
@@ -2055,7 +2053,7 @@ cauchy.control <- function(save.weights = TRUE, ...) {
}, list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <- c(rep( .link , length = ncoly))
+ misc$link <- c(rep_len( .link , ncoly))
names(misc$link) <- parameters.names
misc$earg <- vector("list", M)
@@ -2144,12 +2142,12 @@ dbort <- function(x, Qsize = 1, a = 0.5, log = FALSE) {
if (!is.Numeric(a, positive = TRUE) || max(a) >= 1)
stop("bad input for argument 'a'")
N <- max(length(x), length(Qsize), length(a))
- x <- rep(x, length.out = N)
- Qsize <- rep(Qsize, length.out = N)
- a <- rep(a, length.out = N)
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(a) != N) a <- rep_len(a, N)
+ if (length(Qsize) != N) Qsize <- rep_len(Qsize, N)
xok <- (x >= Qsize) & (x == round(x)) & (a > 0) & (a < 1)
- ans <- rep(if (log.arg) log(0) else 0, length.out = N) # loglikelihood
+ ans <- rep_len(if (log.arg) log(0) else 0, N) # loglikelihood
ans[xok] <- log(Qsize[xok]) - lgamma(x[xok] + 1 - Qsize[xok]) +
(x[xok] - 1 - Qsize[xok]) * log(x[xok]) +
(x[xok] - Qsize[xok]) * log(a[xok]) - a[xok] * x[xok]
@@ -2175,8 +2173,8 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
stop("bad input for argument 'a'")
N <- use.n
- qsize <- rep(Qsize, length.out = N)
- a <- rep(a, length.out = N)
+ qsize <- rep_len(Qsize, N)
+ a <- rep_len(a, N)
totqsize <- qsize
fini <- (qsize < 1)
while (any(!fini)) {
@@ -2244,9 +2242,9 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
if (!length(etastart)) {
a.init <- switch(as.character( .imethod ),
"1" = 1 - .Qsize / (y + 1/8),
- "2" = rep(1 - .Qsize / weighted.mean(y, w), length.out = n),
- "3" = rep(1 - .Qsize / median(y), length.out = n),
- "4" = rep(0.5, length.out = n))
+ "2" = rep_len(1 - .Qsize / weighted.mean(y, w), n),
+ "3" = rep_len(1 - .Qsize / median(y), n),
+ "4" = rep_len(0.5, n))
etastart <-
theta2eta(a.init, .link , earg = .earg )
}
@@ -2326,11 +2324,11 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
if (!is.Numeric(a, positive = TRUE))
stop("bad input for argument 'a'")
N <- max(length(x), length(a))
- x <- rep(x, length.out = N);
- a <- rep(a, length.out = N);
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(a) != N) a <- rep_len(a, N)
xok <- (x %% 2 == 1) & (x == round(x)) & (x >= 1) & (a > 0) & (a < 0.5)
- ans <- rep(if (log.arg) log(0) else 0, length.out = N) # loglikelihood
+ ans <- rep_len(if (log.arg) log(0) else 0, N) # loglikelihood
ans[xok] <- ((x[xok]-3)/2) * log(x[xok]) + ((x[xok]-1)/2) * log(a[xok]) -
lgamma(x[xok]/2 + 0.5) - a[xok] * x[xok]
if (!log.arg) {
@@ -2375,12 +2373,9 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
wymean <- weighted.mean(y, w)
a.init <- switch(as.character( .imethod ),
"1" = (y - 1 + 1/8) / (2 * (y + 1/8) + 1/8),
- "2" = rep((wymean-1+1/8) / (2*(wymean+1/8)+1/8),
- length.out = n),
- "3" = rep((median(y)-1+1/8) / (2*(median(y)+1/8)+1/8),
- length.out = n),
- "4" = rep(0.25,
- length.out = n))
+ "2" = rep_len((wymean-1+1/8) / (2*(wymean+1/8)+1/8), n),
+ "3" = rep_len((median(y)-1+1/8) / (2*(median(y)+1/8)+1/8), n),
+ "4" = rep_len(0.25, n))
etastart <-
theta2eta(a.init, .link , earg = .earg )
}
@@ -2431,375 +2426,6 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
- betaff <-
- function(A = 0, B = 1,
- lmu = "logit",
- lphi = "loge",
- imu = NULL, iphi = NULL, imethod = 1, zero = NULL) {
-
-
- stdbeta <- (A == 0 && B == 1)
-
-
- lmu <- as.list(substitute(lmu))
- emu <- link2list(lmu)
- lmu <- attr(emu, "function.name")
-
-
-
- lphi <- as.list(substitute(lphi))
- ephi <- link2list(lphi)
- lphi <- attr(ephi, "function.name")
-
-
- if (!is.Numeric(A, length.arg = 1) ||
- !is.Numeric(B, length.arg = 1) || A >= B)
- stop("A must be < B, and both must be of length one")
-
-
-
-
- if (length(imu) && (!is.Numeric(imu, positive = TRUE) ||
- any(imu <= A) || any(imu >= B)))
- stop("bad input for argument 'imu'")
- if (length(iphi) && !is.Numeric(iphi, positive = TRUE))
- stop("bad input for argument 'iphi'")
- if (!is.Numeric(imethod, length.arg = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
-
-
- new("vglmff",
- blurb = c("Beta distribution parameterized by mu and a ",
- "precision parameter\n",
- if (stdbeta) paste("f(y) = y^(mu*phi-1) * (1-y)^((1-mu)*phi-1)",
- "/ beta(mu*phi,(1-mu)*phi),\n",
- " 0<y<1, 0<mu<1, phi>0\n\n") else
- paste("f(y) = (y-",A,")^(mu1*phi-1) * (",B,
- "-y)^(((1-mu1)*phi)-1) / \n(beta(mu1*phi,(1-mu1)*phi) * (",
- B, "-", A, ")^(phi-1)),\n",
- A," < y < ",B, ", ", A," < mu < ",B,
- ", mu = ", A, " + ", (B-A), " * mu1",
- ", phi > 0\n\n", sep = ""),
- "Links: ",
- namesof("mu", lmu, earg = emu), ", ",
- namesof("phi", lphi, earg = ephi)),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (min(y) <= .A || max(y) >= .B)
- stop("data not within (A, B)")
-
-
- w.y.check(w = w, y = y)
-
-
- predictors.names <- c(namesof("mu", .lmu , .emu , short = TRUE),
- namesof("phi", .lphi , .ephi, short = TRUE))
- if (!length(etastart)) {
- mu.init <- if (is.Numeric( .imu )) .imu else {
- if ( .imethod == 1) weighted.mean(y, w) else
- median(rep(y, w))
- }
- mu1.init <- (mu.init - .A ) / ( .B - .A ) # In (0,1)
- phi.init <- if (is.Numeric( .iphi )) .iphi else
- max(0.01, -1 + ( .B - .A )^2 * mu1.init*(1-mu1.init)/var(y))
- etastart <- matrix(0, n, 2)
- etastart[, 1] <- theta2eta(mu.init , .lmu , earg = .emu )
- etastart[, 2] <- theta2eta(phi.init, .lphi , earg = .ephi )
- }
- }), list( .lmu = lmu, .lphi = lphi, .imu = imu, .iphi = iphi,
- .A = A, .B = B, .emu = emu, .ephi = ephi,
- .imethod = imethod ))),
-
- linkinv = eval(substitute(function(eta, extra = NULL) {
- mu <- eta2theta(eta[, 1], .lmu , .emu )
- mu
- }, list( .lmu = lmu, .emu = emu, .A = A, .B = B))),
- last = eval(substitute(expression({
- misc$link <- c(mu = .lmu , phi = .lphi )
- misc$earg <- list(mu = .emu , phi = .ephi )
- misc$limits <- c( .A , .B )
- misc$stdbeta <- .stdbeta
- }), list( .lmu = lmu, .lphi = lphi, .A = A, .B = B,
- .emu = emu, .ephi = ephi,
- .stdbeta = stdbeta ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta,
- extra = NULL,
- summation = TRUE) {
- mu <- eta2theta(eta[, 1], .lmu , earg = .emu )
- phi <- eta2theta(eta[, 2], .lphi , earg = .ephi )
- m1u <- if ( .stdbeta ) mu else (mu - .A ) / ( .B - .A )
- if (residuals) {
- stop("loglikelihood residuals not implemented yet")
- } else {
- shape1 <- phi * m1u
- shape2 <- (1 - m1u) * phi
- zedd <- (y - .A) / ( .B - .A)
- ll.elts <-
- c(w) * (dbeta(x = zedd, shape1 = shape1, shape2 = shape2,
- log = TRUE) -
- log( abs( .B - .A )))
- if (summation) {
- sum(ll.elts)
- } else {
- ll.elts
- }
- }
- }, list( .lmu = lmu, .lphi = lphi, .A = A, .B = B,
- .emu = emu, .ephi = ephi,
- .stdbeta = stdbeta ))),
- vfamily = "betaff",
-
-
-
- simslot = eval(substitute(
- function(object, nsim) {
-
- pwts <- if (length(pwts <- object at prior.weights) > 0)
- pwts else weights(object, type = "prior")
- if (any(pwts != 1))
- warning("ignoring prior weights")
-
- eta <- predict(object)
- mu <- eta2theta(eta[, 1], .lmu , earg = .emu )
- phi <- eta2theta(eta[, 2], .lphi , earg = .ephi )
- m1u <- if ( .stdbeta ) mu else (mu - .A ) / ( .B - .A )
- shape1 <- phi * m1u
- shape2 <- (1 - m1u) * phi
- .A + ( .B - .A ) *
- rbeta(nsim * length(shape1), shape1 = shape1, shape2 = shape2)
- }, list( .lmu = lmu, .lphi = lphi, .A = A, .B = B,
- .emu = emu, .ephi = ephi,
- .stdbeta = stdbeta ))),
-
-
-
-
-
- deriv = eval(substitute(expression({
- mu <- eta2theta(eta[, 1], .lmu , .emu )
- phi <- eta2theta(eta[, 2], .lphi , .ephi )
- m1u <- if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
- dmu.deta <- dtheta.deta(mu, .lmu , .emu )
- dmu1.dmu <- 1 / ( .B - .A)
- dphi.deta <- dtheta.deta(phi, .lphi , .ephi )
- temp1 <- m1u*phi
- temp2 <- (1-m1u)*phi
- if ( .stdbeta ) {
- dl.dmu1 <- phi*(digamma(temp2) - digamma(temp1) + log(y) - log1p(-y))
- dl.dphi <- digamma(phi) - mu*digamma(temp1) - (1-mu)*digamma(temp2) +
- mu*log(y) + (1-mu)*log1p(-y)
- } else {
- dl.dmu1 <- phi*(digamma(temp2) - digamma(temp1) +
- log(y-.A) - log( .B-y))
- dl.dphi <- digamma(phi) - m1u*digamma(temp1) -
- (1-m1u)*digamma(temp2) +
- m1u*log(y-.A) + (1-m1u)*log( .B-y) - log( .B -.A)
- }
- c(w) * cbind(dl.dmu1 * dmu1.dmu * dmu.deta,
- dl.dphi * dphi.deta)
- }), list( .lmu = lmu, .lphi = lphi,
- .emu = emu, .ephi = ephi,
- .A = A, .B = B,
- .stdbeta = stdbeta ))),
- weight = eval(substitute(expression({
- d2l.dmu12 <- (trigamma(temp1) + trigamma(temp2)) * phi^2
- d2l.dphi2 <- -trigamma(phi) + trigamma(temp1) * m1u^2 +
- trigamma(temp2) * (1-m1u)^2
- d2l.dmu1phi <- temp1 * trigamma(temp1) - temp2 * trigamma(temp2)
- wz <- matrix(NA_real_, n, dimm(M))
- wz[, iam(1, 1, M)] <- d2l.dmu12 * dmu1.dmu^2 * dmu.deta^2
- wz[, iam(2, 2, M)] <- d2l.dphi2 * dphi.deta^2
- wz[, iam(1, 2, M)] <- d2l.dmu1phi * dmu1.dmu * dmu.deta * dphi.deta
- c(w) * wz
- }), list( .A = A, .B = B ))))
-}
-
-
-
-
-
- betaR <-
- function(lshape1 = "loge", lshape2 = "loge",
- i1 = NULL, i2 = NULL, trim = 0.05,
- A = 0, B = 1, parallel = FALSE, zero = NULL) {
-
- lshape1 <- as.list(substitute(lshape1))
- eshape1 <- link2list(lshape1)
- lshape1 <- attr(eshape1, "function.name")
-
- lshape2 <- as.list(substitute(lshape2))
- eshape2 <- link2list(lshape2)
- lshape2 <- attr(eshape2, "function.name")
-
-
- if (length( i1 ) && !is.Numeric( i1, positive = TRUE))
- stop("bad input for argument 'i1'")
- if (length( i2 ) && !is.Numeric( i2, positive = TRUE))
- stop("bad input for argument 'i2'")
-
- if (!is.Numeric(A, length.arg = 1) ||
- !is.Numeric(B, length.arg = 1) ||
- A >= B)
- stop("A must be < B, and both must be of length one")
-
- stdbeta <- (A == 0 && B == 1) # stdbeta == T iff standard beta distn
-
-
-
- new("vglmff",
- blurb = c("Two-parameter Beta distribution ",
- "(shape parameters parameterization)\n",
- if (stdbeta)
- paste("y^(shape1-1) * (1-y)^(shape2-1) / B(shape1,shape2),",
- "0 <= y <= 1, shape1>0, shape2>0\n\n") else
- paste("(y-",A,")^(shape1-1) * (",B,
- "-y)^(shape2-1) / [B(shape1,shape2) * (",
- B, "-", A, ")^(shape1+shape2-1)], ",
- A," <= y <= ",B," shape1>0, shape2>0\n\n", sep = ""),
- "Links: ",
- namesof("shape1", lshape1, earg = eshape1), ", ",
- namesof("shape2", lshape2, earg = eshape2)),
- constraints = eval(substitute(expression({
- constraints <- cm.VGAM(matrix(1, M, 1), x = x,
- bool = .parallel ,
- constraints, apply.int = TRUE)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
- }), list( .parallel = parallel, .zero = zero ))),
- infos = eval(substitute(function(...) {
- list(M1 = 2,
- Q1 = 1,
- A = .A,
- B = .B,
- multipleResponses = FALSE,
- zero = .zero )
- }, list( .A = A, .B = B,
- .zero = zero ))),
- initialize = eval(substitute(expression({
- if (min(y) <= .A || max(y) >= .B)
- stop("data not within (A, B)")
-
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
-
-
- w.y.check(w = w, y = y)
-
-
- predictors.names <-
- c(namesof("shape1", .lshape1 , earg = .eshape1 , short = TRUE),
- namesof("shape2", .lshape2 , earg = .eshape2 , short = TRUE))
-
- if (!length(etastart)) {
- mu1d <- mean(y, trim = .trim )
- uu <- (mu1d - .A) / ( .B - .A)
- DD <- ( .B - .A)^2
- pinit <- max(0.01, uu^2 * (1 - uu) * DD / var(y) - uu)
- qinit <- max(0.01, pinit * (1 - uu) / uu)
- etastart <- matrix(0, n, 2)
- etastart[, 1] <- theta2eta( pinit, .lshape1 , earg = .eshape1 )
- etastart[, 2] <- theta2eta( qinit, .lshape2 , earg = .eshape2 )
- }
- if (is.Numeric( .i1 ))
- etastart[, 1] <- theta2eta( .i1 , .lshape1 , earg = .eshape1 )
- if (is.Numeric( .i2 ))
- etastart[, 2] <- theta2eta( .i2 , .lshape2 , earg = .eshape2 )
- }), list( .lshape1 = lshape1, .lshape2 = lshape2,
- .i1 = i1, .i2 = i2, .trim = trim, .A = A, .B = B,
- .eshape1 = eshape1, .eshape2 = eshape2 ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
- .A + ( .B - .A ) * shapes[, 1] / (shapes[, 1] + shapes[, 2])
- }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
- .eshape1 = eshape1, .eshape2 = eshape2 ))),
- last = eval(substitute(expression({
- misc$link <- c(shape1 = .lshape1 , shape2 = .lshape2 )
- misc$earg <- list(shape1 = .eshape1 , shape2 = .eshape2 )
- misc$limits <- c( .A , .B )
- }), list( .lshape1 = lshape1, .lshape2 = lshape2,
- .A = A, .B = B,
- .eshape1 = eshape1, .eshape2 = eshape2 ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL,
- summation = TRUE) {
- shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
- if (residuals) {
- stop("loglikelihood residuals not implemented yet")
- } else {
- zedd <- (y - .A ) / ( .B - .A )
- ll.elts <-
- c(w) * (dbeta(x = zedd, shape1 = shapes[, 1],
- shape2 = shapes[, 2],
- log = TRUE) - log( abs( .B - .A )))
- if (summation) {
- sum(ll.elts)
- } else {
- ll.elts
- }
- }
- }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
- .eshape1 = eshape1, .eshape2 = eshape2 ))),
- vfamily = "betaR",
-
-
-
-
- simslot = eval(substitute(
- function(object, nsim) {
-
- pwts <- if (length(pwts <- object at prior.weights) > 0)
- pwts else weights(object, type = "prior")
- if (any(pwts != 1))
- warning("ignoring prior weights")
-
- eta <- predict(object)
- shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
- .A + ( .B - .A ) *
- rbeta(nsim * length(shapes[, 1]),
- shape1 = shapes[, 1], shape2 = shapes[, 2])
- }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
- .eshape1 = eshape1, .eshape2 = eshape2 ))),
-
-
-
- deriv = eval(substitute(expression({
- shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
-
- dshapes.deta <-
- cbind(dtheta.deta(shapes[, 1], .lshape1 , earg = .eshape1),
- dtheta.deta(shapes[, 2], .lshape2 , earg = .eshape2))
-
- dl.dshapes <- cbind(log(y - .A ), log( .B - y)) -
- digamma(shapes) +
- digamma(shapes[, 1] + shapes[, 2]) - log( .B - .A )
-
- c(w) * dl.dshapes * dshapes.deta
- }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
- .eshape1 = eshape1, .eshape2 = eshape2 ))),
- weight = expression({
- trig.sum <- trigamma(shapes[, 1] + shapes[, 2])
- ned2l.dshape12 <- trigamma(shapes[, 1]) - trig.sum
- ned2l.dshape22 <- trigamma(shapes[, 2]) - trig.sum
- ned2l.dshape1shape2 <- -trig.sum
- wz <- matrix(NA_real_, n, dimm(M)) # dimm(M) == 3
- wz[, iam(1, 1, M)] <- ned2l.dshape12 * dshapes.deta[, 1]^2
- wz[, iam(2, 2, M)] <- ned2l.dshape22 * dshapes.deta[, 2]^2
- wz[, iam(1, 2, M)] <- ned2l.dshape1shape2 * dshapes.deta[, 1] *
- dshapes.deta[, 2]
- c(w) * wz
- }))
-}
-
-
-
simple.exponential <- function() {
@@ -2914,7 +2540,7 @@ simple.exponential <- function() {
extra$location + 1 / eta2theta(eta, .link , earg = .earg ),
list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
misc$earg <- vector("list", M)
names(misc$link) <- names(misc$earg) <- mynames1
for (ii in 1:M)
@@ -3054,7 +2680,7 @@ simple.exponential <- function() {
extra$location + 1 / eta2theta(eta, .link , earg = .earg ),
list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- mynames1
misc$earg <- vector("list", M)
names(misc$earg) <- mynames1
@@ -3184,7 +2810,7 @@ simple.exponential <- function() {
eta2theta(eta, .link , earg = .earg )),
list( .link = link, .earg = earg )),
last = eval(substitute(expression({
- misc$link <- rep( .link , length = M)
+ misc$link <- rep_len( .link , M)
names(misc$link) <- mynames1
misc$earg <- vector("list", M)
@@ -3354,8 +2980,7 @@ simple.exponential <- function() {
namesof(mynames2, .lratee , earg = .eratee , tag = FALSE))
}
parameters.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
- predictors.names <- predictors.names[
- interleave.VGAM(M, M1 = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
@@ -3406,10 +3031,10 @@ simple.exponential <- function() {
misc$multipleResponses <- TRUE
M1 <- extra$M1
- avector <- if ( .lss ) c(rep( .lratee , length = ncoly),
- rep( .lshape , length = ncoly)) else
- c(rep( .lshape , length = ncoly),
- rep( .lratee , length = ncoly))
+ avector <- if ( .lss ) c(rep_len( .lratee , ncoly),
+ rep_len( .lshape , ncoly)) else
+ c(rep_len( .lshape , ncoly),
+ rep_len( .lratee , ncoly))
misc$link <- avector[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
@@ -3670,8 +3295,8 @@ simple.exponential <- function() {
if (exists("CQO.FastAlgorithm", envir = VGAMenv))
rm("CQO.FastAlgorithm", envir = VGAMenv)
- tmp34 <- c(rep( .lmu , length = NOS),
- rep( .lshape , length = NOS))
+ tmp34 <- c(rep_len( .lmu , NOS),
+ rep_len( .lshape , NOS))
names(tmp34) <- c(param.names("mu", NOS),
param.names("shape", NOS))
tmp34 <- tmp34[interleave.VGAM(M, M1 = M1)]
@@ -3909,7 +3534,7 @@ simple.exponential <- function() {
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <- c(rep( .link , length = ncoly))
+ misc$link <- c(rep_len( .link , ncoly))
names(misc$link) <- mynames1
misc$earg <- vector("list", M)
@@ -3998,9 +3623,9 @@ dbetageom <- function(x, shape1, shape2, log = FALSE) {
if (!is.Numeric(shape2, positive = TRUE))
stop("bad input for argument 'shape2'")
N <- max(length(x), length(shape1), length(shape2))
- x <- rep(x, length.out = N)
- shape1 <- rep(shape1, length.out = N)
- shape2 <- rep(shape2, length.out = N)
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(shape1) != N) shape1 <- rep_len(shape1, N)
+ if (length(shape2) != N) shape2 <- rep_len(shape2, N)
loglik <- lbeta(1+shape1, shape2 + abs(x)) - lbeta(shape1, shape2)
xok <- (x == round(x) & x >= 0)
@@ -4014,34 +3639,35 @@ dbetageom <- function(x, shape1, shape2, log = FALSE) {
pbetageom <- function(q, shape1, shape2, log.p = FALSE) {
- if (!is.Numeric(q))
- stop("bad input for argument 'q'")
- if (!is.Numeric(shape1, positive = TRUE))
- stop("bad input for argument 'shape1'")
- if (!is.Numeric(shape2, positive = TRUE))
- stop("bad input for argument 'shape2'")
- N <- max(length(q), length(shape1), length(shape2))
- q <- rep(q, length.out = N);
- shape1 <- rep(shape1, length.out = N);
- shape2 <- rep(shape2, length.out = N)
- ans <- q * 0 # Retains names(q)
- if (max(abs(shape1-shape1[1])) < 1.0e-08 &&
- max(abs(shape2-shape2[1])) < 1.0e-08) {
- qstar <- floor(q)
- temp <- if (max(qstar) >= 0) dbetageom(x = 0:max(qstar),
- shape1 = shape1[1], shape2 = shape2[1]) else 0*qstar
- unq <- unique(qstar)
- for (ii in unq) {
- index <- (qstar == ii)
- ans[index] <- if (ii >= 0) sum(temp[1:(1+ii)]) else 0
- }
- } else
+ if (!is.Numeric(q))
+ stop("bad input for argument 'q'")
+ if (!is.Numeric(shape1, positive = TRUE))
+ stop("bad input for argument 'shape1'")
+ if (!is.Numeric(shape2, positive = TRUE))
+ stop("bad input for argument 'shape2'")
+ N <- max(length(q), length(shape1), length(shape2))
+ if (length(q) != N) q <- rep_len(q, N)
+ if (length(shape1) != N) shape1 <- rep_len(shape1, N)
+ if (length(shape2) != N) shape2 <- rep_len(shape2, N)
+ ans <- q * 0 # Retains names(q)
+ if (max(abs(shape1-shape1[1])) < 1.0e-08 &&
+ max(abs(shape2-shape2[1])) < 1.0e-08) {
+ qstar <- floor(q)
+ temp <- if (max(qstar) >= 0) dbetageom(x = 0:max(qstar),
+ shape1 = shape1[1], shape2 = shape2[1]) else 0*qstar
+ unq <- unique(qstar)
+ for (ii in unq) {
+ index <- (qstar == ii)
+ ans[index] <- if (ii >= 0) sum(temp[1:(1+ii)]) else 0
+ }
+ } else {
for (ii in 1:N) {
- qstar <- floor(q[ii])
- ans[ii] <- if (qstar >= 0) sum(dbetageom(x = 0:qstar,
+ qstar <- floor(q[ii])
+ ans[ii] <- if (qstar >= 0) sum(dbetageom(x = 0:qstar,
shape1 = shape1[ii], shape2 = shape2[ii])) else 0
}
- if (log.p) log(ans) else ans
+ }
+ if (log.p) log(ans) else ans
}
@@ -4058,7 +3684,7 @@ rbetageom <- function(n, shape1, shape2) {
Init.mu <-
- function(y, x = cbind("(Intercept)" = rep(1, nrow(as.matrix(y)))),
+ function(y, x = cbind("(Intercept)" = rep_len(1, nrow(as.matrix(y)))),
w = x, imethod = 1, imu = NULL,
ishrinkage = 0.95,
pos.only = FALSE,
@@ -4220,6 +3846,13 @@ EIM.NB.speciald <- function(mu, size,
+NBD.Loglikfun2 <- function(munbval, sizeval,
+ y, x, w, extraargs) {
+ sum(c(w) * dnbinom(x = y, mu = munbval,
+ size = sizeval, log = TRUE))
+}
+
+
negbinomial.control <- function(save.weights = TRUE, ...) {
list(save.weights = save.weights)
@@ -4232,7 +3865,7 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
zero = "size",
parallel = FALSE,
deviance.arg = FALSE,
- mds.min = 1e-4,
+ mds.min = 1e-3,
nsimEIM = 500, cutoff.prob = 0.999, # Maxiter = 5000,
eps.trig = 1e-7,
max.support = 4000,
@@ -4240,10 +3873,10 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
lmu = "loge", lsize = "loge",
imethod = 1,
imu = NULL,
- probs.y = 0.35,
- ishrinkage = 0.95,
+ iprobs.y = NULL, # 0.35,
+ gprobs.y = (0:9)/10, # 20160709; grid for finding munb.init
isize = NULL,
- gsize.mux = exp((-12:6)/2)) {
+ gsize.mux = exp(c(-30, -20, -15, -10, -6:3))) {
@@ -4251,6 +3884,10 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
+ if (!is.Numeric(imethod, length.arg = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
if (!is.logical( deviance.arg ) || length( deviance.arg ) != 1)
@@ -4300,8 +3937,6 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
new("vglmff",
-
-
blurb = c("Negative binomial distribution\n\n",
"Links: ",
namesof("mu", lmunb, earg = emunb), ", ",
@@ -4310,10 +3945,6 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
"Variance: mu * (1 + mu / size) for NB-2"),
constraints = eval(substitute(expression({
-
-
-
-
constraints <- cm.VGAM(matrix(1, M, 1), x = x,
bool = .parallel ,
constraints = constraints)
@@ -4343,7 +3974,7 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
initialize = eval(substitute(expression({
M1 <- 2
- temp5 <-
+ temp12 <-
w.y.check(w = w, y = y,
Is.nonnegative.y = TRUE,
Is.integer.y = TRUE,
@@ -4351,8 +3982,8 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
ncol.y.max = Inf,
out.wy = TRUE,
colsyperw = 1, maximize = TRUE)
- w <- temp5$w
- y <- temp5$y
+ w <- temp12$w
+ y <- temp12$y
assign("CQO.FastAlgorithm",
@@ -4374,37 +4005,53 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
.lsize , earg = .esize , tag = FALSE))
predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
+ gprobs.y <- .gprobs.y
+ imunb <- .imunb # Default in NULL
+ if (length(imunb))
+ imunb <- matrix(imunb, n, NOS, byrow = TRUE)
if (!length(etastart)) {
- munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
- imu = .imunb , ishrinkage = .ishrinkage ,
- pos.only = FALSE,
- probs.y = .probs.y )
+ munb.init <-
+ size.init <- matrix(NA_real_, n, NOS)
+ gprobs.y <- .gprobs.y
+ if (length( .iprobs.y ))
+ gprobs.y <- .iprobs.y
+ gsize.mux <- .gsize.mux # gsize.mux is on a relative scale
+
+ for (jay in 1:NOS) { # For each response 'y_jay'... do:
+ munb.init.jay <- if ( .imethod == 1 ) {
+ quantile(y[, jay], probs = gprobs.y) + 1/16
+ } else {
+ weighted.mean(y[, jay], w = w[, jay])
+ }
+ if (length(imunb))
+ munb.init.jay <- imunb[, jay]
- if ( is.Numeric( .isize )) {
- size.init <- matrix( .isize , nrow = n, ncol = NOS, byrow = TRUE)
- } else {
- negbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
- sum(c(w) * dnbinom(x = y, mu = extraargs, size = kmat, log = TRUE))
- }
- size.init <- matrix(0, nrow = n, ncol = NOS)
- for (jay in 1:NOS) {
- size.grid <- .gsize.mux * mean(munb.init[, jay])
- size.init[, jay] <- grid.search(size.grid,
- objfun = negbinomial.Loglikfun,
- y = y[, jay], x = x, w = w[, jay],
- extraargs = munb.init[, jay])
- }
- }
+ gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) +
+ weighted.mean(y[, jay], w = w[, jay]))
+ if (length( .isize ))
+ gsize <- .isize # isize is on an absolute scale
- newemu <- .emunb
- if ( .lmunb == "nbcanlink") {
- newemu$size <- size.init
- testing1 <- log(munb.init / (munb.init + size.init))
- testing2 <- theta2eta(munb.init, link = .lmunb , earg = newemu )
- }
+ try.this <-
+ grid.search2(munb.init.jay, gsize,
+ objfun = NBD.Loglikfun2,
+ y = y[, jay], w = w[, jay],
+ ret.objfun = TRUE) # Last value is the loglik
+
+ munb.init[, jay] <- try.this["Value1"]
+ size.init[, jay] <- try.this["Value2"]
+ } # for (jay ...)
+
+
+
+ newemu <- .emunb
+ if ( .lmunb == "nbcanlink") {
+ newemu$size <- size.init
+ testing1 <- log(munb.init / (munb.init + size.init))
+ testing2 <- theta2eta(munb.init, link = .lmunb , earg = newemu )
+ }
etastart <-
@@ -4412,13 +4059,14 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
theta2eta(size.init, link = .lsize , earg = .esize ))
etastart <-
etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
- }
+ }
}), list( .lmunb = lmunb, .lsize = lsize,
.emunb = emunb, .esize = esize,
- .imunb = imunb, .gsize.mux = gsize.mux,
+ .imunb = imunb,
+ .gprobs.y = gprobs.y, .gsize.mux = gsize.mux,
.deviance.arg = deviance.arg,
- .isize = isize, .probs.y = probs.y,
- .ishrinkage = ishrinkage, .nsimEIM = nsimEIM,
+ .isize = isize, .iprobs.y = iprobs.y,
+ .nsimEIM = nsimEIM,
.zero = zero, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -4450,8 +4098,8 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
save.weights <- control$save.weights <- !all(ind2)
- temp0303 <- c(rep( .lmunb , length = NOS),
- rep( .lsize , length = NOS))
+ temp0303 <- c(rep_len( .lmunb , NOS),
+ rep_len( .lsize , NOS))
names(temp0303) <- c(param.names("mu", NOS),
param.names("size", NOS))
temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
@@ -4469,14 +4117,12 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
misc$imethod <- .imethod
misc$nsimEIM <- .nsimEIM
misc$expected <- TRUE
- misc$ishrinkage <- .ishrinkage
misc$multipleResponses <- TRUE
}), list( .lmunb = lmunb, .lsize = lsize,
.emunb = emunb, .esize = esize,
.cutoff.prob = cutoff.prob,
.max.chunk.MB = max.chunk.MB,
.nsimEIM = nsimEIM,
- .ishrinkage = ishrinkage,
.imethod = imethod ))),
linkfun = eval(substitute(function(mu, extra = NULL) {
@@ -4508,11 +4154,6 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
extra = NULL,
summation = TRUE) {
eta.k <- eta[, c(FALSE, TRUE), drop = FALSE]
- if ( FALSE && .lsize == "loge") {
- bigval <- 68
- eta.k[eta.k > bigval] <- bigval
- eta.k[eta.k < -bigval] <- -bigval
- }
kmat <- eta2theta(eta.k, .lsize , earg = .esize )
@@ -4554,23 +4195,21 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
.emunb = emunb, .esize = esize ))),
- validparams = eval(substitute(function(eta, extra = NULL) {
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
munb <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
.lmunb , earg = .emunb )
size <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
.lsize , earg = .esize )
smallval <- .mds.min # .munb.div.size
- overdispersion <- all(munb / size > smallval)
- ans <- all(is.finite(munb)) && all(munb > 0) &&
- all(is.finite(size)) && all(size > 0) &&
- overdispersion
+ okay1 <- all(is.finite(munb)) && all(munb > 0) &&
+ all(is.finite(size)) && all(size > 0)
+ overdispersion <- if (okay1) all(munb / size > smallval) else FALSE
if (!overdispersion)
- warning("parameter 'size' has very large values; ",
- "replacing them by an arbitrary large value within ",
- "the parameter space. Try fitting a quasi-Poisson ",
- "model instead.")
- ans
+ warning("parameter 'size' has very large values; ",
+ "try fitting a quasi-Poisson ",
+ "model instead.")
+ okay1 && overdispersion
}, list( .lmunb = lmunb, .emunb = emunb,
.lsize = lsize, .esize = esize,
.mds.min = mds.min))),
@@ -4612,22 +4251,16 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
M1 <- 2
NOS <- ncol(eta) / M1
eta.k <- eta[, c(FALSE, TRUE), drop = FALSE]
- if (FALSE && .lsize == "loge") {
- bigval <- 68 # 3.404276e+29
- bigval <- 68 # 3.404276e+29
- eta.k[eta.k > bigval] <- bigval
- eta.k[eta.k < -bigval] <- -bigval
- }
kmat <- eta2theta(eta.k, .lsize , earg = .esize )
- smallval <- 1e-4 # Something like this is needed
- if (any(infinite.size <- mu / kmat < smallval)) {
+ smallval <- .mds.min # Something like this is needed
+ if (any(big.size <- mu / kmat < smallval)) {
+ if (FALSE)
warning("parameter 'size' has very large values; ",
- "replacing them by a large value within ",
- "the parameter space. Try fitting a quasi-Poisson ",
+ "try fitting a quasi-Poisson ",
"model instead.")
- kmat[infinite.size] <- mu[infinite.size] / smallval
+ kmat[big.size] <- mu[big.size] / smallval
}
@@ -4640,8 +4273,8 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
dl.dmunb <- y / mu - (1 + y/kmat) / (1 + mu/kmat)
dl.dsize <- digamma(y + kmat) - digamma(kmat) -
(y - mu) / (mu + kmat) + log1p(-mu / (kmat + mu))
- if (any(infinite.size)) {
- dl.dsize[infinite.size] <- 1e-8 # A small number
+ if (any(big.size)) {
+ dl.dsize[big.size] <- 1e-8 # A small number
}
@@ -4665,12 +4298,11 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
myderiv <- myderiv[, interleave.VGAM(M, M1 = M1)]
-
-
myderiv
}), list( .lmunb = lmunb, .lsize = lsize,
.emunb = emunb, .esize = esize,
- .deviance.arg = deviance.arg ))),
+ .deviance.arg = deviance.arg,
+ .mds.min = mds.min ))),
@@ -4799,7 +4431,6 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
wz[, M1*(1:NOS)] <- wz[, M1*(1:NOS)] * dsize.deta^2
-
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
}), list( .cutoff.prob = cutoff.prob,
.max.support = max.support,
@@ -4870,7 +4501,7 @@ polya.control <- function(save.weights = TRUE, ...) {
function(
zero = "size",
type.fitted = c("mean", "prob"),
- mds.min = 1e-4,
+ mds.min = 1e-3,
nsimEIM = 500, cutoff.prob = 0.999, # Maxiter = 5000,
eps.trig = 1e-7,
max.support = 4000,
@@ -4878,13 +4509,19 @@ polya.control <- function(save.weights = TRUE, ...) {
lprob = "logit", lsize = "loge",
imethod = 1,
iprob = NULL,
- probs.y = 0.35,
- ishrinkage = 0.95,
+ iprobs.y = NULL,
+ gprobs.y = (0:9)/10, # 20160709; grid for finding munb.init
isize = NULL,
- gsize.mux = exp((-12:6)/2),
+ gsize.mux = exp(c(-30, -20, -15, -10, -6:3)),
imunb = NULL) {
+ if (!is.Numeric(imethod, length.arg = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+
deviance.arg <- FALSE # 20131212; for now
type.fitted <- match.arg(type.fitted,
@@ -4928,11 +4565,9 @@ polya.control <- function(save.weights = TRUE, ...) {
"Mean: size * (1 - prob) / prob\n",
"Variance: mean / prob"),
constraints = eval(substitute(expression({
-
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 2)
-
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
@@ -4956,15 +4591,15 @@ polya.control <- function(save.weights = TRUE, ...) {
"Try negbinomial()")
- temp5 <- w.y.check(w = w, y = y,
+ temp12 <- w.y.check(w = w, y = y,
Is.integer.y = TRUE,
Is.nonnegative = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
out.wy = TRUE,
colsyperw = 1, maximize = TRUE)
- w <- temp5$w
- y <- temp5$y
+ w <- temp12$w
+ y <- temp12$y
M <- M1 * ncol(y)
@@ -4981,32 +4616,52 @@ polya.control <- function(save.weights = TRUE, ...) {
save.weights <- control$save.weights <- FALSE
}
+
+ gprobs.y <- .gprobs.y
+ imunb <- .imunb # Default in NULL
+ if (length(imunb))
+ imunb <- matrix(imunb, n, NOS, byrow = TRUE)
+
if (!length(etastart)) {
- munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
- imu = .imunb , ishrinkage = .ishrinkage ,
- pos.only = FALSE,
- probs.y = .probs.y )
+ munb.init <-
+ size.init <- matrix(NA_real_, n, NOS)
+ gprobs.y <- .gprobs.y
+ if (length( .iprobs.y ))
+ gprobs.y <- .iprobs.y
+ gsize.mux <- .gsize.mux # gsize.mux is on a relative scale
+
+ for (jay in 1:NOS) { # For each response 'y_jay'... do:
+ munb.init.jay <- if ( .imethod == 1 ) {
+ quantile(y[, jay], probs = gprobs.y) + 1/16
+ } else {
+ weighted.mean(y[, jay], w = w[, jay])
+ }
+ if (length(imunb))
+ munb.init.jay <- imunb[, jay]
+
+
+ gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) +
+ weighted.mean(y[, jay], w = w[, jay]))
+ if (length( .isize ))
+ gsize <- .isize # isize is on an absolute scale
+
+
+ try.this <-
+ grid.search2(munb.init.jay, gsize,
+ objfun = NBD.Loglikfun2,
+ y = y[, jay], w = w[, jay],
+ ret.objfun = TRUE) # Last value is the loglik
+
+ munb.init[, jay] <- try.this["Value1"]
+ size.init[, jay] <- try.this["Value2"]
+ } # for (jay ...)
+
+
+
- if ( is.Numeric( .isize )) {
- size.init <- matrix( .isize , nrow = n, ncol = NOS, byrow = TRUE)
- } else {
- negbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
- mu <- extraargs
- sum(c(w) * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
- }
- size.init <- matrix(0, nrow = n, ncol = NOS)
- for (jay in 1:NOS) {
- size.grid <- .gsize.mux * mean(munb.init[, jay])
- size.init[, jay] <- grid.search(size.grid,
- objfun = negbinomial.Loglikfun,
- y = y[, jay], # x = x,
- w = w[, jay],
- extraargs = munb.init[, jay])
- }
- }
prob.init <- if (length( .iprob ))
matrix( .iprob , nrow(y), ncol(y), byrow = TRUE) else
@@ -5023,9 +4678,9 @@ polya.control <- function(save.weights = TRUE, ...) {
.eprob = eprob, .esize = esize,
.iprob = iprob, .isize = isize,
.pinit = iprob,
- .gsize.mux = gsize.mux,
- .probs.y = probs.y,
- .ishrinkage = ishrinkage, .nsimEIM = nsimEIM, .zero = zero,
+ .gprobs.y = gprobs.y, .gsize.mux = gsize.mux,
+ .iprobs.y = iprobs.y,
+ .nsimEIM = nsimEIM, .zero = zero,
.imethod = imethod , .imunb = imunb,
.type.fitted = type.fitted ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -5061,8 +4716,8 @@ polya.control <- function(save.weights = TRUE, ...) {
}, list( .lprob = lprob, .eprob = eprob,
.lsize = lsize, .esize = esize))),
last = eval(substitute(expression({
- temp0303 <- c(rep( .lprob , length = NOS),
- rep( .lsize , length = NOS))
+ temp0303 <- c(rep_len( .lprob , NOS),
+ rep_len( .lsize , NOS))
names(temp0303) <- c(param.names("prob", NOS),
param.names("size", NOS))
temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
@@ -5078,12 +4733,11 @@ polya.control <- function(save.weights = TRUE, ...) {
misc$isize <- .isize
misc$imethod <- .imethod
misc$nsimEIM <- .nsimEIM
- misc$ishrinkage <- .ishrinkage
}), list( .lprob = lprob, .lsize = lsize,
.eprob = eprob, .esize = esize,
.isize = isize,
.nsimEIM = nsimEIM,
- .ishrinkage = ishrinkage, .imethod = imethod ))),
+ .imethod = imethod ))),
loglikelihood = eval(substitute(
@@ -5130,7 +4784,7 @@ polya.control <- function(save.weights = TRUE, ...) {
- validparams = eval(substitute(function(eta, extra = NULL) {
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
pmat <- eta2theta(eta[, c(TRUE, FALSE)], .lprob , .eprob )
size <- eta2theta(eta[, c(FALSE, TRUE)], .lsize , .esize )
munb <- size * (1 / pmat - 1)
@@ -5141,10 +4795,9 @@ polya.control <- function(save.weights = TRUE, ...) {
all(is.finite(pmat)) && all(pmat > 0 & pmat < 1)
overdispersion <- if (okay1) all(munb / size > smallval) else FALSE
if (!overdispersion)
- warning("parameter 'size' has very large values; ",
- "replacing them by an arbitrary large value within ",
- "the parameter space. Try fitting a quasi-Poisson ",
- "model instead.")
+ warning("parameter 'size' has very large values; ",
+ "try fitting a quasi-Poisson ",
+ "model instead.")
okay1 && overdispersion
}, list( .lprob = lprob, .eprob = eprob,
.lsize = lsize, .esize = esize,
@@ -5339,21 +4992,27 @@ polyaR.control <- function(save.weights = TRUE, ...) {
function(
zero = "size",
type.fitted = c("mean", "prob"),
- mds.min = 1e-4,
+ mds.min = 1e-3,
nsimEIM = 500, cutoff.prob = 0.999, # Maxiter = 5000,
eps.trig = 1e-7,
max.support = 4000,
max.chunk.MB = 30, # max.memory = Inf is allowed
lsize = "loge", lprob = "logit",
imethod = 1,
- isize = NULL,
iprob = NULL,
- probs.y = 0.35,
- ishrinkage = 0.95,
- gsize.mux = exp((-12:6)/2),
+ iprobs.y = NULL,
+ gprobs.y = (0:9)/10, # 20160709; grid for finding munb.init
+ isize = NULL,
+ gsize.mux = exp(c(-30, -20, -15, -10, -6:3)),
imunb = NULL) {
+ if (!is.Numeric(imethod, length.arg = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+
deviance.arg <- FALSE # 20131212; for now
@@ -5398,11 +5057,9 @@ polyaR.control <- function(save.weights = TRUE, ...) {
"Mean: size * (1 - prob) / prob\n",
"Variance: mean / prob"),
constraints = eval(substitute(expression({
-
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 2)
-
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
@@ -5426,21 +5083,21 @@ polyaR.control <- function(save.weights = TRUE, ...) {
"Try negbinomial()")
- temp5 <- w.y.check(w = w, y = y,
+ temp12 <- w.y.check(w = w, y = y,
Is.integer.y = TRUE,
Is.nonnegative = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
out.wy = TRUE,
colsyperw = 1, maximize = TRUE)
- w <- temp5$w
- y <- temp5$y
+ w <- temp12$w
+ y <- temp12$y
M <- M1 * ncol(y)
NOS <- ncoly <- ncol(y) # Number of species
- extra$type.fitted <- .type.fitted
- extra$dimnamesy <- dimnames(y)
+ extra$type.fitted <- .type.fitted
+ extra$dimnamesy <- dimnames(y)
predictors.names <-
c(namesof(param.names("size", NOS), .lsize , earg = .esize , tag = FALSE),
@@ -5453,30 +5110,51 @@ polyaR.control <- function(save.weights = TRUE, ...) {
- if (!length(etastart)) {
- munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
- imu = .imunb , ishrinkage = .ishrinkage ,
- pos.only = FALSE,
- probs.y = .probs.y )
+ gprobs.y <- .gprobs.y
+ imunb <- .imunb # Default in NULL
+ if (length(imunb))
+ imunb <- matrix(imunb, n, NOS, byrow = TRUE)
+
- if ( is.Numeric( .isize )) {
- size.init <- matrix( .isize , nrow = n, ncol = NOS, byrow = TRUE)
- } else {
- negbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
- mu <- extraargs
- sum(c(w) * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
- }
- size.init <- matrix(0, nrow = n, ncol = NOS)
- for (jay in 1:NOS) {
- size.grid <- .gsize.mux * mean(munb.init[, jay])
- size.init[, jay] <- grid.search(size.grid,
- objfun = negbinomial.Loglikfun,
- y = y[, jay], # x = x,
- w = w[, jay],
- extraargs = munb.init[, jay])
+ if (!length(etastart)) {
+ munb.init <-
+ size.init <- matrix(NA_real_, n, NOS)
+ gprobs.y <- .gprobs.y
+ if (length( .iprobs.y ))
+ gprobs.y <- .iprobs.y
+ gsize.mux <- .gsize.mux # gsize.mux is on a relative scale
+
+ for (jay in 1:NOS) { # For each response 'y_jay'... do:
+ munb.init.jay <- if ( .imethod == 1 ) {
+ quantile(y[, jay], probs = gprobs.y) + 1/16
+ } else {
+ weighted.mean(y[, jay], w = w[, jay])
}
- }
+ if (length(imunb))
+ munb.init.jay <- imunb[, jay]
+
+
+ gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) +
+ weighted.mean(y[, jay], w = w[, jay]))
+ if (length( .isize ))
+ gsize <- .isize # isize is on an absolute scale
+
+
+ try.this <-
+ grid.search2(munb.init.jay, gsize,
+ objfun = NBD.Loglikfun2,
+ y = y[, jay], w = w[, jay],
+ ret.objfun = TRUE) # Last value is the loglik
+
+ munb.init[, jay] <- try.this["Value1"]
+ size.init[, jay] <- try.this["Value2"]
+ } # for (jay ...)
+
+
+
+
+
prob.init <- if (length( .iprob ))
matrix( .iprob , nrow(y), ncol(y), byrow = TRUE) else
@@ -5493,9 +5171,9 @@ polyaR.control <- function(save.weights = TRUE, ...) {
.eprob = eprob, .esize = esize,
.iprob = iprob, .isize = isize,
.pinit = iprob,
- .gsize.mux = gsize.mux,
- .probs.y = probs.y,
- .ishrinkage = ishrinkage, .nsimEIM = nsimEIM, .zero = zero,
+ .gprobs.y = gprobs.y, .gsize.mux = gsize.mux,
+ .iprobs.y = iprobs.y,
+ .nsimEIM = nsimEIM, .zero = zero,
.imethod = imethod , .imunb = imunb,
.type.fitted = type.fitted ))),
@@ -5532,8 +5210,8 @@ polyaR.control <- function(save.weights = TRUE, ...) {
}, list( .lprob = lprob, .eprob = eprob,
.lsize = lsize, .esize = esize))),
last = eval(substitute(expression({
- temp0303 <- c(rep( .lprob , length = NOS),
- rep( .lsize , length = NOS))
+ temp0303 <- c(rep_len( .lprob , NOS),
+ rep_len( .lsize , NOS))
names(temp0303) <- c(param.names("size", NOS),
param.names("prob", NOS))
temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
@@ -5549,12 +5227,11 @@ polyaR.control <- function(save.weights = TRUE, ...) {
misc$isize <- .isize
misc$imethod <- .imethod
misc$nsimEIM <- .nsimEIM
- misc$ishrinkage <- .ishrinkage
}), list( .lprob = lprob, .lsize = lsize,
.eprob = eprob, .esize = esize,
.isize = isize,
.nsimEIM = nsimEIM,
- .ishrinkage = ishrinkage, .imethod = imethod ))),
+ .imethod = imethod ))),
loglikelihood = eval(substitute(
@@ -5600,7 +5277,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
.eprob = eprob, .esize = esize ))),
- validparams = eval(substitute(function(eta, extra = NULL) {
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
size <- eta2theta(eta[, c(TRUE, FALSE)], .lsize , .esize )
pmat <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , .eprob )
munb <- size * (1 / pmat - 1)
@@ -5612,10 +5289,9 @@ polyaR.control <- function(save.weights = TRUE, ...) {
all(is.finite(pmat)) && all(pmat > 0 & pmat < 1) &&
overdispersion
if (!overdispersion)
- warning("parameter 'size' has very large values; ",
- "replacing them by an arbitrary large value within ",
- "the parameter space. Try fitting a quasi-Poisson ",
- "model instead.")
+ warning("parameter 'size' has very large values; ",
+ "try fitting a quasi-Poisson ",
+ "model instead.")
ans
}, list( .lprob = lprob, .eprob = eprob,
.lsize = lsize, .esize = esize,
@@ -5920,8 +5596,8 @@ polyaR.control <- function(save.weights = TRUE, ...) {
}
- etastart <- rep(theta2eta(init.df, .ldof , earg = .edof ),
- length.out = length(y))
+ etastart <- rep_len(theta2eta(init.df, .ldof , earg = .edof ),
+ length(y))
}
}), list( .ldof = ldof, .edof = edof, .idof = idof,
.tol1 = tol1, .imethod = imethod ))),
@@ -6137,18 +5813,14 @@ polyaR.control <- function(save.weights = TRUE, ...) {
init.sca <- if (length( .isca )) .isca else
sdvec / 2.3
- sdvec <- rep(sdvec,
- length.out <- max(length(sdvec),
- length(init.sca)))
- init.sca <- rep(init.sca,
- length.out <- max(length(sdvec),
- length(init.sca)))
+ sdvec <- rep_len(sdvec, max(length(sdvec), length(init.sca)))
+ init.sca <- rep_len(init.sca, max(length(sdvec), length(init.sca)))
ind9 <- (sdvec / init.sca <= (1 + 0.12))
sdvec[ind9] <- sqrt(1.12) * init.sca[ind9]
init.dof <- if (length( .idof )) .idof else
(2 * (sdvec / init.sca)^2) / ((sdvec / init.sca)^2 - 1)
if (!is.Numeric(init.dof) || init.dof <= 1)
- init.dof <- rep(3, length.out = ncoly)
+ init.dof <- rep_len(3, ncoly)
mat1 <- matrix(theta2eta(init.loc, .lloc , earg = .eloc ), n, NOS,
byrow = TRUE)
@@ -6175,9 +5847,9 @@ polyaR.control <- function(save.weights = TRUE, ...) {
.ldof = ldof, .edof = edof ))),
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <- c(rep( .lloc , length = NOS),
- rep( .lsca , length = NOS),
- rep( .ldof , length = NOS))
+ misc$link <- c(rep_len( .lloc , NOS),
+ rep_len( .lsca , NOS),
+ rep_len( .ldof , NOS))
misc$link <- misc$link[interleave.VGAM(M1 * NOS, M1 = M1)]
temp.names <- c(mynames1, mynames2, mynames3)
temp.names <- temp.names[interleave.VGAM(M1 * NOS, M1 = M1)]
@@ -6463,8 +6135,8 @@ polyaR.control <- function(save.weights = TRUE, ...) {
.doff = doff ))),
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <- c(rep( .lloc , length = NOS),
- rep( .lsca , length = NOS))
+ misc$link <- c(rep_len( .lloc , NOS),
+ rep_len( .lsca , NOS))
temp.names <- c(mynames1, mynames2)
temp.names <- temp.names[interleave.VGAM(M1 * NOS, M1 = M1)]
names(misc$link) <- temp.names
@@ -6649,7 +6321,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <- c(rep( .link , length = ncoly))
+ misc$link <- c(rep_len( .link , ncoly))
names(misc$link) <- mynames1
misc$earg <- vector("list", M)
@@ -6746,9 +6418,9 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
stop("bad input for argument 'n'") else n
oneval <- (length(mu) == 1 && length(dispersion) == 1)
- answer <- rep(0.0, length.out = use.n)
- mu <- rep(mu, length.out = use.n);
- dispersion <- rep(dispersion, length.out = use.n)
+ answer <- rep_len(0.0, use.n)
+ mu <- rep_len(mu, use.n)
+ dispersion <- rep_len(dispersion, use.n)
Kay1 <- 3 * (dispersion * mu * (1-mu))^2
if (oneval) {
@@ -6760,7 +6432,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
myroots <- myroots[myroots >= 0.0]
myroots <- myroots[myroots <= 1.0]
pdfmax <- dsimplex(myroots, mymu, dispersion[1])
- pdfmax <- rep(max(pdfmax), length.out = use.n) # For multiple peaks
+ pdfmax <- rep_len(max(pdfmax), use.n) # For multiple peaks
} else {
pdfmax <- numeric(use.n)
for (ii in 1:use.n) {
@@ -6879,13 +6551,12 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
init.mu <- (1 - .ishrinkage ) * y + .ishrinkage * use.this
- mu.init <- rep(if (length( .imu )) .imu else init.mu, length = n)
- sigma.init <- if (length( .isigma )) rep( .isigma, leng = n) else {
+ mu.init <- rep_len(if (length( .imu )) .imu else init.mu, n)
+ sigma.init <- if (length( .isigma )) rep_len( .isigma, n) else {
use.this <- deeFun(y, mu = init.mu)
- rep(sqrt( if ( .imethod == 3) weighted.mean(use.this, w) else
+ rep_len(sqrt( if ( .imethod == 3) weighted.mean(use.this, w) else
if ( .imethod == 1) median(use.this) else
- mean(use.this, trim = 0.1)),
- length = n)
+ mean(use.this, trim = 0.1)), n)
}
etastart <-
cbind(theta2eta(mu.init, .lmu , earg = .emu ),
@@ -7023,10 +6694,9 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
c(namesof("mu", .lmu , earg = .emu , tag = FALSE),
namesof("lambda", .llambda , earg = .elambda , tag = FALSE))
if (!length(etastart)) {
- mu.init <- rep(if (length( .imu )) .imu else
- median(y), length = n)
- lambda.init <- rep(if (length( .ilambda )) .ilambda else
- sqrt(var(y)), length = n)
+ mu.init <- rep_len(if (length( .imu )) .imu else median(y), n)
+ lambda.init <- rep_len(if (length( .ilambda )) .ilambda else
+ sqrt(var(y)), n)
etastart <-
cbind(theta2eta(mu.init, .lmu , earg = .emu ),
theta2eta(lambda.init, .llambda , earg = .elambda ))
@@ -7145,8 +6815,8 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
predictors.names <-
namesof("theta", .link.theta , earg = .earg , tag = FALSE)
if (!length(etastart)) {
- theta.init <- rep(if (length( .init.theta )) .init.theta else
- median(y), length = n)
+ theta.init <- rep_len(if (length( .init.theta )) .init.theta else
+ median(y), n)
etastart <-
theta2eta(theta.init, .link.theta , earg = .earg )
}
@@ -7228,8 +6898,8 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
namesof("theta", .link.theta , earg = .earg , tag = FALSE)
if (!length(etastart)) {
- theta.init <- rep(if (length( .init.theta )) .init.theta else
- median(y), length = n)
+ theta.init <- rep_len(if (length( .init.theta )) .init.theta else
+ median(y), n)
etastart <-
theta2eta(theta.init, .link.theta , earg = .earg )
@@ -7327,10 +6997,9 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
namesof("lambda", .llambda , earg = .elambda , tag = FALSE))
if (!length(etastart)) {
- mu.init <- rep(if (length( .imu )) .imu else
- (y), length = n)
- lambda.init <- rep(if (length( .ilambda )) .ilambda else
- 1/var(y), length = n)
+ mu.init <- rep_len(if (length( .imu )) .imu else (y), n)
+ lambda.init <- rep_len(if (length( .ilambda )) .ilambda else
+ 1/var(y), n)
etastart <-
cbind(theta2eta(mu.init, .lmu , earg = .emu ),
theta2eta(lambda.init, .llambda , earg = .elambda ))
@@ -7388,7 +7057,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
.emu = emu, .elambda = elambda ))),
weight = eval(substitute(expression({
denominator <- y*(1-y) + (y-mu)^2
- d2l.dthetas2 <- array(NA, c(n, 2, 2))
+ d2l.dthetas2 <- array(NA_real_, c(n, 2, 2))
d2l.dthetas2[, 1, 1] <- c(w) * lambda*(-y*(1-y)+(y-mu)^2)/denominator^2
d2l.dthetas2[, 1, 2] <-
d2l.dthetas2[, 2, 1] <- c(w) * (y-mu) / denominator
@@ -7472,12 +7141,11 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
covarn <- sd(c(y))^2 / weighted.mean(y, w)
temp1 <- 0.5 + (1 + sqrt(1+8*covarn)) / (8*covarn)
temp2 <- 0.5 + (1 - sqrt(1+8*covarn)) / (8*covarn)
- init.rho <- rep(if (length( .irho)) .irho else {
- ifelse(temp1 > 0.5 && temp1 < 1, temp1, temp2)
- }, length = n)
- init.lambda <- rep(if (length( .ilambda)) .ilambda else {
- (2*init.rho-1) * weighted.mean(y, w) / (1-init.rho)
- }, length = n)
+ init.rho <- rep_len(if (length( .irho)) .irho else {
+ ifelse(temp1 > 0.5 && temp1 < 1, temp1, temp2) }, n)
+ init.lambda <- rep_len(if (length( .ilambda)) .ilambda else {
+ (2*init.rho-1) *
+ weighted.mean(y, w) / (1-init.rho)}, n)
etastart <-
cbind(theta2eta(init.rho, .lrho, earg = .erho),
theta2eta(init.lambda, .llambda , earg = .elambda ))
@@ -7572,9 +7240,9 @@ dgenpois <- function(x, lambda = 0, theta, log = FALSE) {
rm(log)
LLL <- max(length(x), length(lambda), length(theta))
- if (length(x) != LLL) x <- rep(x, len = LLL)
- if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL)
- if (length(theta) != LLL) theta <- rep(theta, len = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(lambda) != LLL) lambda <- rep_len(lambda, LLL)
+ if (length(theta) != LLL) theta <- rep_len(theta, LLL)
llans <- -x*lambda - theta + (x-1) * log(theta + x*lambda) +
log(theta) - lgamma(x+1)
@@ -7733,7 +7401,7 @@ dgenpois <- function(x, lambda = 0, theta, log = FALSE) {
temp.names <- c(mynames1, mynames2)
temp.names <- temp.names[interleave.VGAM(M1 * ncoly, M1 = M1)]
- misc$link <- rep( .llambda , length = M1 * ncoly)
+ misc$link <- rep_len( .llambda , M1 * ncoly)
misc$earg <- vector("list", M1 * ncoly)
names(misc$link) <-
names(misc$earg) <- temp.names
@@ -7908,7 +7576,7 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
if (!length(etastart)) {
k.init <- if (length( .init.k))
- rep( .init.k, length.out = length(y)) else {
+ rep_len( .init.k, length(y)) else {
medy = median(y)
if (medy < 2) 5 else if (medy < 4) 20 else exp(0.7 * medy)
}
@@ -8052,17 +7720,16 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
if (!length(etastart)) {
k.init <- if (length( .ishape ))
- rep( .ishape, length.out = length(y)) else {
- rep(exp(median(y)), length.out = length(y))
+ rep_len( .ishape, length(y)) else {
+ rep_len(exp(median(y)), length(y))
}
scale.init <- if (length( .iscale ))
- rep( .iscale , length.out = length(y)) else {
- rep(sqrt(var(y) / trigamma(k.init)), length.out = length(y))
+ rep_len( .iscale , length(y)) else {
+ rep_len(sqrt(var(y) / trigamma(k.init)), length(y))
}
loc.init <- if (length( .ilocat ))
- rep( .ilocat, length.out = length(y)) else {
- rep(median(y) - scale.init * digamma(k.init),
- length.out = length(y))
+ rep_len( .ilocat, length(y)) else {
+ rep_len(median(y) - scale.init * digamma(k.init), length(y))
}
etastart <-
cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
@@ -8250,25 +7917,25 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
if (!length(etastart)) {
- sdy <- sqrt(var(y))
- k.init <- if (length( .ishape ))
- rep( .ishape, length.out = length(y)) else {
- skewness <- mean((y-mean(y))^3) / sdy^3 # <0 Left Skewed
- rep(-skewness, length.out = length(y))
- }
- scale.init <- if (length( .iscale ))
- rep( .iscale , length.out = length(y)) else {
- rep(sdy, length.out = length(y))
- }
- loc.init <- if (length( .iloc ))
- rep( .iloc, length.out = length(y)) else {
- rep(median(y), length.out = length(y))
- }
- etastart <-
- cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
- theta2eta(scale.init, .lscale , earg = .escale ),
- theta2eta(k.init, .lshape , earg = .eshape ))
+ sdy <- sqrt(var(y))
+ k.init <- if (length( .ishape ))
+ rep_len( .ishape, length(y)) else {
+ skewness <- mean((y-mean(y))^3) / sdy^3 # <0 Left Skewed
+ rep_len(-skewness, length(y))
+ }
+ scale.init <- if (length( .iscale ))
+ rep_len( .iscale , length(y)) else {
+ rep_len(sdy, length(y))
}
+ loc.init <- if (length( .iloc ))
+ rep_len( .iloc, length(y)) else {
+ rep_len(median(y), length(y))
+ }
+ etastart <-
+ cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
+ theta2eta(scale.init, .lscale , earg = .escale ),
+ theta2eta(k.init, .lshape , earg = .eshape ))
+ }
}), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
.elocat = elocat, .escale = escale, .eshape = eshape,
.iloc = ilocat, .iscale = iscale, .ishape = ishape ))),
@@ -8366,12 +8033,12 @@ dgengamma.stacy <- function(x, scale = 1, d = 1, k = 1, log = FALSE) {
stop("bad input for argument 'k'")
N <- max(length(x), length(scale), length(d), length(k))
- x <- rep(x, length.out = N)
- scale <- rep(scale, length.out = N)
- d <- rep(d, length.out = N)
- k <- rep(k, length.out = N)
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(d) != N) d <- rep_len(d, N)
+ if (length(k) != N) k <- rep_len(k, N)
+ if (length(scale) != N) scale <- rep_len(scale, N)
- Loglik <- rep(log(0), length.out = N)
+ Loglik <- rep_len(log(0), N)
xok <- x > 0
if (any(xok)) {
zedd <- (x[xok]/scale[xok])^(d[xok])
@@ -8511,37 +8178,29 @@ rgengamma.stacy <- function(n, scale = 1, d = 1, k = 1) {
gscale <- .gscale
gshape1.d <- .gshape1.d
gshape2.k <- .gshape2.k
- if (length( .iscale ))
- gscale <- rep( .iscale , length = NOS)
- if (length( .id ))
- gshape1.d <- rep( .id , length = NOS)
- if (length( .ik ))
- gshape2.p <- rep( .ik , length = NOS)
- allmat1 <- expand.grid(shape1.d = gshape1.d,
- shape2.k = gshape2.k)
- allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
-
- ll.gstacy <- function(scaleval, x = x, y = y, w = w, extraargs) {
+ if (length( .iscale )) gscale <- rep_len( .iscale , NOS)
+ if (length( .id )) gshape1.d <- rep_len( .id , NOS)
+ if (length( .ik )) gshape2.p <- rep_len( .ik , NOS)
+
+
+ ll.gstacy3 <- function(scaleval, shape1.d, shape2.k,
+ x = x, y = y, w = w, extraargs) {
ans <- sum(c(w) * dgengamma.stacy(x = y,
scale = scaleval,
- d = extraargs$Shape1.d,
- k = extraargs$Shape2.k,
+ d = shape1.d,
+ k = shape2.k,
log = TRUE))
ans
}
-
- for (iloc in 1:nrow(allmat1)) {
- allmat2[iloc, ] <-
- grid.search(gscale, objfun = ll.gstacy,
- y = yvec, x = x, w = wvec,
- ret.objfun = TRUE, # 2nd value is the loglik
- extraargs = list(Shape1.d = allmat1[iloc, 1],
- Shape2.k = allmat1[iloc, 2]))
- }
- ind5 <- which.max(allmat2[, 2]) # 2nd value is the loglik
- sc.init[, spp.] <- allmat2[ind5, 1]
- dd.init[, spp.] <- allmat1[ind5, 1]
- kk.init[, spp.] <- allmat1[ind5, 2]
+ try.this <-
+ grid.search3(gscale, gshape1.d, gshape2.k,
+ objfun = ll.gstacy3,
+ y = yvec, w = wvec,
+ ret.objfun = TRUE) # Last value is the loglik
+
+ sc.init[, spp.] <- try.this["Value1" ]
+ dd.init[, spp.] <- try.this["Value2" ]
+ kk.init[, spp.] <- try.this["Value3" ]
} # End of for (spp. ...)
@@ -8668,13 +8327,11 @@ dlog <- function(x, prob, log = FALSE) {
if (!is.Numeric(prob, positive = TRUE) || max(prob) >= 1)
stop("bad input for argument 'prob'")
N <- max(length(x), length(prob))
- if (length(x) != N)
- x <- rep(x, length.out = N)
- if (length(prob) != N)
- prob <- rep(prob, length.out = N)
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(prob) != N) prob <- rep_len(prob, N)
ox <- !is.finite(x)
zero <- ox | round(x) != x | x < 1
- ans <- rep(0.0, length.out = length(x))
+ ans <- rep_len(0.0, length(x))
if (log.arg) {
ans[ zero] <- log(0.0)
ans[!zero] <- x[!zero] * log(prob[!zero]) - log(x[!zero]) -
@@ -8691,12 +8348,13 @@ dlog <- function(x, prob, log = FALSE) {
plog <- function(q, prob, log.p = FALSE) {
- if (!is.Numeric(q)) stop("bad input for argument 'q'")
- if (!is.Numeric(prob, positive = TRUE) || max(prob) >= 1)
- stop("bad input for argument 'prob'")
- N <- max(length(q), length(prob))
- q <- rep(q, length.out = N);
- prob <- rep(prob, length.out = N);
+ if (!is.Numeric(q)) stop("bad input for argument 'q'")
+ if (!is.Numeric(prob, positive = TRUE) || max(prob) >= 1)
+ stop("bad input for argument 'prob'")
+ N <- max(length(q), length(prob))
+ if (length(q) != N) q <- rep_len(q, N)
+ if (length(prob) != N) prob <- rep_len(prob, N)
+
bigno <- 10
owen1965 <- (q * (1 - prob) > bigno)
@@ -8749,7 +8407,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
Smallno > 0.01 ||
Smallno < 2 * .Machine$double.eps)
stop("bad input for argument 'Smallno'")
- ans <- rep(0.0, length.out = use.n)
+ ans <- rep_len(0.0, use.n)
ptr1 <- 1; ptr2 <- 0
a <- -1 / log1p(-prob)
@@ -8865,7 +8523,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <- c(rep( .link , length = ncoly))
+ misc$link <- c(rep_len( .link , ncoly))
names(misc$link) <- mynames1
misc$earg <- vector("list", M)
@@ -9026,7 +8684,7 @@ rlevy <- function(n, location = 0, scale = 1)
}
gamma.init <- if (length( .iscale )) .iscale else
median(y - delta.init) # = 1/median(1/(y-delta.init))
- gamma.init <- rep(gamma.init, length = length(y))
+ gamma.init <- rep_len(gamma.init, length(y))
etastart <-
cbind(theta2eta(gamma.init, .link.gamma , earg = .earg ),
if ( .delta.known ) NULL else delta.init)
@@ -9211,19 +8869,16 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
if (!length(etastart)) {
- lambda.init <- rep(if (length( .ilambda )) .ilambda else 1,
- length = n)
- sh1.init <- if (length( .ishape1 ))
- rep( .ishape1, length = n) else NULL
- sh2.init <- if (length( .ishape2 ))
- rep( .ishape2, length = n) else NULL
+ lambda.init <- rep_len(if (length( .ilambda )) .ilambda else 1, n)
+ sh1.init <- if (length( .ishape1 )) rep_len( .ishape1 , n) else NULL
+ sh2.init <- if (length( .ishape2 )) rep_len( .ishape2 , n) else NULL
txY.init <- lambda.init * y / (1+lambda.init*y - y)
mean1 <- mean(txY.init)
mean2 <- mean(1/txY.init)
if (!is.Numeric(sh1.init))
- sh1.init <- rep((mean2 - 1) / (mean2 - 1/mean1), length = n)
+ sh1.init <- rep_len((mean2 - 1) / (mean2 - 1/mean1), n)
if (!is.Numeric(sh2.init))
- sh2.init <- rep(sh1.init * (1-mean1) / mean1, length = n)
+ sh2.init <- rep_len(sh1.init * (1-mean1) / mean1, n)
etastart <-
cbind(theta2eta(sh1.init, .lshape1 , earg = .eshape1),
theta2eta(sh2.init, .lshape2 , earg = .eshape2),
@@ -9340,121 +8995,15 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
-
- betaprime <- function(link = "loge", i1 = 2, i2 = NULL, zero = NULL) {
-
- link <- as.list(substitute(link))
- earg <- link2list(link)
- link <- attr(earg, "function.name")
-
-
- new("vglmff",
- blurb = c("Beta-prime distribution\n",
- "y^(shape1-1) * (1+y)^(-shape1-shape2) / Beta(shape1,shape2),",
- " y>0, shape1>0, shape2>0\n\n",
- "Links: ",
- namesof("shape1", link, earg = earg), ", ",
- namesof("shape2", link, earg = earg), "\n",
- "Mean: shape1/(shape2-1) provided shape2>1"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
-
- w.y.check(w = w, y = y,
- Is.positive.y = TRUE,
- ncol.w.max = 1,
- ncol.y.max = 1)
-
-
-
- predictors.names <-
- c(namesof("shape1", .link , earg = .earg , short = TRUE),
- namesof("shape2", .link , earg = .earg , short = TRUE))
- if (is.numeric( .i1) && is.numeric( .i2)) {
- vec <- c( .i1, .i2)
- vec <- c(theta2eta(vec[1], .link , earg = .earg ),
- theta2eta(vec[2], .link , earg = .earg ))
- etastart <- matrix(vec, n, 2, byrow = TRUE)
- }
- if (!length(etastart)) {
- init1 <- if (length( .i1))
- rep( .i1, length.out = n) else rep(1, length.out = n)
- init2 <- if (length( .i2))
- rep( .i2, length.out = n) else 1 + init1 / (y + 0.1)
- etastart <-
- matrix(theta2eta(c(init1, init2), .link , earg = .earg ),
- n, 2, byrow = TRUE)
- }
- }), list( .link = link, .earg = earg, .i1 = i1, .i2 = i2 ))),
-
- linkinv = eval(substitute(function(eta, extra = NULL) {
- shapes <- eta2theta(eta, .link , earg = .earg )
- ifelse(shapes[, 2] > 1, shapes[, 1] / (shapes[, 2] - 1), NA)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link <- c(shape1 = .link , shape2 = .link)
- misc$earg <- list(shape1 = .earg , shape2 = .earg )
- }), list( .link = link, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta,
- extra = NULL,
- summation = TRUE) {
- shapes <- eta2theta(eta, .link , earg = .earg )
- if (residuals) {
- stop("loglikelihood residuals not implemented yet")
- } else {
- ll.elts <-
- c(w) * ((shapes[, 1]-1) * log(y) -
- lbeta(shapes[, 1], shapes[, 2]) -
- (shapes[, 2]+shapes[, 1]) * log1p(y))
- if (summation) {
- sum(ll.elts)
- } else {
- ll.elts
- }
- }
- }, list( .link = link, .earg = earg ))),
- vfamily = "betaprime",
- deriv = eval(substitute(expression({
- shapes <- eta2theta(eta, .link , earg = .earg )
- dshapes.deta <- dtheta.deta(shapes, .link , earg = .earg )
- dl.dshapes <- cbind(log(y) - log1p(y) - digamma(shapes[, 1]) +
- digamma(shapes[, 1]+shapes[, 2]),
- - log1p(y) - digamma(shapes[, 2]) +
- digamma(shapes[, 1]+shapes[, 2]))
- c(w) * dl.dshapes * dshapes.deta
- }), list( .link = link, .earg = earg ))),
- weight = expression({
- temp2 <- trigamma(shapes[, 1] + shapes[, 2])
- d2l.dshape12 <- temp2 - trigamma(shapes[, 1])
- d2l.dshape22 <- temp2 - trigamma(shapes[, 2])
- d2l.dshape1shape2 <- temp2
-
- wz <- matrix(NA_real_, n, dimm(M)) #3=dimm(M)
- wz[, iam(1, 1, M)] <- d2l.dshape12 * dshapes.deta[, 1]^2
- wz[, iam(2, 2, M)] <- d2l.dshape22 * dshapes.deta[, 2]^2
- wz[, iam(1, 2, M)] <- d2l.dshape1shape2 *
- dshapes.deta[, 1] * dshapes.deta[, 2]
-
- -c(w) * wz
- }))
-}
-
-
-
-
-
-
dmaxwell <- function(x, rate, log = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
L <- max(length(x), length(rate))
- x <- rep(x, length.out = L)
- rate <- rep(rate, length.out = L)
- logdensity <- rep(log(0), length.out = L)
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(rate) != L) rate <- rep_len(rate, L)
+ logdensity <- rep_len(log(0), L)
xok <- (x >= 0)
logdensity[xok] <- 0.5 * log(2/pi) + 1.5 * log(rate[xok]) +
2 * log(x[xok]) - 0.5 * rate[xok] * x[xok]^2
@@ -9587,7 +9136,7 @@ rmaxwell <- function(n, rate) {
misc$earg[[ilocal]] <- .earg
}
- misc$link <- rep( .link , length = ncoly)
+ misc$link <- rep_len( .link , ncoly)
names(misc$link) <- mynames1
misc$M1 <- M1
@@ -9656,24 +9205,23 @@ rmaxwell <- function(n, rate) {
dnaka <- function(x, scale = 1, shape, log = FALSE) {
- if (!is.logical(log.arg <- log) || length(log) != 1)
- stop("bad input for argument 'log'")
- rm(log)
-
- L <- max(length(x), length(shape), length(scale))
- x <- rep(x, length.out = L)
- shape <- rep(shape, length.out = L)
- scale <- rep(scale, length.out = L)
-
- logdensity <- rep(log(0), length.out = L)
- xok <- (x > 0)
- logdensity[xok] <- dgamma(x = x[xok]^2, shape = shape[xok],
- scale = scale[xok] / shape[xok],
- log = TRUE) +
- log(2) + log(x[xok])
- logdensity[is.infinite(x)] <- log(0) # 20141208 KaiH
-
- if (log.arg) logdensity else exp(logdensity)
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ L <- max(length(x), length(shape), length(scale))
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(shape) != L) shape <- rep_len(shape, L)
+ if (length(scale) != L) scale <- rep_len(scale, L)
+
+ logdensity <- rep_len(log(0), L)
+ xok <- (x > 0)
+ logdensity[xok] <- dgamma(x = x[xok]^2, shape = shape[xok],
+ scale = scale[xok] / shape[xok], log = TRUE) +
+ log(2) + log(x[xok])
+ logdensity[is.infinite(x)] <- log(0) # 20141208 KaiH
+
+ if (log.arg) logdensity else exp(logdensity)
}
@@ -9697,10 +9245,10 @@ qnaka <- function(p, scale = 1, shape, ...) {
stop("bad input for argument 'scale'")
L <- max(length(p), length(shape), length(scale))
- p <- rep(p, length.out = L)
- shape <- rep(shape, length.out = L)
- scale <- rep(scale, length.out = L)
- ans <- rep(0.0, length.out = L)
+ if (length(p) != L) p <- rep_len(p, L)
+ if (length(shape) != L) shape <- rep_len(shape, L)
+ if (length(scale) != L) scale <- rep_len(scale, L)
+ ans <- rep_len(0.0, L)
myfun <- function(x, shape, scale = 1, p)
pnaka(q = x, shape = shape, scale = scale) - p
@@ -9734,7 +9282,7 @@ rnaka <- function(n, scale = 1, shape, Smallno = 1.0e-6) {
Smallno > 0.01 ||
Smallno < 2 * .Machine$double.eps)
stop("bad input for argument 'Smallno'")
- ans <- rep(0.0, length.out = use.n)
+ ans <- rep_len(0.0, use.n)
ptr1 <- 1
ptr2 <- 0
@@ -9807,11 +9355,10 @@ rnaka <- function(n, scale = 1, shape, Smallno = 1.0e-6) {
if (!length(etastart)) {
init2 <- if (is.Numeric( .iscale , positive = TRUE))
- rep( .iscale , length.out = n) else
- rep(1, length.out = n)
+ rep_len( .iscale , n) else rep_len(1, n)
init1 <- if (is.Numeric( .ishape, positive = TRUE))
- rep( .ishape, length.out = n) else
- rep(init2 / (y+1/8)^2, length.out = n)
+ rep_len( .ishape , n) else
+ rep_len(init2 / (y+1/8)^2, n)
etastart <-
cbind(theta2eta(init2, .lscale , earg = .escale ),
theta2eta(init1, .lshape , earg = .eshape ))
@@ -9890,10 +9437,10 @@ drayleigh <- function(x, scale = 1, log = FALSE) {
rm(log)
L <- max(length(x), length(scale))
- x <- rep(x, length.out = L)
- scale <- rep(scale, length.out = L)
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(scale) != L) scale <- rep_len(scale, L)
- logdensity <- rep(log(0), length.out = L)
+ logdensity <- rep_len(log(0), L)
xok <- (x > 0)
logdensity[xok] <- log(x[xok]) - 0.5 * (x[xok]/scale[xok])^2 -
2 * log(scale[xok])
@@ -10060,7 +9607,7 @@ rrayleigh <- function(n, scale = 1) {
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <- c(rep( .lscale , length = ncoly))
+ misc$link <- c(rep_len( .lscale , ncoly))
names(misc$link) <- mynames1
misc$earg <- vector("list", M)
@@ -10131,7 +9678,7 @@ rrayleigh <- function(n, scale = 1) {
if (intercept.only && .oim.mean ) {
ave.oim <- weighted.mean(d2l.dScale2,
- rep(c(w), length = length(d2l.dScale2)))
+ rep_len(c(w), length(d2l.dScale2)))
if (ave.oim > 0) {
wz <- c(w) * dScale.deta^2 * ave.oim
}
@@ -10155,14 +9702,14 @@ dparetoIV <- function(x, location = 0, scale = 1, inequality = 1,
N <- max(length(x), length(location), length(scale),
length(inequality), length(shape))
- if (length(x) != N) x <- rep(x, length.out = N)
- if (length(location) != N) location <- rep(location, length.out = N)
- if (length(inequality) != N) inequality <- rep(inequality, length.out = N)
- if (length(shape) != N) shape <- rep(shape, length.out = N)
- if (length(scale) != N) scale <- rep(scale, length.out = N)
+ if (length(x) != N) x <- rep_len(x, N)
+ if (length(location) != N) location <- rep_len(location, N)
+ if (length(inequality) != N) inequality <- rep_len(inequality, N)
+ if (length(shape) != N) shape <- rep_len(shape, N)
+ if (length(scale) != N) scale <- rep_len(scale, N)
- logdensity <- rep(log(0), length.out = N)
+ logdensity <- rep_len(log(0), N)
xok <- (x > location)
zedd <- (x - location) / scale
logdensity[xok] <- log(shape[xok]) -
@@ -10422,13 +9969,10 @@ rparetoI <- function(n, scale = 1, shape = 1)
shape.init <- max(0.01, (2*A2-A1)/(A1-A2))
}
- etastart <- cbind(
- theta2eta(rep(scale.init, length.out = n),
- .lscale , earg = .escale ),
- theta2eta(rep(inequ.init, length.out = n),
- .linequ, earg = .einequ),
- theta2eta(rep(shape.init, length.out = n),
- .lshape , earg = .eshape ))
+ etastart <- cbind(
+ theta2eta(rep_len(scale.init, n), .lscale , earg = .escale ),
+ theta2eta(rep_len(inequ.init, n), .linequ , earg = .einequ ),
+ theta2eta(rep_len(shape.init, n), .lshape , earg = .eshape ))
}
}), list( .location = location, .lscale = lscale,
.linequ = linequ, .lshape = lshape, .imethod = imethod,
@@ -10591,12 +10135,9 @@ rparetoI <- function(n, scale = 1, shape = 1)
if (!length(scale.init))
scale.init <- exp(fittemp$coef["Intercept"])
}
- etastart=cbind(
- theta2eta(rep(scale.init, length.out = n),
- .lscale , earg = .escale ),
- theta2eta(rep(inequ.init, length.out = n),
- .linequ,
- earg = .einequ))
+ etastart<- cbind(
+ theta2eta(rep_len(scale.init, n), .lscale , earg = .escale ),
+ theta2eta(rep_len(inequ.init, n), .linequ , earg = .einequ ))
}
}), list( .location = location, .lscale = lscale,
.linequ = linequ,
@@ -10737,10 +10278,9 @@ rparetoI <- function(n, scale = 1, shape = 1)
if (!length(scale.init))
scale.init <- exp(fittemp$coef["Intercept"])
}
- etastart <- cbind(theta2eta(rep(scale.init, length.out = n),
- .lscale , earg = .escale ),
- theta2eta(rep(shape.init, length.out = n),
- .lshape , earg = .eshape ))
+ etastart <-
+ cbind(theta2eta(rep_len(scale.init, n), .lscale , earg = .escale ),
+ theta2eta(rep_len(shape.init, n), .lshape , earg = .eshape ))
}
}), list( .location = location, .lscale = lscale,
.escale = escale, .eshape = eshape,
@@ -10824,11 +10364,13 @@ dpareto <- function(x, scale = 1, shape, log = FALSE) {
rm(log)
L <- max(length(x), length(scale), length(shape))
- x <- rep(x, length.out = L);
- scale <- rep(scale, length.out = L);
- shape <- rep(shape, length.out = L)
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(scale) != L) scale <- rep_len(scale, L)
+ if (length(shape) != L) shape <- rep_len(shape, L)
- logdensity <- rep(log(0), length.out = L)
+
+
+ logdensity <- rep_len(log(0), L)
xok <- (x >= scale) # 20141212 KaiH
logdensity[xok] <- log(shape[xok]) + shape[xok] * log(scale[xok]) -
(shape[xok]+1) * log(x[xok])
@@ -11036,13 +10578,13 @@ dtruncpareto <- function(x, lower, upper, shape, log = FALSE) {
stop("argument 'shape' must be positive")
L <- max(length(x), length(lower), length(upper), length(shape))
- if (length(x) != L) x <- rep(x, length.out = L)
- if (length(shape) != L) shape <- rep(shape, length.out = L)
- if (length(lower) != L) lower <- rep(lower, length.out = L)
- if (length(upper) != L) upper <- rep(upper, length.out = L)
+ if (length(x) != L) x <- rep_len(x, L)
+ if (length(shape) != L) shape <- rep_len(shape, L)
+ if (length(lower) != L) lower <- rep_len(lower, L)
+ if (length(upper) != L) upper <- rep_len(upper, L)
- logdensity <- rep(log(0), length.out = L)
+ logdensity <- rep_len(log(0), L)
xok <- (0 < lower) & (lower < x) & (x < upper) & (shape > 0)
logdensity[xok] <- log(shape[xok]) + shape[xok] * log(lower[xok]) -
@@ -11071,10 +10613,10 @@ ptruncpareto <- function(q, lower, upper, shape,
rm(log.p) # 20141231 KaiH
L <- max(length(q), length(lower), length(upper), length(shape))
- if (length(q) != L) q <- rep(q, length.out = L)
- if (length(shape) != L) shape <- rep(shape, length.out = L)
- if (length(lower) != L) lower <- rep(lower, length.out = L)
- if (length(upper) != L) upper <- rep(upper, length.out = L)
+ if (length(q) != L) q <- rep_len(q, L)
+ if (length(shape) != L) shape <- rep_len(shape, L)
+ if (length(lower) != L) lower <- rep_len(lower, L)
+ if (length(upper) != L) upper <- rep_len(upper, L)
ans <- q * 0
xok <- (0 < lower) & (lower < q) & (q < upper) & (shape > 0)
@@ -11192,7 +10734,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
shape.grid <- 2^((-4):4)
try.this <- grid.search(shape.grid, objfun = truncpareto.Loglikfun,
y = y, x = x, w = w)
- try.this <- rep(try.this, length.out = n)
+ try.this <- rep_len(try.this, n)
try.this
}
etastart <- theta2eta(shape.init, .lshape , earg = .earg )
@@ -11295,7 +10837,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
if (!length(etastart)) {
initlambda <- if (length( .init.lambda )) .init.lambda else
1 / (0.01 + (y-1)^2)
- initlambda <- rep(initlambda, length.out = n)
+ initlambda <- rep_len(initlambda, n)
etastart <-
cbind(theta2eta(initlambda,
link = .link.lambda , earg = .earg ))
@@ -11402,12 +10944,10 @@ rtruncpareto <- function(n, lower, upper, shape) {
if (!length(etastart)) {
shape.init <- if (!is.Numeric( .ishape, positive = TRUE))
stop("argument 'ishape' must be positive") else
- rep( .ishape, length.out = n)
- ratee.init <- if (length( .iratee ))
- rep( .iratee , length.out = n) else
- (digamma(shape.init+1) - digamma(1)) / (y+1/8)
- ratee.init <- rep(weighted.mean(ratee.init, w = w),
- length.out = n)
+ rep_len( .ishape, n)
+ ratee.init <- if (length( .iratee )) rep_len( .iratee , n) else
+ (digamma(shape.init+1) - digamma(1)) / (y+1/8)
+ ratee.init <- rep_len(weighted.mean(ratee.init, w = w), n)
etastart <-
cbind(theta2eta(ratee.init, .lratee , earg = .eratee ),
theta2eta(shape.init, .lshape , earg = .eshape ))
@@ -11466,7 +11006,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
.eshape = eshape, .eratee = eratee))),
weight = eval(substitute(expression({
d11 <- 1 / shape^2 # True for all shape
- d22 <- d12 <- rep(NA_real_, length.out = n)
+ d22 <- d12 <- rep_len(NA_real_, n)
index2 <- abs(shape - 2) > .tolerance # index2 = shape != 1
largeno <- 10000
if (any(index2)) {
@@ -11554,12 +11094,10 @@ rtruncpareto <- function(n, lower, upper, shape) {
if (!length(etastart)) {
shape.init <- if (!is.Numeric( .ishape, positive = TRUE))
stop("argument 'ishape' must be positive") else
- rep( .ishape, length.out = n)
- rateinit <- if (length( .irate ))
- rep( .irate , length.out = n) else
+ rep_len( .ishape , n)
+ rateinit <- if (length( .irate )) rep_len( .irate , n) else
(digamma(shape.init+1) - digamma(1)) / (y+1/8)
- etastart <-
- cbind(theta2eta(rateinit, .lrate , earg = .erate ))
+ etastart <- cbind(theta2eta(rateinit, .lrate , earg = .erate ))
}
}), list( .lrate = lrate, .irate = irate, .ishape = ishape,
.erate = erate))),
@@ -11765,9 +11303,8 @@ rtruncpareto <- function(n, lower, upper, shape) {
last = eval(substitute(expression({
M1 <- extra$M1
- misc$link <-
- c(rep( .llocat , length = ncoly),
- rep( .lscale , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ misc$link <- c(rep_len( .llocat , ncoly),
+ rep_len( .lscale , ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2)[
interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
@@ -11872,7 +11409,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
negbinomial.size <- function(size = Inf,
lmu = "loge",
imu = NULL,
- probs.y = 0.35,
+ iprobs.y = 0.35,
imethod = 1,
ishrinkage = 0.95, zero = NULL) {
@@ -11883,7 +11420,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
if (any(size <= 0))
stop("bad input for argument 'size'")
- if (any(is.na(size)))
+ if (anyNA(size))
stop("bad input for argument 'size'")
@@ -11972,7 +11509,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
use.this <- if ( .imethod == 1) {
weighted.mean(y[, iii], w[, iii]) + 1/16
} else if ( .imethod == 3) {
- c(quantile(y[, iii], probs = .probs.y ) + 1/16)
+ c(quantile(y[, iii], probs = .iprobs.y ) + 1/16)
} else {
median(y[, iii]) + 1/16
}
@@ -12010,7 +11547,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
}), list( .lmu = lmu,
.emu = emu,
.mu.init = imu,
- .size = size, .probs.y = probs.y,
+ .size = size, .iprobs.y = iprobs.y,
.ishrinkage = ishrinkage,
.zero = zero, .imethod = imethod ))),
@@ -12038,7 +11575,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
.size = size ))),
last = eval(substitute(expression({
- misc$link <- rep( .lmu , length = NOS)
+ misc$link <- rep_len( .lmu , NOS)
names(misc$link) <- mynames1
misc$earg <- vector("list", M)
diff --git a/R/family.vglm.R b/R/family.vglm.R
index 1d24ec9..4fe021c 100644
--- a/R/family.vglm.R
+++ b/R/family.vglm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -25,7 +25,7 @@ print.vfamily <- function(x, ...) {
cat("Classes:", paste(f, collapse=", "), "\n")
cat("\n")
- for (ii in 1:length(nn))
+ for (ii in seq_along(nn))
cat(nn[ii])
cat("\n")
invisible(return(x))
diff --git a/R/family.zeroinf.R b/R/family.zeroinf.R
index 4d48a51..6095299 100644
--- a/R/family.zeroinf.R
+++ b/R/family.zeroinf.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -27,17 +27,17 @@ dzanegbin <- function(x, size, prob = NULL, munb = NULL, pobs0 = 0,
rm(log)
LLL <- max(length(x), length(pobs0), length(prob), length(size))
- if (length(x) != LLL) x <- rep(x, len = LLL)
- if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL)
- if (length(prob) != LLL) prob <- rep(prob, len = LLL)
- if (length(size) != LLL) size <- rep(size, len = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(pobs0) != LLL) pobs0 <- rep_len(pobs0, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
- ans <- rep(0.0, len = LLL)
+ ans <- rep_len(0.0, LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be in [0,1]")
if (!is.Numeric(prob, positive = TRUE) ||
- max(prob, na.rm = TRUE) >= 1)
- stop("argument 'prob' must be in (0,1)")
+ max(prob, na.rm = TRUE) > 1)
+ stop("argument 'prob' must be in (0,1]")
if (!is.Numeric(size, positive = TRUE))
stop("argument 'size' must be in (0,Inf)")
index0 <- x == 0
@@ -65,11 +65,11 @@ pzanegbin <- function(q, size, prob = NULL, munb = NULL, pobs0 = 0) {
}
LLL <- max(length(q), length(pobs0), length(prob), length(size))
- if (length(q) != LLL) q <- rep(q, len = LLL)
- if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL)
- if (length(prob) != LLL) prob <- rep(prob, len = LLL)
- if (length(size) != LLL) size <- rep(size, len = LLL)
- ans <- rep(0.0, len = LLL)
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(pobs0) != LLL) pobs0 <- rep_len(pobs0, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
+ ans <- rep_len(0.0, LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be in [0,1]")
@@ -95,11 +95,11 @@ qzanegbin <- function(p, size, prob = NULL, munb = NULL, pobs0 = 0) {
}
LLL <- max(length(p), length(pobs0), length(prob), length(size))
- if (length(p) != LLL) p <- rep(p, len = LLL)
- if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL)
- if (length(prob) != LLL) prob <- rep(prob, len = LLL)
- if (length(size) != LLL) size <- rep(size, len = LLL)
- ans <- rep(0.0, len = LLL)
+ if (length(p) != LLL) p <- rep_len(p, LLL)
+ if (length(pobs0) != LLL) pobs0 <- rep_len(pobs0, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
+ ans <- rep_len(0.0, LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be between 0 and 1 inclusive")
@@ -129,7 +129,7 @@ rzanegbin <- function(n, size, prob = NULL, munb = NULL, pobs0 = 0) {
ans <- rposnegbin(n = use.n, prob = prob, size = size)
if (length(pobs0) != use.n)
- pobs0 <- rep(pobs0, len = use.n)
+ pobs0 <- rep_len(pobs0, use.n)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be between 0 and 1 inclusive")
@@ -147,10 +147,10 @@ dzapois <- function(x, lambda, pobs0 = 0, log = FALSE) {
rm(log)
LLL <- max(length(x), length(lambda), length(pobs0))
- if (length(x) != LLL) x <- rep(x, len = LLL)
- if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL)
- if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL)
- ans <- rep(0.0, len = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(lambda) != LLL) lambda <- rep_len(lambda, LLL)
+ if (length(pobs0) != LLL) pobs0 <- rep_len(pobs0, LLL)
+ ans <- rep_len(0.0, LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be in [0,1]")
@@ -173,10 +173,10 @@ dzapois <- function(x, lambda, pobs0 = 0, log = FALSE) {
pzapois <- function(q, lambda, pobs0 = 0) {
LLL <- max(length(q), length(lambda), length(pobs0))
- if (length(q) != LLL) q <- rep(q, len = LLL)
- if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL)
- if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL)
- ans <- rep(0.0, len = LLL)
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(lambda) != LLL) lambda <- rep_len(lambda, LLL)
+ if (length(pobs0) != LLL) pobs0 <- rep_len(pobs0, LLL)
+ ans <- rep_len(0.0, LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be in [0,1]")
@@ -195,9 +195,9 @@ pzapois <- function(q, lambda, pobs0 = 0) {
qzapois <- function(p, lambda, pobs0 = 0) {
LLL <- max(length(p), length(lambda), length(pobs0))
- if (length(p) != LLL) p <- rep(p, len = LLL)
- if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL)
- if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL)
+ if (length(p) != LLL) p <- rep_len(p, LLL)
+ if (length(lambda) != LLL) lambda <- rep_len(lambda, LLL)
+ if (length(pobs0) != LLL) pobs0 <- rep_len(pobs0, LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be between 0 and 1 inclusive")
@@ -218,7 +218,7 @@ rzapois <- function(n, lambda, pobs0 = 0) {
ans <- rpospois(use.n, lambda)
if (length(pobs0) != use.n)
- pobs0 <- rep(pobs0, length = use.n)
+ pobs0 <- rep_len(pobs0, use.n)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must in [0,1]")
@@ -238,9 +238,9 @@ dzipois <- function(x, lambda, pstr0 = 0, log = FALSE) {
rm(log)
LLL <- max(length(x), length(lambda), length(pstr0))
- if (length(x) != LLL) x <- rep(x, len = LLL)
- if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL)
- if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL)
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(lambda) != LLL) lambda <- rep_len(lambda, LLL)
+ if (length(pstr0) != LLL) pstr0 <- rep_len(pstr0, LLL)
ans <- x + lambda + pstr0
@@ -272,9 +272,9 @@ dzipois <- function(x, lambda, pstr0 = 0, log = FALSE) {
pzipois <- function(q, lambda, pstr0 = 0) {
LLL <- max(length(pstr0), length(lambda), length(q))
- if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL)
- if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL)
- if (length(q) != LLL) q <- rep(q, len = LLL)
+ if (length(pstr0) != LLL) pstr0 <- rep_len(pstr0, LLL)
+ if (length(lambda) != LLL) lambda <- rep_len(lambda, LLL)
+ if (length(q) != LLL) q <- rep_len(q, LLL)
ans <- ppois(q, lambda)
ans <- ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
@@ -293,36 +293,24 @@ pzipois <- function(q, lambda, pstr0 = 0) {
qzipois <- function(p, lambda, pstr0 = 0) {
LLL <- max(length(p), length(lambda), length(pstr0))
- if (length(p) != LLL) p <- rep(p, len = LLL)
- if (length(lambda) != LLL) lambda <- rep(lambda, len = LLL)
- if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL)
- ans <- p
+ if (length(p) != LLL) p <- rep_len(p, LLL)
+ if (length(lambda) != LLL) lambda <- rep_len(lambda, LLL)
+ if (length(pstr0) != LLL) pstr0 <- rep_len(pstr0, LLL)
+ ans <- rep_len(NA_real_, LLL)
+ deflat.limit <- -1 / expm1(lambda)
ans[p <= pstr0] <- 0
- pindex <- (p > pstr0)
+ pindex <- (pstr0 < p) & (deflat.limit <= pstr0)
ans[pindex] <-
qpois((p[pindex] - pstr0[pindex]) / (1 - pstr0[pindex]),
lambda = lambda[pindex])
-
- deflat.limit <- -1 / expm1(lambda)
- ind0 <- (deflat.limit <= pstr0) & (pstr0 < 0)
- if (any(ind0)) {
- pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * exp(-lambda[ind0])
- ans[p[ind0] <= pobs0] <- 0
- pindex <- (1:LLL)[ind0 & (p > pobs0)]
- Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * exp(-lambda[pindex])
- ans[pindex] <- qpospois((p[pindex] - Pobs0) / (1 - Pobs0),
- lambda = lambda[pindex])
- }
-
-
ans[pstr0 < deflat.limit] <- NaN
- ans[pstr0 > 1] <- NaN
+ ans[1 < pstr0] <- NaN
ans[p < 0] <- NaN
- ans[p > 1] <- NaN
+ ans[1 < p] <- NaN
ans
}
@@ -330,32 +318,7 @@ qzipois <- function(p, lambda, pstr0 = 0) {
rzipois <- function(n, lambda, pstr0 = 0) {
- use.n <- if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- length.arg = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
-
- if (length(pstr0) != use.n) pstr0 <- rep(pstr0, len = use.n)
- if (length(lambda) != use.n) lambda <- rep(lambda, len = use.n)
-
- ans <- rpois(use.n, lambda)
- ans <- ifelse(runif(use.n) < pstr0, 0, ans)
-
-
-
- prob0 <- exp(-lambda)
- deflat.limit <- -1 / expm1(lambda)
- ind0 <- (deflat.limit <= pstr0) & (pstr0 < 0)
- if (any(ind0)) {
- pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
- ans[ind0] <- rpospois(sum(ind0), lambda[ind0])
- ans[ind0] <- ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
- }
-
- ans[pstr0 < deflat.limit] <- NaN
- ans[pstr0 > 1] <- NaN
-
- ans
+ qzipois(runif(n), lambda, pstr0 = pstr0)
}
@@ -638,8 +601,8 @@ rzipois <- function(n, lambda, pstr0 = 0) {
misc$expected <- TRUE
misc$multipleResponses <- TRUE
- temp.names <- c(rep( .lpobs.0 , len = NOS),
- rep( .llambda , len = NOS))
+ temp.names <- c(rep_len( .lpobs.0 , NOS),
+ rep_len( .llambda , NOS))
temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
misc$link <- temp.names
names(misc$link) <-
@@ -680,6 +643,16 @@ rzipois <- function(n, lambda, pstr0 = 0) {
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ TFvec <- c(TRUE, FALSE)
+ phimat <- eta2theta(eta[, TFvec, drop = FALSE], .lpobs.0 , earg = .epobs.0 )
+ lambda <- eta2theta(eta[, !TFvec, drop = FALSE], .llambda , earg = .elambda )
+ okay1 <- all(is.finite(lambda)) && all(0 < lambda) &&
+ all(is.finite(phimat)) && all(0 < phimat & phimat < 1)
+ okay1
+ }, list( .lpobs.0 = lpobs.0, .llambda = llambda,
+ .epobs.0 = epobs.0, .elambda = elambda ))),
+
simslot = eval(substitute(
function(object, nsim) {
@@ -919,8 +892,8 @@ rzipois <- function(n, lambda, pstr0 = 0) {
misc$expected <- TRUE
misc$multipleResponses <- TRUE
- temp.names <- c(rep( .llambda , len = NOS),
- rep( .lonempobs0 , len = NOS))
+ temp.names <- c(rep_len( .llambda , NOS),
+ rep_len( .lonempobs0 , NOS))
temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
misc$link <- temp.names
names(misc$link) <-
@@ -965,6 +938,17 @@ rzipois <- function(n, lambda, pstr0 = 0) {
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ TFvec <- c(TRUE, FALSE)
+ lambda <- eta2theta(eta[, TFvec, drop=FALSE], .llambda , e= .elambda )
+ onempobs0 <- eta2theta(eta[, !TFvec, drop=FALSE], .lonempobs0 , e= .eonempobs0 )
+
+ okay1 <- all(is.finite(lambda)) && all(0 < lambda) &&
+ all(is.finite(onempobs0)) && all(0 < onempobs0 & onempobs0 < 1)
+ okay1
+ }, list( .lonempobs0 = lonempobs0, .llambda = llambda,
+ .eonempobs0 = eonempobs0, .elambda = elambda ))),
+
simslot = eval(substitute(
function(object, nsim) {
@@ -1067,6 +1051,7 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
function(
zero = "size",
type.fitted = c("mean", "munb", "pobs0"),
+ mds.min = 1e-3,
nsimEIM = 500,
cutoff.prob = 0.999, # higher is better for large 'size'
eps.trig = 1e-7,
@@ -1076,14 +1061,21 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
imethod = 1,
ipobs0 = NULL,
imunb = NULL,
- probs.y = 0.35,
- ishrinkage = 0.95,
+ iprobs.y = NULL,
+ gprobs.y = (0:9)/10, # 20160709; grid for finding munb.init
isize = NULL,
- gsize.mux = exp((-12:6)/2)) {
+ gsize.mux = exp(c(-30, -20, -15, -10, -6:3))) {
+
+
+
+ if (!is.Numeric(imethod, length.arg = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
if (!is.Numeric(eps.trig, length.arg = 1,
@@ -1132,7 +1124,6 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
"Mean: (1 - pobs0) * munb / (1 - (size / (size + ",
"munb))^size)"),
constraints = eval(substitute(expression({
-
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 3)
@@ -1143,6 +1134,7 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
list(M1 = 3,
Q1 = 1,
expected = TRUE,
+ mds.min = .mds.min ,
imethod = .imethod ,
multipleResponses = TRUE,
parameters.names = c("pobs0", "munb", "size"),
@@ -1152,13 +1144,14 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
zero = .zero )
}, list( .zero = zero, .imethod = imethod,
.nsimEIM = nsimEIM, .eps.trig = eps.trig,
- .type.fitted = type.fitted
+ .type.fitted = type.fitted,
+ .mds.min = mds.min
))),
initialize = eval(substitute(expression({
M1 <- 3
- temp5 <-
+ temp16 <-
w.y.check(w = w, y = y,
Is.integer.y = TRUE,
Is.nonnegative.y = TRUE,
@@ -1167,8 +1160,8 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
- w <- temp5$w
- y <- temp5$y
+ w <- temp16$w
+ y <- temp16$y
extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
@@ -1190,12 +1183,49 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
+ gprobs.y <- .gprobs.y
+ imunb <- .imunb # Default in NULL
+ if (length(imunb))
+ imunb <- matrix(imunb, n, NOS, byrow = TRUE)
if (!length(etastart)) {
- munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
- imu = .imunb , ishrinkage = .ishrinkage ,
- pos.only = TRUE,
- probs.y = .probs.y )
+
+ munb.init <-
+ size.init <- matrix(NA_real_, n, NOS)
+ gprobs.y <- .gprobs.y
+ if (length( .iprobs.y ))
+ gprobs.y <- .iprobs.y
+ gsize.mux <- .gsize.mux # gsize.mux is on a relative scale
+
+ for (jay in 1:NOS) { # For each response 'y_jay'... do:
+ TFvec <- y[, jay] > 0 # Important to exclude the 0s
+ posyvec <- y[TFvec, jay]
+ munb.init.jay <- if ( .imethod == 1 ) {
+ quantile(posyvec, probs = gprobs.y) - 1/2 # + 1/16
+ } else {
+ weighted.mean(posyvec, w = w[TFvec, jay]) - 1/2
+ }
+ if (length(imunb))
+ munb.init.jay <- imunb[, jay]
+
+
+ gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) +
+ weighted.mean(posyvec, w = w[TFvec, jay]))
+ if (length( .isize ))
+ gsize <- .isize # isize is on an absolute scale
+
+
+
+ try.this <-
+ grid.search2(munb.init.jay, gsize,
+ objfun = posNBD.Loglikfun2,
+ y = posyvec, # x = x[TFvec, , drop = FALSE],
+ w = w[TFvec, jay],
+ ret.objfun = TRUE) # Last value is the loglik
+ munb.init[, jay] <- try.this["Value1"]
+ size.init[, jay] <- try.this["Value2"]
+ } # for (jay ...)
+
@@ -1204,30 +1234,13 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
for (jay in 1:NOS) {
if (any(pobs0.init[, jay] < 0)) {
index.y0 <- (y[, jay] < 0.5)
- pobs0.init[, jay] <- max(min(mean(index.y0), 1 - .ipobs0.small ),
+ pobs0.init[, jay] <- max(min(weighted.mean(index.y0, w[, jay]),
+ 1 - .ipobs0.small ),
.ipobs0.small )
}
}
- if ( is.Numeric( .isize )) {
- size.init <- matrix( .isize , nrow = n, ncol = ncoly, byrow = TRUE)
- } else {
- posnegbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
- munb <- extraargs
- sum(c(w) * dposnegbin(y, munb = munb, size = kmat, log = TRUE))
- }
- size.init <- matrix(0, nrow = n, ncol = NOS)
- for (jay in 1:NOS) {
- size.grid <- .gsize.mux * mean(munb.init[, jay])
- TFvec <- (y[, jay] > 0)
- size.init[, jay] <-
- grid.search(size.grid, objfun = posnegbinomial.Loglikfun,
- y = y[TFvec, jay], # x = x[TFvec, ],
- w = w[TFvec, jay],
- extraargs = munb.init[TFvec, jay])
- }
- }
etastart <- cbind(theta2eta(pobs0.init, .lpobs0 , earg = .epobs0 ),
theta2eta(munb.init, .lmunb , earg = .emunb ),
@@ -1240,9 +1253,10 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
.epobs0 = epobs0, .emunb = emunb, .esize = esize,
.ipobs0 = ipobs0, .isize = isize,
.ipobs0.small = ipobs0.small,
- .imunb = imunb, .gsize.mux = gsize.mux,
- .imethod = imethod, .ishrinkage = ishrinkage,
- .type.fitted = type.fitted, .probs.y = probs.y ))),
+ .imunb = imunb,
+ .gprobs.y = gprobs.y, .gsize.mux = gsize.mux,
+ .imethod = imethod,
+ .type.fitted = type.fitted, .iprobs.y = iprobs.y ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -1267,7 +1281,7 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
prob0 <- tempk^kmat # p(0) from negative binomial
oneminusf0 <- 1 - prob0
- smallval <- 1e-3 # Something like this is needed
+ smallval <- .mds.min # Something like this is needed
if (any(big.size <- munb / kmat < smallval)) {
prob0[big.size] <- exp(-munb[big.size]) # The limit as kmat --> Inf
oneminusf0[big.size] <- -expm1(-munb[big.size])
@@ -1290,14 +1304,15 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
}
ans
}, list( .lpobs0 = lpobs0, .lsize = lsize, .lmunb = lmunb,
- .epobs0 = epobs0, .emunb = emunb, .esize = esize ))),
+ .epobs0 = epobs0, .emunb = emunb, .esize = esize,
+ .mds.min = mds.min ))),
last = eval(substitute(expression({
misc$link <-
- c(rep( .lpobs0 , length = NOS),
- rep( .lmunb , length = NOS),
- rep( .lsize , length = NOS))[interleave.VGAM(M1*NOS, M1 = M1)]
+ c(rep_len( .lpobs0 , NOS),
+ rep_len( .lmunb , NOS),
+ rep_len( .lsize , NOS))[interleave.VGAM(M1*NOS, M1 = M1)]
temp.names <- c(mynames1,
mynames2,
mynames3)[interleave.VGAM(M1*NOS, M1 = M1)]
@@ -1364,6 +1379,29 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
.epobs0 = epobs0, .emunb = emunb, .esize = esize ))),
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ M1 <- 3
+ NOS <- ncol(eta) / M1
+ phi0 <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
+ .lpobs0 , earg = .epobs0 )
+ munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+ .lmunb , earg = .emunb )
+ size <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lsize , earg = .esize )
+
+ okay1 <- all(is.finite(munb)) && all(munb > 0) &&
+ all(is.finite(size)) && all(size > 0) &&
+ all(is.finite(phi0)) && all(0 < phi0 & phi0 < 1)
+ smallval <- .mds.min # .munb.div.size
+ overdispersion <- if (okay1) all(munb / size > smallval) else FALSE
+ if (!overdispersion)
+ warning("parameter 'size' has very large values; ",
+ "try fitting a zero-altered Poisson ",
+ "model instead.")
+ okay1 && overdispersion
+ }, list( .lpobs0 = lpobs0, .lmunb = lmunb, .lsize = lsize,
+ .epobs0 = epobs0, .emunb = emunb, .esize = esize,
+ .mds.min = mds.min))),
@@ -1387,16 +1425,18 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
- smallval <- 1e-3 # Something like this is needed
+ smallval <- .mds.min # Something like this is needed
if (any(big.size <- munb / kmat < smallval)) {
+ if (FALSE)
warning("parameter 'size' has very large values; ",
"try fitting a zero-altered Poisson ",
"model instead")
- kmat[big.size] <- munb[big.size] / smallval
+ kmat[big.size] <- munb[big.size] / smallval
}
+
tempk <- 1 / (1 + munb / kmat) # kmat / (kmat + munb)
tempm <- munb / (kmat + munb)
prob0 <- tempk^kmat
@@ -1438,7 +1478,6 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
if (any(big.size)) {
- dl.dsize[big.size] <- 1e-8 # A small number
}
@@ -1464,7 +1503,8 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
ans
}), list( .lpobs0 = lpobs0 , .lmunb = lmunb , .lsize = lsize ,
- .epobs0 = epobs0 , .emunb = emunb , .esize = esize ))),
+ .epobs0 = epobs0 , .emunb = emunb , .esize = esize,
+ .mds.min = mds.min ))),
@@ -1645,20 +1685,28 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
type.fitted = c("mean", "munb", "pobs0", "onempobs0"),
isize = NULL, ionempobs0 = NULL,
zero = c("size", "onempobs0"),
+ mds.min = 1e-3,
- probs.y = 0.35,
+ iprobs.y = NULL, # 0.35,
+ gprobs.y = (0:9)/10, # 20160709; grid for finding munb.init
cutoff.prob = 0.999, # higher is better for large 'size'
eps.trig = 1e-7,
max.support = 4000, # 20160127; I have changed this
max.chunk.MB = 30, # max.memory = Inf is allowed
- gsize.mux = exp((-12:6)/2),
+ gsize.mux = exp(c(-30, -20, -15, -10, -6:3)),
imethod = 1,
imunb = NULL,
- nsimEIM = 500,
- ishrinkage = 0.95) {
+ nsimEIM = 500) {
+
+
+
+ if (!is.Numeric(imethod, length.arg = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
if (!is.Numeric(eps.trig, length.arg = 1,
@@ -1709,7 +1757,6 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
"Mean: onempobs0 * munb / (1 - (size / (size + ",
"munb))^size)"),
constraints = eval(substitute(expression({
-
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 3)
@@ -1720,6 +1767,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
list(M1 = 3,
Q1 = 1,
expected = TRUE,
+ mds.min = .mds.min ,
multipleResponses = TRUE,
nsimEIM = .nsimEIM ,
parameters.names = c("munb", "size", "onempobs0"),
@@ -1728,13 +1776,14 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
zero = .zero )
}, list( .zero = zero,
.nsimEIM = nsimEIM, .eps.trig = eps.trig,
- .type.fitted = type.fitted
+ .type.fitted = type.fitted,
+ .mds.min = mds.min
))),
initialize = eval(substitute(expression({
M1 <- 3
- temp5 <-
+ temp16 <-
w.y.check(w = w, y = y,
Is.integer.y = TRUE,
Is.nonnegative.y = TRUE,
@@ -1743,8 +1792,8 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
- w <- temp5$w
- y <- temp5$y
+ w <- temp16$w
+ y <- temp16$y
extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
@@ -1767,12 +1816,53 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
+ gprobs.y <- .gprobs.y
+ imunb <- .imunb # Default in NULL
+ if (length(imunb))
+ imunb <- matrix(imunb, n, NOS, byrow = TRUE)
+
+
if (!length(etastart)) {
- munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
- imu = .imunb , ishrinkage = .ishrinkage ,
- pos.only = TRUE,
- probs.y = .probs.y )
+
+ munb.init <-
+ size.init <- matrix(NA_real_, n, NOS)
+ gprobs.y <- .gprobs.y
+ if (length( .iprobs.y ))
+ gprobs.y <- .iprobs.y
+ gsize.mux <- .gsize.mux # gsize.mux is on a relative scale
+
+ for (jay in 1:NOS) { # For each response 'y_jay'... do:
+ TFvec <- y[, jay] > 0 # Important to exclude the 0s
+ posyvec <- y[TFvec, jay]
+ munb.init.jay <- if ( .imethod == 1 ) {
+ quantile(posyvec, probs = gprobs.y) - 1/2 # + 1/16
+ } else {
+ weighted.mean(posyvec, w = w[TFvec, jay]) - 1/2
+ }
+ if (length(imunb))
+ munb.init.jay <- imunb[, jay]
+
+
+ gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) +
+ weighted.mean(posyvec, w = w[TFvec, jay]))
+ if (length( .isize ))
+ gsize <- .isize # isize is on an absolute scale
+
+
+ try.this <-
+ grid.search2(munb.init.jay, gsize,
+ objfun = posNBD.Loglikfun2,
+ y = posyvec, # x = x[TFvec, , drop = FALSE],
+ w = w[TFvec, jay],
+ ret.objfun = TRUE) # Last value is the loglik
+ munb.init[, jay] <- try.this["Value1"]
+ size.init[, jay] <- try.this["Value2"]
+ } # for (jay ...)
+
+
+
+
pobs0.init <- matrix(if (length( .ionempobs0 )) 1 - .ionempobs0 else -1,
@@ -1786,25 +1876,6 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
}
- if ( is.Numeric( .isize )) {
- size.init <- matrix( .isize , nrow = n, ncol = ncoly, byrow = TRUE)
- } else {
- posnegbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
- munb <- extraargs
- sum(c(w) * dposnegbin(x = y, munb = munb, size = kmat,
- log = TRUE))
- }
- size.init <- matrix(0, nrow = n, ncol = NOS)
- for (jay in 1:NOS) {
- size.grid <- .gsize.mux * mean(munb.init[, jay])
- TFvec <- (y[, jay] > 0)
- size.init[, jay] <-
- grid.search(size.grid, objfun = posnegbinomial.Loglikfun,
- y = y[TFvec, jay], # x = x[index.posy, ],
- w = w[TFvec, jay],
- extraargs = munb.init[TFvec, jay])
- }
- }
etastart <-
cbind(theta2eta(munb.init , .lmunb , earg = .emunb ),
@@ -1817,10 +1888,10 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
}), list( .lonempobs0 = lonempobs0, .lmunb = lmunb, .lsize = lsize,
.eonempobs0 = eonempobs0, .emunb = emunb, .esize = esize,
.ionempobs0 = ionempobs0, .imunb = imunb, .isize = isize,
- .gsize.mux = gsize.mux,
+ .gprobs.y = gprobs.y, .gsize.mux = gsize.mux,
.ipobs0.small = ipobs0.small,
- .imethod = imethod, .ishrinkage = ishrinkage,
- .probs.y = probs.y, .type.fitted = type.fitted ))),
+ .imethod = imethod,
+ .iprobs.y = iprobs.y, .type.fitted = type.fitted ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
warning("cannot find 'type.fitted'. ",
@@ -1843,7 +1914,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
prob0 <- tempk^kmat # p(0) from negative binomial
oneminusf0 <- 1 - prob0
- smallval <- 1e-3 # Something like this is needed
+ smallval <- .mds.min # Something like this is needed
if (any(big.size <- munb / kmat < smallval)) {
prob0[big.size] <- exp(-munb[big.size]) # The limit as kmat --> Inf
oneminusf0[big.size] <- -expm1(-munb[big.size])
@@ -1868,12 +1939,13 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
}
ans
}, list( .lonempobs0 = lonempobs0, .lsize = lsize, .lmunb = lmunb,
- .eonempobs0 = eonempobs0, .emunb = emunb, .esize = esize ))),
+ .eonempobs0 = eonempobs0, .emunb = emunb, .esize = esize,
+ .mds.min = mds.min ))),
last = eval(substitute(expression({
misc$link <-
- c(rep( .lmunb , length = NOS),
- rep( .lsize , length = NOS),
- rep( .lonempobs0 , length = NOS))[
+ c(rep_len( .lmunb , NOS),
+ rep_len( .lsize , NOS),
+ rep_len( .lonempobs0 , NOS))[
interleave.VGAM(M1*NOS, M1 = M1)]
temp.names <- c(mynames1,
mynames2,
@@ -1945,6 +2017,30 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
.eonempobs0 = eonempobs0, .emunb = emunb, .esize = esize ))),
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ M1 <- 3
+ NOS <- ncol(eta) / M1
+ munb <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
+ .lmunb , earg = .emunb )
+ size <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+ .lsize , earg = .esize )
+ onempobs0 <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lonempobs0 , earg = .eonempobs0 )
+
+ okay1 <- all(is.finite(munb)) && all(munb > 0) &&
+ all(is.finite(size)) && all(size > 0) &&
+ all(is.finite(onempobs0)) && all(0 < onempobs0 & onempobs0 < 1)
+ smallval <- .mds.min # .munb.div.size
+ overdispersion <- if (okay1) all(munb / size > smallval) else FALSE
+ if (!overdispersion)
+ warning("parameter 'size' has very large values; ",
+ "try fitting a zero-altered Poisson ",
+ "model instead.")
+ okay1 && overdispersion
+ }, list( .lonempobs0 = lonempobs0, .lmunb = lmunb, .lsize = lsize,
+ .eonempobs0 = eonempobs0, .emunb = emunb, .esize = esize,
+ .mds.min = mds.min))),
+
deriv = eval(substitute(expression({
@@ -1971,12 +2067,13 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
- smallval <- 1e-3 # Something like this is needed
+ smallval <- .mds.min # Something like this is needed
if (any(big.size <- munb / kmat < smallval)) {
+ if (FALSE)
warning("parameter 'size' has very large values; ",
"try fitting a zero-altered Poisson ",
"model instead")
- kmat[big.size] <- munb[big.size] / smallval
+ kmat[big.size] <- munb[big.size] / smallval
}
@@ -2009,7 +2106,6 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
if (any(big.size)) {
- dl.dsize[big.size] <- 1e-8 # A small number
}
@@ -2039,11 +2135,8 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
ans
}), list( .lonempobs0 = lonempobs0 , .lmunb = lmunb , .lsize = lsize ,
- .eonempobs0 = eonempobs0 , .emunb = emunb , .esize = esize ))),
-
-
-
-
+ .eonempobs0 = eonempobs0 , .emunb = emunb , .esize = esize,
+ .mds.min = mds.min ))),
@@ -2351,16 +2444,18 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
}
Phi.init <- if (length(phi.grid)) {
grid.search(phi.grid, objfun = zipois.Loglikfun,
- y = y[, jay], x = x, w = w[, jay],
+ y = y[, jay], w = w[, jay], # x = x,
extraargs = list(lambda = matL[, jay]))
} else {
pmax(ipstr0.small,
weighted.mean(y[, jay] == 0, w[, jay]) -
dpois(0, matL[, jay]))
}
- if (mean(Phi.init == ipstr0.small) > 0.95)
+ if (mean(Phi.init == ipstr0.small) > 0.95 &&
+ .lpstr0 != "identitylink")
warning("from the initial values only, the data appears to ",
- "have little or no 0-inflation")
+ "have little or no 0-inflation, and possibly ",
+ "0-deflation.")
matP[, jay] <- Phi.init
} # for (jay)
@@ -2416,8 +2511,8 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
last = eval(substitute(expression({
M1 <- extra$M1
misc$link <-
- c(rep( .lpstr00 , length = ncoly),
- rep( .llambda , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ c(rep_len( .lpstr00 , ncoly),
+ rep_len( .llambda , ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
@@ -2433,6 +2528,8 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
misc$expected <- TRUE
misc$multipleResponses <- TRUE
+ if (FALSE) {
+
misc$pobs0 <- phimat + (1 - phimat) * exp(-lambda) # P(Y=0)
if (length(dimnames(y)[[2]]) > 0)
dimnames(misc$pobs0) <- dimnames(y)
@@ -2440,6 +2537,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
misc$pstr0 <- phimat
if (length(dimnames(y)[[2]]) > 0)
dimnames(misc$pstr0) <- dimnames(y)
+ }
}), list( .lpstr00 = lpstr00, .llambda = llambda,
.epstr00 = epstr00, .elambda = elambda,
.imethod = imethod ))),
@@ -2466,6 +2564,24 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
+
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
+ lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
+
+ okay1 <- all(is.finite(lambda)) && all(lambda > 0) &&
+ all(is.finite(phimat)) && all(phimat < 1)
+ deflat.limit <- -1 / expm1(lambda)
+ okay2.deflat <- TRUE
+ if (okay1 && !(okay2.deflat <- all(deflat.limit < phimat)))
+ warning("parameter 'pstr0' is too negative even allowing for ",
+ "0-deflation.")
+ okay1 && okay2.deflat
+ }, list( .lpstr00 = lpstr00, .llambda = llambda,
+ .epstr00 = epstr00, .elambda = elambda ))),
+
+
simslot = eval(substitute(
function(object, nsim) {
@@ -2549,155 +2665,151 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
+ zipoissonff <-
+ function(llambda = "loge", lonempstr0 = "logit",
+ type.fitted = c("mean", "lambda", "pobs0", "pstr0", "onempstr0"),
+ ilambda = NULL, ionempstr0 = NULL,
+ gonempstr0 = NULL, # (1:9) / 10,
+ imethod = 1,
+ ishrinkage = 0.95, probs.y = 0.35,
+ zero = "onempstr0") {
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "lambda", "pobs0", "pstr0", "onempstr0"))[1]
- zibinomial <-
- function(lpstr0 = "logit", lprob = "logit",
- type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
- ipstr0 = NULL,
- zero = NULL, # 20130917; was originally zero = 1,
- multiple.responses = FALSE, imethod = 1) {
- if (as.logical(multiple.responses))
- stop("argument 'multiple.responses' must be FALSE")
- lpstr0 <- as.list(substitute(lpstr0))
- epstr0 <- link2list(lpstr0)
- lpstr0 <- attr(epstr0, "function.name")
- lprob <- as.list(substitute(lprob))
- eprob <- link2list(lprob)
- lprob <- attr(eprob, "function.name")
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
- type.fitted <- match.arg(type.fitted,
- c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
+ lonempstr0 <- as.list(substitute(lonempstr0))
+ eonempstr0 <- link2list(lonempstr0)
+ lonempstr0 <- attr(eonempstr0, "function.name")
+ ipstr0.small <- 1/64 # A number easily represented exactly
- if (is.Numeric(ipstr0))
- if (!is.Numeric(ipstr0, positive = TRUE) || any(ipstr0 >= 1))
- stop("'ipstr0' values must be inside the interval (0,1)")
- if (!is.Numeric(imethod, length.arg = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
+ if (length(ilambda))
+ if (!is.Numeric(ilambda, positive = TRUE))
+ stop("'ilambda' values must be positive")
+ if (length(ionempstr0))
+ if (!is.Numeric(ionempstr0, positive = TRUE) ||
+ any(ionempstr0 >= 1))
+ stop("'ionempstr0' values must be inside the interval (0,1)")
new("vglmff",
- blurb = c("Zero-inflated binomial\n\n",
+ blurb = c("Zero-inflated Poisson\n\n",
"Links: ",
- namesof("pstr0", lpstr0, earg = epstr0), ", ",
- namesof("prob" , lprob , earg = eprob ), "\n",
- "Mean: (1 - pstr0) * prob"),
+ namesof("lambda", llambda, earg = elambda), ", ",
+ namesof("onempstr0", lonempstr0, earg = eonempstr0), "\n",
+ "Mean: onempstr0 * lambda"),
constraints = eval(substitute(expression({
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 2)
}), list( .zero = zero ))),
-
infos = eval(substitute(function(...) {
list(M1 = 2,
- type.fitted = .type.fitted ,
+ Q1 = 1,
expected = TRUE,
- multipleResponses = FALSE,
- parameters.names = c("pstr0", "prob"),
+ multipleResponses = TRUE,
+ parameters.names = c("lambda", "onempstr0"),
+ type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
.type.fitted = type.fitted
))),
initialize = eval(substitute(expression({
- if (!all(w == 1))
- extra$orig.w <- w
-
-
+ M1 <- 2
- if (NCOL(y) == 1) {
- if (is.factor(y))
- y <- y != levels(y)[1]
- nn <- rep(1, n)
- if (!all(y >= 0 & y <= 1))
- stop("response values must be in [0, 1]")
- if (!length(mustart) && !length(etastart))
- mustart <- (0.5 + w * y) / (1.0 + w)
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
- no.successes <- y
- if (min(y) < 0)
- stop("Negative data not allowed!")
- if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
- stop("Number of successes must be integer-valued")
- } else if (NCOL(y) == 2) {
- if (min(y) < 0)
- stop("Negative data not allowed!")
- if (any(abs(y - round(y)) > 1.0e-8))
- stop("Count data must be integer-valued")
- y <- round(y)
- nvec <- y[, 1] + y[, 2]
- y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
- w <- w * nvec
- if (!length(mustart) && !length(etastart))
- mustart <- (0.5 + nvec * y) / (1 + nvec)
- } else {
- stop("for the binomialff family, response 'y' must be a ",
- "vector of 0 and 1's\n",
- "or a factor ",
- "(first level = fail, other levels = success),\n",
- "or a 2-column matrix where col 1 is the no. of ",
- "successes and col 2 is the no. of failures")
- }
-
-
- if ( .imethod == 1)
- mustart <- (mustart + y) / 2
-
-
- extra$type.fitted <- .type.fitted
- extra$dimnamesy <- dimnames(y)
+ ncoly <- ncol(y)
+ extra$ncoly <- ncoly
+ extra$M1 <- M1
+ M <- M1 * ncoly
+ extra$type.fitted <- .type.fitted
+ extra$dimnamesy <- dimnames(y)
+ mynames1 <- param.names("lambda", ncoly)
+ mynames2 <- param.names("onempstr0", ncoly)
+ predictors.names <-
+ c(namesof(mynames1, .llambda , earg = .elambda , tag = FALSE),
+ namesof(mynames2, .lonempstr0 , earg = .eonempstr0 , tag = FALSE))[
+ interleave.VGAM(M, M1 = M1)]
- predictors.names <-
- c(namesof("pstr0", .lpstr0 , earg = .epstr0 , tag = FALSE),
- namesof("prob" , .lprob , earg = .eprob , tag = FALSE))
+ if (!length(etastart)) {
+ matL <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ imu = .ilambda , ishrinkage = .ishrinkage ,
+ pos.only = TRUE,
+ probs.y = .probs.y )
+ matP <- matrix(if (length( .ionempstr0 )) .ionempstr0 else 0,
+ n, ncoly, byrow = TRUE)
+ phi0.grid <- .gonempstr0
+ ipstr0.small <- .ipstr0.small # A number easily represented exactly
- extra$w <- w # Needed for @linkinv
- phi.init <- if (length( .ipstr0 )) .ipstr0 else {
- prob0.est <- sum(w[y == 0]) / sum(w)
- if ( .imethod == 1) {
- (prob0.est - (1 - mustart)^w) / (1 - (1 - mustart)^w)
+ if (!length( .ionempstr0 ))
+ for (jay in 1:ncoly) {
+ zipois.Loglikfun <- function(phival, y, x, w, extraargs) {
+ sum(c(w) * dzipois(x = y, pstr0 = phival,
+ lambda = extraargs$lambda,
+ log = TRUE))
+ }
+ Phi0.init <- if (length(phi0.grid)) {
+ grid.search(phi0.grid,
+ objfun = zipois.Loglikfun,
+ y = y[, jay], x = x, w = w[, jay],
+ extraargs = list(lambda = matL[, jay]))
} else {
- prob0.est
+ pmax(ipstr0.small,
+ weighted.mean(y[, jay] == 0, w[, jay]) -
+ dpois(0, matL[, jay]))
}
- }
-
- phi.init[phi.init <= -0.10] <- 0.10 # Lots of sample variation
- phi.init[phi.init <= 0.05] <- 0.15 # Last resort
- phi.init[phi.init >= 0.80] <- 0.80 # Last resort
-
- if ( length(mustart) && !length(etastart))
- mustart <- cbind(rep(phi.init, len = n),
- mustart) # 1st coln not a real mu
- }), list( .lpstr0 = lpstr0, .lprob = lprob,
- .epstr0 = epstr0, .eprob = eprob,
- .ipstr0 = ipstr0,
- .type.fitted = type.fitted,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- pstr0 <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
- mubin <- eta2theta(eta[, 2], .lprob , earg = .eprob )
+ if (mean(Phi0.init == ipstr0.small) > 0.95 &&
+ .lonempstr0 != "identitylink")
+ warning("from the initial values only, the data appears to ",
+ "have little or no 0-inflation, and possibly ",
+ "0-deflation.")
+ matP[, jay] <- Phi0.init
+ } # for (jay)
- orig.w <- if (length(tmp3 <- extra$orig.w)) tmp3 else
- rep(1, len = nrow(eta))
- priorw <- extra$w
- nvec <- priorw / orig.w
+ etastart <-
+ cbind(theta2eta( matL, .llambda , earg = .elambda ),
+ theta2eta(1 - matP, .lonempstr0 , earg = .eonempstr0 ))[,
+ interleave.VGAM(M, M1 = M1)]
+ mustart <- NULL # Since etastart has been computed.
+ }
+ }), list( .lonempstr0 = lonempstr0, .llambda = llambda,
+ .eonempstr0 = eonempstr0, .elambda = elambda,
+ .ionempstr0 = ionempstr0, .ilambda = ilambda,
+ .gonempstr0 = gonempstr0,
+ .type.fitted = type.fitted, .probs.y = probs.y,
+ .ipstr0.small = ipstr0.small,
+ .imethod = imethod, .ishrinkage = ishrinkage ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
warning("cannot find 'type.fitted'. ",
"Returning the 'mean'.")
@@ -2705,128 +2817,193 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "lambda", "pobs0", "pstr0", "onempstr0"))[1]
+
+ M1 <- 2
+ ncoly <- ncol(eta) / M1
+ lambda <- eta2theta(eta[, M1*(1:ncoly) - 1], .llambda ,
+ earg = .elambda )
+ onempstr0 <- eta2theta(eta[, M1*(1:ncoly) ], .lonempstr0 ,
+ earg = .eonempstr0 )
+
ans <- switch(type.fitted,
- "mean" = (1 - pstr0) * mubin,
- "prob" = mubin,
- "pobs0" = pstr0 + (1-pstr0)*(1-mubin)^nvec, # P(Y=0)
- "pstr0" = pstr0,
- "onempstr0" = 1 - pstr0)
+ "mean" = onempstr0 * lambda,
+ "lambda" = lambda,
+ "pobs0" = 1 + onempstr0 * expm1(-lambda), # P(Y=0)
+ "pstr0" = 1 - onempstr0,
+ "onempstr0" = onempstr0)
if (length(extra$dimnamesy) &&
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
length(extra$dimnamesy[[2]]) > 0) {
- dimnames(ans) <- extra$dimnamesy
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
} else
if (NCOL(ans) == 1 &&
is.matrix(ans)) {
colnames(ans) <- NULL
}
ans
- }, list( .lpstr0 = lpstr0, .lprob = lprob,
- .epstr0 = epstr0, .eprob = eprob,
+ }, list( .lonempstr0 = lonempstr0, .llambda = llambda,
+ .eonempstr0 = eonempstr0, .elambda = elambda,
.type.fitted = type.fitted ))),
last = eval(substitute(expression({
- misc$link <- c("pstr0" = .lpstr0 , "prob" = .lprob )
+ M1 <- extra$M1
+ misc$link <-
+ c(rep_len( .llambda , ncoly),
+ rep_len( .lonempstr0 , ncoly))[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
+ names(misc$link) <- temp.names
- misc$earg <- list("pstr0" = .epstr0 , "prob" = .eprob )
+ misc$earg <- vector("list", M1 * ncoly)
+ names(misc$earg) <- temp.names
+ for (ii in 1:ncoly) {
+ misc$earg[[M1*ii-1]] <- .elambda
+ misc$earg[[M1*ii ]] <- .eonempstr0
+ }
+
+ misc$M1 <- M1
misc$imethod <- .imethod
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+ if (FALSE) {
- }), list( .lpstr0 = lpstr0, .lprob = lprob,
- .epstr0 = epstr0, .eprob = eprob,
+ misc$pobs0 <- (1 - onempstr0) + onempstr0 * exp(-lambda) # P(Y=0)
+ misc$pobs0 <- as.matrix(misc$pobs0)
+ if (length(dimnames(y)[[2]]) > 0)
+ dimnames(misc$pobs0) <- dimnames(y)
+
+ misc$pstr0 <- (1 - onempstr0)
+ misc$pstr0 <- as.matrix(misc$pstr0)
+ if (length(dimnames(y)[[2]]) > 0)
+ dimnames(misc$pstr0) <- dimnames(y)
+ }
+ }), list( .lonempstr0 = lonempstr0, .llambda = llambda,
+ .eonempstr0 = eonempstr0, .elambda = elambda,
.imethod = imethod ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- cbind(theta2eta(mu[, 1], .lpstr0 , earg = .epstr0 ),
- theta2eta(mu[, 2], .lprob , earg = .eprob ))
- }, list( .lpstr0 = lpstr0, .lprob = lprob,
- .epstr0 = epstr0, .eprob = eprob ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- pstr0 <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
- mubin <- eta2theta(eta[, 2], .lprob , earg = .eprob )
+ lambda <- eta2theta(eta[, c(TRUE, FALSE)], .llambda ,
+ earg = .elambda )
+ onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
+ earg = .eonempstr0 )
+
+
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- ll.elts <-
- dzibinom(x = round(w * y), size = w, prob = mubin,
- log = TRUE, pstr0 = pstr0)
+ ll.elts <- c(w) *
+ dzipois(x = y, pstr0 = 1 - onempstr0, lambda = lambda,
+ log = TRUE)
if (summation) {
sum(ll.elts)
} else {
ll.elts
}
}
- }, list( .lpstr0 = lpstr0, .lprob = lprob,
- .epstr0 = epstr0, .eprob = eprob ))),
- vfamily = c("zibinomial"),
- deriv = eval(substitute(expression({
- phi <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
- mubin <- eta2theta(eta[, 2], .lprob , earg = .eprob )
+ }, list( .lonempstr0 = lonempstr0, .llambda = llambda,
+ .eonempstr0 = eonempstr0, .elambda = elambda ))),
+ vfamily = c("zipoissonff"),
- prob0 <- (1 - mubin)^w # Actually q^w
- pobs0 <- phi + (1 - phi) * prob0
- index <- (y == 0)
- dl.dphi <- (1 - prob0) / pobs0
- dl.dphi[!index] <- -1 / (1 - phi[!index])
- dl.dmubin <- -w * (1 - phi) * (1 - mubin)^(w - 1) / pobs0
- dl.dmubin[!index] <- w[!index] *
- ( y[!index] / mubin[!index] -
- (1 - y[!index]) / (1 - mubin[!index]))
- dphi.deta <- dtheta.deta(phi, .lpstr0 , earg = .epstr0 )
- dmubin.deta <- dtheta.deta(mubin, .lprob , earg = .eprob )
+ simslot = eval(substitute(
+ function(object, nsim) {
- ans <- cbind(dl.dphi * dphi.deta,
- dl.dmubin * dmubin.deta)
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ lambda <- eta2theta(eta[, c(TRUE, FALSE)], .llambda ,
+ earg = .elambda )
+ onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
+ earg = .eonempstr0 )
+ rzipois(nsim * length(lambda), lambda = lambda, pstr0 = 1 - onempstr0)
+ }, list( .lonempstr0 = lonempstr0, .llambda = llambda,
+ .eonempstr0 = eonempstr0, .elambda = elambda ))),
- if ( .lprob == "logit") {
- ans[!index, 2] <- w[!index] * (y[!index] - mubin[!index])
- }
- ans
- }), list( .lpstr0 = lpstr0, .lprob = lprob,
- .epstr0 = epstr0, .eprob = eprob ))),
- weight = eval(substitute(expression({
- wz <- matrix(NA_real_, nrow = n, ncol = dimm(M))
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ lambda <- eta2theta(eta[, c(TRUE, FALSE)], .llambda ,
+ earg = .elambda )
+ onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
+ earg = .eonempstr0 )
+ okay1 <- all(is.finite(lambda )) && all(0 < lambda ) &&
+ all(is.finite(onempstr0)) && all(0 < onempstr0)
+ deflat.limit <- -1 / expm1(lambda)
+ okay2.deflat <- TRUE
+ if (okay1 && !(okay2.deflat <- all(onempstr0 < 1 - deflat.limit)))
+ warning("parameter 'onempstr0' is too positive even allowing for ",
+ "0-deflation.")
+ okay1 && okay2.deflat
+ }, list( .lonempstr0 = lonempstr0, .llambda = llambda,
+ .eonempstr0 = eonempstr0, .elambda = elambda ))),
- ned2l.dphi2 <- (1 - prob0) / ((1 - phi) * pobs0)
+ deriv = eval(substitute(expression({
+ M1 <- 2
+ ncoly <- ncol(eta) / M1 # extra$ncoly
+ lambda <- eta2theta(eta[, c(TRUE, FALSE)], .llambda ,
+ earg = .elambda )
+ onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
+ earg = .eonempstr0 )
- ned2l.dphimubin <- -w * ((1 - mubin)^(w - 1)) / pobs0
+ dlambda.deta <- dtheta.deta(lambda , .llambda ,
+ earg = .elambda )
+ donempstr0.deta <- dtheta.deta(onempstr0, .lonempstr0 ,
+ earg = .eonempstr0 )
+ denom <- 1 + onempstr0 * expm1(-lambda)
+ ind0 <- (y == 0)
+ dl.dlambda <- -onempstr0 * exp(-lambda) / denom
+ dl.dlambda[!ind0] <- (y[!ind0] - lambda[!ind0]) / lambda[!ind0]
+ dl.donempstr0 <- expm1(-lambda) / denom
+ dl.donempstr0[!ind0] <- 1 / onempstr0[!ind0]
+ ans <- c(w) * cbind(dl.dlambda * dlambda.deta,
+ dl.donempstr0 * donempstr0.deta)
+ ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
+ if ( .llambda == "loge" && is.empty.list( .elambda ) &&
+ any(lambda[!ind0] < .Machine$double.eps)) {
+ for (spp. in 1:ncoly) {
+ ans[!ind0[, spp.], M1 * spp.] <-
+ w[!ind0[, spp.]] *
+ (y[!ind0[, spp.], spp.] - lambda[!ind0[, spp.], spp.])
+ }
+ }
- ned2l.dmubin2 <- (w * (1 - phi) / (mubin * (1 - mubin)^2)) *
- (1 - mubin - w * mubin *
- (1 - mubin)^w * phi / pobs0)
+ ans
+ }), list( .lonempstr0 = lonempstr0, .llambda = llambda,
+ .eonempstr0 = eonempstr0, .elambda = elambda ))),
+ weight = eval(substitute(expression({
+ ned2l.dlambda2 <- ( onempstr0) / lambda -
+ onempstr0 * (1 - onempstr0) * exp(-lambda) / denom
+ ned2l.donempstr0.2 <- -expm1(-lambda) / ((onempstr0) * denom)
+ ned2l.dphilambda <- +exp(-lambda) / denom
+ wz <- array(c(c(w) * ned2l.dlambda2 * dlambda.deta^2,
+ c(w) * ned2l.donempstr0.2 * donempstr0.deta^2,
+ c(w) * ned2l.dphilambda * donempstr0.deta * dlambda.deta),
+ dim = c(n, M / M1, 3))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
- wz[,iam(1, 1, M)] <- ned2l.dphi2 * dphi.deta^2
- wz[,iam(2, 2, M)] <- ned2l.dmubin2 * dmubin.deta^2
- wz[,iam(1, 2, M)] <- ned2l.dphimubin * dphi.deta * dmubin.deta
- if (TRUE) {
- ind6 <- (wz[, iam(2, 2, M)] < .Machine$double.eps)
- if (any(ind6))
- wz[ind6, iam(2, 2, M)] <- .Machine$double.eps
- }
wz
- }), list( .lpstr0 = lpstr0, .lprob = lprob,
- .epstr0 = epstr0, .eprob = eprob ))))
+ }), list( .llambda = llambda ))))
}
@@ -2834,36 +3011,34 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
- zibinomialff <-
- function(lprob = "logit", lonempstr0 = "logit",
- type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
- ionempstr0 = NULL,
- zero = "onempstr0",
- multiple.responses = FALSE, imethod = 1) {
-
-
+ zibinomial <-
+ function(lpstr0 = "logit", lprob = "logit",
+ type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
+ ipstr0 = NULL,
+ zero = NULL, # 20130917; was originally zero = 1,
+ multiple.responses = FALSE, imethod = 1) {
if (as.logical(multiple.responses))
stop("argument 'multiple.responses' must be FALSE")
+ lpstr0 <- as.list(substitute(lpstr0))
+ epstr0 <- link2list(lpstr0)
+ lpstr0 <- attr(epstr0, "function.name")
+
lprob <- as.list(substitute(lprob))
eprob <- link2list(lprob)
lprob <- attr(eprob, "function.name")
- lonempstr0 <- as.list(substitute(lonempstr0))
- eonempstr0 <- link2list(lonempstr0)
- lonempstr0 <- attr(eonempstr0, "function.name")
-
type.fitted <- match.arg(type.fitted,
- c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
- if (is.Numeric(ionempstr0))
- if (!is.Numeric(ionempstr0, positive = TRUE) || any(ionempstr0 >= 1))
- stop("'ionempstr0' values must be inside the interval (0,1)")
+ if (is.Numeric(ipstr0))
+ if (!is.Numeric(ipstr0, positive = TRUE) || any(ipstr0 >= 1))
+ stop("'ipstr0' values must be inside the interval (0,1)")
if (!is.Numeric(imethod, length.arg = 1,
integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
@@ -2874,11 +3049,11 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
new("vglmff",
blurb = c("Zero-inflated binomial\n\n",
"Links: ",
- namesof("prob" , lprob , earg = eprob ), ", ",
- namesof("onempstr0", lonempstr0, earg = eonempstr0), "\n",
- "Mean: onempstr0 * prob"),
+ namesof("pstr0", lpstr0, earg = epstr0), ", ",
+ namesof("prob" , lprob , earg = eprob ), "\n",
+ "Mean: (1 - pstr0) * prob"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 2)
}), list( .zero = zero ))),
@@ -2886,16 +3061,15 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
infos = eval(substitute(function(...) {
list(M1 = 2,
- Q1 = NA,
+ type.fitted = .type.fitted ,
expected = TRUE,
multipleResponses = FALSE,
- parameters.names = c("prob", "onempstr0"),
- type.fitted = .type.fitted ,
+ parameters.names = c("pstr0", "prob"),
zero = .zero )
}, list( .zero = zero,
.type.fitted = type.fitted
))),
-
+
initialize = eval(substitute(expression({
if (!all(w == 1))
extra$orig.w <- w
@@ -2905,7 +3079,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
if (NCOL(y) == 1) {
if (is.factor(y))
y <- y != levels(y)[1]
- nn <- rep(1, n)
+ nn <- rep_len(1, n)
if (!all(y >= 0 & y <= 1))
stop("response values must be in [0, 1]")
if (!length(mustart) && !length(etastart))
@@ -2950,40 +3124,39 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
predictors.names <-
- c(namesof("prob" , .lprob , earg = .eprob , tag = FALSE),
- namesof("onempstr0", .lonempstr0 , earg = .eonempstr0 , tag = FALSE))
+ c(namesof("pstr0", .lpstr0 , earg = .epstr0 , tag = FALSE),
+ namesof("prob" , .lprob , earg = .eprob , tag = FALSE))
extra$w <- w # Needed for @linkinv
- onemphi.init <- if (length( .ionempstr0 )) .ionempstr0 else {
+ phi.init <- if (length( .ipstr0 )) .ipstr0 else {
prob0.est <- sum(w[y == 0]) / sum(w)
if ( .imethod == 1) {
- 1 - (prob0.est - (1 - mustart)^w) / (1 - (1 - mustart)^w)
+ (prob0.est - (1 - mustart)^w) / (1 - (1 - mustart)^w)
} else {
- 1 - prob0.est
+ prob0.est
}
}
- onemphi.init[onemphi.init <= -0.10] <- 0.10 # Lots of sample variation
- onemphi.init[onemphi.init <= 0.05] <- 0.15 # Last resort
- onemphi.init[onemphi.init >= 0.80] <- 0.80 # Last resort
+ phi.init[phi.init <= -0.10] <- 0.10 # Lots of sample variation
+ phi.init[phi.init <= 0.05] <- 0.15 # Last resort
+ phi.init[phi.init >= 0.80] <- 0.80 # Last resort
if ( length(mustart) && !length(etastart))
- mustart <- cbind(mustart,
- rep(onemphi.init, len = n)) # 1st coln not a real mu
-
- }), list( .lonempstr0 = lonempstr0, .lprob = lprob,
- .eonempstr0 = eonempstr0, .eprob = eprob,
- .ionempstr0 = ionempstr0,
+ mustart <- cbind(rep_len(phi.init, n),
+ mustart) # 1st coln not a real mu
+ }), list( .lpstr0 = lpstr0, .lprob = lprob,
+ .epstr0 = epstr0, .eprob = eprob,
+ .ipstr0 = ipstr0,
.type.fitted = type.fitted,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mubin <- eta2theta(eta[, 1], .lprob , earg = .eprob )
- onempstr0 <- eta2theta(eta[, 2], .lonempstr0 , earg = .eonempstr0 )
+ pstr0 <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
+ mubin <- eta2theta(eta[, 2], .lprob , earg = .eprob )
orig.w <- if (length(tmp3 <- extra$orig.w)) tmp3 else
- rep(1, len = nrow(eta))
+ rep_len(1, nrow(eta))
priorw <- extra$w
nvec <- priorw / orig.w
@@ -2998,11 +3171,11 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
ans <- switch(type.fitted,
- "mean" = (onempstr0) * mubin,
+ "mean" = (1 - pstr0) * mubin,
"prob" = mubin,
- "pobs0" = 1 - onempstr0 + (onempstr0)*(1-mubin)^nvec, # P(Y=0)
- "pstr0" = 1 - onempstr0,
- "onempstr0" = onempstr0)
+ "pobs0" = pstr0 + (1-pstr0)*(1-mubin)^nvec, # P(Y=0)
+ "pstr0" = pstr0,
+ "onempstr0" = 1 - pstr0)
if (length(extra$dimnamesy) &&
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
@@ -3014,95 +3187,117 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
colnames(ans) <- NULL
}
ans
- }, list( .lonempstr0 = lonempstr0, .lprob = lprob,
- .eonempstr0 = eonempstr0, .eprob = eprob,
+ }, list( .lpstr0 = lpstr0, .lprob = lprob,
+ .epstr0 = epstr0, .eprob = eprob,
.type.fitted = type.fitted ))),
last = eval(substitute(expression({
- misc$link <- c("prob" = .lprob , "onempstr0" = .lonempstr0 )
+ misc$link <- c("pstr0" = .lpstr0 , "prob" = .lprob )
- misc$earg <- list("prob" = .eprob , "onempstr0" = .eonempstr0 )
+ misc$earg <- list("pstr0" = .epstr0 , "prob" = .eprob )
misc$imethod <- .imethod
- misc$pobs0 <- phi + (1 - phi) * (1 - mubin)^w # [1] # P(Y=0)
- misc$pstr0 <- phi
- }), list( .lonempstr0 = lonempstr0, .lprob = lprob,
- .eonempstr0 = eonempstr0, .eprob = eprob,
+ }), list( .lpstr0 = lpstr0, .lprob = lprob,
+ .epstr0 = epstr0, .eprob = eprob,
.imethod = imethod ))),
linkfun = eval(substitute(function(mu, extra = NULL) {
- cbind(theta2eta(mu[, 1], .lprob , earg = .eprob ),
- theta2eta(mu[, 2], .lonempstr0 , earg = .eonempstr0 ))
- }, list( .lonempstr0 = lonempstr0, .lprob = lprob,
- .eonempstr0 = eonempstr0, .eprob = eprob ))),
+ cbind(theta2eta(mu[, 1], .lpstr0 , earg = .epstr0 ),
+ theta2eta(mu[, 2], .lprob , earg = .eprob ))
+ }, list( .lpstr0 = lpstr0, .lprob = lprob,
+ .epstr0 = epstr0, .eprob = eprob ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- mubin <- eta2theta(eta[, 1], .lprob , earg = .eprob )
- onempstr0 <- eta2theta(eta[, 2], .lonempstr0 , earg = .eonempstr0 )
+ pstr0 <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
+ mubin <- eta2theta(eta[, 2], .lprob , earg = .eprob )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
ll.elts <-
dzibinom(x = round(w * y), size = w, prob = mubin,
- log = TRUE, pstr0 = 1 - onempstr0)
+ log = TRUE, pstr0 = pstr0)
if (summation) {
sum(ll.elts)
} else {
ll.elts
}
}
- }, list( .lonempstr0 = lonempstr0, .lprob = lprob,
- .eonempstr0 = eonempstr0, .eprob = eprob ))),
- vfamily = c("zibinomialff"),
- deriv = eval(substitute(expression({
- mubin <- eta2theta(eta[, 1], .lprob , earg = .eprob )
- onempstr0 <- eta2theta(eta[, 2], .lonempstr0 , earg = .eonempstr0 )
- omphi <- onempstr0
- phi <- 1 - onempstr0
+ }, list( .lpstr0 = lpstr0, .lprob = lprob,
+ .epstr0 = epstr0, .eprob = eprob ))),
+ vfamily = c("zibinomial"),
+
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ pstr0 <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
+ probb <- eta2theta(eta[, 2], .lprob , earg = .eprob )
+ size <- extra$w
+
+ okay1 <- all(is.finite(probb)) && all(0 < probb) &&
+ all(is.finite(pstr0)) && all(pstr0 < 1)
+ prob0 <- (1 - probb)^size
+ Prob0.check <- dbinom(0, size = size, prob = probb)
+ deflat.limit <- -prob0 / (1 - prob0)
+ okay2.deflat <- TRUE
+ if (okay1 && !(okay2.deflat <- all(deflat.limit < pstr0)))
+ warning("parameter 'pstr0' is too negative even allowing for ",
+ "0-deflation.")
+
+ okay1 && okay2.deflat
+ }, list( .lpstr0 = lpstr0, .lprob = lprob,
+ .epstr0 = epstr0, .eprob = eprob ))),
+
+
+
+
+ deriv = eval(substitute(expression({
+ phi <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
+ mubin <- eta2theta(eta[, 2], .lprob , earg = .eprob )
+
prob0 <- (1 - mubin)^w # Actually q^w
- pobs0 <- phi + (omphi) * prob0
+ pobs0 <- phi + (1 - phi) * prob0
index <- (y == 0)
- dl.domphi <- -(1 - prob0) / pobs0 # Note "-"
- dl.domphi[!index] <- +1 / (omphi[!index]) # Note "+"
+ dl.dphi <- (1 - prob0) / pobs0
+ dl.dphi[!index] <- -1 / (1 - phi[!index])
- dl.dmubin <- -w * (omphi) * (1 - mubin)^(w - 1) / pobs0
+ dl.dmubin <- -w * (1 - phi) * (1 - mubin)^(w - 1) / pobs0
dl.dmubin[!index] <- w[!index] *
( y[!index] / mubin[!index] -
(1 - y[!index]) / (1 - mubin[!index]))
- dmubin.deta <- dtheta.deta(mubin, .lprob , earg = .eprob )
- domphi.deta <- dtheta.deta(omphi, .lonempstr0 , earg = .eonempstr0 )
+ dphi.deta <- dtheta.deta(phi, .lpstr0 , earg = .epstr0 )
+ dmubin.deta <- dtheta.deta(mubin, .lprob , earg = .eprob )
- ans <- cbind(dl.dmubin * dmubin.deta,
- dl.domphi * domphi.deta)
+ ans <- cbind(dl.dphi * dphi.deta,
+ dl.dmubin * dmubin.deta)
if ( .lprob == "logit") {
- ans[!index, 1] <- w[!index] * (y[!index] - mubin[!index])
+ ans[!index, 2] <- w[!index] * (y[!index] - mubin[!index])
}
ans
- }), list( .lonempstr0 = lonempstr0, .lprob = lprob,
- .eonempstr0 = eonempstr0, .eprob = eprob ))),
+ }), list( .lpstr0 = lpstr0, .lprob = lprob,
+ .epstr0 = epstr0, .eprob = eprob ))),
weight = eval(substitute(expression({
wz <- matrix(NA_real_, nrow = n, ncol = dimm(M))
- ned2l.domphi2 <- (1 - prob0) / ((omphi) * pobs0)
+ ned2l.dphi2 <- (1 - prob0) / ((1 - phi) * pobs0)
- ned2l.domphimubin <- +w * ((1 - mubin)^(w - 1)) / pobs0 # Note "+"
+ ned2l.dphimubin <- -w * ((1 - mubin)^(w - 1)) / pobs0
- ned2l.dmubin2 <- (w * (omphi) / (mubin * (1 - mubin)^2)) *
+
+ ned2l.dmubin2 <- (w * (1 - phi) / (mubin * (1 - mubin)^2)) *
(1 - mubin - w * mubin *
(1 - mubin)^w * phi / pobs0)
@@ -3110,17 +3305,17 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
- wz[,iam(1, 1, M)] <- ned2l.dmubin2 * dmubin.deta^2
- wz[,iam(2, 2, M)] <- ned2l.domphi2 * domphi.deta^2
- wz[,iam(1, 2, M)] <- ned2l.domphimubin * domphi.deta * dmubin.deta
+ wz[,iam(1, 1, M)] <- ned2l.dphi2 * dphi.deta^2
+ wz[,iam(2, 2, M)] <- ned2l.dmubin2 * dmubin.deta^2
+ wz[,iam(1, 2, M)] <- ned2l.dphimubin * dphi.deta * dmubin.deta
if (TRUE) {
- ind6 <- (wz[, iam(1, 1, M)] < .Machine$double.eps)
+ ind6 <- (wz[, iam(2, 2, M)] < .Machine$double.eps)
if (any(ind6))
- wz[ind6, iam(1, 1, M)] <- .Machine$double.eps
+ wz[ind6, iam(2, 2, M)] <- .Machine$double.eps
}
wz
- }), list( .lonempstr0 = lonempstr0, .lprob = lprob,
- .eonempstr0 = eonempstr0, .eprob = eprob ))))
+ }), list( .lpstr0 = lpstr0, .lprob = lprob,
+ .epstr0 = epstr0, .eprob = eprob ))))
}
@@ -3128,235 +3323,402 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
+ zibinomialff <-
+ function(lprob = "logit", lonempstr0 = "logit",
+ type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
+ ionempstr0 = NULL,
+ zero = "onempstr0",
+ multiple.responses = FALSE, imethod = 1) {
-dzibinom <- function(x, size, prob, pstr0 = 0, log = FALSE) {
- if (!is.logical(log.arg <- log) || length(log) != 1)
- stop("bad input for argument 'log'")
- rm(log)
- LLL <- max(length(x), length(size), length(prob), length(pstr0))
- if (length(x) != LLL) x <- rep(x, len = LLL);
- if (length(size) != LLL) size <- rep(size, len = LLL);
- if (length(prob) != LLL) prob <- rep(prob, len = LLL);
- if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
- ans <- dbinom(x = x, size = size, prob = prob, log = TRUE)
+ if (as.logical(multiple.responses))
+ stop("argument 'multiple.responses' must be FALSE")
+ lprob <- as.list(substitute(lprob))
+ eprob <- link2list(lprob)
+ lprob <- attr(eprob, "function.name")
- ans <- if (log.arg) {
- ifelse(x == 0, log(pstr0 + (1-pstr0) * exp(ans)), log1p(-pstr0) + ans)
- } else {
- ifelse(x == 0, pstr0 + (1-pstr0) * exp(ans) ,
- (1-pstr0) * exp(ans))
- }
+ lonempstr0 <- as.list(substitute(lonempstr0))
+ eonempstr0 <- link2list(lonempstr0)
+ lonempstr0 <- attr(eonempstr0, "function.name")
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
- prob0 <- (1 - prob)^size
- deflat.limit <- -prob0 / (1 - prob0)
- ans[pstr0 < deflat.limit] <- NaN
- ans[pstr0 > 1] <- NaN
+ if (is.Numeric(ionempstr0))
+ if (!is.Numeric(ionempstr0, positive = TRUE) || any(ionempstr0 >= 1))
+ stop("'ionempstr0' values must be inside the interval (0,1)")
+ if (!is.Numeric(imethod, length.arg = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
- ans
-}
-pzibinom <- function(q, size, prob, pstr0 = 0,
- lower.tail = TRUE, log.p = FALSE) {
+ new("vglmff",
+ blurb = c("Zero-inflated binomial\n\n",
+ "Links: ",
+ namesof("prob" , lprob , earg = eprob ), ", ",
+ namesof("onempstr0", lonempstr0, earg = eonempstr0), "\n",
+ "Mean: onempstr0 * prob"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
+ }), list( .zero = zero ))),
- LLL <- max(length(pstr0), length(size), length(prob), length(q))
- if (length(q) != LLL) q <- rep(q, len = LLL);
- if (length(size) != LLL) size <- rep(size, len = LLL);
- if (length(prob) != LLL) prob <- rep(prob, len = LLL);
- if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
- ans <- pbinom(q, size, prob, lower.tail = lower.tail, log.p = log.p)
- ans <- ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = NA,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("prob", "onempstr0"),
+ type.fitted = .type.fitted ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .type.fitted = type.fitted
+ ))),
+
+ initialize = eval(substitute(expression({
+ if (!all(w == 1))
+ extra$orig.w <- w
- prob0 <- (1 - prob)^size
- deflat.limit <- -prob0 / (1 - prob0)
- ans[pstr0 < deflat.limit] <- NaN
- ans[pstr0 > 1] <- NaN
- ans
-}
+ if (NCOL(y) == 1) {
+ if (is.factor(y))
+ y <- y != levels(y)[1]
+ nn <- rep_len(1, n)
+ if (!all(y >= 0 & y <= 1))
+ stop("response values must be in [0, 1]")
+ if (!length(mustart) && !length(etastart))
+ mustart <- (0.5 + w * y) / (1.0 + w)
-qzibinom <- function(p, size, prob, pstr0 = 0,
- lower.tail = TRUE, log.p = FALSE) {
- LLL <- max(length(p), length(size), length(prob), length(pstr0))
- p <- rep(p, length = LLL)
- size <- rep(size, length = LLL)
- prob <- rep(prob, length = LLL)
- pstr0 <- rep(pstr0, length = LLL)
+ no.successes <- y
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
+ stop("Number of successes must be integer-valued")
+
+ } else if (NCOL(y) == 2) {
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(y - round(y)) > 1.0e-8))
+ stop("Count data must be integer-valued")
+ y <- round(y)
+ nvec <- y[, 1] + y[, 2]
+ y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
+ w <- w * nvec
+ if (!length(mustart) && !length(etastart))
+ mustart <- (0.5 + nvec * y) / (1 + nvec)
+ } else {
+ stop("for the binomialff family, response 'y' must be a ",
+ "vector of 0 and 1's\n",
+ "or a factor ",
+ "(first level = fail, other levels = success),\n",
+ "or a 2-column matrix where col 1 is the no. of ",
+ "successes and col 2 is the no. of failures")
+ }
- ans <- p
- ans[p <= pstr0] <- 0
- ans[p > pstr0] <-
- qbinom((p[p > pstr0] - pstr0[p > pstr0]) / (1 - pstr0[p > pstr0]),
- size[p > pstr0],
- prob[p > pstr0],
- lower.tail = lower.tail, log.p = log.p)
+ if ( .imethod == 1)
+ mustart <- (mustart + y) / 2
+ extra$type.fitted <- .type.fitted
+ extra$dimnamesy <- dimnames(y)
- prob0 <- (1 - prob)^size
- deflat.limit <- -prob0 / (1 - prob0)
- ind0 <- (deflat.limit <= pstr0) & (pstr0 < 0)
- if (any(ind0)) {
- pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
- ans[p[ind0] <= pobs0] <- 0
- pindex <- (1:LLL)[ind0 & (p > pobs0)]
- Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
- ans[pindex] <- qposbinom((p[pindex] - Pobs0) / (1 - Pobs0),
- size = size[pindex],
- prob = prob[pindex])
- }
- ans[pstr0 < deflat.limit] <- NaN
- ans[pstr0 > 1] <- NaN
- ans
-}
+ predictors.names <-
+ c(namesof("prob" , .lprob , earg = .eprob , tag = FALSE),
+ namesof("onempstr0", .lonempstr0 , earg = .eonempstr0 , tag = FALSE))
-rzibinom <- function(n, size, prob, pstr0 = 0) {
- use.n <- if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- length.arg = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
+ extra$w <- w # Needed for @linkinv
+ onemphi.init <- if (length( .ionempstr0 )) .ionempstr0 else {
+ prob0.est <- sum(w[y == 0]) / sum(w)
+ if ( .imethod == 1) {
+ 1 - (prob0.est - (1 - mustart)^w) / (1 - (1 - mustart)^w)
+ } else {
+ 1 - prob0.est
+ }
+ }
- pstr0 <- rep(pstr0, len = use.n)
- size <- rep(size, len = use.n)
- prob <- rep(prob, len = use.n)
+ onemphi.init[onemphi.init <= -0.10] <- 0.10 # Lots of sample variation
+ onemphi.init[onemphi.init <= 0.05] <- 0.15 # Last resort
+ onemphi.init[onemphi.init >= 0.80] <- 0.80 # Last resort
- ans <- rbinom(use.n, size, prob)
- ans[runif(use.n) < pstr0] <- 0
+ if ( length(mustart) && !length(etastart))
+ mustart <- cbind(mustart,
+ rep_len(onemphi.init, n)) # 1st coln not a real mu
+ }), list( .lonempstr0 = lonempstr0, .lprob = lprob,
+ .eonempstr0 = eonempstr0, .eprob = eprob,
+ .ionempstr0 = ionempstr0,
+ .type.fitted = type.fitted,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ mubin <- eta2theta(eta[, 1], .lprob , earg = .eprob )
+ onempstr0 <- eta2theta(eta[, 2], .lonempstr0 , earg = .eonempstr0 )
- prob0 <- (1 - prob)^size
- deflat.limit <- -prob0 / (1 - prob0)
- ind0 <- (deflat.limit <= pstr0) & (pstr0 < 0)
- if (any(ind0)) {
- pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
- ans[ind0] <- rposbinom(sum(ind0), size = size[ind0], prob = prob[ind0])
- ans[ind0] <- ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
- }
+ orig.w <- if (length(tmp3 <- extra$orig.w)) tmp3 else
+ rep_len(1, nrow(eta))
+ priorw <- extra$w
+ nvec <- priorw / orig.w
- ans[pstr0 < deflat.limit] <- NaN
- ans[pstr0 > 1] <- NaN
- ans
-}
+ type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+ warning("cannot find 'type.fitted'. ",
+ "Returning the 'mean'.")
+ "mean"
+ }
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
+
+ ans <- switch(type.fitted,
+ "mean" = (onempstr0) * mubin,
+ "prob" = mubin,
+ "pobs0" = 1 - onempstr0 + (onempstr0)*(1-mubin)^nvec, # P(Y=0)
+ "pstr0" = 1 - onempstr0,
+ "onempstr0" = onempstr0)
+ if (length(extra$dimnamesy) &&
+ is.matrix(ans) &&
+ length(extra$dimnamesy[[2]]) == ncol(ans) &&
+ length(extra$dimnamesy[[2]]) > 0) {
+ dimnames(ans) <- extra$dimnamesy
+ } else
+ if (NCOL(ans) == 1 &&
+ is.matrix(ans)) {
+ colnames(ans) <- NULL
+ }
+ ans
+ }, list( .lonempstr0 = lonempstr0, .lprob = lprob,
+ .eonempstr0 = eonempstr0, .eprob = eprob,
+ .type.fitted = type.fitted ))),
+ last = eval(substitute(expression({
+ misc$link <- c("prob" = .lprob , "onempstr0" = .lonempstr0 )
+ misc$earg <- list("prob" = .eprob , "onempstr0" = .eonempstr0 )
+ misc$imethod <- .imethod
+ misc$pobs0 <- phi + (1 - phi) * (1 - mubin)^w # [1] # P(Y=0)
+ misc$pstr0 <- phi
+ }), list( .lonempstr0 = lonempstr0, .lprob = lprob,
+ .eonempstr0 = eonempstr0, .eprob = eprob,
+ .imethod = imethod ))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ cbind(theta2eta(mu[, 1], .lprob , earg = .eprob ),
+ theta2eta(mu[, 2], .lonempstr0 , earg = .eonempstr0 ))
+ }, list( .lonempstr0 = lonempstr0, .lprob = lprob,
+ .eonempstr0 = eonempstr0, .eprob = eprob ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ mubin <- eta2theta(eta[, 1], .lprob , earg = .eprob )
+ onempstr0 <- eta2theta(eta[, 2], .lonempstr0 , earg = .eonempstr0 )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ dzibinom(x = round(w * y), size = w, prob = mubin,
+ log = TRUE, pstr0 = 1 - onempstr0)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .lonempstr0 = lonempstr0, .lprob = lprob,
+ .eonempstr0 = eonempstr0, .eprob = eprob ))),
+ vfamily = c("zibinomialff"),
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ probb <- eta2theta(eta[, 1], .lprob , earg = .eprob )
+ onempstr0 <- eta2theta(eta[, 2], .lonempstr0 , earg = .eonempstr0 )
+ size <- extra$w
+
+ okay1 <- all(is.finite(probb)) && all(0 < probb) &&
+ all(is.finite(onempstr0)) && all(0 < onempstr0)
+ prob0 <- (1 - probb)^size
+ Prob0.check <- dbinom(0, size = size, prob = probb)
+ deflat.limit <- -prob0 / (1 - prob0)
+ okay2.deflat <- TRUE
+ if (okay1 && !(okay2.deflat <- all(onempstr0 < 1 - deflat.limit)))
+ warning("parameter 'onempstr0' is too positive even allowing for ",
+ "0-deflation.")
+
+ okay1 && okay2.deflat
+ }, list( .lonempstr0 = lonempstr0, .lprob = lprob,
+ .eonempstr0 = eonempstr0, .eprob = eprob ))),
+ deriv = eval(substitute(expression({
+ mubin <- eta2theta(eta[, 1], .lprob , earg = .eprob )
+ onempstr0 <- eta2theta(eta[, 2], .lonempstr0 , earg = .eonempstr0 )
+ omphi <- onempstr0
+ phi <- 1 - onempstr0
-dzinegbin <- function(x, size, prob = NULL, munb = NULL, pstr0 = 0,
- log = FALSE) {
- if (length(munb)) {
- if (length(prob))
- stop("arguments 'prob' and 'munb' both specified")
- prob <- size / (size + munb)
- }
+ prob0 <- (1 - mubin)^w # Actually q^w
+ pobs0 <- phi + (omphi) * prob0
+ index <- (y == 0)
+ dl.domphi <- -(1 - prob0) / pobs0 # Note "-"
+ dl.domphi[!index] <- +1 / (omphi[!index]) # Note "+"
- if (!is.logical(log.arg <- log) || length(log) != 1)
- stop("bad input for argument 'log'")
- rm(log)
+ dl.dmubin <- -w * (omphi) * (1 - mubin)^(w - 1) / pobs0
+ dl.dmubin[!index] <- w[!index] *
+ ( y[!index] / mubin[!index] -
+ (1 - y[!index]) / (1 - mubin[!index]))
+ dmubin.deta <- dtheta.deta(mubin, .lprob , earg = .eprob )
+ domphi.deta <- dtheta.deta(omphi, .lonempstr0 , earg = .eonempstr0 )
- LLL <- max(length(pstr0), length(size), length(prob), length(x))
- if (length(x) != LLL) x <- rep(x, len = LLL);
- if (length(size) != LLL) size <- rep(size, len = LLL);
- if (length(prob) != LLL) prob <- rep(prob, len = LLL);
- if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
+ ans <- cbind(dl.dmubin * dmubin.deta,
+ dl.domphi * domphi.deta)
+ if ( .lprob == "logit") {
+ ans[!index, 1] <- w[!index] * (y[!index] - mubin[!index])
+ }
- ans <- dnbinom(x = x, size = size, prob = prob, log = log.arg)
+ ans
+ }), list( .lonempstr0 = lonempstr0, .lprob = lprob,
+ .eonempstr0 = eonempstr0, .eprob = eprob ))),
+ weight = eval(substitute(expression({
+ wz <- matrix(NA_real_, nrow = n, ncol = dimm(M))
- ans <- if (log.arg)
- ifelse(x == 0, log(pstr0+(1-pstr0)*exp(ans)), log1p(-pstr0) + ans) else
- ifelse(x == 0, pstr0+(1-pstr0)* ans, (1-pstr0) * ans)
+ ned2l.domphi2 <- (1 - prob0) / ((omphi) * pobs0)
- prob0 <- prob^size
- deflat.limit <- -prob0 / (1 - prob0)
- ans[pstr0 < deflat.limit] <- NaN
- ans[pstr0 > 1] <- NaN
+ ned2l.domphimubin <- +w * ((1 - mubin)^(w - 1)) / pobs0 # Note "+"
- ans
-}
-pzinegbin <- function(q, size, prob = NULL, munb = NULL, pstr0 = 0) {
- if (length(munb)) {
- if (length(prob))
- stop("arguments 'prob' and 'munb' both specified")
- prob <- size / (size + munb)
- }
- LLL <- max(length(pstr0), length(size), length(prob), length(q))
- if (length(q) != LLL) q <- rep(q, len = LLL);
- if (length(size) != LLL) size <- rep(size, len = LLL);
- if (length(prob) != LLL) prob <- rep(prob, len = LLL);
- if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
+ ned2l.dmubin2 <- (w * (omphi) / (mubin * (1 - mubin)^2)) *
+ (1 - mubin - w * mubin *
+ (1 - mubin)^w * phi / pobs0)
- ans <- pnbinom(q = q, size = size, prob = prob)
- ans <- ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
- prob0 <- prob^size
- deflat.limit <- -prob0 / (1 - prob0)
- ans[pstr0 < deflat.limit] <- NaN
- ans[pstr0 > 1] <- NaN
- ans
+ wz[,iam(1, 1, M)] <- ned2l.dmubin2 * dmubin.deta^2
+ wz[,iam(2, 2, M)] <- ned2l.domphi2 * domphi.deta^2
+ wz[,iam(1, 2, M)] <- ned2l.domphimubin * domphi.deta * dmubin.deta
+ if (TRUE) {
+ ind6 <- (wz[, iam(1, 1, M)] < .Machine$double.eps)
+ if (any(ind6))
+ wz[ind6, iam(1, 1, M)] <- .Machine$double.eps
+ }
+ wz
+ }), list( .lonempstr0 = lonempstr0, .lprob = lprob,
+ .eonempstr0 = eonempstr0, .eprob = eprob ))))
}
-qzinegbin <- function(p, size, prob = NULL, munb = NULL, pstr0 = 0) {
- if (length(munb)) {
- if (length(prob))
- stop("arguments 'prob' and 'munb' both specified")
- prob <- size/(size + munb)
- }
- LLL <- max(length(p), length(prob), length(pstr0), length(size))
- if (length(p) != LLL) p <- rep(p, len = LLL)
- if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
- if (length(prob) != LLL) prob <- rep(prob, len = LLL)
- if (length(size) != LLL) size <- rep(size, len = LLL);
- ans <- p
- ind4 <- (p > pstr0)
- ans[!ind4] <- 0
- ans[ ind4] <- qnbinom(p = (p[ind4] - pstr0[ind4]) / (1 - pstr0[ind4]),
- size = size[ind4], prob = prob[ind4])
- prob0 <- prob^size
+
+
+dzibinom <- function(x, size, prob, pstr0 = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL <- max(length(x), length(size), length(prob), length(pstr0))
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(pstr0) != LLL) pstr0 <- rep_len(pstr0, LLL)
+
+ ans <- dbinom(x = x, size = size, prob = prob, log = TRUE)
+
+
+ ans <- if (log.arg) {
+ ifelse(x == 0, log(pstr0 + (1-pstr0) * exp(ans)), log1p(-pstr0) + ans)
+ } else {
+ ifelse(x == 0, pstr0 + (1-pstr0) * exp(ans) ,
+ (1-pstr0) * exp(ans))
+ }
+
+
+ prob0 <- (1 - prob)^size
+ deflat.limit <- -prob0 / (1 - prob0)
+ ans[pstr0 < deflat.limit] <- NaN
+ ans[pstr0 > 1] <- NaN
+
+
+ ans
+}
+
+
+
+pzibinom <- function(q, size, prob, pstr0 = 0
+ ) {
+
+ LLL <- max(length(pstr0), length(size), length(prob), length(q))
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(pstr0) != LLL) pstr0 <- rep_len(pstr0, LLL)
+
+ ans <- pbinom(q, size, prob) # lower.tail = lower.tail, log.p = log.p
+ ans <- ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
+
+
+ prob0 <- (1 - prob)^size
+ deflat.limit <- -prob0 / (1 - prob0)
+ ans[pstr0 < deflat.limit] <- NaN
+ ans[pstr0 > 1] <- NaN
+
+ ans
+}
+
+
+
+qzibinom <- function(p, size, prob, pstr0 = 0
+ ) {
+ LLL <- max(length(p), length(size), length(prob), length(pstr0))
+ p <- rep_len(p, LLL)
+ size <- rep_len(size, LLL)
+ prob <- rep_len(prob, LLL)
+ pstr0 <- rep_len(pstr0, LLL)
+
+
+ ans <- p
+ ans[p <= pstr0] <- 0
+ ans[p > pstr0] <-
+ qbinom((p[p > pstr0] - pstr0[p > pstr0]) / (1 - pstr0[p > pstr0]),
+ size[p > pstr0],
+ prob[p > pstr0])
+
+
+
+ prob0 <- (1 - prob)^size
deflat.limit <- -prob0 / (1 - prob0)
ind0 <- (deflat.limit <= pstr0) & (pstr0 < 0)
if (any(ind0)) {
@@ -3364,55 +3726,96 @@ qzinegbin <- function(p, size, prob = NULL, munb = NULL, pstr0 = 0) {
ans[p[ind0] <= pobs0] <- 0
pindex <- (1:LLL)[ind0 & (p > pobs0)]
Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
- ans[pindex] <- qposnegbin((p[pindex] - Pobs0) / (1 - Pobs0),
- size = size[pindex],
- prob = prob[pindex])
+ ans[pindex] <- qposbinom((p[pindex] - Pobs0) / (1 - Pobs0),
+ size = size[pindex],
+ prob = prob[pindex])
}
-
ans[pstr0 < deflat.limit] <- NaN
ans[pstr0 > 1] <- NaN
-
ans
}
+rzibinom <- function(n, size, prob, pstr0 = 0) {
-rzinegbin <- function(n, size, prob = NULL, munb = NULL, pstr0 = 0) {
+ qzibinom(runif(n), size, prob, pstr0 = pstr0)
+}
+
+
+
+
+
+
+
+
+
+
+
+
+dzinegbin <- function(x, size, prob = NULL, munb = NULL, pstr0 = 0,
+ log = FALSE) {
if (length(munb)) {
if (length(prob))
stop("arguments 'prob' and 'munb' both specified")
prob <- size / (size + munb)
}
- use.n <- if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- length.arg = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
- pstr0 <- rep(pstr0, len = use.n)
- size <- rep(size, len = use.n)
- prob <- rep(prob, len = use.n)
+ LLL <- max(length(pstr0), length(size), length(prob), length(x))
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(pstr0) != LLL) pstr0 <- rep_len(pstr0, LLL)
+
+ ans <- dnbinom(x = x, size = size, prob = prob, log = log.arg)
- ans <- rnbinom(n = use.n, size = size, prob = prob)
- ans <- ifelse(runif(use.n) < pstr0, rep(0, use.n), ans)
+ ans <- if (log.arg)
+ ifelse(x == 0, log(pstr0+(1-pstr0)*exp(ans)), log1p(-pstr0) + ans) else
+ ifelse(x == 0, pstr0+(1-pstr0)* ans, (1-pstr0) * ans)
- prob0 <- rep(prob^size, len = use.n)
+ prob0 <- prob^size
deflat.limit <- -prob0 / (1 - prob0)
- ind0 <- (deflat.limit <= pstr0) & (pstr0 < 0)
- if (any(ind0, na.rm = TRUE)) {
- pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
- ans[ind0] <- rposnegbin(sum(ind0, na.rm = TRUE), size = size[ind0],
- prob = prob[ind0])
- ans[ind0] <- ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
+ ans[pstr0 < deflat.limit] <- NaN
+ ans[pstr0 > 1] <- NaN
+
+
+ ans
+}
+
+
+
+pzinegbin <- function(q, size, prob = NULL, munb = NULL, pstr0 = 0) {
+ if (length(munb)) {
+ if (length(prob))
+ stop("arguments 'prob' and 'munb' both specified")
+ prob <- size / (size + munb)
}
+ LLL <- max(length(pstr0), length(size), length(prob), length(q))
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(pstr0) != LLL) pstr0 <- rep_len(pstr0, LLL)
+
+
+
+ ans <- pnbinom(q = q, size = size, prob = prob)
+ ans <- ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
+
+
+
+ prob0 <- prob^size
+ deflat.limit <- -prob0 / (1 - prob0)
ans[pstr0 < deflat.limit] <- NaN
ans[pstr0 > 1] <- NaN
@@ -3421,6 +3824,51 @@ rzinegbin <- function(n, size, prob = NULL, munb = NULL, pstr0 = 0) {
+qzinegbin <- function(p, size, prob = NULL, munb = NULL, pstr0 = 0) {
+ if (length(munb)) {
+ if (length(prob))
+ stop("arguments 'prob' and 'munb' both specified")
+ prob <- size/(size + munb)
+ }
+ LLL <- max(length(p), length(prob), length(pstr0), length(size),
+ length(munb))
+ if (length(p) != LLL) p <- rep_len(p, LLL)
+ if (length(pstr0) != LLL) pstr0 <- rep_len(pstr0, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
+ if (length(munb) != LLL) munb <- rep_len(munb, LLL)
+
+ ans <- rep_len(NA_real_, LLL)
+ prob0 <- prob^size
+ deflat.limit <- -prob0 / (1 - prob0)
+
+ ans[p <= pstr0] <- 0
+ ind4 <- (pstr0 < p) & (deflat.limit <= pstr0)
+ ans[ ind4] <- qnbinom(p = (p[ind4] - pstr0[ind4]) / (1 - pstr0[ind4]),
+ size = size[ind4], prob = prob[ind4])
+
+
+
+
+ ans[pstr0 < deflat.limit] <- NaN
+ ans[1 < pstr0] <- NaN
+
+ ans[p < 0] <- NaN
+ ans[1 < p] <- NaN
+
+
+ ans
+}
+
+
+
+rzinegbin <- function(n, size, prob = NULL, munb = NULL, pstr0 = 0) {
+
+ qzinegbin(runif(n), size = size, prob = prob, munb = munb, pstr0 = pstr0)
+}
+
+
+
@@ -3435,6 +3883,7 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
function(
zero = "size",
type.fitted = c("mean", "munb", "pobs0", "pstr0", "onempstr0"),
+ mds.min = 1e-3,
nsimEIM = 500,
cutoff.prob = 0.999, # higher is better for large 'size'
eps.trig = 1e-7,
@@ -3444,12 +3893,18 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
imethod = 1,
ipstr0 = NULL,
imunb = NULL,
- probs.y = 0.35,
- ishrinkage = 0.95,
+ iprobs.y = NULL,
isize = NULL,
- gsize.mux = exp((-12:6)/2)) {
+ gprobs.y = (0:9)/10, # 20160709; grid for finding munb.init
+ gsize.mux = exp(c(-30, -20, -15, -10, -6:3))) {
+
+
+ if (!is.Numeric(imethod, length.arg = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
lpstr0 <- as.list(substitute(lpstr0))
@@ -3496,7 +3951,6 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
namesof("size", lsize, earg = esize, tag = FALSE), "\n",
"Mean: (1 - pstr0) * munb"),
constraints = eval(substitute(expression({
-
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 3)
@@ -3507,6 +3961,7 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
list(M1 = 3,
Q1 = 1,
expected = TRUE,
+ mds.min = .mds.min ,
multipleResponses = FALSE,
parameters.names = c("pstr0", "munb", "size"),
eps.trig = .eps.trig ,
@@ -3515,14 +3970,14 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
zero = .zero )
}, list( .zero = zero,
.nsimEIM = nsimEIM, .eps.trig = eps.trig,
- .type.fitted = type.fitted
- ))),
+ .type.fitted = type.fitted,
+ .mds.min = mds.min))),
initialize = eval(substitute(expression({
M1 <- 3
- temp5 <-
+ temp16 <-
w.y.check(w = w, y = y,
Is.nonnegative.y = TRUE,
Is.integer.y = TRUE,
@@ -3531,8 +3986,8 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
- w <- temp5$w
- y <- temp5$y
+ w <- temp16$w
+ y <- temp16$y
@@ -3552,35 +4007,54 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
namesof(mynames3, .lsize , earg = .esize , tag = FALSE))[
interleave.VGAM(M1*NOS, M1 = M1)]
- if (!length(etastart)) {
+ gprobs.y <- .gprobs.y
+ imunb <- .imunb # Default in NULL
+ if (length(imunb))
+ imunb <- matrix(imunb, n, NOS, byrow = TRUE)
- munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
- imu = .imunb , ishrinkage = .ishrinkage ,
- pos.only = TRUE,
- probs.y = .probs.y )
+ if (!length(etastart)) {
- if ( is.Numeric( .isize )) {
- size.init <- matrix( .isize , nrow = n, ncol = ncoly, byrow = TRUE)
- } else {
- posnegbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
- munb <- extraargs
- sum(c(w) * dposnegbin(y, munb = munb, size = kmat, log = TRUE))
- }
-
- size.init <- matrix(0, nrow = n, ncol = NOS)
- for (jay in 1:NOS) {
- size.grid <- .gsize.mux * mean(munb.init[, jay])
- TFvec <- (y[, jay] > 0)
- size.init[, jay] <-
- grid.search(size.grid, objfun = posnegbinomial.Loglikfun,
- y = y[TFvec, jay], # x = x[TFvec, ],
- w = w[TFvec, jay],
- extraargs = munb.init[TFvec, jay])
+ munb.init <-
+ size.init <- matrix(NA_real_, n, NOS)
+ gprobs.y <- .gprobs.y
+ if (length( .iprobs.y ))
+ gprobs.y <- .iprobs.y
+ gsize.mux <- .gsize.mux # gsize.mux is on a relative scale
+
+ for (jay in 1:NOS) { # For each response 'y_jay'... do:
+ TFvec <- y[, jay] > 0 # Important to exclude the 0s
+ posyvec <- y[TFvec, jay]
+ munb.init.jay <- if ( .imethod == 1 ) {
+ quantile(posyvec, probs = gprobs.y) - 1/2 # + 1/16
+ } else {
+ weighted.mean(posyvec, w = w[TFvec, jay]) - 1/2
}
- }
+ if (length(imunb))
+ munb.init.jay <- imunb[, jay]
+
+
+ gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) +
+ weighted.mean(posyvec, w = w[TFvec, jay]))
+ if (length( .isize ))
+ gsize <- .isize # isize is on an absolute scale
+
+
+ try.this <-
+ grid.search2(munb.init.jay, gsize,
+ objfun = posNBD.Loglikfun2,
+ y = posyvec, # x = x[TFvec, , drop = FALSE],
+ w = w[TFvec, jay],
+ ret.objfun = TRUE) # Last value is the loglik
+ munb.init[, jay] <- try.this["Value1"]
+ size.init[, jay] <- try.this["Value2"]
+ } # for (jay ...)
+
+
+
+
@@ -3594,9 +4068,11 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
weighted.mean(y[, jay] == 0, w[, jay]) -
dnbinom(0, mu = munb.init[, jay],
size = size.init[, jay]))
- if (mean(Phi.init == ipstr0.small) > 0.95)
+ if (mean(Phi.init == ipstr0.small) > 0.95 &&
+ .lpstr0 != "identitylink")
warning("from the initial values only, the data appears to ",
- "have little or no 0-inflation")
+ "have little or no 0-inflation, and possibly ",
+ "0-deflation.")
pstr0.init[, jay] <- Phi.init
} # for (jay)
}
@@ -3616,9 +4092,9 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
}), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
.epstr0 = epstr0, .emunb = emunb, .esize = esize,
.ipstr0 = ipstr0, .imunb = imunb, .isize = isize,
- .gsize.mux = gsize.mux,
+ .gprobs.y = gprobs.y, .gsize.mux = gsize.mux,
.type.fitted = type.fitted,
- .ishrinkage = ishrinkage, .probs.y = probs.y,
+ .iprobs.y = iprobs.y,
.ipstr0.small = ipstr0.small,
.imethod = imethod ))),
@@ -3645,7 +4121,7 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
tempk <- 1 / (1 + munb / kmat) # kmat / (kmat + munb)
prob0 <- tempk^kmat # p(0) from negative binomial
- smallval <- 1e-3 # Something like this is needed
+ smallval <- .mds.min # Something like this is needed
if (any(big.size <- munb / kmat < smallval)) {
prob0[big.size] <- exp(-munb[big.size]) # The limit as kmat --> Inf
}
@@ -3671,13 +4147,13 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
ans
}, list( .lpstr0 = lpstr0, .lsize = lsize, .lmunb = lmunb,
.epstr0 = epstr0, .esize = esize, .emunb = emunb,
- .type.fitted = type.fitted ))),
+ .type.fitted = type.fitted, .mds.min = mds.min ))),
last = eval(substitute(expression({
misc$link <-
- c(rep( .lpstr0 , length = NOS),
- rep( .lmunb , length = NOS),
- rep( .lsize , length = NOS))[interleave.VGAM(M1*NOS, M1 = M1)]
+ c(rep_len( .lpstr0 , NOS),
+ rep_len( .lmunb , NOS),
+ rep_len( .lsize , NOS))[interleave.VGAM(M1*NOS, M1 = M1)]
temp.names <-
c(mynames1,
mynames2,
@@ -3700,15 +4176,13 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
misc$imethod <- .imethod
misc$nsimEIM <- .nsimEIM
misc$expected <- TRUE
- misc$ishrinkage <- .ishrinkage
misc$multipleResponses <- TRUE
}), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
.epstr0 = epstr0, .emunb = emunb, .esize = esize,
.ipstr0 = ipstr0, .isize = isize,
.nsimEIM = nsimEIM, .imethod = imethod,
.cutoff.prob = cutoff.prob,
- .max.chunk.MB = max.chunk.MB,
- .ishrinkage = ishrinkage
+ .max.chunk.MB = max.chunk.MB
))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
@@ -3756,7 +4230,7 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
- validparams = eval(substitute(function(eta, extra = NULL) {
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
M1 <- 3
NOS <- ncol(eta) / M1
@@ -3767,21 +4241,30 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
size <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
.lsize , earg = .esize )
- smallval <- 1e-3
- ans <- all(is.finite(munb)) && all(munb > 0) &&
- all(is.finite(size)) && all(size > 0) &&
- all(is.finite(pstr0)) && all(pstr0 > 0) &&
- all(pstr0 < 1) &&
- (overdispersion <- all(munb / size > smallval))
+
+ okay1 <- all(is.finite(munb)) && all(0 < munb) &&
+ all(is.finite(size)) && all(0 < size) &&
+ all(is.finite(pstr0)) && all(pstr0 < 1)
+ prob <- size / (size + munb)
+ prob0 <- prob^size
+ Prob0.check <- dnbinom(0, size = size, prob = prob)
+ deflat.limit <- -prob0 / (1 - prob0)
+ okay2.deflat <- TRUE
+ if (okay1 && !(okay2.deflat <- all(deflat.limit < pstr0)))
+ warning("parameter 'pstr0' is too negative even allowing for ",
+ "0-deflation.")
+
+ smallval <- .mds.min # .munb.div.size
+ overdispersion <- if (okay1 && okay2.deflat)
+ all(munb / size > smallval) else FALSE
if (!overdispersion)
- warning("parameter 'size' has very large values; ",
- "replacing them by an arbitrary large value within ",
- "the parameter space. Try fitting ",
- "a zero-inflated Poisson ",
- "model instead.")
- ans
+ warning("parameter 'size' has very large values; ",
+ "try fitting a zero-inflated Poisson ",
+ "model instead.")
+ okay1 && okay2.deflat && overdispersion
}, list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
- .epstr0 = epstr0, .emunb = emunb, .esize = esize ))),
+ .epstr0 = epstr0, .emunb = emunb, .esize = esize,
+ .mds.min = mds.min))),
@@ -3806,8 +4289,9 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
- smallval <- 1e-2 # Something like this is needed
+ smallval <- .mds.min # Something like this is needed
if (any(big.size <- munb / kmat < smallval)) {
+ if (FALSE)
warning("parameter 'size' has very large values; ",
"try fitting a zero-inflated Poisson ",
"model instead")
@@ -3879,7 +4363,8 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
ans <- c(w) * dl.dthetas * dthetas.detas
ans
}), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
- .epstr0 = epstr0, .emunb = emunb, .esize = esize ))),
+ .epstr0 = epstr0, .emunb = emunb, .esize = esize,
+ .mds.min = mds.min ))),
@@ -4087,18 +4572,26 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
type.fitted = c("mean", "munb", "pobs0", "pstr0", "onempstr0"),
imunb = NULL, isize = NULL, ionempstr0 = NULL,
zero = c("size", "onempstr0"),
- imethod = 1, ishrinkage = 0.95,
+ imethod = 1,
- probs.y = 0.35,
+ iprobs.y = NULL, # 0.35,
cutoff.prob = 0.999, # higher is better for large 'size'
eps.trig = 1e-7,
max.support = 4000, # 20160127; I have changed this
max.chunk.MB = 30, # max.memory = Inf is allowed
+ gprobs.y = (0:9)/10, # 20160709; grid for finding munb.init
gsize.mux = exp((-12:6)/2),
+ mds.min = 1e-3,
nsimEIM = 500) {
+ if (!is.Numeric(imethod, length.arg = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+
lmunb <- as.list(substitute(lmunb))
emunb <- link2list(lmunb)
lmunb <- attr(emunb, "function.name")
@@ -4145,7 +4638,6 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
"\n",
"Mean: (1 - pstr0) * munb"),
constraints = eval(substitute(expression({
-
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 3)
@@ -4156,6 +4648,7 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
list(M1 = 3,
Q1 = 1,
expected = TRUE,
+ mds.min = .mds.min ,
multipleResponses = TRUE,
parameters.names = c("munb", "size", "onempstr0"),
eps.trig = .eps.trig ,
@@ -4164,14 +4657,15 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
zero = .zero )
}, list( .zero = zero,
.nsimEIM = nsimEIM, .eps.trig = eps.trig,
- .type.fitted = type.fitted
+ .type.fitted = type.fitted,
+ .mds.min = mds.min
))),
initialize = eval(substitute(expression({
M1 <- 3
- temp5 <-
+ temp16 <-
w.y.check(w = w, y = y,
Is.nonnegative.y = TRUE,
Is.integer.y = TRUE,
@@ -4180,8 +4674,8 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
- w <- temp5$w
- y <- temp5$y
+ w <- temp16$w
+ y <- temp16$y
extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
@@ -4199,34 +4693,53 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
namesof(mynames3, .lonempstr0 , earg = .eonempstr0 , tag = FALSE))[
interleave.VGAM(M1*NOS, M1 = M1)]
- if (!length(etastart)) {
+ gprobs.y <- .gprobs.y
+ imunb <- .imunb # Default in NULL
+ if (length(imunb))
+ imunb <- matrix(imunb, n, NOS, byrow = TRUE)
- munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
- imu = .imunb , ishrinkage = .ishrinkage ,
- pos.only = TRUE,
- probs.y = .probs.y )
+ if (!length(etastart)) {
- if ( is.Numeric( .isize )) {
- size.init <- matrix( .isize , nrow = n, ncol = ncoly, byrow = TRUE)
- } else {
- posnegbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
- munb <- extraargs
- sum(c(w) * dposnegbin(y, munb = munb, size = kmat, log = TRUE))
- }
-
- size.init <- matrix(0, nrow = n, ncol = NOS)
- for (jay in 1:NOS) {
- size.grid <- .gsize.mux * mean(munb.init[, jay])
- TFvec <- (y[, jay] > 0)
- size.init[, jay] <-
- grid.search(size.grid, objfun = posnegbinomial.Loglikfun,
- y = y[TFvec, jay], # x = x[TFvec, ],
- w = w[TFvec, jay],
- extraargs = munb.init[TFvec, jay])
+ munb.init <-
+ size.init <- matrix(NA_real_, n, NOS)
+ gprobs.y <- .gprobs.y
+ if (length( .iprobs.y ))
+ gprobs.y <- .iprobs.y
+ gsize.mux <- .gsize.mux # gsize.mux is on a relative scale
+
+ for (jay in 1:NOS) { # For each response 'y_jay'... do:
+ TFvec <- y[, jay] > 0 # Important to exclude the 0s
+ posyvec <- y[TFvec, jay]
+ munb.init.jay <- if ( .imethod == 1 ) {
+ quantile(posyvec, probs = gprobs.y) - 1/2 # + 1/16
+ } else {
+ weighted.mean(posyvec, w = w[TFvec, jay]) - 1/2
}
- }
+ if (length(imunb))
+ munb.init.jay <- imunb[, jay]
+
+
+ gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) +
+ weighted.mean(posyvec, w = w[TFvec, jay]))
+ if (length( .isize ))
+ gsize <- .isize # isize is on an absolute scale
+
+
+ try.this <-
+ grid.search2(munb.init.jay, gsize,
+ objfun = posNBD.Loglikfun2,
+ y = posyvec, # x = x[TFvec, , drop = FALSE],
+ w = w[TFvec, jay],
+ ret.objfun = TRUE) # Last value is the loglik
+ munb.init[, jay] <- try.this["Value1"]
+ size.init[, jay] <- try.this["Value2"]
+ } # for (jay ...)
+
+
+
+
@@ -4262,10 +4775,10 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
}), list( .lonempstr0 = lonempstr0, .lmunb = lmunb, .lsize = lsize,
.eonempstr0 = eonempstr0, .emunb = emunb, .esize = esize,
.ionempstr0 = ionempstr0, .imunb = imunb, .isize = isize,
- .gsize.mux = gsize.mux,
+ .gprobs.y = gprobs.y, .gsize.mux = gsize.mux,
.type.fitted = type.fitted,
.ipstr0.small = ipstr0.small,
- .ishrinkage = ishrinkage, .probs.y = probs.y,
+ .iprobs.y = iprobs.y,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -4291,7 +4804,7 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
tempk <- 1 / (1 + munb / kmat) # kmat / (kmat + munb)
prob0 <- tempk^kmat # p(0) from negative binomial
- smallval <- 1e-3 # Something like this is needed
+ smallval <- .mds.min # Something like this is needed
if (any(big.size <- munb / kmat < smallval)) {
prob0[big.size] <- exp(-munb[big.size]) # The limit as kmat --> Inf
}
@@ -4321,13 +4834,13 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
ans
}, list( .lonempstr0 = lonempstr0, .lsize = lsize, .lmunb = lmunb,
.eonempstr0 = eonempstr0, .esize = esize, .emunb = emunb,
- .type.fitted = type.fitted ))),
+ .type.fitted = type.fitted, .mds.min = mds.min ))),
last = eval(substitute(expression({
misc$link <-
- c(rep( .lmunb , length = NOS),
- rep( .lsize , length = NOS),
- rep( .lonempstr0 , length = NOS))[interleave.VGAM(M1*NOS, M1 = M1)]
+ c(rep_len( .lmunb , NOS),
+ rep_len( .lsize , NOS),
+ rep_len( .lonempstr0 , NOS))[interleave.VGAM(M1*NOS, M1 = M1)]
temp.names <-
c(mynames1,
mynames2,
@@ -4402,6 +4915,40 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
.eonempstr0 = eonempstr0, .emunb = emunb, .esize = esize ))),
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ M1 <- 3
+ NOS <- ncol(eta) / M1
+ munb <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
+ .lmunb , earg = .emunb )
+ size <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+ .lsize , earg = .esize )
+ onempstr0 <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lonempstr0 , earg = .eonempstr0 )
+
+ okay1 <- all(is.finite(munb)) && all(0 < munb) &&
+ all(is.finite(size)) && all(0 < size) &&
+ all(is.finite(onempstr0)) && all(0 < onempstr0)
+ prob <- size / (size + munb)
+ prob0 <- prob^size
+ Prob0.check <- dnbinom(0, size = size, prob = prob)
+ deflat.limit <- -prob0 / (1 - prob0)
+ okay2.deflat <- TRUE
+ if (okay1 && !(okay2.deflat <- all(onempstr0 < 1 - deflat.limit)))
+ warning("parameter 'pstr0' is too positive even allowing for ",
+ "0-deflation.")
+
+ smallval <- .mds.min # .munb.div.size
+ overdispersion <- if (okay1 && okay2.deflat)
+ all(munb / size > smallval) else FALSE
+ if (!overdispersion)
+ warning("parameter 'size' has very large values; ",
+ "try fitting a zero-inflated Poisson ",
+ "model instead.")
+ okay1 && okay2.deflat && overdispersion
+ }, list( .lonempstr0 = lonempstr0, .lmunb = lmunb, .lsize = lsize,
+ .eonempstr0 = eonempstr0, .emunb = emunb, .esize = esize,
+ .mds.min = mds.min))),
+
deriv = eval(substitute(expression({
@@ -4427,8 +4974,9 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
- smallval <- 1e-2 # Something like this is needed
+ smallval <- .mds.min # Something like this is needed
if (any(big.size <- munb / kmat < smallval)) {
+ if (FALSE)
warning("parameter 'size' has very large values; ",
"try fitting a zero-inflated Poisson ",
"model instead")
@@ -4492,7 +5040,8 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
c(w) * dl.dthetas * dthetas.detas
}), list( .lonempstr0 = lonempstr0, .lmunb = lmunb, .lsize = lsize,
- .eonempstr0 = eonempstr0, .emunb = emunb, .esize = esize ))),
+ .eonempstr0 = eonempstr0, .emunb = emunb, .esize = esize,
+ .mds.min = mds.min ))),
@@ -4687,48 +5236,162 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
- zipoissonff <-
- function(llambda = "loge", lonempstr0 = "logit",
- type.fitted = c("mean", "lambda", "pobs0", "pstr0", "onempstr0"),
- ilambda = NULL, ionempstr0 = NULL,
- gonempstr0 = NULL, # (1:9) / 10,
- imethod = 1,
- ishrinkage = 0.95, probs.y = 0.35,
- zero = "onempstr0") {
+dzigeom <- function(x, prob, pstr0 = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+ LLL <- max(length(x), length(prob), length(pstr0))
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(pstr0) != LLL) pstr0 <- rep_len(pstr0, LLL)
- type.fitted <- match.arg(type.fitted,
- c("mean", "lambda", "pobs0", "pstr0", "onempstr0"))[1]
+ ans <- dgeom(x = x, prob = prob, log = TRUE)
- llambda <- as.list(substitute(llambda))
- elambda <- link2list(llambda)
- llambda <- attr(elambda, "function.name")
+ ans <- if (log.arg) {
+ ifelse(x == 0, log(pstr0 + (1 - pstr0) * exp(ans)),
+ log1p(-pstr0) + ans)
+ } else {
+ ifelse(x == 0, pstr0 + (1 - pstr0) * exp(ans) ,
+ (1 - pstr0) * exp(ans))
+ }
- lonempstr0 <- as.list(substitute(lonempstr0))
- eonempstr0 <- link2list(lonempstr0)
- lonempstr0 <- attr(eonempstr0, "function.name")
- ipstr0.small <- 1/64 # A number easily represented exactly
+ prob0 <- prob
+ deflat.limit <- -prob0 / (1 - prob0)
+ ans[pstr0 < deflat.limit] <- NaN
+ ans[pstr0 > 1] <- NaN
+
+ ans
+}
- if (length(ilambda))
- if (!is.Numeric(ilambda, positive = TRUE))
- stop("'ilambda' values must be positive")
- if (length(ionempstr0))
- if (!is.Numeric(ionempstr0, positive = TRUE) ||
- any(ionempstr0 >= 1))
- stop("'ionempstr0' values must be inside the interval (0,1)")
+
+
+pzigeom <- function(q, prob, pstr0 = 0) {
+
+
+ LLL <- max(length(q), length(prob), length(pstr0))
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(pstr0) != LLL) pstr0 <- rep_len(pstr0, LLL)
+
+ ans <- pgeom(q, prob)
+ ans <- ifelse(q < 0, 0, pstr0 + (1-pstr0) * ans)
+
+
+ prob0 <- prob
+ deflat.limit <- -prob0 / (1 - prob0)
+ ans[pstr0 < deflat.limit] <- NaN
+ ans[pstr0 > 1] <- NaN
+
+ ans
+}
+
+
+
+qzigeom <- function(p, prob, pstr0 = 0) {
+ LLL <- max(length(p), length(prob), length(pstr0))
+ ans <- p <- rep_len(p, LLL)
+ prob <- rep_len(prob, LLL)
+ pstr0 <- rep_len(pstr0, LLL)
+ ans[p <= pstr0] <- 0
+ ind1 <- (p > pstr0)
+ ans[ind1] <-
+ qgeom((p[ind1] - pstr0[ind1]) / (1 - pstr0[ind1]),
+ prob = prob[ind1])
+
+
+ prob0 <- prob
+ deflat.limit <- -prob0 / (1 - prob0)
+ ind0 <- (deflat.limit <= pstr0) & (pstr0 < 0)
+ if (any(ind0)) {
+ pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+ ans[p[ind0] <= pobs0] <- 0
+ pindex <- (1:LLL)[ind0 & (p > pobs0)]
+ Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
+ ans[pindex] <- 1 + qgeom((p[pindex] - Pobs0) / (1 - Pobs0),
+ prob = prob[pindex])
+ }
+
+ ans[pstr0 < deflat.limit] <- NaN
+ ans[pstr0 > 1] <- NaN
+
+ ans
+}
+
+
+
+rzigeom <- function(n, prob, pstr0 = 0) {
+
+ qzigeom(runif(n), prob, pstr0 = pstr0)
+}
+
+
+
+
+ zigeometric <-
+ function(
+ lpstr0 = "logit",
+ lprob = "logit",
+ type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
+ ipstr0 = NULL, iprob = NULL,
+ imethod = 1,
+ bias.red = 0.5,
+ zero = NULL) {
+
+
+
+ expected <- TRUE
+
+
+
+ lpstr0 <- as.list(substitute(lpstr0))
+ epstr0 <- link2list(lpstr0)
+ lpstr0 <- attr(epstr0, "function.name")
+
+ lprob <- as.list(substitute(lprob))
+ eprob <- link2list(lprob)
+ lprob <- attr(eprob, "function.name")
+
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
+
+
+ if (length(ipstr0))
+ if (!is.Numeric(ipstr0, positive = TRUE) ||
+ ipstr0 >= 1)
+ stop("argument 'ipstr0' is out of range")
+
+ if (length(iprob))
+ if (!is.Numeric(iprob, positive = TRUE) ||
+ iprob >= 1)
+ stop("argument 'iprob' is out of range")
+
+ if (!is.Numeric(bias.red, length.arg = 1, positive = TRUE) ||
+ bias.red > 1)
+ stop("argument 'bias.red' must be between 0 and 1")
+
+
+ if (!is.Numeric(imethod, length.arg = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
new("vglmff",
- blurb = c("Zero-inflated Poisson\n\n",
- "Links: ",
- namesof("lambda", llambda, earg = elambda), ", ",
- namesof("onempstr0", lonempstr0, earg = eonempstr0), "\n",
- "Mean: onempstr0 * lambda"),
+ blurb = c("Zero-inflated geometric distribution,\n",
+ "P[Y = 0] = pstr0 + (1 - pstr0) * prob,\n",
+ "P[Y = y] = (1 - pstr0) * prob * (1 - prob)^y, ",
+ "y = 1, 2, ...\n\n",
+ "Link: ",
+ namesof("pstr0", lpstr0, earg = epstr0), ", ",
+ namesof("prob", lprob, earg = eprob ), "\n",
+ "Mean: (1 - pstr0) * (1 - prob) / prob"),
constraints = eval(substitute(expression({
+
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 2)
@@ -4739,13 +5402,11 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
Q1 = 1,
expected = TRUE,
multipleResponses = TRUE,
- parameters.names = c("lambda", "onempstr0"),
+ parameters.names = c("pstr0", "prob"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
- .type.fitted = type.fitted
- ))),
-
+ .type.fitted = type.fitted ))),
initialize = eval(substitute(expression({
M1 <- 2
@@ -4760,172 +5421,161 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
maximize = TRUE)
w <- temp5$w
y <- temp5$y
-
-
-
-
- ncoly <- ncol(y)
- extra$ncoly <- ncoly
- extra$M1 <- M1
- M <- M1 * ncoly
+ extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
extra$type.fitted <- .type.fitted
extra$dimnamesy <- dimnames(y)
- mynames1 <- param.names("lambda", ncoly)
- mynames2 <- param.names("onempstr0", ncoly)
+
+ mynames1 <- param.names("pstr0", ncoly)
+ mynames2 <- param.names("prob", ncoly)
predictors.names <-
- c(namesof(mynames1, .llambda , earg = .elambda , tag = FALSE),
- namesof(mynames2, .lonempstr0 , earg = .eonempstr0 , tag = FALSE))[
- interleave.VGAM(M, M1 = M1)]
+ c(namesof(mynames1, .lpstr0, earg = .epstr0, tag = FALSE),
+ namesof(mynames2, .lprob, earg = .eprob, tag = FALSE))[
+ interleave.VGAM(M1 * NOS, M1 = M1)]
- if (!length(etastart)) {
- matL <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
- imu = .ilambda , ishrinkage = .ishrinkage ,
- pos.only = TRUE,
- probs.y = .probs.y )
+ if (!length(etastart)) {
+ prob.init <- if ( .imethod == 3)
+ .bias.red / (1 + y + 1/8) else
+ if ( .imethod == 2)
+ .bias.red / (1 +
+ matrix(colMeans(y) + 1/8,
+ n, ncoly, byrow = TRUE)) else
+ .bias.red / (1 +
+ matrix(colSums(y * w) / colSums(w) + 1/8,
+ n, ncoly, byrow = TRUE))
- matP <- matrix(if (length( .ionempstr0 )) .ionempstr0 else 0,
- n, ncoly, byrow = TRUE)
- phi0.grid <- .gonempstr0
- ipstr0.small <- .ipstr0.small # A number easily represented exactly
+ prob.init <- if (length( .iprob )) {
+ matrix( .iprob , n, ncoly, byrow = TRUE)
+ } else {
+ prob.init # Already a matrix
+ }
- if (!length( .ionempstr0 ))
- for (jay in 1:ncoly) {
- zipois.Loglikfun <- function(phival, y, x, w, extraargs) {
- sum(c(w) * dzipois(x = y, pstr0 = phival,
- lambda = extraargs$lambda,
- log = TRUE))
- }
- Phi0.init <- if (length(phi0.grid)) {
- grid.search(phi0.grid,
- objfun = zipois.Loglikfun,
- y = y[, jay], x = x, w = w[, jay],
- extraargs = list(lambda = matL[, jay]))
- } else {
- pmax(ipstr0.small,
- weighted.mean(y[, jay] == 0, w[, jay]) -
- dpois(0, matL[, jay]))
- }
- if (mean(Phi0.init == ipstr0.small) > 0.95)
- warning("from the initial values only, the data appears to ",
- "have little or no 0-inflation")
- matP[, jay] <- Phi0.init
- } # for (jay)
+ prob0.est <- psze.init <- matrix(0, n, NOS)
+ for (jlocal in 1:NOS) {
+ prob0.est[, jlocal] <-
+ sum(w[y[, jlocal] == 0, jlocal]) / sum(w[, jlocal])
+ psze.init[, jlocal] <- if ( .imethod == 3)
+ prob0.est[, jlocal] / 2 else
+ if ( .imethod == 1)
+ pmax(0.05, (prob0.est[, jlocal] -
+ median(prob.init[, jlocal]))) else
+ prob0.est[, jlocal] / 5
+ }
+ psze.init <- if (length( .ipstr0 )) {
+ matrix( .ipstr0 , n, ncoly, byrow = TRUE)
+ } else {
+ psze.init # Already a matrix
+ }
- etastart <-
- cbind(theta2eta( matL, .llambda , earg = .elambda ),
- theta2eta(1 - matP, .lonempstr0 , earg = .eonempstr0 ))[,
- interleave.VGAM(M, M1 = M1)]
- mustart <- NULL # Since etastart has been computed.
- }
- }), list( .lonempstr0 = lonempstr0, .llambda = llambda,
- .eonempstr0 = eonempstr0, .elambda = elambda,
- .ionempstr0 = ionempstr0, .ilambda = ilambda,
- .gonempstr0 = gonempstr0,
- .type.fitted = type.fitted, .probs.y = probs.y,
- .ipstr0.small = ipstr0.small,
- .imethod = imethod, .ishrinkage = ishrinkage ))),
+ etastart <-
+ cbind(theta2eta(psze.init, .lpstr0, earg = .epstr0),
+ theta2eta(prob.init, .lprob , earg = .eprob ))
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
+ }
+ }), list( .lprob = lprob, .lpstr0 = lpstr0,
+ .eprob = eprob, .epstr0 = epstr0,
+ .iprob = iprob, .ipstr0 = ipstr0,
+ .type.fitted = type.fitted,
+ .bias.red = bias.red,
+ .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
- warning("cannot find 'type.fitted'. ",
+ pstr0 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
+ prob <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , earg = .eprob )
+
+ type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+ warning("cannot find 'type.fitted'. ",
"Returning the 'mean'.")
"mean"
}
type.fitted <- match.arg(type.fitted,
- c("mean", "lambda", "pobs0", "pstr0", "onempstr0"))[1]
-
- M1 <- 2
- ncoly <- ncol(eta) / M1
- lambda <- eta2theta(eta[, M1*(1:ncoly) - 1], .llambda ,
- earg = .elambda )
- onempstr0 <- eta2theta(eta[, M1*(1:ncoly) ], .lonempstr0 ,
- earg = .eonempstr0 )
-
+ c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
ans <- switch(type.fitted,
- "mean" = onempstr0 * lambda,
- "lambda" = lambda,
- "pobs0" = 1 + onempstr0 * expm1(-lambda), # P(Y=0)
- "pstr0" = 1 - onempstr0,
- "onempstr0" = onempstr0)
+ "mean" = (1 - pstr0) * (1 - prob) / prob,
+ "prob" = prob,
+ "pobs0" = pstr0 + (1 - pstr0) * prob, # P(Y=0)
+ "pstr0" = pstr0,
+ "onempstr0" = 1 - pstr0)
if (length(extra$dimnamesy) &&
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
length(extra$dimnamesy[[2]]) > 0) {
- if (length(extra$dimnamesy[[1]]) == nrow(ans))
- dimnames(ans) <- extra$dimnamesy
+ dimnames(ans) <- extra$dimnamesy
} else
if (NCOL(ans) == 1 &&
is.matrix(ans)) {
colnames(ans) <- NULL
}
ans
- }, list( .lonempstr0 = lonempstr0, .llambda = llambda,
- .eonempstr0 = eonempstr0, .elambda = elambda,
+ }, list( .lprob = lprob, .lpstr0 = lpstr0,
+ .eprob = eprob, .epstr0 = epstr0,
.type.fitted = type.fitted ))),
last = eval(substitute(expression({
- M1 <- extra$M1
- misc$link <-
- c(rep( .llambda , length = ncoly),
- rep( .lonempstr0 , length = ncoly))[interleave.VGAM(M, M1 = M1)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
- names(misc$link) <- temp.names
+ temp.names <- c(rep_len( .lpstr0 , NOS),
+ rep_len( .lprob , NOS))
+ temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
+ misc$link <- temp.names
- misc$earg <- vector("list", M1 * ncoly)
- names(misc$earg) <- temp.names
- for (ii in 1:ncoly) {
- misc$earg[[M1*ii-1]] <- .elambda
- misc$earg[[M1*ii ]] <- .eonempstr0
+ misc$earg <- vector("list", M1 * NOS)
+ names(misc$link) <-
+ names(misc$earg) <-
+ c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
+
+ for (ii in 1:NOS) {
+ misc$earg[[M1*ii-1]] <- .epstr0
+ misc$earg[[M1*ii ]] <- .eprob
}
- misc$M1 <- M1
+
misc$imethod <- .imethod
- misc$expected <- TRUE
- misc$multipleResponses <- TRUE
+ misc$zero <- .zero
+ misc$bias.red <- .bias.red
+ misc$expected <- .expected
+ misc$ipstr0 <- .ipstr0
+ misc$type.fitted <- .type.fitted
- misc$pobs0 <- (1 - onempstr0) + onempstr0 * exp(-lambda) # P(Y=0)
- misc$pobs0 <- as.matrix(misc$pobs0)
- if (length(dimnames(y)[[2]]) > 0)
- dimnames(misc$pobs0) <- dimnames(y)
- misc$pstr0 <- (1 - onempstr0)
- misc$pstr0 <- as.matrix(misc$pstr0)
- if (length(dimnames(y)[[2]]) > 0)
- dimnames(misc$pstr0) <- dimnames(y)
- }), list( .lonempstr0 = lonempstr0, .llambda = llambda,
- .eonempstr0 = eonempstr0, .elambda = elambda,
+ misc$pobs0 <- pobs0
+ if (length(dimnames(y)[[2]]) > 0)
+ dimnames(misc$pobs0) <- dimnames(y)
+ misc$pstr0 <- pstr0
+ if (length(dimnames(y)[[2]]) > 0)
+ dimnames(misc$pstr0) <- dimnames(y)
+ }), list( .lprob = lprob, .lpstr0 = lpstr0,
+ .eprob = eprob, .epstr0 = epstr0,
+ .ipstr0 = ipstr0,
+ .zero = zero,
+ .expected = expected,
+ .type.fitted = type.fitted,
+ .bias.red = bias.red,
.imethod = imethod ))),
- loglikelihood = eval(substitute(
+ loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- lambda <- eta2theta(eta[, c(TRUE, FALSE)], .llambda ,
- earg = .elambda )
- onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
- earg = .eonempstr0 )
-
-
+ pstr0 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
+ prob <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , earg = .eprob )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- ll.elts <- c(w) *
- dzipois(x = y, pstr0 = 1 - onempstr0, lambda = lambda,
- log = TRUE)
+ ll.elts <-
+ c(w) * dzigeom(x = y, prob = prob, pstr0 = pstr0, log = TRUE)
if (summation) {
sum(ll.elts)
} else {
ll.elts
}
}
- }, list( .lonempstr0 = lonempstr0, .llambda = llambda,
- .eonempstr0 = eonempstr0, .elambda = elambda ))),
- vfamily = c("zipoissonff"),
+ }, list( .lprob = lprob, .lpstr0 = lpstr0,
+ .eprob = eprob, .epstr0 = epstr0 ))),
+ vfamily = c("zigeometric"),
+
@@ -4937,244 +5587,140 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
if (any(pwts != 1))
warning("ignoring prior weights")
eta <- predict(object)
- lambda <- eta2theta(eta[, c(TRUE, FALSE)], .llambda ,
- earg = .elambda )
- onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
- earg = .eonempstr0 )
- rzipois(nsim * length(lambda), lambda = lambda, pstr0 = 1 - onempstr0)
- }, list( .lonempstr0 = lonempstr0, .llambda = llambda,
- .eonempstr0 = eonempstr0, .elambda = elambda ))),
+ pstr0 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
+ prob <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , earg = .eprob )
+ rzigeom(nsim * length(pstr0), prob = prob, pstr0 = pstr0)
+ }, list( .lprob = lprob, .lpstr0 = lpstr0,
+ .eprob = eprob, .epstr0 = epstr0 ))),
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ pstr0 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
+ prob <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , earg = .eprob )
+
+ okay1 <- all(is.finite(prob )) && all(0 < prob) && all(prob < 1) &&
+ all(is.finite(pstr0)) && all(pstr0 < 1)
+ prob0 <- prob
+ deflat.limit <- -prob0 / (1 - prob0)
+ okay2.deflat <- TRUE
+ if (okay1 && !(okay2.deflat <- all(deflat.limit < pstr0)))
+ warning("parameter 'pstr0' is too negative even allowing for ",
+ "0-deflation.")
+ okay1 && okay2.deflat
+ }, list( .lprob = lprob, .lpstr0 = lpstr0,
+ .eprob = eprob, .epstr0 = epstr0 ))),
+
deriv = eval(substitute(expression({
M1 <- 2
- ncoly <- ncol(eta) / M1 # extra$ncoly
- lambda <- eta2theta(eta[, c(TRUE, FALSE)], .llambda ,
- earg = .elambda )
- onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
- earg = .eonempstr0 )
+ pstr0 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
+ prob <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , earg = .eprob )
- dlambda.deta <- dtheta.deta(lambda , .llambda ,
- earg = .elambda )
- donempstr0.deta <- dtheta.deta(onempstr0, .lonempstr0 ,
- earg = .eonempstr0 )
+ prob0 <- prob # P(Y == 0) from parent distribution, aka f(0)
+ pobs0 <- pstr0 + (1 - pstr0) * prob0 # P(Y == 0)
+ index0 <- (y == 0)
- denom <- 1 + onempstr0 * expm1(-lambda)
- ind0 <- (y == 0)
- dl.dlambda <- -onempstr0 * exp(-lambda) / denom
- dl.dlambda[!ind0] <- (y[!ind0] - lambda[!ind0]) / lambda[!ind0]
- dl.donempstr0 <- expm1(-lambda) / denom
- dl.donempstr0[!ind0] <- 1 / onempstr0[!ind0]
+ dl.dpstr0 <- (1 - prob0) / pobs0
+ dl.dpstr0[!index0] <- -1 / (1 - pstr0[!index0])
- ans <- c(w) * cbind(dl.dlambda * dlambda.deta,
- dl.donempstr0 * donempstr0.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
+ dl.dprob <- (1 - pstr0) / pobs0
+ dl.dprob[!index0] <- 1 / prob[!index0] -
+ y[!index0] / (1 - prob[!index0])
+ dpstr0.deta <- dtheta.deta(pstr0 , .lpstr0 , earg = .epstr0 )
+ dprob.deta <- dtheta.deta(prob, .lprob , earg = .eprob )
- if ( .llambda == "loge" && is.empty.list( .elambda ) &&
- any(lambda[!ind0] < .Machine$double.eps)) {
- for (spp. in 1:ncoly) {
- ans[!ind0[, spp.], M1 * spp.] <-
- w[!ind0[, spp.]] *
- (y[!ind0[, spp.], spp.] - lambda[!ind0[, spp.], spp.])
- }
- }
+ dl.deta12 <- c(w) * cbind(dl.dpstr0 * dpstr0.deta,
+ dl.dprob * dprob.deta)
+ dl.deta12 <- dl.deta12[, interleave.VGAM(ncol(dl.deta12), M1 = M1)]
+ dl.deta12
+ }), list( .lprob = lprob, .lpstr0 = lpstr0,
+ .eprob = eprob, .epstr0 = epstr0 ))),
+ weight = eval(substitute(expression({
+ if ( .expected ) {
- ans
- }), list( .lonempstr0 = lonempstr0, .llambda = llambda,
- .eonempstr0 = eonempstr0, .elambda = elambda ))),
- weight = eval(substitute(expression({
+ ned2l.dprob2 <- (1 - pstr0)^2 / pobs0 +
+ (1 - pstr0) * ((1 - prob) / prob) *
+ (1 / prob + 1 / (1 - prob)^2)
- ned2l.dlambda2 <- ( onempstr0) / lambda -
- onempstr0 * (1 - onempstr0) * exp(-lambda) / denom
- ned2l.donempstr0.2 <- -expm1(-lambda) / ((onempstr0) * denom)
- ned2l.dphilambda <- +exp(-lambda) / denom
+ ned2l.dpstr0.prob <- 1 / pobs0
+ ned2l.dpstr02 <- (1 - prob0) / ((1 - pstr0) * pobs0)
+ } else {
+ od2l.dprob2 <- ((1 - pstr0) / pobs0)^2
+ od2l.dprob2[!index0] <- 1 / (prob[!index0])^2 +
+ y[!index0] / (1 - prob[!index0])^2
+ od2l.dpstr0.prob <- (pobs0 + (1 - prob0) * (1 - pstr0)) / pobs0^2
+ od2l.dpstr0.prob[!index0] <- 0
+ od2l.dpstr02 <- ((1 - prob0) / pobs0)^2
+ od2l.dpstr02[!index0] <- 1 / (1 - pstr0[!index0])^2
+ }
- wz <- array(c(c(w) * ned2l.dlambda2 * dlambda.deta^2,
- c(w) * ned2l.donempstr0.2 * donempstr0.deta^2,
- c(w) * ned2l.dphilambda * donempstr0.deta * dlambda.deta),
- dim = c(n, M / M1, 3))
+
+ allvals <- if ( .expected )
+ c(c(w) * ned2l.dpstr02 * dpstr0.deta^2,
+ c(w) * ned2l.dprob2 * dprob.deta^2,
+ c(w) * ned2l.dpstr0.prob * dprob.deta * dpstr0.deta) else
+ c(c(w) * od2l.dpstr02 * dpstr0.deta^2,
+ c(w) * od2l.dprob2 * dprob.deta^2,
+ c(w) * od2l.dpstr0.prob * dprob.deta * dpstr0.deta)
+ wz <- array(allvals, dim = c(n, M / M1, 3))
wz <- arwz2wz(wz, M = M, M1 = M1)
+
wz
- }), list( .llambda = llambda ))))
+ }), list( .lprob = lprob, .lpstr0 = lpstr0,
+ .eprob = eprob, .epstr0 = epstr0,
+ .expected = expected ))))
}
+ zigeometricff <-
+ function(lprob = "logit",
+ lonempstr0 = "logit",
+ type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
+ iprob = NULL, ionempstr0 = NULL,
+ imethod = 1,
+ bias.red = 0.5,
+ zero = "onempstr0") {
+ expected <- TRUE
-dzigeom <- function(x, prob, pstr0 = 0, log = FALSE) {
- if (!is.logical(log.arg <- log) || length(log) != 1)
- stop("bad input for argument 'log'")
- rm(log)
- LLL <- max(length(x), length(prob), length(pstr0))
- if (length(x) != LLL) x <- rep(x, len = LLL);
- if (length(prob) != LLL) prob <- rep(prob, len = LLL);
- if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
+ lprob <- as.list(substitute(lprob))
+ eprob <- link2list(lprob)
+ lprob <- attr(eprob, "function.name")
- ans <- dgeom(x = x, prob = prob, log = TRUE)
+ lonempstr0 <- as.list(substitute(lonempstr0))
+ eonempstr0 <- link2list(lonempstr0)
+ lonempstr0 <- attr(eonempstr0, "function.name")
- ans <- if (log.arg) {
- ifelse(x == 0, log(pstr0 + (1 - pstr0) * exp(ans)),
- log1p(-pstr0) + ans)
- } else {
- ifelse(x == 0, pstr0 + (1 - pstr0) * exp(ans) ,
- (1 - pstr0) * exp(ans))
- }
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
+ if (length(iprob))
+ if (!is.Numeric(iprob, positive = TRUE) ||
+ iprob >= 1)
+ stop("argument 'iprob' is out of range")
- prob0 <- prob
- deflat.limit <- -prob0 / (1 - prob0)
- ans[pstr0 < deflat.limit] <- NaN
- ans[pstr0 > 1] <- NaN
+ if (length(ionempstr0))
+ if (!is.Numeric(ionempstr0, positive = TRUE) ||
+ ionempstr0 >= 1)
+ stop("argument 'ionempstr0' is out of range")
- ans
-}
-
-
-
-pzigeom <- function(q, prob, pstr0 = 0) {
-
-
- LLL <- max(length(q), length(prob), length(pstr0))
- if (length(q) != LLL) q <- rep(q, len = LLL);
- if (length(prob) != LLL) prob <- rep(prob, len = LLL);
- if (length(pstr0) != LLL) pstr0 <- rep(pstr0, len = LLL);
-
- ans <- pgeom(q, prob)
- ans <- ifelse(q < 0, 0, pstr0 + (1-pstr0) * ans)
-
-
- prob0 <- prob
- deflat.limit <- -prob0 / (1 - prob0)
- ans[pstr0 < deflat.limit] <- NaN
- ans[pstr0 > 1] <- NaN
-
- ans
-}
-
-
-
-qzigeom <- function(p, prob, pstr0 = 0) {
- LLL <- max(length(p), length(prob), length(pstr0))
- ans <- p <- rep(p, len = LLL)
- prob <- rep(prob, len = LLL)
- pstr0 <- rep(pstr0, len = LLL)
- ans[p <= pstr0] <- 0
- ind1 <- (p > pstr0)
- ans[ind1] <-
- qgeom((p[ind1] - pstr0[ind1]) / (1 - pstr0[ind1]),
- prob = prob[ind1])
-
-
- prob0 <- prob
- deflat.limit <- -prob0 / (1 - prob0)
- ind0 <- (deflat.limit <= pstr0) & (pstr0 < 0)
- if (any(ind0)) {
- pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
- ans[p[ind0] <= pobs0] <- 0
- pindex <- (1:LLL)[ind0 & (p > pobs0)]
- Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
- ans[pindex] <- 1 + qgeom((p[pindex] - Pobs0) / (1 - Pobs0),
- prob = prob[pindex])
- }
-
- ans[pstr0 < deflat.limit] <- NaN
- ans[pstr0 > 1] <- NaN
-
- ans
-}
-
-
-
-rzigeom <- function(n, prob, pstr0 = 0) {
- use.n <- if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- length.arg = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
-
-
- pstr0 <- rep(pstr0, len = use.n)
- prob <- rep(prob, len = use.n)
-
-
- ans <- rgeom(use.n, prob)
- ans[runif(use.n) < pstr0] <- 0
-
-
- prob0 <- prob
- deflat.limit <- -prob0 / (1 - prob0)
- ind0 <- (deflat.limit <= pstr0) & (pstr0 < 0)
- if (any(ind0)) {
- pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
- ans[ind0] <- 1 + rgeom(sum(ind0), prob = prob[ind0])
- ans[ind0] <- ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
- }
-
- ans[pstr0 < deflat.limit] <- NaN
- ans[pstr0 > 1] <- NaN
-
-
- ans
-}
-
-
-
-
- zigeometric <-
- function(
- lpstr0 = "logit",
- lprob = "logit",
- type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
- ipstr0 = NULL, iprob = NULL,
- imethod = 1,
- bias.red = 0.5,
- zero = NULL) {
-
-
-
- expected <- TRUE
-
-
-
- lpstr0 <- as.list(substitute(lpstr0))
- epstr0 <- link2list(lpstr0)
- lpstr0 <- attr(epstr0, "function.name")
-
- lprob <- as.list(substitute(lprob))
- eprob <- link2list(lprob)
- lprob <- attr(eprob, "function.name")
-
- type.fitted <- match.arg(type.fitted,
- c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
-
-
- if (length(ipstr0))
- if (!is.Numeric(ipstr0, positive = TRUE) ||
- ipstr0 >= 1)
- stop("argument 'ipstr0' is out of range")
-
- if (length(iprob))
- if (!is.Numeric(iprob, positive = TRUE) ||
- iprob >= 1)
- stop("argument 'iprob' is out of range")
-
- if (!is.Numeric(bias.red, length.arg = 1, positive = TRUE) ||
- bias.red > 1)
- stop("argument 'bias.red' must be between 0 and 1")
+ if (!is.Numeric(bias.red, length.arg = 1, positive = TRUE) ||
+ bias.red > 1)
+ stop("argument 'bias.red' must be between 0 and 1")
if (!is.Numeric(imethod, length.arg = 1,
@@ -5185,13 +5731,13 @@ rzigeom <- function(n, prob, pstr0 = 0) {
new("vglmff",
blurb = c("Zero-inflated geometric distribution,\n",
- "P[Y = 0] = pstr0 + (1 - pstr0) * prob,\n",
- "P[Y = y] = (1 - pstr0) * prob * (1 - prob)^y, ",
+ "P[Y = 0] = 1 - onempstr0 + onempstr0 * prob,\n",
+ "P[Y = y] = onempstr0 * prob * (1 - prob)^y, ",
"y = 1, 2, ...\n\n",
"Link: ",
- namesof("pstr0", lpstr0, earg = epstr0), ", ",
- namesof("prob", lprob, earg = eprob ), "\n",
- "Mean: (1 - pstr0) * (1 - prob) / prob"),
+ namesof("prob", lprob, earg = eprob ), ", ",
+ namesof("onempstr0", lonempstr0, earg = eonempstr0), "\n",
+ "Mean: onempstr0 * (1 - prob) / prob"),
constraints = eval(substitute(expression({
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
@@ -5204,7 +5750,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
Q1 = 1,
expected = TRUE,
multipleResponses = TRUE,
- parameters.names = c("pstr0", "prob"),
+ parameters.names = c("prob", "onempstr0"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -5228,12 +5774,12 @@ rzigeom <- function(n, prob, pstr0 = 0) {
extra$dimnamesy <- dimnames(y)
- mynames1 <- param.names("pstr0", ncoly)
- mynames2 <- param.names("prob", ncoly)
+ mynames1 <- param.names("prob", ncoly)
+ mynames2 <- param.names("onempstr0", ncoly)
predictors.names <-
- c(namesof(mynames1, .lpstr0, earg = .epstr0, tag = FALSE),
- namesof(mynames2, .lprob, earg = .eprob, tag = FALSE))[
- interleave.VGAM(M1 * NOS, M1 = M1)]
+ c(namesof(mynames1, .lprob , earg = .eprob , tag = FALSE),
+ namesof(mynames2, .lonempstr0 , earg = .eonempstr0 , tag = FALSE))[
+ interleave.VGAM(M1*NOS, M1 = M1)]
if (!length(etastart)) {
@@ -5250,7 +5796,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
prob.init <- if (length( .iprob )) {
matrix( .iprob , n, ncoly, byrow = TRUE)
} else {
- prob.init # Already a matrix
+ prob.init # Already a matrix
}
@@ -5265,8 +5811,8 @@ rzigeom <- function(n, prob, pstr0 = 0) {
median(prob.init[, jlocal]))) else
prob0.est[, jlocal] / 5
}
- psze.init <- if (length( .ipstr0 )) {
- matrix( .ipstr0 , n, ncoly, byrow = TRUE)
+ psze.init <- if (length( .ionempstr0 )) {
+ matrix( 1 - .ionempstr0 , n, ncoly, byrow = TRUE)
} else {
psze.init # Already a matrix
}
@@ -5274,19 +5820,21 @@ rzigeom <- function(n, prob, pstr0 = 0) {
etastart <-
- cbind(theta2eta(psze.init, .lpstr0, earg = .epstr0),
- theta2eta(prob.init, .lprob , earg = .eprob ))
+ cbind(theta2eta( prob.init, .lprob , earg = .eprob ),
+ theta2eta(1 - psze.init, .lonempstr0 , earg = .eonempstr0 ))
etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
}
- }), list( .lprob = lprob, .lpstr0 = lpstr0,
- .eprob = eprob, .epstr0 = epstr0,
- .iprob = iprob, .ipstr0 = ipstr0,
+ }), list( .lprob = lprob, .lonempstr0 = lonempstr0,
+ .eprob = eprob, .eonempstr0 = eonempstr0,
+ .iprob = iprob, .ionempstr0 = ionempstr0,
.type.fitted = type.fitted,
.bias.red = bias.red,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- pstr0 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
- prob <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , earg = .eprob )
+ prob <- eta2theta(eta[, c(TRUE, FALSE)], .lprob ,
+ earg = .eprob )
+ onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
+ earg = .eonempstr0 )
type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
warning("cannot find 'type.fitted'. ",
@@ -5298,11 +5846,11 @@ rzigeom <- function(n, prob, pstr0 = 0) {
c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
ans <- switch(type.fitted,
- "mean" = (1 - pstr0) * (1 - prob) / prob,
+ "mean" = onempstr0 * (1 - prob) / prob,
"prob" = prob,
- "pobs0" = pstr0 + (1 - pstr0) * prob, # P(Y=0)
- "pstr0" = pstr0,
- "onempstr0" = 1 - pstr0)
+ "pobs0" = 1 - onempstr0 + onempstr0 * prob, # P(Y=0)
+ "pstr0" = 1 - onempstr0,
+ "onempstr0" = onempstr0)
if (length(extra$dimnamesy) &&
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
@@ -5314,12 +5862,12 @@ rzigeom <- function(n, prob, pstr0 = 0) {
colnames(ans) <- NULL
}
ans
- }, list( .lprob = lprob, .lpstr0 = lpstr0,
- .eprob = eprob, .epstr0 = epstr0,
+ }, list( .lprob = lprob, .lonempstr0 = lonempstr0,
+ .eprob = eprob, .eonempstr0 = eonempstr0,
.type.fitted = type.fitted ))),
last = eval(substitute(expression({
- temp.names <- c(rep( .lpstr0 , len = NOS),
- rep( .lprob , len = NOS))
+ temp.names <- c(rep_len( .lprob , NOS),
+ rep_len( .lonempstr0 , NOS))
temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
misc$link <- temp.names
@@ -5330,53 +5878,54 @@ rzigeom <- function(n, prob, pstr0 = 0) {
c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
for (ii in 1:NOS) {
- misc$earg[[M1*ii-1]] <- .epstr0
- misc$earg[[M1*ii ]] <- .eprob
+ misc$earg[[M1*ii-1]] <- .eprob
+ misc$earg[[M1*ii ]] <- .eonempstr0
}
- misc$imethod <- .imethod
- misc$zero <- .zero
+ misc$imethod <- .imethod
+ misc$zero <- .zero
misc$bias.red <- .bias.red
misc$expected <- .expected
- misc$ipstr0 <- .ipstr0
- misc$type.fitted <- .type.fitted
+ misc$ionempstr0 <- .ionempstr0
misc$pobs0 <- pobs0
if (length(dimnames(y)[[2]]) > 0)
dimnames(misc$pobs0) <- dimnames(y)
- misc$pstr0 <- pstr0
+ misc$onempstr0 <- onempstr0
if (length(dimnames(y)[[2]]) > 0)
- dimnames(misc$pstr0) <- dimnames(y)
- }), list( .lprob = lprob, .lpstr0 = lpstr0,
- .eprob = eprob, .epstr0 = epstr0,
- .ipstr0 = ipstr0,
+ dimnames(misc$onempstr0) <- dimnames(y)
+ }), list( .lprob = lprob, .lonempstr0 = lonempstr0,
+ .eprob = eprob, .eonempstr0 = eonempstr0,
+ .ionempstr0 = ionempstr0,
.zero = zero,
.expected = expected,
- .type.fitted = type.fitted,
.bias.red = bias.red,
.imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- pstr0 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
- prob <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , earg = .eprob )
+ prob <- eta2theta(eta[, c(TRUE, FALSE)], .lprob ,
+ earg = .eprob )
+ onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
+ earg = .eonempstr0 )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
ll.elts <-
- c(w) * dzigeom(x = y, prob = prob, pstr0 = pstr0, log = TRUE)
+ c(w) * dzigeom(x = y, prob = prob, pstr0 = 1 - onempstr0,
+ log = TRUE)
if (summation) {
sum(ll.elts)
} else {
ll.elts
}
}
- }, list( .lprob = lprob, .lpstr0 = lpstr0,
- .eprob = eprob, .epstr0 = epstr0 ))),
- vfamily = c("zigeometric"),
+ }, list( .lprob = lprob, .lonempstr0 = lonempstr0,
+ .eprob = eprob, .eonempstr0 = eonempstr0 ))),
+ vfamily = c("zigeometricff"),
@@ -5389,255 +5938,489 @@ rzigeom <- function(n, prob, pstr0 = 0) {
if (any(pwts != 1))
warning("ignoring prior weights")
eta <- predict(object)
- pstr0 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
- prob <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , earg = .eprob )
- rzigeom(nsim * length(pstr0), prob = prob, pstr0 = pstr0)
- }, list( .lprob = lprob, .lpstr0 = lpstr0,
- .eprob = eprob, .epstr0 = epstr0 ))),
+ prob <- eta2theta(eta[, c(TRUE, FALSE)], .lprob ,
+ earg = .eprob )
+ onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
+ earg = .eonempstr0 )
+ rzigeom(nsim * length(onempstr0), prob = prob, pstr0 = 1 - onempstr0)
+ }, list( .lprob = lprob, .lonempstr0 = lonempstr0,
+ .eprob = eprob, .eonempstr0 = eonempstr0 ))),
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ prob <- eta2theta(eta[, c(TRUE, FALSE)], .lprob ,
+ earg = .eprob )
+ onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
+ earg = .eonempstr0 )
+ okay1 <- all(is.finite(onempobs0)) && all(0 < onempobs0) &&
+ all(is.finite(prob )) && all(0 < prob & prob < 1)
+ prob0 <- prob
+ deflat.limit <- -prob0 / (1 - prob0)
+ okay2.deflat <- TRUE
+ if (okay1 && !(okay2.deflat <- all(onempstr0 < 1 - deflat.limit)))
+ warning("parameter 'onempstr0' is too positive even allowing for ",
+ "0-deflation.")
+ okay1 && okay2.deflat
+ }, list( .lprob = lprob, .lonempstr0 = lonempstr0,
+ .eprob = eprob, .eonempstr0 = eonempstr0 ))),
+
+
+
deriv = eval(substitute(expression({
M1 <- 2
- pstr0 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
- prob <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , earg = .eprob )
+ prob <- eta2theta(eta[, c(TRUE, FALSE)], .lprob ,
+ earg = .eprob )
+ onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
+ earg = .eonempstr0 )
- prob0 <- prob # P(Y == 0) from parent distribution, aka f(0)
- pobs0 <- pstr0 + (1 - pstr0) * prob0 # P(Y == 0)
+ prob0 <- prob # P(Y == 0) from the parent distribution
+ pobs0 <- 1 - onempstr0 + (onempstr0) * prob0 # P(Y == 0)
index0 <- (y == 0)
- dl.dpstr0 <- (1 - prob0) / pobs0
- dl.dpstr0[!index0] <- -1 / (1 - pstr0[!index0])
- dl.dprob <- (1 - pstr0) / pobs0
+ dl.donempstr0 <- -(1 - prob0) / pobs0 # zz
+ dl.donempstr0[!index0] <- 1 / (onempstr0[!index0]) # zz
+
+ dl.dprob <- (onempstr0) / pobs0
dl.dprob[!index0] <- 1 / prob[!index0] -
y[!index0] / (1 - prob[!index0])
- dpstr0.deta <- dtheta.deta(pstr0 , .lpstr0 , earg = .epstr0 )
- dprob.deta <- dtheta.deta(prob, .lprob , earg = .eprob )
-
- dl.deta12 <- c(w) * cbind(dl.dpstr0 * dpstr0.deta,
- dl.dprob * dprob.deta)
+ dprob.deta <- dtheta.deta(prob , .lprob ,
+ earg = .eprob )
+ donempstr0.deta <- dtheta.deta(onempstr0 , .lonempstr0 ,
+ earg = .eonempstr0 )
+
+ dl.deta12 <- c(w) * cbind(dl.dprob * dprob.deta,
+ dl.donempstr0 * donempstr0.deta)
dl.deta12 <- dl.deta12[, interleave.VGAM(ncol(dl.deta12), M1 = M1)]
dl.deta12
- }), list( .lprob = lprob, .lpstr0 = lpstr0,
- .eprob = eprob, .epstr0 = epstr0 ))),
+ }), list( .lprob = lprob, .lonempstr0 = lonempstr0,
+ .eprob = eprob, .eonempstr0 = eonempstr0 ))),
weight = eval(substitute(expression({
if ( .expected ) {
-
- ned2l.dprob2 <- (1 - pstr0)^2 / pobs0 +
- (1 - pstr0) * ((1 - prob) / prob) *
+ ned2l.dprob2 <- (onempstr0)^2 / pobs0 +
+ (onempstr0) * ((1 - prob) / prob) *
(1 / prob + 1 / (1 - prob)^2)
- ned2l.dpstr0.prob <- 1 / pobs0
- ned2l.dpstr02 <- (1 - prob0) / ((1 - pstr0) * pobs0)
+ ned2l.donempstr0.prob <- -1 / pobs0
+ ned2l.donempstr02 <- (1 - prob0) / (( onempstr0) * pobs0)
} else {
- od2l.dprob2 <- ((1 - pstr0) / pobs0)^2
+ od2l.dprob2 <- (( onempstr0) / pobs0)^2
od2l.dprob2[!index0] <- 1 / (prob[!index0])^2 +
y[!index0] / (1 - prob[!index0])^2
- od2l.dpstr0.prob <- (pobs0 + (1 - prob0) * (1 - pstr0)) / pobs0^2
- od2l.dpstr0.prob[!index0] <- 0
+ od2l.donempstr0.prob <- -(pobs0 + (1 - prob0) * (onempstr0)) / pobs0^2
+ od2l.donempstr0.prob[!index0] <- 0
- od2l.dpstr02 <- ((1 - prob0) / pobs0)^2
- od2l.dpstr02[!index0] <- 1 / (1 - pstr0[!index0])^2
+ od2l.donempstr02 <- ((1 - prob0) / pobs0)^2
+ od2l.donempstr02[!index0] <- 1 / ( onempstr0[!index0])^2
}
allvals <- if ( .expected )
- c(c(w) * ned2l.dpstr02 * dpstr0.deta^2,
- c(w) * ned2l.dprob2 * dprob.deta^2,
- c(w) * ned2l.dpstr0.prob * dprob.deta * dpstr0.deta) else
- c(c(w) * od2l.dpstr02 * dpstr0.deta^2,
- c(w) * od2l.dprob2 * dprob.deta^2,
- c(w) * od2l.dpstr0.prob * dprob.deta * dpstr0.deta)
+ c(c(w) * ned2l.dprob2 * dprob.deta^2,
+ c(w) * ned2l.donempstr02 * donempstr0.deta^2,
+ c(w) * ned2l.donempstr0.prob * dprob.deta *
+ donempstr0.deta) else
+ c(c(w) * od2l.dprob2 * dprob.deta^2,
+ c(w) * od2l.donempstr02 * donempstr0.deta^2,
+ c(w) * od2l.donempstr0.prob * dprob.deta *
+ donempstr0.deta)
wz <- array(allvals, dim = c(n, M / M1, 3))
wz <- arwz2wz(wz, M = M, M1 = M1)
wz
- }), list( .lprob = lprob, .lpstr0 = lpstr0,
- .eprob = eprob, .epstr0 = epstr0,
+ }), list( .lprob = lprob, .lonempstr0 = lonempstr0,
+ .eprob = eprob, .eonempstr0 = eonempstr0,
.expected = expected ))))
}
- zigeometricff <-
- function(lprob = "logit",
- lonempstr0 = "logit",
- type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
- iprob = NULL, ionempstr0 = NULL,
- imethod = 1,
- bias.red = 0.5,
- zero = "onempstr0") {
+dzageom <- function(x, prob, pobs0 = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+ LLL <- max(length(x), length(prob), length(pobs0))
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(pobs0) != LLL) pobs0 <- rep_len(pobs0, LLL)
+ ans <- rep_len(0.0, LLL)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
+ index0 <- (x == 0)
- expected <- TRUE
+ if (log.arg) {
+ ans[ index0] <- log(pobs0[index0])
+ ans[!index0] <- log1p(-pobs0[!index0]) +
+ dposgeom(x[!index0],
+ prob = prob[!index0], log = TRUE)
+ } else {
+ ans[ index0] <- pobs0[index0]
+ ans[!index0] <- (1-pobs0[!index0]) *
+ dposgeom(x[!index0],
+ prob = prob[!index0])
+ }
+ ans
+}
- lprob <- as.list(substitute(lprob))
- eprob <- link2list(lprob)
- lprob <- attr(eprob, "function.name")
+pzageom <- function(q, prob, pobs0 = 0) {
- lonempstr0 <- as.list(substitute(lonempstr0))
- eonempstr0 <- link2list(lonempstr0)
- lonempstr0 <- attr(eonempstr0, "function.name")
+ LLL <- max(length(q), length(prob), length(pobs0))
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(pobs0) != LLL) pobs0 <- rep_len(pobs0, LLL)
+ ans <- rep_len(0.0, LLL)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
+ ans[q > 0] <- pobs0[q > 0] +
+ (1 - pobs0[q > 0]) *
+ pposgeom(q[q > 0], prob = prob[q > 0])
+ ans[q < 0] <- 0
+ ans[q == 0] <- pobs0[q == 0]
- type.fitted <- match.arg(type.fitted,
- c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
+ ans <- pmax(0, ans)
+ ans <- pmin(1, ans)
+ ans
+}
- if (length(iprob))
- if (!is.Numeric(iprob, positive = TRUE) ||
- iprob >= 1)
- stop("argument 'iprob' is out of range")
- if (length(ionempstr0))
- if (!is.Numeric(ionempstr0, positive = TRUE) ||
- ionempstr0 >= 1)
- stop("argument 'ionempstr0' is out of range")
+qzageom <- function(p, prob, pobs0 = 0) {
- if (!is.Numeric(bias.red, length.arg = 1, positive = TRUE) ||
- bias.red > 1)
- stop("argument 'bias.red' must be between 0 and 1")
+ LLL <- max(length(p), length(prob), length(pobs0))
+ if (length(p) != LLL) p <- rep_len(p, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(pobs0) != LLL) pobs0 <- rep_len(pobs0, LLL)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
- if (!is.Numeric(imethod, length.arg = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
+ ans <- p
+ ind4 <- (p > pobs0)
+ ans[!ind4] <- 0.0
+ ans[ ind4] <- qposgeom((p[ind4] - pobs0[ind4]) / (1 - pobs0[ind4]),
+ prob = prob[ind4])
+ ans
+}
- new("vglmff",
- blurb = c("Zero-inflated geometric distribution,\n",
- "P[Y = 0] = 1 - onempstr0 + onempstr0 * prob,\n",
- "P[Y = y] = onempstr0 * prob * (1 - prob)^y, ",
- "y = 1, 2, ...\n\n",
- "Link: ",
- namesof("prob", lprob, earg = eprob ), ", ",
- namesof("onempstr0", lonempstr0, earg = eonempstr0), "\n",
- "Mean: onempstr0 * (1 - prob) / prob"),
- constraints = eval(substitute(expression({
+rzageom <- function(n, prob, pobs0 = 0) {
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ length.arg = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
- predictors.names = predictors.names,
- M1 = 2)
- }), list( .zero = zero ))),
+ ans <- rposgeom(use.n, prob)
+ if (length(pobs0) != use.n)
+ pobs0 <- rep_len(pobs0, use.n)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be between 0 and 1 inclusive")
+ ifelse(runif(use.n) < pobs0, 0, ans)
+}
- infos = eval(substitute(function(...) {
- list(M1 = 2,
- Q1 = 1,
- expected = TRUE,
- multipleResponses = TRUE,
- parameters.names = c("prob", "onempstr0"),
- type.fitted = .type.fitted ,
- zero = .zero )
- }, list( .zero = zero,
- .type.fitted = type.fitted ))),
- initialize = eval(substitute(expression({
- M1 <- 2
- temp5 <-
- w.y.check(w = w, y = y,
- Is.nonnegative.y = TRUE,
- Is.integer.y = TRUE,
- ncol.w.max = Inf,
- ncol.y.max = Inf,
- out.wy = TRUE,
- colsyperw = 1,
- maximize = TRUE)
- w <- temp5$w
- y <- temp5$y
- extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
- extra$type.fitted <- .type.fitted
- extra$dimnamesy <- dimnames(y)
- mynames1 <- param.names("prob", ncoly)
- mynames2 <- param.names("onempstr0", ncoly)
- predictors.names <-
- c(namesof(mynames1, .lprob , earg = .eprob , tag = FALSE),
- namesof(mynames2, .lonempstr0 , earg = .eonempstr0 , tag = FALSE))[
- interleave.VGAM(M1*NOS, M1 = M1)]
- if (!length(etastart)) {
- prob.init <- if ( .imethod == 3)
- .bias.red / (1 + y + 1/8) else
- if ( .imethod == 2)
- .bias.red / (1 +
- matrix(colMeans(y) + 1/8,
- n, ncoly, byrow = TRUE)) else
- .bias.red / (1 +
- matrix(colSums(y * w) / colSums(w) + 1/8,
- n, ncoly, byrow = TRUE))
- prob.init <- if (length( .iprob )) {
- matrix( .iprob , n, ncoly, byrow = TRUE)
- } else {
- prob.init # Already a matrix
- }
- prob0.est <- psze.init <- matrix(0, n, NOS)
- for (jlocal in 1:NOS) {
- prob0.est[, jlocal] <-
- sum(w[y[, jlocal] == 0, jlocal]) / sum(w[, jlocal])
- psze.init[, jlocal] <- if ( .imethod == 3)
- prob0.est[, jlocal] / 2 else
- if ( .imethod == 1)
- pmax(0.05, (prob0.est[, jlocal] -
- median(prob.init[, jlocal]))) else
- prob0.est[, jlocal] / 5
- }
- psze.init <- if (length( .ionempstr0 )) {
- matrix( 1 - .ionempstr0 , n, ncoly, byrow = TRUE)
- } else {
- psze.init # Already a matrix
- }
+dzabinom <- function(x, size, prob, pobs0 = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+ LLL <- max(length(x), length(size), length(prob), length(pobs0))
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(pobs0) != LLL) pobs0 <- rep_len(pobs0, LLL)
+ ans <- rep_len(0.0, LLL)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
+ index0 <- (x == 0)
- etastart <-
- cbind(theta2eta( prob.init, .lprob , earg = .eprob ),
- theta2eta(1 - psze.init, .lonempstr0 , earg = .eonempstr0 ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
- }
- }), list( .lprob = lprob, .lonempstr0 = lonempstr0,
- .eprob = eprob, .eonempstr0 = eonempstr0,
- .iprob = iprob, .ionempstr0 = ionempstr0,
- .type.fitted = type.fitted,
- .bias.red = bias.red,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- prob <- eta2theta(eta[, c(TRUE, FALSE)], .lprob ,
- earg = .eprob )
- onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
- earg = .eonempstr0 )
+ if (log.arg) {
+ ans[ index0] <- log(pobs0[index0])
+ ans[!index0] <- log1p(-pobs0[!index0]) +
+ dposbinom(x[!index0], size = size[!index0],
+ prob = prob[!index0], log = TRUE)
+ } else {
+ ans[ index0] <- pobs0[index0]
+ ans[!index0] <- (1-pobs0[!index0]) *
+ dposbinom(x[!index0], size = size[!index0],
+ prob = prob[!index0])
+ }
+ ans
+}
- type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
- warning("cannot find 'type.fitted'. ",
- "Returning the 'mean'.")
- "mean"
- }
+
+
+pzabinom <- function(q, size, prob, pobs0 = 0) {
+
+ LLL <- max(length(q), length(size), length(prob), length(pobs0))
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(pobs0) != LLL) pobs0 <- rep_len(pobs0, LLL)
+ ans <- rep_len(0.0, LLL)
+ if (!is.Numeric(pobs0) ||
+ any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
+
+ ans[q > 0] <- pobs0[q > 0] +
+ (1 - pobs0[q > 0]) *
+ pposbinom(q[q > 0], size = size[q > 0], prob = prob[q > 0])
+ ans[q < 0] <- 0
+ ans[q == 0] <- pobs0[q == 0]
+
+ ans <- pmax(0, ans)
+ ans <- pmin(1, ans)
+
+ ans
+}
+
+
+qzabinom <- function(p, size, prob, pobs0 = 0) {
+
+ LLL <- max(length(p), length(size), length(prob), length(pobs0))
+ if (length(p) != LLL) p <- rep_len(p, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(pobs0) != LLL) pobs0 <- rep_len(pobs0, LLL)
+
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
+
+ ans <- p
+ ind4 <- (p > pobs0)
+ ans[!ind4] <- 0.0
+ ans[ ind4] <- qposbinom((p[ind4] - pobs0[ind4]) / (1 - pobs0[ind4]),
+ size = size[ind4],
+ prob = prob[ind4])
+ ans
+}
+
+
+rzabinom <- function(n, size, prob, pobs0 = 0) {
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ length.arg = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ ans <- rposbinom(use.n, size, prob)
+ if (length(pobs0) != use.n)
+ pobs0 <- rep_len(pobs0, use.n)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be between 0 and 1 inclusive")
+ ifelse(runif(use.n) < pobs0, 0, ans)
+}
+
+
+
+
+
+
+ zabinomial <-
+ function(lpobs0 = "logit",
+ lprob = "logit",
+ type.fitted = c("mean", "prob", "pobs0"),
+ ipobs0 = NULL, iprob = NULL,
+ imethod = 1,
+ zero = NULL # Was zero = 2 prior to 20130917
+ ) {
+
+
+
+ lpobs0 <- as.list(substitute(lpobs0))
+ epobs0 <- link2list(lpobs0)
+ lpobs0 <- attr(epobs0, "function.name")
+
+ lprob <- as.list(substitute(lprob))
+ eprob <- link2list(lprob)
+ lprob <- attr(eprob, "function.name")
+
+
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "prob", "pobs0"))[1]
+
+ if (length(ipobs0))
+ if (!is.Numeric(ipobs0, positive = TRUE) ||
+ ipobs0 >= 1)
+ stop("argument 'ipobs0' is out of range")
+
+ if (length(iprob))
+ if (!is.Numeric(iprob, positive = TRUE) ||
+ iprob >= 1)
+ stop("argument 'iprob' is out of range")
+
+
+
+ if (!is.Numeric(imethod, length.arg = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
+
+
+ new("vglmff",
+ blurb = c("Zero-altered binomial distribution ",
+ "(Bernoulli and positive-binomial conditional model)\n\n",
+ "P[Y = 0] = pobs0,\n",
+ "P[Y = y] = (1 - pobs0) * dposbinom(x = y, size, prob), ",
+ "y = 1, 2, ..., size,\n\n",
+ "Link: ",
+ namesof("pobs0", lpobs0, earg = epobs0), ", ",
+ namesof("prob" , lprob, earg = eprob), "\n",
+ "Mean: (1 - pobs0) * prob / (1 - (1 - prob)^size)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = NA,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("pobs0", "prob"),
+ type.fitted = .type.fitted ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .type.fitted = type.fitted ))),
+
+ initialize = eval(substitute(expression({
+ if (!all(w == 1))
+ extra$orig.w <- w
+
+
+
+ if (NCOL(y) == 1) {
+ if (is.factor(y))
+ y <- y != levels(y)[1]
+ nn <- rep_len(1, n)
+ if (!all(y >= 0 & y <= 1))
+ stop("response values must be in [0, 1]")
+ if (!length(mustart) && !length(etastart))
+ mustart <- (0.5 + w * y) / (1.0 + w)
+
+
+ no.successes <- y
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
+ stop("Number of successes must be integer-valued")
+
+ } else if (NCOL(y) == 2) {
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(y - round(y)) > 1.0e-8))
+ stop("Count data must be integer-valued")
+ y <- round(y)
+ nvec <- y[, 1] + y[, 2]
+ y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
+ w <- w * nvec
+ if (!length(mustart) && !length(etastart))
+ mustart <- (0.5 + nvec * y) / (1 + nvec)
+ } else {
+ stop("for the binomialff family, response 'y' must be a ",
+ "vector of 0 and 1's\n",
+ "or a factor ",
+ "(first level = fail, other levels = success),\n",
+ "or a 2-column matrix where col 1 is the no. of ",
+ "successes and col 2 is the no. of failures")
+ }
+ if (!all(w == 1))
+ extra$new.w <- w
+
+
+ y <- as.matrix(y)
+ extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
+ extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
+ extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
+
+ extra$dimnamesy <- dimnames(y)
+ extra$type.fitted <- .type.fitted
+
+
+ predictors.names <-
+ c(namesof("pobs0", .lpobs0 , earg = .epobs0 , tag = FALSE),
+ namesof("prob" , .lprob , earg = .eprob , tag = FALSE))
+
+
+
+ orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+ new.w <- if (length(extra$new.w)) extra$new.w else 1
+ Size <- new.w / orig.w
+
+ phi.init <- if (length( .ipobs0 )) .ipobs0 else {
+ prob0.est <- sum(Size[y == 0]) / sum(Size)
+ if ( .imethod == 1) {
+ (prob0.est - (1 - mustart)^Size) / (1 - (1 - mustart)^Size)
+ } else
+ if ( .imethod == 2) {
+ prob0.est
+ } else {
+ prob0.est * 0.5
+ }
+ }
+
+ phi.init[phi.init <= -0.10] <- 0.50 # Lots of sample variation
+ phi.init[phi.init <= 0.01] <- 0.05 # Last resort
+ phi.init[phi.init >= 0.99] <- 0.95 # Last resort
+
+
+
+
+ if (!length(etastart)) {
+ etastart <-
+ cbind(theta2eta(phi.init, .lpobs0, earg = .epobs0 ),
+ theta2eta( mustart, .lprob, earg = .eprob ))
+
+
+ mustart <- NULL
+ }
+ }), list( .lprob = lprob, .lpobs0 = lpobs0,
+ .eprob = eprob, .epobs0 = epobs0,
+ .iprob = iprob, .ipobs0 = ipobs0,
+ .imethod = imethod,
+ .type.fitted = type.fitted ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+ warning("cannot find 'type.fitted'. ",
+ "Returning the 'mean'.")
+ "mean"
+ }
type.fitted <- match.arg(type.fitted,
- c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "prob", "pobs0"))[1]
+
+ phi0 <- eta2theta(eta[, 1], .lpobs0, earg = .epobs0 )
+ prob <- eta2theta(eta[, 2], .lprob, earg = .eprob )
+ orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+ new.w <- if (length(extra$new.w)) extra$new.w else 1
+ Size <- new.w / orig.w
ans <- switch(type.fitted,
- "mean" = onempstr0 * (1 - prob) / prob,
+ "mean" = (1 - phi0) * prob / (1 - (1 - prob)^Size),
"prob" = prob,
- "pobs0" = 1 - onempstr0 + onempstr0 * prob, # P(Y=0)
- "pstr0" = 1 - onempstr0,
- "onempstr0" = onempstr0)
+ "pobs0" = phi0) # P(Y=0)
if (length(extra$dimnamesy) &&
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
@@ -5649,413 +6432,512 @@ rzigeom <- function(n, prob, pstr0 = 0) {
colnames(ans) <- NULL
}
ans
- }, list( .lprob = lprob, .lonempstr0 = lonempstr0,
- .eprob = eprob, .eonempstr0 = eonempstr0,
- .type.fitted = type.fitted ))),
- last = eval(substitute(expression({
- temp.names <- c(rep( .lprob , len = NOS),
- rep( .lonempstr0 , len = NOS))
- temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
- misc$link <- temp.names
-
-
- misc$earg <- vector("list", M1 * NOS)
- names(misc$link) <-
- names(misc$earg) <-
- c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
-
- for (ii in 1:NOS) {
- misc$earg[[M1*ii-1]] <- .eprob
- misc$earg[[M1*ii ]] <- .eonempstr0
- }
+ }, list( .lprob = lprob, .lpobs0 = lpobs0,
+ .eprob = eprob, .epobs0 = epobs0 ))),
+ last = eval(substitute(expression({
+ misc$link <- c(prob = .lprob, pobs0 = .lpobs0 )
+ misc$earg <- list(prob = .eprob, pobs0 = .epobs0 )
misc$imethod <- .imethod
misc$zero <- .zero
- misc$bias.red <- .bias.red
- misc$expected <- .expected
- misc$ionempstr0 <- .ionempstr0
-
-
- misc$pobs0 <- pobs0
- if (length(dimnames(y)[[2]]) > 0)
- dimnames(misc$pobs0) <- dimnames(y)
- misc$onempstr0 <- onempstr0
- if (length(dimnames(y)[[2]]) > 0)
- dimnames(misc$onempstr0) <- dimnames(y)
- }), list( .lprob = lprob, .lonempstr0 = lonempstr0,
- .eprob = eprob, .eonempstr0 = eonempstr0,
- .ionempstr0 = ionempstr0,
+ misc$expected <- TRUE
+ }), list( .lprob = lprob, .lpobs0 = lpobs0,
+ .eprob = eprob, .epobs0 = epobs0,
.zero = zero,
- .expected = expected,
- .bias.red = bias.red,
.imethod = imethod ))),
+
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- prob <- eta2theta(eta[, c(TRUE, FALSE)], .lprob ,
- earg = .eprob )
- onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
- earg = .eonempstr0 )
+ orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+ new.w <- if (length(extra$new.w)) extra$new.w else 1
+ Size <- new.w / orig.w
+ pobs0 <- eta2theta(eta[, 1], .lpobs0 , earg = .epobs0 )
+ prob <- eta2theta(eta[, 2], .lprob , earg = .eprob )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
ll.elts <-
- c(w) * dzigeom(x = y, prob = prob, pstr0 = 1 - onempstr0,
- log = TRUE)
+ orig.w * dzabinom(x = round(y * Size), size = Size,
+ prob = prob, pobs0 = pobs0,
+ log = TRUE)
if (summation) {
sum(ll.elts)
} else {
ll.elts
}
}
- }, list( .lprob = lprob, .lonempstr0 = lonempstr0,
- .eprob = eprob, .eonempstr0 = eonempstr0 ))),
- vfamily = c("zigeometricff"),
+ }, list( .lprob = lprob, .lpobs0 = lpobs0,
+ .eprob = eprob, .epobs0 = epobs0 ))),
+ vfamily = c("zabinomial"),
+
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ phi0 <- eta2theta(eta[, 1], .lpobs0 , earg = .epobs0 )
+ prob <- eta2theta(eta[, 2], .lprob , earg = .eprob )
+ okay1 <- all(is.finite(phi0)) && all(0 < phi0 & phi0 < 1) &&
+ all(is.finite(prob)) && all(0 < prob & prob < 1)
+ okay1
+ }, list( .lprob = lprob, .lpobs0 = lpobs0,
+ .eprob = eprob, .epobs0 = epobs0 ))),
+
+
+ deriv = eval(substitute(expression({
+ M1 <- 2
+ NOS <- if (length(extra$NOS)) extra$NOS else 1
+
+ orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+ new.w <- if (length(extra$new.w)) extra$new.w else 1
+ Size <- new.w / orig.w
+
+ phi0 <- eta2theta(eta[, 1], .lpobs0 , earg = .epobs0 )
+ prob <- eta2theta(eta[, 2], .lprob , earg = .eprob )
+
+ dphi0.deta <- dtheta.deta(phi0, .lpobs0, earg = .epobs0 )
+ dprob.deta <- dtheta.deta(prob, .lprob , earg = .eprob )
+
+ df0.dprob <- -Size * (1 - prob)^(Size - 1)
+ df02.dprob2 <- Size * (Size - 1) * (1 - prob)^(Size - 2)
+ prob0 <- (1 - prob)^(Size)
+ oneminusf0 <- 1 - prob0
+
+
+ dl.dphi0 <- -1 / (1 - phi0)
+ dl.dprob <- c(w) * (y / prob - (1 - y) / (1 - prob)) +
+ c(orig.w) * df0.dprob / oneminusf0
+
+
+ dl.dphi0[y == 0] <- 1 / phi0[y == 0] # Do it in one line
+ skip <- extra$skip.these
+ for (spp. in 1:NOS) {
+ dl.dprob[skip[, spp.], spp.] <- 0
+ }
+ ans <- cbind(c(orig.w) * dl.dphi0 * dphi0.deta,
+ dl.dprob * dprob.deta)
+
+
+ ans
+ }), list( .lprob = lprob, .lpobs0 = lpobs0,
+ .eprob = eprob, .epobs0 = epobs0 ))),
- simslot = eval(substitute(
- function(object, nsim) {
+ weight = eval(substitute(expression({
+ wz <- matrix(0.0, n, M1)
- pwts <- if (length(pwts <- object at prior.weights) > 0)
- pwts else weights(object, type = "prior")
- if (any(pwts != 1))
- warning("ignoring prior weights")
- eta <- predict(object)
- prob <- eta2theta(eta[, c(TRUE, FALSE)], .lprob ,
- earg = .eprob )
- onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
- earg = .eonempstr0 )
- rzigeom(nsim * length(onempstr0), prob = prob, pstr0 = 1 - onempstr0)
- }, list( .lprob = lprob, .lonempstr0 = lonempstr0,
- .eprob = eprob, .eonempstr0 = eonempstr0 ))),
+ usualmeanY <- prob
+ meanY <- (1 - phi0) * usualmeanY / oneminusf0
+ term1 <- c(Size) * (meanY / prob^2 -
+ meanY / (1 - prob)^2) +
+ c(Size) * (1 - phi0) / (1 - prob)^2
+ term2 <- -(1 - phi0) * df02.dprob2 / oneminusf0
+ term3 <- -(1 - phi0) * (df0.dprob / oneminusf0)^2
+ ned2l.dprob2 <- term1 + term2 + term3
+ wz[, iam(2, 2, M)] <- ned2l.dprob2 * dprob.deta^2
- deriv = eval(substitute(expression({
- M1 <- 2
- prob <- eta2theta(eta[, c(TRUE, FALSE)], .lprob ,
- earg = .eprob )
- onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
- earg = .eonempstr0 )
+ mu.phi0 <- phi0
+ tmp100 <- mu.phi0 * (1.0 - mu.phi0)
+ tmp200 <- if ( .lpobs0 == "logit" && is.empty.list( .epobs0 )) {
+ tmp100
+ } else {
+ (dphi0.deta^2) / tmp100
+ }
+ wz[, iam(1, 1, M)] <- tmp200
- prob0 <- prob # P(Y == 0) from the parent distribution
- pobs0 <- 1 - onempstr0 + (onempstr0) * prob0 # P(Y == 0)
- index0 <- (y == 0)
+ c(orig.w) * wz
+ }), list( .lprob = lprob, .lpobs0 = lpobs0,
+ .eprob = eprob, .epobs0 = epobs0 ))))
+}
- dl.donempstr0 <- -(1 - prob0) / pobs0 # zz
- dl.donempstr0[!index0] <- 1 / (onempstr0[!index0]) # zz
- dl.dprob <- (onempstr0) / pobs0
- dl.dprob[!index0] <- 1 / prob[!index0] -
- y[!index0] / (1 - prob[!index0])
- dprob.deta <- dtheta.deta(prob , .lprob ,
- earg = .eprob )
- donempstr0.deta <- dtheta.deta(onempstr0 , .lonempstr0 ,
- earg = .eonempstr0 )
- dl.deta12 <- c(w) * cbind(dl.dprob * dprob.deta,
- dl.donempstr0 * donempstr0.deta)
+ zabinomialff <-
+ function(lprob = "logit",
+ lonempobs0 = "logit",
+ type.fitted = c("mean", "prob", "pobs0", "onempobs0"),
+ iprob = NULL, ionempobs0 = NULL,
+ imethod = 1,
+ zero = "onempobs0") {
- dl.deta12 <- dl.deta12[, interleave.VGAM(ncol(dl.deta12), M1 = M1)]
- dl.deta12
- }), list( .lprob = lprob, .lonempstr0 = lonempstr0,
- .eprob = eprob, .eonempstr0 = eonempstr0 ))),
- weight = eval(substitute(expression({
- if ( .expected ) {
- ned2l.dprob2 <- (onempstr0)^2 / pobs0 +
- (onempstr0) * ((1 - prob) / prob) *
- (1 / prob + 1 / (1 - prob)^2)
+ lprob <- as.list(substitute(lprob))
+ eprob <- link2list(lprob)
+ lprob <- attr(eprob, "function.name")
+ lonempobs0 <- as.list(substitute(lonempobs0))
+ eonempobs0 <- link2list(lonempobs0)
+ lonempobs0 <- attr(eonempobs0, "function.name")
- ned2l.donempstr0.prob <- -1 / pobs0
- ned2l.donempstr02 <- (1 - prob0) / (( onempstr0) * pobs0)
- } else {
- od2l.dprob2 <- (( onempstr0) / pobs0)^2
- od2l.dprob2[!index0] <- 1 / (prob[!index0])^2 +
- y[!index0] / (1 - prob[!index0])^2
- od2l.donempstr0.prob <- -(pobs0 + (1 - prob0) * (onempstr0)) / pobs0^2
- od2l.donempstr0.prob[!index0] <- 0
- od2l.donempstr02 <- ((1 - prob0) / pobs0)^2
- od2l.donempstr02[!index0] <- 1 / ( onempstr0[!index0])^2
- }
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "prob", "pobs0", "onempobs0"))[1]
+ if (length(iprob))
+ if (!is.Numeric(iprob, positive = TRUE) ||
+ iprob >= 1)
+ stop("argument 'iprob' is out of range")
+ if (length(ionempobs0))
+ if (!is.Numeric(ionempobs0, positive = TRUE) ||
+ ionempobs0 >= 1)
+ stop("argument 'ionempobs0' is out of range")
- allvals <- if ( .expected )
- c(c(w) * ned2l.dprob2 * dprob.deta^2,
- c(w) * ned2l.donempstr02 * donempstr0.deta^2,
- c(w) * ned2l.donempstr0.prob * dprob.deta *
- donempstr0.deta) else
- c(c(w) * od2l.dprob2 * dprob.deta^2,
- c(w) * od2l.donempstr02 * donempstr0.deta^2,
- c(w) * od2l.donempstr0.prob * dprob.deta *
- donempstr0.deta)
- wz <- array(allvals, dim = c(n, M / M1, 3))
- wz <- arwz2wz(wz, M = M, M1 = M1)
- wz
- }), list( .lprob = lprob, .lonempstr0 = lonempstr0,
- .eprob = eprob, .eonempstr0 = eonempstr0,
- .expected = expected ))))
-}
+ if (!is.Numeric(imethod, length.arg = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
+
+ new("vglmff",
+ blurb = c("Zero-altered binomial distribution ",
+ "(Bernoulli and positive-binomial conditional model)\n\n",
+ "P[Y = 0] = 1 - onempobs0,\n",
+ "P[Y = y] = onempobs0 * dposbinom(x = y, size, prob), ",
+ "y = 1, 2, ..., size,\n\n",
+ "Link: ",
+ namesof("prob" , lprob , earg = eprob ), ", ",
+ namesof("onempobs0", lonempobs0, earg = eonempobs0), "\n",
+ "Mean: onempobs0 * prob / (1 - (1 - prob)^size)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
+ }), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = NA,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("prob", "onempobs0"),
+ type.fitted = .type.fitted ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .type.fitted = type.fitted ))),
+ initialize = eval(substitute(expression({
+ if (!all(w == 1))
+ extra$orig.w <- w
-dzageom <- function(x, prob, pobs0 = 0, log = FALSE) {
- if (!is.logical(log.arg <- log) || length(log) != 1)
- stop("bad input for argument 'log'")
- rm(log)
- LLL <- max(length(x), length(prob), length(pobs0))
- if (length(x) != LLL) x <- rep(x, len = LLL);
- if (length(prob) != LLL) prob <- rep(prob, len = LLL);
- if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
- ans <- rep(0.0, len = LLL)
- if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
- stop("argument 'pobs0' must be in [0,1]")
- index0 <- (x == 0)
- if (log.arg) {
- ans[ index0] <- log(pobs0[index0])
- ans[!index0] <- log1p(-pobs0[!index0]) +
- dposgeom(x[!index0],
- prob = prob[!index0], log = TRUE)
- } else {
- ans[ index0] <- pobs0[index0]
- ans[!index0] <- (1-pobs0[!index0]) *
- dposgeom(x[!index0],
- prob = prob[!index0])
- }
- ans
-}
+ if (NCOL(y) == 1) {
+ if (is.factor(y))
+ y <- y != levels(y)[1]
+ nn <- rep_len(1, n)
+ if (!all(y >= 0 & y <= 1))
+ stop("response values must be in [0, 1]")
+ if (!length(mustart) && !length(etastart))
+ mustart <- (0.5 + w * y) / (1.0 + w)
+ no.successes <- y
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
+ stop("Number of successes must be integer-valued")
-pzageom <- function(q, prob, pobs0 = 0) {
+ } else if (NCOL(y) == 2) {
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(y - round(y)) > 1.0e-8))
+ stop("Count data must be integer-valued")
+ y <- round(y)
+ nvec <- y[, 1] + y[, 2]
+ y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
+ w <- w * nvec
+ if (!length(mustart) && !length(etastart))
+ mustart <- (0.5 + nvec * y) / (1 + nvec)
+ } else {
+ stop("for the binomialff family, response 'y' must be a ",
+ "vector of 0 and 1's\n",
+ "or a factor ",
+ "(first level = fail, other levels = success),\n",
+ "or a 2-column matrix where col 1 is the no. of ",
+ "successes and col 2 is the no. of failures")
+ }
+ if (!all(w == 1))
+ extra$new.w <- w
- LLL <- max(length(q), length(prob), length(pobs0))
- if (length(q) != LLL) q <- rep(q, len = LLL);
- if (length(prob) != LLL) prob <- rep(prob, len = LLL);
- if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
- ans <- rep(0.0, len = LLL)
- if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
- stop("argument 'pobs0' must be in [0,1]")
- ans[q > 0] <- pobs0[q > 0] +
- (1 - pobs0[q > 0]) *
- pposgeom(q[q > 0], prob = prob[q > 0])
- ans[q < 0] <- 0
- ans[q == 0] <- pobs0[q == 0]
+ y <- as.matrix(y)
+ extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
+ extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
+ extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
- ans <- pmax(0, ans)
- ans <- pmin(1, ans)
+ extra$dimnamesy <- dimnames(y)
+ extra$type.fitted <- .type.fitted
- ans
-}
+ predictors.names <-
+ c(namesof("prob" , .lprob , earg = .eprob , tag = FALSE),
+ namesof("onempobs0", .lonempobs0 , earg = .eonempobs0 , tag = FALSE))
+
+
+ orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+ new.w <- if (length(extra$new.w)) extra$new.w else 1
+ Size <- new.w / orig.w
+
+ phi.init <- if (length( .ionempobs0 )) 1 - .ionempobs0 else {
+ prob0.est <- sum(Size[y == 0]) / sum(Size)
+ if ( .imethod == 1) {
+ (prob0.est - (1 - mustart)^Size) / (1 - (1 - mustart)^Size)
+ } else
+ if ( .imethod == 2) {
+ prob0.est
+ } else {
+ prob0.est * 0.5
+ }
+ }
-qzageom <- function(p, prob, pobs0 = 0) {
+ phi.init[phi.init <= -0.10] <- 0.50 # Lots of sample variation
+ phi.init[phi.init <= 0.01] <- 0.05 # Last resort
+ phi.init[phi.init >= 0.99] <- 0.95 # Last resort
- LLL <- max(length(p), length(prob), length(pobs0))
- if (length(p) != LLL) p <- rep(p, len = LLL);
- if (length(prob) != LLL) prob <- rep(prob, len = LLL);
- if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
- if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
- stop("argument 'pobs0' must be in [0,1]")
- ans <- p
- ind4 <- (p > pobs0)
- ans[!ind4] <- 0.0
- ans[ ind4] <- qposgeom((p[ind4] - pobs0[ind4]) / (1 - pobs0[ind4]),
- prob = prob[ind4])
- ans
-}
+ if (!length(etastart)) {
+ etastart <-
+ cbind(theta2eta( mustart, .lprob , earg = .eprob ),
+ theta2eta(1 - phi.init, .lonempobs0 , earg = .eonempobs0 ))
-rzageom <- function(n, prob, pobs0 = 0) {
- use.n <- if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- length.arg = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
+ mustart <- NULL
+ }
+ }), list( .lprob = lprob, .lonempobs0 = lonempobs0,
+ .eprob = eprob, .eonempobs0 = eonempobs0,
+ .iprob = iprob, .ionempobs0 = ionempobs0,
+ .imethod = imethod,
+ .type.fitted = type.fitted ))),
- ans <- rposgeom(use.n, prob)
- if (length(pobs0) != use.n)
- pobs0 <- rep(pobs0, len = use.n)
- if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
- stop("argument 'pobs0' must be between 0 and 1 inclusive")
- ifelse(runif(use.n) < pobs0, 0, ans)
-}
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+ warning("cannot find 'type.fitted'. ",
+ "Returning the 'mean'.")
+ "mean"
+ }
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "prob", "pobs0", "onempobs0"))[1]
+
+ prob <- eta2theta(eta[, 1], .lprob , earg = .eprob )
+ onempobs0 <- eta2theta(eta[, 2], .lonempobs0 , earg = .eonempobs0 )
+ orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+ new.w <- if (length(extra$new.w)) extra$new.w else 1
+ Size <- new.w / orig.w
+ ans <- switch(type.fitted,
+ "mean" = onempobs0 * prob / (1 - (1 - prob)^Size),
+ "prob" = prob,
+ "pobs0" = 1 - onempobs0, # P(Y=0)
+ "onempobs0" = onempobs0) # P(Y>0)
+ if (length(extra$dimnamesy) &&
+ is.matrix(ans) &&
+ length(extra$dimnamesy[[2]]) == ncol(ans) &&
+ length(extra$dimnamesy[[2]]) > 0) {
+ dimnames(ans) <- extra$dimnamesy
+ } else
+ if (NCOL(ans) == 1 &&
+ is.matrix(ans)) {
+ colnames(ans) <- NULL
+ }
+ ans
+ }, list( .lprob = lprob, .lonempobs0 = lonempobs0,
+ .eprob = eprob, .eonempobs0 = eonempobs0 ))),
+ last = eval(substitute(expression({
+ misc$link <- c(prob = .lprob, onempobs0 = .lonempobs0 )
+ misc$earg <- list(prob = .eprob, onempobs0 = .eonempobs0 )
+ misc$imethod <- .imethod
+ misc$zero <- .zero
+ misc$expected <- TRUE
+ }), list( .lprob = lprob, .lonempobs0 = lonempobs0,
+ .eprob = eprob, .eonempobs0 = eonempobs0,
+ .zero = zero,
+ .imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+ new.w <- if (length(extra$new.w)) extra$new.w else 1
+ Size <- new.w / orig.w
+ prob <- eta2theta(eta[, 1], .lprob , earg = .eprob )
+ onempobs0 <- eta2theta(eta[, 2], .lonempobs0 , earg = .eonempobs0 )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ orig.w * dzabinom(x = round(y * Size), size = Size,
+ prob = prob, pobs0 = 1 - onempobs0,
+ log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .lprob = lprob, .lonempobs0 = lonempobs0,
+ .eprob = eprob, .eonempobs0 = eonempobs0 ))),
+ vfamily = c("zabinomialff"),
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ prob <- eta2theta(eta[, 1], .lprob , earg = .eprob )
+ onempobs0 <- eta2theta(eta[, 2], .lonempobs0 , earg = .eonempobs0 )
+ okay1 <- all(is.finite(onempobs0)) &&
+ all(0 < onempobs0 & onempobs0 < 1) &&
+ all(is.finite(prob )) &&
+ all(0 < prob & prob < 1)
+ okay1
+ }, list( .lprob = lprob, .lonempobs0 = lonempobs0,
+ .eprob = eprob, .eonempobs0 = eonempobs0 ))),
-dzabinom <- function(x, size, prob, pobs0 = 0, log = FALSE) {
- if (!is.logical(log.arg <- log) || length(log) != 1)
- stop("bad input for argument 'log'")
- rm(log)
- LLL <- max(length(x), length(size), length(prob), length(pobs0))
- if (length(x) != LLL) x <- rep(x, len = LLL);
- if (length(size) != LLL) size <- rep(size, len = LLL);
- if (length(prob) != LLL) prob <- rep(prob, len = LLL);
- if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
- ans <- rep(0.0, len = LLL)
- if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
- stop("argument 'pobs0' must be in [0,1]")
- index0 <- (x == 0)
- if (log.arg) {
- ans[ index0] <- log(pobs0[index0])
- ans[!index0] <- log1p(-pobs0[!index0]) +
- dposbinom(x[!index0], size = size[!index0],
- prob = prob[!index0], log = TRUE)
- } else {
- ans[ index0] <- pobs0[index0]
- ans[!index0] <- (1-pobs0[!index0]) *
- dposbinom(x[!index0], size = size[!index0],
- prob = prob[!index0])
- }
- ans
-}
+ deriv = eval(substitute(expression({
+ M1 <- 2
+ NOS <- if (length(extra$NOS)) extra$NOS else 1
+ orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+ new.w <- if (length(extra$new.w)) extra$new.w else 1
+ Size <- new.w / orig.w
+ prob <- eta2theta(eta[, 1], .lprob , earg = .eprob )
+ onempobs0 <- eta2theta(eta[, 2], .lonempobs0 , earg = .eonempobs0 )
+ phi0 <- 1 - onempobs0
-pzabinom <- function(q, size, prob, pobs0 = 0) {
+ dprob.deta <- dtheta.deta(prob , .lprob ,
+ earg = .eprob )
+ donempobs0.deta <- dtheta.deta(onempobs0, .lonempobs0 ,
+ earg = .eonempobs0 )
- LLL <- max(length(q), length(size), length(prob), length(pobs0))
- if (length(q) != LLL) q <- rep(q, len = LLL);
- if (length(size) != LLL) size <- rep(size, len = LLL);
- if (length(prob) != LLL) prob <- rep(prob, len = LLL);
- if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
- ans <- rep(0.0, len = LLL)
- if (!is.Numeric(pobs0) ||
- any(pobs0 < 0) || any(pobs0 > 1))
- stop("argument 'pobs0' must be in [0,1]")
+ df0.dprob <- -Size * (1 - prob)^(Size - 1)
+ df02.dprob2 <- Size * (Size - 1) * (1 - prob)^(Size - 2)
+ prob0 <- (1 - prob)^(Size)
+ oneminusf0 <- 1 - prob0
- ans[q > 0] <- pobs0[q > 0] +
- (1 - pobs0[q > 0]) *
- pposbinom(q[q > 0], size = size[q > 0], prob = prob[q > 0])
- ans[q < 0] <- 0
- ans[q == 0] <- pobs0[q == 0]
- ans <- pmax(0, ans)
- ans <- pmin(1, ans)
+ dl.dprob <- c(w) * (y / prob - (1 - y) / (1 - prob)) +
+ c(orig.w) * df0.dprob / oneminusf0
+ dl.donempobs0 <- +1 / (onempobs0)
- ans
-}
+ dl.donempobs0[y == 0] <-
+ -1 / (1 - onempobs0[y == 0]) # Do it in 1 line
+ skip <- extra$skip.these
+ for (spp. in 1:NOS) {
+ dl.dprob[skip[, spp.], spp.] <- 0
+ }
-qzabinom <- function(p, size, prob, pobs0 = 0) {
- LLL <- max(length(p), length(size), length(prob), length(pobs0))
- if (length(p) != LLL) p <- rep(p, len = LLL);
- if (length(size) != LLL) size <- rep(size, len = LLL);
- if (length(prob) != LLL) prob <- rep(prob, len = LLL);
- if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
+ ans <- cbind( dl.dprob * dprob.deta,
+ c(orig.w) * dl.donempobs0 * donempobs0.deta)
+
+ ans
+ }), list( .lprob = lprob, .lonempobs0 = lonempobs0,
+ .eprob = eprob, .eonempobs0 = eonempobs0 ))),
- if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
- stop("argument 'pobs0' must be in [0,1]")
- ans <- p
- ind4 <- (p > pobs0)
- ans[!ind4] <- 0.0
- ans[ ind4] <- qposbinom((p[ind4] - pobs0[ind4]) / (1 - pobs0[ind4]),
- size = size[ind4],
- prob = prob[ind4])
- ans
-}
+ weight = eval(substitute(expression({
+ wz <- matrix(0.0, n, M1)
+ usualmeanY <- prob
+ meanY <- (1 - phi0) * usualmeanY / oneminusf0
-rzabinom <- function(n, size, prob, pobs0 = 0) {
- use.n <- if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- length.arg = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
- ans <- rposbinom(use.n, size, prob)
- if (length(pobs0) != use.n)
- pobs0 <- rep(pobs0, len = use.n)
- if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
- stop("argument 'pobs0' must be between 0 and 1 inclusive")
- ifelse(runif(use.n) < pobs0, 0, ans)
-}
+ term1 <- c(Size) * (meanY / prob^2 -
+ meanY / (1 - prob)^2) +
+ c(Size) * (1 - phi0) / (1 - prob)^2
+ term2 <- -(1 - phi0) * df02.dprob2 / oneminusf0
+ term3 <- -(1 - phi0) * (df0.dprob / oneminusf0)^2
+ ned2l.dprob2 <- term1 + term2 + term3
+ wz[, iam(1, 1, M)] <- ned2l.dprob2 * dprob.deta^2
+ mu.phi0 <- phi0
+ tmp100 <- mu.phi0 * (1.0 - mu.phi0)
+ tmp200 <- if (FALSE &&
+ .lonempobs0 == "logit" &&
+ is.empty.list( .eonempobs0 )) {
+ tmp100
+ } else {
+ (donempobs0.deta^2) / tmp100
+ }
+ wz[, iam(2, 2, M)] <- tmp200
+ c(orig.w) * wz
+ }), list( .lprob = lprob, .lonempobs0 = lonempobs0,
+ .eprob = eprob, .eonempobs0 = eonempobs0 ))))
+}
- zabinomial <-
- function(lpobs0 = "logit",
- lprob = "logit",
- type.fitted = c("mean", "prob", "pobs0"),
- ipobs0 = NULL, iprob = NULL,
- imethod = 1,
- zero = NULL # Was zero = 2 prior to 20130917
- ) {
- lpobs0 <- as.list(substitute(lpobs0))
- epobs0 <- link2list(lpobs0)
- lpobs0 <- attr(epobs0, "function.name")
- lprob <- as.list(substitute(lprob))
- eprob <- link2list(lprob)
- lprob <- attr(eprob, "function.name")
+ zageometric <-
+ function(lpobs0 = "logit", lprob = "logit",
+ type.fitted = c("mean", "prob", "pobs0", "onempobs0"),
+ imethod = 1,
+ ipobs0 = NULL, iprob = NULL,
+ zero = NULL) {
- type.fitted <- match.arg(type.fitted,
- c("mean", "prob", "pobs0"))[1]
- if (length(ipobs0))
- if (!is.Numeric(ipobs0, positive = TRUE) ||
- ipobs0 >= 1)
- stop("argument 'ipobs0' is out of range")
- if (length(iprob))
- if (!is.Numeric(iprob, positive = TRUE) ||
- iprob >= 1)
- stop("argument 'iprob' is out of range")
+ lpobs0 <- as.list(substitute(lpobs0))
+ epobs0 <- link2list(lpobs0)
+ lpobs0 <- attr(epobs0, "function.name")
+
+ lprob <- as.list(substitute(lprob))
+ eprob <- link2list(lprob)
+ lprob <- attr(eprob, "function.name")
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "prob", "pobs0", "onempobs0"))[1]
if (!is.Numeric(imethod, length.arg = 1,
integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
+ imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
+ if (length(iprob))
+ if (!is.Numeric(iprob, positive = TRUE) ||
+ max(iprob) >= 1)
+ stop("argument 'iprob' out of range")
+ if (length(ipobs0))
+ if (!is.Numeric(ipobs0, positive = TRUE) ||
+ max(ipobs0) >= 1)
+ stop("argument 'ipobs0' out of range")
new("vglmff",
- blurb = c("Zero-altered binomial distribution ",
- "(Bernoulli and positive-binomial conditional model)\n\n",
- "P[Y = 0] = pobs0,\n",
- "P[Y = y] = (1 - pobs0) * dposbinom(x = y, size, prob), ",
- "y = 1, 2, ..., size,\n\n",
- "Link: ",
- namesof("pobs0", lpobs0, earg = epobs0), ", ",
- namesof("prob" , lprob, earg = eprob), "\n",
- "Mean: (1 - pobs0) * prob / (1 - (1 - prob)^size)"),
+ blurb = c("Zero-altered geometric ",
+ "(Bernoulli and positive-geometric conditional model)\n\n",
+ "Links: ",
+ namesof("pobs0", lpobs0, earg = epobs0, tag = FALSE), ", ",
+ namesof("prob" , lprob , earg = eprob , tag = FALSE), "\n",
+ "Mean: (1 - pobs0) / prob"),
+
constraints = eval(substitute(expression({
+
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 2)
@@ -6063,61 +6945,34 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
infos = eval(substitute(function(...) {
list(M1 = 2,
- Q1 = NA,
+ Q1 = 1,
expected = TRUE,
multipleResponses = FALSE,
parameters.names = c("pobs0", "prob"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
- .type.fitted = type.fitted ))),
+ .type.fitted = type.fitted
+ ))),
initialize = eval(substitute(expression({
- if (!all(w == 1))
- extra$orig.w <- w
-
-
-
- if (NCOL(y) == 1) {
- if (is.factor(y))
- y <- y != levels(y)[1]
- nn <- rep(1, n)
- if (!all(y >= 0 & y <= 1))
- stop("response values must be in [0, 1]")
- if (!length(mustart) && !length(etastart))
- mustart <- (0.5 + w * y) / (1.0 + w)
+ M1 <- 2
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
- no.successes <- y
- if (min(y) < 0)
- stop("Negative data not allowed!")
- if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
- stop("Number of successes must be integer-valued")
- } else if (NCOL(y) == 2) {
- if (min(y) < 0)
- stop("Negative data not allowed!")
- if (any(abs(y - round(y)) > 1.0e-8))
- stop("Count data must be integer-valued")
- y <- round(y)
- nvec <- y[, 1] + y[, 2]
- y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
- w <- w * nvec
- if (!length(mustart) && !length(etastart))
- mustart <- (0.5 + nvec * y) / (1 + nvec)
- } else {
- stop("for the binomialff family, response 'y' must be a ",
- "vector of 0 and 1's\n",
- "or a factor ",
- "(first level = fail, other levels = success),\n",
- "or a 2-column matrix where col 1 is the no. of ",
- "successes and col 2 is the no. of failures")
- }
- if (!all(w == 1))
- extra$new.w <- w
- y <- as.matrix(y)
extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
@@ -6125,50 +6980,47 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
extra$dimnamesy <- dimnames(y)
extra$type.fitted <- .type.fitted
-
+
+ mynames1 <- param.names("pobs0", ncoly)
+ mynames2 <- param.names("prob", ncoly)
predictors.names <-
- c(namesof("pobs0", .lpobs0 , earg = .epobs0 , tag = FALSE),
- namesof("prob" , .lprob , earg = .eprob , tag = FALSE))
-
+ c(namesof(mynames1, .lpobs0 , earg = .epobs0 , tag = FALSE),
+ namesof(mynames2, .lprob , earg = .eprob , tag = FALSE))[
+ interleave.VGAM(M1*NOS, M1 = M1)]
+ if (!length(etastart)) {
- orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
- new.w <- if (length(extra$new.w)) extra$new.w else 1
- Size <- new.w / orig.w
+ foo <- function(x) mean(as.numeric(x == 0))
+ phi0.init <- matrix(apply(y, 2, foo), n, ncoly, byrow = TRUE)
+ if (length( .ipobs0 ))
+ phi0.init <- matrix( .ipobs0 , n, ncoly, byrow = TRUE)
- phi.init <- if (length( .ipobs0 )) .ipobs0 else {
- prob0.est <- sum(Size[y == 0]) / sum(Size)
- if ( .imethod == 1) {
- (prob0.est - (1 - mustart)^Size) / (1 - (1 - mustart)^Size)
- } else
- if ( .imethod == 2) {
- prob0.est
- } else {
- prob0.est * 0.5
- }
- }
- phi.init[phi.init <= -0.10] <- 0.50 # Lots of sample variation
- phi.init[phi.init <= 0.01] <- 0.05 # Last resort
- phi.init[phi.init >= 0.99] <- 0.95 # Last resort
+ prob.init <-
+ if ( .imethod == 2)
+ 1 / (1 + y + 1/16) else
+ if ( .imethod == 1)
+ (1 - phi0.init) / (1 +
+ matrix(colSums(y * w) / colSums(w) + 1/16,
+ n, ncoly, byrow = TRUE)) else
+ (1 - phi0.init) / (1 +
+ matrix(apply(y, 2, median), n, ncoly, byrow = TRUE) + 1/16)
+ if (length( .iprob ))
+ prob.init <- matrix( .iprob , n, ncoly, byrow = TRUE)
- if (!length(etastart)) {
- etastart <-
- cbind(theta2eta(phi.init, .lpobs0, earg = .epobs0 ),
- theta2eta( mustart, .lprob, earg = .eprob ))
-
- mustart <- NULL
+ etastart <- cbind(theta2eta(phi0.init, .lpobs0 , earg = .epobs0 ),
+ theta2eta(prob.init, .lprob , earg = .eprob ))
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
}
- }), list( .lprob = lprob, .lpobs0 = lpobs0,
- .eprob = eprob, .epobs0 = epobs0,
- .iprob = iprob, .ipobs0 = ipobs0,
+ }), list( .lpobs0 = lpobs0, .lprob = lprob,
+ .epobs0 = epobs0, .eprob = eprob,
+ .ipobs0 = ipobs0, .iprob = iprob,
.imethod = imethod,
- .type.fitted = type.fitted ))),
-
+ .type.fitted = type.fitted ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
warning("cannot find 'type.fitted'. ",
@@ -6177,18 +7029,21 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "prob", "pobs0"))[1]
-
- phi0 <- eta2theta(eta[, 1], .lpobs0, earg = .epobs0 )
- prob <- eta2theta(eta[, 2], .lprob, earg = .eprob )
- orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
- new.w <- if (length(extra$new.w)) extra$new.w else 1
- Size <- new.w / orig.w
+ c("mean", "prob", "pobs0", "onempobs0"))[1]
+ M1 <- 2
+ NOS <- ncol(eta) / M1
+
+ phi0 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+ .lpobs0 , earg = .epobs0 ))
+ prob <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
+ .lprob , earg = .eprob ))
+
ans <- switch(type.fitted,
- "mean" = (1 - phi0) * prob / (1 - (1 - prob)^Size),
+ "mean" = (1 - phi0) / prob,
"prob" = prob,
- "pobs0" = phi0) # P(Y=0)
+ "pobs0" = phi0, # P(Y=0)
+ "onempobs0" = 1 - phi0) # P(Y>0)
if (length(extra$dimnamesy) &&
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
@@ -6200,131 +7055,165 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
colnames(ans) <- NULL
}
ans
- }, list( .lprob = lprob, .lpobs0 = lpobs0,
- .eprob = eprob, .epobs0 = epobs0 ))),
-
+ }, list( .lpobs0 = lpobs0, .lprob = lprob,
+ .epobs0 = epobs0, .eprob = eprob ))),
last = eval(substitute(expression({
- misc$link <- c(prob = .lprob, pobs0 = .lpobs0 )
- misc$earg <- list(prob = .eprob, pobs0 = .epobs0 )
+ temp.names <- c(rep_len( .lpobs0 , NOS),
+ rep_len( .lprob , NOS))
+ temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
+ misc$link <- temp.names
+
+ misc$earg <- vector("list", M1 * NOS)
+
+ names(misc$link) <-
+ names(misc$earg) <-
+ c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
+
+ for (ii in 1:NOS) {
+ misc$earg[[M1*ii-1]] <- .epobs0
+ misc$earg[[M1*ii ]] <- .eprob
+ }
+
- misc$imethod <- .imethod
- misc$zero <- .zero
misc$expected <- TRUE
- }), list( .lprob = lprob, .lpobs0 = lpobs0,
- .eprob = eprob, .epobs0 = epobs0,
- .zero = zero,
+ misc$imethod <- .imethod
+ misc$ipobs0 <- .ipobs0
+ misc$iprob <- .iprob
+ misc$multipleResponses <- TRUE
+ }), list( .lpobs0 = lpobs0, .lprob = lprob,
+ .epobs0 = epobs0, .eprob = eprob,
+ .ipobs0 = ipobs0, .iprob = iprob,
.imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL,
+ summation = TRUE) {
+ NOS <- extra$NOS
+ M1 <- 2
+
+ phi0 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+ .lpobs0 , earg = .epobs0 ))
+ prob <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
+ .lprob , earg = .eprob ))
+
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ll.elts <-
+ c(w) * dzageom(x = y, pobs0 = phi0, prob = prob, log = TRUE)
+ if (summation) {
+ sum(ll.elts)
+ } else {
+ ll.elts
+ }
+ }
+ }, list( .lpobs0 = lpobs0, .lprob = lprob,
+ .epobs0 = epobs0, .eprob = eprob ))),
+ vfamily = c("zageometric"),
+
+
+
+
+ simslot = eval(substitute(
+ function(object, nsim) {
+
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ phi0 <- cbind(eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+ .lpobs0 , earg = .epobs0 ))
+ prob <- cbind(eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+ .lprob , earg = .eprob ))
+ rzageom(nsim * length(prob), prob = prob, pobs0 = phi0)
+ }, list( .lpobs0 = lpobs0, .lprob = lprob,
+ .epobs0 = epobs0, .eprob = eprob ))),
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ phi0 <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+ .lpobs0 , earg = .epobs0 )
+ prob <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+ .lprob , earg = .eprob )
+ okay1 <- all(is.finite(phi0)) && all(0 < phi0 & phi0 < 1) &&
+ all(is.finite(prob)) && all(0 < prob & prob < 1)
+ okay1
+ }, list( .lpobs0 = lpobs0, .lprob = lprob,
+ .epobs0 = epobs0, .eprob = eprob ))),
+
+
+
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta,
- extra = NULL,
- summation = TRUE) {
- orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
- new.w <- if (length(extra$new.w)) extra$new.w else 1
- Size <- new.w / orig.w
- pobs0 <- eta2theta(eta[, 1], .lpobs0 , earg = .epobs0 )
- prob <- eta2theta(eta[, 2], .lprob , earg = .eprob )
- if (residuals) {
- stop("loglikelihood residuals not implemented yet")
- } else {
- ll.elts <-
- orig.w * dzabinom(x = round(y * Size), size = Size,
- prob = prob, pobs0 = pobs0,
- log = TRUE)
- if (summation) {
- sum(ll.elts)
- } else {
- ll.elts
- }
- }
- }, list( .lprob = lprob, .lpobs0 = lpobs0,
- .eprob = eprob, .epobs0 = epobs0 ))),
- vfamily = c("zabinomial"),
- deriv = eval(substitute(expression({
- M1 <- 2
- NOS <- if (length(extra$NOS)) extra$NOS else 1
- orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
- new.w <- if (length(extra$new.w)) extra$new.w else 1
- Size <- new.w / orig.w
- phi0 <- eta2theta(eta[, 1], .lpobs0 , earg = .epobs0 )
- prob <- eta2theta(eta[, 2], .lprob , earg = .eprob )
- dphi0.deta <- dtheta.deta(phi0, .lpobs0, earg = .epobs0 )
- dprob.deta <- dtheta.deta(prob, .lprob , earg = .eprob )
+ deriv = eval(substitute(expression({
+ M1 <- 2
+ NOS <- ncol(eta) / M1 # extra$NOS
+ y0 <- extra$y0
+ skip <- extra$skip.these
- df0.dprob <- -Size * (1 - prob)^(Size - 1)
- df02.dprob2 <- Size * (Size - 1) * (1 - prob)^(Size - 2)
- prob0 <- (1 - prob)^(Size)
- oneminusf0 <- 1 - prob0
+ phi0 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+ .lpobs0 , earg = .epobs0 ))
+ prob <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
+ .lprob , earg = .eprob ))
+ dl.dprob <- 1 / prob - (y - 1) / (1 - prob)
dl.dphi0 <- -1 / (1 - phi0)
- dl.dprob <- c(w) * (y / prob - (1 - y) / (1 - prob)) +
- c(orig.w) * df0.dprob / oneminusf0
- dl.dphi0[y == 0] <- 1 / phi0[y == 0] # Do it in one line
- skip <- extra$skip.these
for (spp. in 1:NOS) {
+ dl.dphi0[skip[, spp.], spp.] <- 1 / phi0[skip[, spp.], spp.]
dl.dprob[skip[, spp.], spp.] <- 0
}
+ dphi0.deta <- dtheta.deta(phi0, .lpobs0 , earg = .epobs0 )
+ dprob.deta <- dtheta.deta(prob, .lprob , earg = .eprob )
- ans <- cbind(c(orig.w) * dl.dphi0 * dphi0.deta,
- dl.dprob * dprob.deta)
-
-
+ ans <- c(w) * cbind(dl.dphi0 * dphi0.deta,
+ dl.dprob * dprob.deta)
+ ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
ans
- }), list( .lprob = lprob, .lpobs0 = lpobs0,
- .eprob = eprob, .epobs0 = epobs0 ))),
-
-
+ }), list( .lpobs0 = lpobs0, .lprob = lprob,
+ .epobs0 = epobs0, .eprob = eprob ))),
weight = eval(substitute(expression({
- wz <- matrix(0.0, n, M1)
- usualmeanY <- prob
- meanY <- (1 - phi0) * usualmeanY / oneminusf0
+ wz <- matrix(0.0, n, M1*NOS)
- term1 <- c(Size) * (meanY / prob^2 -
- meanY / (1 - prob)^2) +
- c(Size) * (1 - phi0) / (1 - prob)^2
+ ned2l.dprob2 <- (1 - phi0) / (prob^2 * (1 - prob))
- term2 <- -(1 - phi0) * df02.dprob2 / oneminusf0
- term3 <- -(1 - phi0) * (df0.dprob / oneminusf0)^2
- ned2l.dprob2 <- term1 + term2 + term3
- wz[, iam(2, 2, M)] <- ned2l.dprob2 * dprob.deta^2
+ wz[, NOS+(1:NOS)] <- c(w) * ned2l.dprob2 * dprob.deta^2
mu.phi0 <- phi0
tmp100 <- mu.phi0 * (1.0 - mu.phi0)
tmp200 <- if ( .lpobs0 == "logit" && is.empty.list( .epobs0 )) {
- tmp100
+ cbind(c(w) * tmp100)
} else {
- (dphi0.deta^2) / tmp100
+ cbind(c(w) * (dphi0.deta^2) / tmp100)
}
- wz[, iam(1, 1, M)] <- tmp200
+ wz[, 1:NOS] <- tmp200
- c(orig.w) * wz
- }), list( .lprob = lprob, .lpobs0 = lpobs0,
- .eprob = eprob, .epobs0 = epobs0 ))))
-}
+ wz <- wz[, interleave.VGAM(ncol(wz), M1 = M1)]
+
+ wz
+ }), list( .lpobs0 = lpobs0,
+ .epobs0 = epobs0 ))))
+} # End of zageometric
- zabinomialff <-
- function(lprob = "logit",
- lonempobs0 = "logit",
- type.fitted = c("mean", "prob", "pobs0", "onempobs0"),
- iprob = NULL, ionempobs0 = NULL,
- imethod = 1,
- zero = "onempobs0") {
+ zageometricff <-
+ function(lprob = "logit", lonempobs0 = "logit",
+ type.fitted = c("mean", "prob", "pobs0", "onempobs0"),
+ imethod = 1,
+ iprob = NULL, ionempobs0 = NULL,
+ zero = "onempobs0") {
lprob <- as.list(substitute(lprob))
@@ -6335,38 +7224,36 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
eonempobs0 <- link2list(lonempobs0)
lonempobs0 <- attr(eonempobs0, "function.name")
-
type.fitted <- match.arg(type.fitted,
c("mean", "prob", "pobs0", "onempobs0"))[1]
- if (length(iprob))
- if (!is.Numeric(iprob, positive = TRUE) ||
- iprob >= 1)
- stop("argument 'iprob' is out of range")
- if (length(ionempobs0))
- if (!is.Numeric(ionempobs0, positive = TRUE) ||
- ionempobs0 >= 1)
- stop("argument 'ionempobs0' is out of range")
-
-
if (!is.Numeric(imethod, length.arg = 1,
integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
+ imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
+ if (length(iprob))
+ if (!is.Numeric(iprob, positive = TRUE) ||
+ max(iprob) >= 1)
+ stop("argument 'iprob' out of range")
+
+ if (length(ionempobs0))
+ if (!is.Numeric(ionempobs0, positive = TRUE) ||
+ max(ionempobs0) >= 1)
+ stop("argument 'ionempobs0' out of range")
+
new("vglmff",
- blurb = c("Zero-altered binomial distribution ",
- "(Bernoulli and positive-binomial conditional model)\n\n",
- "P[Y = 0] = 1 - onempobs0,\n",
- "P[Y = y] = onempobs0 * dposbinom(x = y, size, prob), ",
- "y = 1, 2, ..., size,\n\n",
- "Link: ",
- namesof("prob" , lprob , earg = eprob ), ", ",
- namesof("onempobs0", lonempobs0, earg = eonempobs0), "\n",
- "Mean: onempobs0 * prob / (1 - (1 - prob)^size)"),
+ blurb = c("Zero-altered geometric ",
+ "(Bernoulli and positive-geometric conditional model)\n\n",
+ "Links: ",
+ namesof("prob" , lprob , earg = eprob , tag = FALSE), ", ",
+ namesof("onempobs0", lonempobs0, earg = eonempobs0, tag = FALSE), "\n",
+ "Mean: onempobs0 / prob"),
+
constraints = eval(substitute(expression({
+
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 2)
@@ -6374,61 +7261,34 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
infos = eval(substitute(function(...) {
list(M1 = 2,
- Q1 = NA,
+ Q1 = 1,
expected = TRUE,
- multipleResponses = FALSE,
+ multipleResponses = TRUE,
parameters.names = c("prob", "onempobs0"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
- .type.fitted = type.fitted ))),
+ .type.fitted = type.fitted
+ ))),
initialize = eval(substitute(expression({
- if (!all(w == 1))
- extra$orig.w <- w
-
-
-
- if (NCOL(y) == 1) {
- if (is.factor(y))
- y <- y != levels(y)[1]
- nn <- rep(1, n)
- if (!all(y >= 0 & y <= 1))
- stop("response values must be in [0, 1]")
- if (!length(mustart) && !length(etastart))
- mustart <- (0.5 + w * y) / (1.0 + w)
+ M1 <- 2
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
- no.successes <- y
- if (min(y) < 0)
- stop("Negative data not allowed!")
- if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
- stop("Number of successes must be integer-valued")
- } else if (NCOL(y) == 2) {
- if (min(y) < 0)
- stop("Negative data not allowed!")
- if (any(abs(y - round(y)) > 1.0e-8))
- stop("Count data must be integer-valued")
- y <- round(y)
- nvec <- y[, 1] + y[, 2]
- y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
- w <- w * nvec
- if (!length(mustart) && !length(etastart))
- mustart <- (0.5 + nvec * y) / (1 + nvec)
- } else {
- stop("for the binomialff family, response 'y' must be a ",
- "vector of 0 and 1's\n",
- "or a factor ",
- "(first level = fail, other levels = success),\n",
- "or a 2-column matrix where col 1 is the no. of ",
- "successes and col 2 is the no. of failures")
- }
- if (!all(w == 1))
- extra$new.w <- w
- y <- as.matrix(y)
extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
@@ -6436,48 +7296,49 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
extra$dimnamesy <- dimnames(y)
extra$type.fitted <- .type.fitted
-
+
+ mynames1 <- param.names("prob", ncoly)
+ mynames2 <- param.names("onempobs0", ncoly)
predictors.names <-
- c(namesof("prob" , .lprob , earg = .eprob , tag = FALSE),
- namesof("onempobs0", .lonempobs0 , earg = .eonempobs0 , tag = FALSE))
+ c(namesof(mynames1, .lprob , earg = .eprob , tag = FALSE),
+ namesof(mynames2, .lonempobs0 , earg = .eonempobs0 , tag = FALSE))[
+ interleave.VGAM(M1*NOS, M1 = M1)]
+ if (!length(etastart)) {
- orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
- new.w <- if (length(extra$new.w)) extra$new.w else 1
- Size <- new.w / orig.w
+ foo <- function(x) mean(as.numeric(x == 0))
+ phi0.init <- matrix(apply(y, 2, foo), n, ncoly, byrow = TRUE)
+ if (length( .ionempobs0 ))
+ phi0.init <- matrix( 1 - .ionempobs0 , n, ncoly, byrow = TRUE)
- phi.init <- if (length( .ionempobs0 )) 1 - .ionempobs0 else {
- prob0.est <- sum(Size[y == 0]) / sum(Size)
- if ( .imethod == 1) {
- (prob0.est - (1 - mustart)^Size) / (1 - (1 - mustart)^Size)
- } else
- if ( .imethod == 2) {
- prob0.est
- } else {
- prob0.est * 0.5
- }
- }
- phi.init[phi.init <= -0.10] <- 0.50 # Lots of sample variation
- phi.init[phi.init <= 0.01] <- 0.05 # Last resort
- phi.init[phi.init >= 0.99] <- 0.95 # Last resort
+ prob.init <-
+ if ( .imethod == 2)
+ 1 / (1 + y + 1/16) else
+ if ( .imethod == 1)
+ (1 - phi0.init) / (1 +
+ matrix(colSums(y * w) / colSums(w) + 1/16,
+ n, ncoly, byrow = TRUE)) else
+ (1 - phi0.init) / (1 +
+ matrix(apply(y, 2, median), n, ncoly, byrow = TRUE) + 1/16)
+ if (length( .iprob ))
+ prob.init <- matrix( .iprob , n, ncoly, byrow = TRUE)
- if (!length(etastart)) {
- etastart <-
- cbind(theta2eta( mustart, .lprob , earg = .eprob ),
- theta2eta(1 - phi.init, .lonempobs0 , earg = .eonempobs0 ))
- mustart <- NULL
+ etastart <-
+ cbind(theta2eta( prob.init, .lprob , earg = .eprob ),
+ theta2eta(1 - phi0.init, .lonempobs0 , earg = .eonempobs0 ))
+
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
}
- }), list( .lprob = lprob, .lonempobs0 = lonempobs0,
- .eprob = eprob, .eonempobs0 = eonempobs0,
- .iprob = iprob, .ionempobs0 = ionempobs0,
+ }), list( .lonempobs0 = lonempobs0, .lprob = lprob,
+ .eonempobs0 = eonempobs0, .eprob = eprob,
+ .ionempobs0 = ionempobs0, .iprob = iprob,
.imethod = imethod,
- .type.fitted = type.fitted ))),
-
+ .type.fitted = type.fitted ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
warning("cannot find 'type.fitted'. ",
@@ -6487,18 +7348,21 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
type.fitted <- match.arg(type.fitted,
c("mean", "prob", "pobs0", "onempobs0"))[1]
-
- prob <- eta2theta(eta[, 1], .lprob , earg = .eprob )
- onempobs0 <- eta2theta(eta[, 2], .lonempobs0 , earg = .eonempobs0 )
- orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
- new.w <- if (length(extra$new.w)) extra$new.w else 1
- Size <- new.w / orig.w
+
+ NOS <- extra$NOS
+ M1 <- 2
+
+ prob <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+ .lprob , earg = .eprob ))
+ onempobs0 <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
+ .lonempobs0 , earg = .eonempobs0 ))
+
ans <- switch(type.fitted,
- "mean" = onempobs0 * prob / (1 - (1 - prob)^Size),
- "prob" = prob,
- "pobs0" = 1 - onempobs0, # P(Y=0)
- "onempobs0" = onempobs0) # P(Y>0)
+ "mean" = onempobs0 / prob,
+ "prob" = prob,
+ "pobs0" = 1 - onempobs0, # P(Y=0)
+ "onempobs0" = onempobs0) # P(Y>0)
if (length(extra$dimnamesy) &&
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
@@ -6510,175 +7374,325 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
colnames(ans) <- NULL
}
ans
- }, list( .lprob = lprob, .lonempobs0 = lonempobs0,
- .eprob = eprob, .eonempobs0 = eonempobs0 ))),
-
+ }, list( .lonempobs0 = lonempobs0, .lprob = lprob,
+ .eonempobs0 = eonempobs0, .eprob = eprob ))),
last = eval(substitute(expression({
- misc$link <- c(prob = .lprob, onempobs0 = .lonempobs0 )
- misc$earg <- list(prob = .eprob, onempobs0 = .eonempobs0 )
+ temp.names <- c(rep_len( .lprob , NOS),
+ rep_len( .lonempobs0 , NOS))
+ temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
+ misc$link <- temp.names
+
+ misc$earg <- vector("list", M1 * NOS)
+
+ names(misc$link) <-
+ names(misc$earg) <-
+ c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
+
+ for (ii in 1:NOS) {
+ misc$earg[[M1*ii-1]] <- .eprob
+ misc$earg[[M1*ii ]] <- .eonempobs0
+ }
+
- misc$imethod <- .imethod
- misc$zero <- .zero
misc$expected <- TRUE
- }), list( .lprob = lprob, .lonempobs0 = lonempobs0,
- .eprob = eprob, .eonempobs0 = eonempobs0,
- .zero = zero,
+ misc$imethod <- .imethod
+ misc$ionempobs0 <- .ionempobs0
+ misc$iprob <- .iprob
+ misc$multipleResponses <- TRUE
+ }), list( .lonempobs0 = lonempobs0, .lprob = lprob,
+ .eonempobs0 = eonempobs0, .eprob = eprob,
+ .ionempobs0 = ionempobs0, .iprob = iprob,
.imethod = imethod ))),
-
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
- new.w <- if (length(extra$new.w)) extra$new.w else 1
- Size <- new.w / orig.w
- prob <- eta2theta(eta[, 1], .lprob , earg = .eprob )
- onempobs0 <- eta2theta(eta[, 2], .lonempobs0 , earg = .eonempobs0 )
+ NOS <- extra$NOS
+ M1 <- 2
+
+ prob <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+ .lprob , earg = .eprob ))
+ onempobs0 <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
+ .lonempobs0 , earg = .eonempobs0 ))
+
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
ll.elts <-
- orig.w * dzabinom(x = round(y * Size), size = Size,
- prob = prob, pobs0 = 1 - onempobs0,
- log = TRUE)
+ c(w) * dzageom(x = y, pobs0 = 1 - onempobs0, prob = prob,
+ log = TRUE)
if (summation) {
sum(ll.elts)
} else {
ll.elts
}
}
- }, list( .lprob = lprob, .lonempobs0 = lonempobs0,
- .eprob = eprob, .eonempobs0 = eonempobs0 ))),
- vfamily = c("zabinomialff"),
+ }, list( .lonempobs0 = lonempobs0, .lprob = lprob,
+ .eonempobs0 = eonempobs0, .eprob = eprob ))),
+ vfamily = c("zageometricff"),
- deriv = eval(substitute(expression({
- M1 <- 2
- NOS <- if (length(extra$NOS)) extra$NOS else 1
- orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
- new.w <- if (length(extra$new.w)) extra$new.w else 1
- Size <- new.w / orig.w
- prob <- eta2theta(eta[, 1], .lprob , earg = .eprob )
- onempobs0 <- eta2theta(eta[, 2], .lonempobs0 , earg = .eonempobs0 )
- phi0 <- 1 - onempobs0
- dprob.deta <- dtheta.deta(prob , .lprob ,
- earg = .eprob )
- donempobs0.deta <- dtheta.deta(onempobs0, .lonempobs0 ,
- earg = .eonempobs0 )
+ simslot = eval(substitute(
+ function(object, nsim) {
- df0.dprob <- -Size * (1 - prob)^(Size - 1)
- df02.dprob2 <- Size * (Size - 1) * (1 - prob)^(Size - 2)
- prob0 <- (1 - prob)^(Size)
- oneminusf0 <- 1 - prob0
+ pwts <- if (length(pwts <- object at prior.weights) > 0)
+ pwts else weights(object, type = "prior")
+ if (any(pwts != 1))
+ warning("ignoring prior weights")
+ eta <- predict(object)
+ onempobs0 <- cbind(eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+ .lonempobs0 , earg = .eonempobs0 ))
+ prob <- cbind(eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+ .lprob , earg = .eprob ))
+ rzageom(nsim * length(prob), pobs0 = 1 - onempobs0, prob = prob)
+ }, list( .lonempobs0 = lonempobs0, .lprob = lprob,
+ .eonempobs0 = eonempobs0, .eprob = eprob ))),
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ prob <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+ .lprob , earg = .eprob )
+ onempobs0 <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+ .lonempobs0 , earg = .eonempobs0 )
+ okay1 <- all(is.finite(onempobs0)) &&
+ all(0 < onempobs0 & onempobs0 < 1) &&
+ all(is.finite(prob )) &&
+ all(0 < prob & prob < 1)
+ okay1
+ }, list( .lonempobs0 = lonempobs0, .lprob = lprob,
+ .eonempobs0 = eonempobs0, .eprob = eprob ))),
- dl.dprob <- c(w) * (y / prob - (1 - y) / (1 - prob)) +
- c(orig.w) * df0.dprob / oneminusf0
- dl.donempobs0 <- +1 / (onempobs0)
- dl.donempobs0[y == 0] <-
- -1 / (1 - onempobs0[y == 0]) # Do it in 1 line
+
+
+
+
+ deriv = eval(substitute(expression({
+ M1 <- 2
+ NOS <- ncol(eta) / M1 # extra$NOS
+ y0 <- extra$y0
skip <- extra$skip.these
+
+ prob <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+ .lprob , earg = .eprob ))
+ onempobs0 <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
+ .lonempobs0 , earg = .eonempobs0 ))
+ pobs0 <- 1 - onempobs0
+
+
+ dl.dprob <- 1 / prob - (y - 1) / (1 - prob)
+ dl.donempobs0 <- +1 / (onempobs0)
+
+
for (spp. in 1:NOS) {
+ dl.donempobs0[skip[, spp.], spp.] <- -1 / pobs0[skip[, spp.], spp.]
dl.dprob[skip[, spp.], spp.] <- 0
}
+ dprob.deta <- dtheta.deta(prob, .lprob , earg = .eprob )
+ donempobs0.deta <- dtheta.deta(onempobs0, .lonempobs0 ,
+ earg = .eonempobs0 )
- ans <- cbind( dl.dprob * dprob.deta,
- c(orig.w) * dl.donempobs0 * donempobs0.deta)
-
+ ans <- c(w) * cbind(dl.dprob * dprob.deta,
+ dl.donempobs0 * donempobs0.deta)
+ ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
ans
- }), list( .lprob = lprob, .lonempobs0 = lonempobs0,
- .eprob = eprob, .eonempobs0 = eonempobs0 ))),
+ }), list( .lonempobs0 = lonempobs0, .lprob = lprob,
+ .eonempobs0 = eonempobs0, .eprob = eprob ))),
+ weight = eval(substitute(expression({
+ wz <- matrix(0.0, n, M1*NOS)
- weight = eval(substitute(expression({
- wz <- matrix(0.0, n, M1)
- usualmeanY <- prob
- meanY <- (1 - phi0) * usualmeanY / oneminusf0
+ ned2l.dprob2 <- (1 - pobs0) / (prob^2 * (1 - prob))
+ wz[, (1:NOS)] <- c(w) * ned2l.dprob2 * dprob.deta^2
- term1 <- c(Size) * (meanY / prob^2 -
- meanY / (1 - prob)^2) +
- c(Size) * (1 - phi0) / (1 - prob)^2
- term2 <- -(1 - phi0) * df02.dprob2 / oneminusf0
- term3 <- -(1 - phi0) * (df0.dprob / oneminusf0)^2
- ned2l.dprob2 <- term1 + term2 + term3
- wz[, iam(1, 1, M)] <- ned2l.dprob2 * dprob.deta^2
+ mu.phi0 <- pobs0 # phi0
+ tmp100 <- mu.phi0 * (1.0 - mu.phi0)
+ tmp200 <- if ( FALSE &&
+ .lonempobs0 == "logit" &&
+ is.empty.list( .eonempobs0 )) {
+
+ cbind(c(w) * tmp100)
+ } else {
+ cbind(c(w) * (donempobs0.deta^2) / tmp100)
+ }
+ wz[, NOS+(1:NOS)] <- tmp200
+
+
+ wz <- wz[, interleave.VGAM(ncol(wz), M1 = M1)]
+
+
+ wz
+ }), list( .lonempobs0 = lonempobs0,
+ .eonempobs0 = eonempobs0 ))))
+} # End of zageometricff
+
+
+
+
+
+
+
+deflat.limit.oipospois <- function(lambda) {
+ if (any(lambda < 0))
+ stop("argument 'lambda' cannot be negative")
+ ans <- -lambda / (expm1(lambda) - lambda)
+ ans[is.infinite(lambda)] <- 0
+ ans
+}
+
+
+doipospois <- function(x, lambda, pstr1 = 0, log = FALSE) {
+
+
+
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL <- max(length(x), length(lambda), length(pstr1))
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(lambda) != LLL) lambda <- rep_len(lambda, LLL)
+ if (length(pstr1) != LLL) pstr1 <- rep_len(pstr1, LLL)
+
+ ans <- rep(NA_real_, LLL)
+ index1 <- (x == 1)
+ if (log.arg) {
+ ans[ index1] <- log(pstr1[ index1] + (1 - pstr1[ index1]) *
+ dpospois(x[ index1], lambda[ index1]))
+ ans[!index1] <- log1p(-pstr1[!index1]) +
+ dpospois(x[!index1], lambda[!index1], log = TRUE)
+ } else {
+ ans[ index1] <- pstr1[ index1] + (1 - pstr1[ index1]) *
+ dpospois(x[ index1], lambda[ index1])
+ ans[!index1] <- (1 - pstr1[!index1]) *
+ dpospois(x[!index1], lambda[!index1])
+ }
+
+
+ deflat.limit <- deflat.limit.oipospois(lambda)
+ ans[pstr1 < deflat.limit] <- NaN
+ ans[pstr1 > 1] <- NaN
+
+ ans
+} # doipospois
+
+
+
+
+
+
+poipospois <- function(q, lambda, pstr1 = 0) {
+
+ LLL <- max(length(q), length(lambda), length(pstr1))
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(lambda) != LLL) lambda <- rep_len(lambda, LLL)
+ if (length(pstr1) != LLL) pstr1 <- rep_len(pstr1, LLL)
+ ans <- rep_len(NA_real_, LLL)
+ deflat.limit <- deflat.limit.oipospois(lambda)
+
+ ans <- ppospois(q, lambda) #, lower.tail = lower.tail, log.p = log.p
+ ans <- ifelse(q < 1, 0, pstr1 + (1 - pstr1) * ans)
+
+ ans[pstr1 < deflat.limit] <- NaN
+ ans[1 < pstr1] <- NaN
+ ans[lambda <= 0] <- NaN
+
+ ans
+} # poipospois
+
+
+
+
+qoipospois <- function(p, lambda, pstr1 = 0) {
+
+ LLL <- max(length(p), length(lambda), length(pstr1))
+ if (length(p) != LLL) p <- rep_len(p, LLL)
+ if (length(lambda) != LLL) lambda <- rep_len(lambda, LLL)
+ if (length(pstr1) != LLL) pstr1 <- rep_len(pstr1, LLL)
+ ans <- rep_len(NA_real_, LLL)
+ deflat.limit <- deflat.limit.oipospois(lambda)
+
+ ans[p <= pstr1] <- 1
+ pindex <- (deflat.limit <= pstr1) & (pstr1 < p)
+ ans[pindex] <-
+ qpospois((p[pindex] - pstr1[pindex]) / (1 - pstr1[pindex]),
+ lambda = lambda[pindex])
+
+ ans[pstr1 < deflat.limit] <- NaN
+ ans[1 < pstr1] <- NaN
+
+ ans[p < 0] <- NaN
+ ans[1 < p] <- NaN
+ ans[lambda <= 0] <- NaN
+
+ ans
+} # qoipospois
+
+
+
+roipospois <- function(n, lambda, pstr1 = 0) {
+
+ ans <- qoipospois(runif(n), lambda, pstr1 = pstr1)
+ ans
+} # roipospois
- mu.phi0 <- phi0
- tmp100 <- mu.phi0 * (1.0 - mu.phi0)
- tmp200 <- if (FALSE &&
- .lonempobs0 == "logit" &&
- is.empty.list( .eonempobs0 )) {
- tmp100
- } else {
- (donempobs0.deta^2) / tmp100
- }
- wz[, iam(2, 2, M)] <- tmp200
- c(orig.w) * wz
- }), list( .lprob = lprob, .lonempobs0 = lonempobs0,
- .eprob = eprob, .eonempobs0 = eonempobs0 ))))
-}
+ oipospoisson <-
+ function(lpstr1 = "logit", llambda = "loge",
+ type.fitted = c("mean", "lambda", "pobs1", "pstr1", "onempstr1"),
+ ilambda = NULL,
+ gpstr1 = (1:19)/20,
+ gprobs.y = (1:19)/20, # 20160518; grid for finding lambd.init
+ imethod = 1,
+ zero = NULL) {
+ if (!is.Numeric(imethod, length.arg = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+ gpstr10 <- gpstr1
- zageometric <-
- function(lpobs0 = "logit", lprob = "logit",
- type.fitted = c("mean", "prob", "pobs0", "onempobs0"),
- imethod = 1,
- ipobs0 = NULL, iprob = NULL,
- zero = NULL) {
+ lpstr10 <- as.list(substitute(lpstr1))
+ epstr10 <- link2list(lpstr10)
+ lpstr10 <- attr(epstr10, "function.name")
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
- lpobs0 <- as.list(substitute(lpobs0))
- epobs0 <- link2list(lpobs0)
- lpobs0 <- attr(epobs0, "function.name")
- lprob <- as.list(substitute(lprob))
- eprob <- link2list(lprob)
- lprob <- attr(eprob, "function.name")
type.fitted <- match.arg(type.fitted,
- c("mean", "prob", "pobs0", "onempobs0"))[1]
+ c("mean", "lambda", "pobs1", "pstr1", "onempstr1"))[1]
- if (!is.Numeric(imethod, length.arg = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
- if (length(iprob))
- if (!is.Numeric(iprob, positive = TRUE) ||
- max(iprob) >= 1)
- stop("argument 'iprob' out of range")
- if (length(ipobs0))
- if (!is.Numeric(ipobs0, positive = TRUE) ||
- max(ipobs0) >= 1)
- stop("argument 'ipobs0' out of range")
+ if (length(ilambda))
+ if (!is.Numeric(ilambda, positive = TRUE))
+ stop("argument 'ilambda' values must be positive")
new("vglmff",
- blurb = c("Zero-altered geometric ",
- "(Bernoulli and positive-geometric conditional model)\n\n",
+ blurb = c("One-inflated positive Poisson\n\n",
"Links: ",
- namesof("pobs0", lpobs0, earg = epobs0, tag = FALSE), ", ",
- namesof("prob" , lprob , earg = eprob , tag = FALSE), "\n",
- "Mean: (1 - pobs0) / prob"),
+ namesof("pstr1", lpstr10, earg = epstr10 ), ", ",
+ namesof("lambda", llambda, earg = elambda ), "\n",
+ "Mean: (1 - pstr1) * lambda / (1 - exp(-lambda))"),
constraints = eval(substitute(expression({
-
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 2)
@@ -6688,19 +7702,21 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
list(M1 = 2,
Q1 = 1,
expected = TRUE,
- multipleResponses = FALSE,
- parameters.names = c("pobs0", "prob"),
+ imethod = .imethod ,
+ multipleResponses = TRUE,
+ parameters.names = c("pstr1", "lambda"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
+ .imethod = imethod,
.type.fitted = type.fitted
))),
-
initialize = eval(substitute(expression({
M1 <- 2
temp5 <-
w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
Is.nonnegative.y = TRUE,
Is.integer.y = TRUE,
ncol.w.max = Inf,
@@ -6713,274 +7729,392 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
-
- extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
- extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
- extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
-
+ NOS <- ncoly <- ncol(y)
+ extra$ncoly <- ncoly
extra$dimnamesy <- dimnames(y)
+ M <- M1 * ncoly
extra$type.fitted <- .type.fitted
-
- mynames1 <- param.names("pobs0", ncoly)
- mynames2 <- param.names("prob", ncoly)
+ mynames1 <- param.names("pstr1", ncoly)
+ mynames2 <- param.names("lambda", ncoly)
predictors.names <-
- c(namesof(mynames1, .lpobs0 , earg = .epobs0 , tag = FALSE),
- namesof(mynames2, .lprob , earg = .eprob , tag = FALSE))[
- interleave.VGAM(M1*NOS, M1 = M1)]
+ c(namesof(mynames1, .lpstr10 , earg = .epstr10 , tag = FALSE),
+ namesof(mynames2, .llambda , earg = .elambda , tag = FALSE))[
+ interleave.VGAM(M, M1 = M1)]
+
if (!length(etastart)) {
- foo <- function(x) mean(as.numeric(x == 0))
- phi0.init <- matrix(apply(y, 2, foo), n, ncoly, byrow = TRUE)
- if (length( .ipobs0 ))
- phi0.init <- matrix( .ipobs0 , n, ncoly, byrow = TRUE)
+ lambd.init <-
+ pstr1.init <- matrix(NA_real_, n, NOS)
+ gpstr10 <- .gpstr10
+ gprobs.y <- .gprobs.y
+ ilambda <- .ilambda
+ oipospois.Loglikfun <- function(pstr1, lambda, y, x, w, extraargs) {
+ sum(c(w) * doipospois(x = y, pstr1 = pstr1,
+ lambda = lambda, log = TRUE))
+ }
- prob.init <-
- if ( .imethod == 2)
- 1 / (1 + y + 1/16) else
- if ( .imethod == 1)
- (1 - phi0.init) / (1 +
- matrix(colSums(y * w) / colSums(w) + 1/16,
- n, ncoly, byrow = TRUE)) else
- (1 - phi0.init) / (1 +
- matrix(apply(y, 2, median), n, ncoly, byrow = TRUE) + 1/16)
+ for (jay in 1:NOS) { # For each response 'y_jay'... do:
+ TFvec <- y[, jay] > 1 # Important to exclude the 1s
+ posyvec <- y[TFvec, jay] # Variable name unchanged (lazy)
+ lambd.init.jay <- if ( .imethod == 1) {
+ quantile(posyvec, probs = gprobs.y) - 1/2 # + 1/16
+ } else if ( .imethod == 2) {
+ weighted.mean(posyvec, w = w[TFvec, jay]) - 1/2
+ } else {
+ warning("argument 'imethod' should have the value 1 or 2")
+ }
+ if (length(ilambda)) { # zz
+ lambd.init.jay <- ilambda[jay]
+ } else {
+ }
- if (length( .iprob ))
- prob.init <- matrix( .iprob , n, ncoly, byrow = TRUE)
+ try.this <-
+ grid.search2(gpstr10, lambd.init.jay,
+ objfun = oipospois.Loglikfun,
+ y = y[, jay], # x = x[TFvec, , drop = FALSE],
+ w = w[, jay],
+ ret.objfun = TRUE) # Last value is the loglik
+ pstr1.init[, jay] <- try.this["Value1"]
+ lambd.init[, jay] <- (try.this["Value2"] + y[, jay]) / 2
+ lambd.init[, jay] <- try.this["Value2"]
+ } # for (jay ...)
- etastart <- cbind(theta2eta(phi0.init, .lpobs0 , earg = .epobs0 ),
- theta2eta(prob.init, .lprob , earg = .eprob ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
- }
- }), list( .lpobs0 = lpobs0, .lprob = lprob,
- .epobs0 = epobs0, .eprob = eprob,
- .ipobs0 = ipobs0, .iprob = iprob,
- .imethod = imethod,
- .type.fitted = type.fitted ))),
+ etastart <- cbind(theta2eta(pstr1.init, .lpstr10 , earg = .epstr10 ),
+ theta2eta(lambd.init, .llambda , earg = .elambda ))[,
+ interleave.VGAM(M, M1 = M1)]
+ mustart <- NULL # Since etastart has been computed.
+ } # End of !length(etastart)
+ }), list( .lpstr10 = lpstr10, .llambda = llambda,
+ .epstr10 = epstr10, .elambda = elambda,
+ .ilambda = ilambda,
+ .gpstr10 = gpstr10,
+ .gprobs.y = gprobs.y,
+ .imethod = imethod, # .probs.y = probs.y,
+ .type.fitted = type.fitted ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+ type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
warning("cannot find 'type.fitted'. ",
"Returning the 'mean'.")
"mean"
}
type.fitted <- match.arg(type.fitted,
- c("mean", "prob", "pobs0", "onempobs0"))[1]
- M1 <- 2
- NOS <- ncol(eta) / M1
-
- phi0 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
- .lpobs0 , earg = .epobs0 ))
- prob <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
- .lprob , earg = .eprob ))
+ c("mean", "lambda", "pobs1", "pstr1", "onempstr1"))[1]
+ phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr10 , earg = .epstr10 )
+ lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
- ans <- switch(type.fitted,
- "mean" = (1 - phi0) / prob,
- "prob" = prob,
- "pobs0" = phi0, # P(Y=0)
- "onempobs0" = 1 - phi0) # P(Y>0)
+ ans <-
+ switch(type.fitted,
+ "mean" = phimat - (1 - phimat) * lambda / expm1(-lambda),
+ "lambda" = lambda,
+ "pobs1" = doipospois(1, lambda = lambda, pstr1 = phimat), # Pr(Y=1)
+ "pstr1" = phimat,
+ "onempstr1" = 1 - phimat)
if (length(extra$dimnamesy) &&
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
length(extra$dimnamesy[[2]]) > 0) {
- dimnames(ans) <- extra$dimnamesy
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
} else
if (NCOL(ans) == 1 &&
is.matrix(ans)) {
colnames(ans) <- NULL
}
ans
- }, list( .lpobs0 = lpobs0, .lprob = lprob,
- .epobs0 = epobs0, .eprob = eprob ))),
+ }, list( .lpstr10 = lpstr10, .llambda = llambda,
+ .epstr10 = epstr10, .elambda = elambda,
+ .type.fitted = type.fitted
+ ))),
last = eval(substitute(expression({
- temp.names <- c(rep( .lpobs0 , len = NOS),
- rep( .lprob , len = NOS))
- temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
- misc$link <- temp.names
-
- misc$earg <- vector("list", M1 * NOS)
-
- names(misc$link) <-
- names(misc$earg) <-
- c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
+ misc$link <-
+ c(rep_len( .lpstr10 , NOS),
+ rep_len( .llambda , NOS))[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
+ names(misc$link) <- temp.names
- for (ii in 1:NOS) {
- misc$earg[[M1*ii-1]] <- .epobs0
- misc$earg[[M1*ii ]] <- .eprob
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- temp.names
+ for (ii in 1:ncoly) {
+ misc$earg[[M1*ii-1]] <- .epstr10
+ misc$earg[[M1*ii ]] <- .elambda
}
-
-
- misc$expected <- TRUE
- misc$imethod <- .imethod
- misc$ipobs0 <- .ipobs0
- misc$iprob <- .iprob
- misc$multipleResponses <- TRUE
- }), list( .lpobs0 = lpobs0, .lprob = lprob,
- .epobs0 = epobs0, .eprob = eprob,
- .ipobs0 = ipobs0, .iprob = iprob,
- .imethod = imethod ))),
- loglikelihood = eval(substitute(
+ }), list( .lpstr10 = lpstr10, .llambda = llambda,
+ .epstr10 = epstr10, .elambda = elambda ))),
+ loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- NOS <- extra$NOS
- M1 <- 2
-
- phi0 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
- .lpobs0 , earg = .epobs0 ))
- prob <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
- .lprob , earg = .eprob ))
-
+ phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr10 , earg = .epstr10 )
+ lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- ll.elts <-
- c(w) * dzageom(x = y, pobs0 = phi0, prob = prob, log = TRUE)
+ ll.elts <- c(w) * doipospois(x = y, pstr1 = phimat, lambda = lambda,
+ log = TRUE)
if (summation) {
sum(ll.elts)
} else {
ll.elts
}
}
- }, list( .lpobs0 = lpobs0, .lprob = lprob,
- .epobs0 = epobs0, .eprob = eprob ))),
- vfamily = c("zageometric"),
-
+ }, list( .lpstr10 = lpstr10, .llambda = llambda,
+ .epstr10 = epstr10, .elambda = elambda ))),
+ vfamily = c("oipospoisson"),
simslot = eval(substitute(
function(object, nsim) {
-
pwts <- if (length(pwts <- object at prior.weights) > 0)
pwts else weights(object, type = "prior")
if (any(pwts != 1))
warning("ignoring prior weights")
eta <- predict(object)
- phi0 <- cbind(eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
- .lpobs0 , earg = .epobs0 ))
- prob <- cbind(eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
- .lprob , earg = .eprob ))
- rzageom(nsim * length(prob), prob = prob, pobs0 = phi0)
- }, list( .lpobs0 = lpobs0, .lprob = lprob,
- .epobs0 = epobs0, .eprob = eprob ))),
+ phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr10 , earg = .epstr10 )
+ lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
+ roipospois(nsim * length(lambda), lambda = lambda, pstr1 = phimat)
+ }, list( .lpstr10 = lpstr10, .llambda = llambda,
+ .epstr10 = epstr10, .elambda = elambda ))),
+
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ phimat <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr10 ,
+ earg = .epstr10 )
+ lambda <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .llambda ,
+ earg = .elambda )
+ okay1 <- all(is.finite(lambda)) && all(0 < lambda) &&
+ all(is.finite(phimat)) && all(phimat < 1)
+ deflat.limit <- deflat.limit.oipospois(lambda)
+ okay2.deflat <- TRUE
+ if (okay1 && !(okay2.deflat <- all(deflat.limit < phimat)))
+ warning("parameter 'pstr1' is too negative even allowing for ",
+ "0-deflation.")
+ okay1 && okay2.deflat
+ }, list( .lpstr10 = lpstr10, .llambda = llambda,
+ .epstr10 = epstr10, .elambda = elambda ))),
+
+
deriv = eval(substitute(expression({
M1 <- 2
- NOS <- ncol(eta) / M1 # extra$NOS
- y0 <- extra$y0
- skip <- extra$skip.these
+ NOS <- M / M1
+ phimat <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr10 ,
+ earg = .epstr10 )
+ lambda <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .llambda ,
+ earg = .elambda )
- phi0 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
- .lpobs0 , earg = .epobs0 ))
- prob <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
- .lprob , earg = .eprob ))
+ pmf1 <- -lambda * exp(-lambda) / expm1(-lambda)
+ onempmf1 <- 1 - pmf1 # doipospois(1, lambda = lambda, pstr1 = phimat)
+ pobs1 <- phimat + (1 - phimat) * pmf1
+ index1 <- as.matrix(y == 1)
+ dl.dphimat <- onempmf1 / pobs1
+ dl.dphimat[!index1] <- -1 / (1 - phimat[!index1])
- dl.dprob <- 1 / prob - (y - 1) / (1 - prob)
- dl.dphi0 <- -1 / (1 - phi0)
+ dpmf1.dlambda <- exp(-lambda) *
+ (1 - lambda - exp(-lambda)) / (expm1(-lambda))^2
+ d3 <- deriv3( ~ exp(-lambda) * lambda / (1 - exp(-lambda)),
+ c("lambda"), hessian = TRUE)
+ eval.d3 <- eval(d3)
+ d2pmf1.dlambda2 <- attr(eval.d3, "hessian")
+ dim(d2pmf1.dlambda2) <- c(n, NOS) # Matrix it, even for NOS==1
- for (spp. in 1:NOS) {
- dl.dphi0[skip[, spp.], spp.] <- 1 / phi0[skip[, spp.], spp.]
- dl.dprob[skip[, spp.], spp.] <- 0
- }
- dphi0.deta <- dtheta.deta(phi0, .lpobs0 , earg = .epobs0 )
- dprob.deta <- dtheta.deta(prob, .lprob , earg = .eprob )
+ dl.dlambda <- (1 - phimat) * dpmf1.dlambda / pobs1 #
+ dl.dlambda[!index1] <- y[!index1] / lambda[!index1] - 1 -
+ 1 / expm1(lambda[!index1])
+
+ dphimat.deta <- dtheta.deta(phimat, .lpstr10 , earg = .epstr10 )
+ dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
+
+ myderiv <- c(w) * cbind(dl.dphimat * dphimat.deta,
+ dl.dlambda * dlambda.deta)
+ myderiv[, interleave.VGAM(M, M1 = M1)]
+ }), list( .lpstr10 = lpstr10, .llambda = llambda,
+ .epstr10 = epstr10, .elambda = elambda ))),
+ weight = eval(substitute(expression({
+
+ ned2l.dphimat2 <- onempmf1 / ((1 - phimat) * pobs1) #
+ ned2l.dphimatlambda <- dpmf1.dlambda / pobs1 #
+ ned2l.dlambda2 <-
+ (((1 - phimat) * dpmf1.dlambda)^2) / pobs1 -
+ (1 - phimat) * d2pmf1.dlambda2 +
+ (1 - phimat) * (1/lambda - exp(-lambda) *
+ (1 - exp(-lambda) - lambda * exp(-lambda)) / (expm1(-lambda))^3)
+
+ wz <- array(c(c(w) * ned2l.dphimat2 * dphimat.deta^2,
+ c(w) * ned2l.dlambda2 * dlambda.deta^2,
+ c(w) * ned2l.dphimatlambda * dphimat.deta * dlambda.deta),
+ dim = c(n, M / M1, 3))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
+ wz
+ }), list( .llambda = llambda, .elambda = elambda ))))
+} # oipospoisson
+
+
+
+
+
+
+
+doiposbinom <- function(x, size, prob, pstr1 = 0, log = FALSE) {
+
+
+
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL <- max(length(x), length(size), length(prob), length(pstr1))
+ if (length(x) != LLL) x <- rep_len(x, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(pstr1) != LLL) pstr1 <- rep_len(pstr1, LLL)
+ ans <- x # + prob + pstr1
+ index1 <- (x == 1)
+ if (log.arg) {
+ ans[ index1] <- log(pstr1[ index1] + (1 - pstr1[ index1]) *
+ dposbinom(x[ index1], size[ index1],
+ prob[ index1]))
+ ans[!index1] <- log1p(-pstr1[!index1]) +
+ dposbinom(x[!index1], size[!index1],
+ prob[!index1], log = TRUE)
+ } else {
+ ans[ index1] <- pstr1[ index1] + (1 - pstr1[ index1]) *
+ dposbinom(x[ index1], size[ index1], prob[ index1])
+ ans[!index1] <- (1 - pstr1[!index1]) *
+ dposbinom(x[!index1], size[!index1], prob[!index1])
+ }
+
+
+ deflat.limit <- size * prob / (1 + (size-1) * prob - 1 / (1-prob)^(size-1))
+ ans[pstr1 < deflat.limit] <- NaN
+ ans[1 < pstr1] <- NaN
+
+ ans
+} # doiposbinom
+
+
+
+poiposbinom <- function(q, size, prob, pstr1 = 0) {
+
+ LLL <- max(length(q), length(size), length(prob), length(pstr1))
+ if (length(q) != LLL) q <- rep_len(q, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(pstr1) != LLL) pstr1 <- rep_len(pstr1, LLL)
+ ans <- rep_len(NA_real_, LLL)
+
+ ans <- pposbinom(q, size, prob) # lower.tail = lower.tail, log.p = log.p
+ ans <- ifelse(q < 1, 0, pstr1 + (1 - pstr1) * ans)
+
+ deflat.limit <- size * prob / (1 + (size-1) * prob - 1 / (1-prob)^(size-1))
+ ans[pstr1 < deflat.limit] <- NaN
+ ans[1 < pstr1] <- NaN
+
+ ans
+}
- ans <- c(w) * cbind(dl.dphi0 * dphi0.deta,
- dl.dprob * dprob.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
- ans
- }), list( .lpobs0 = lpobs0, .lprob = lprob,
- .epobs0 = epobs0, .eprob = eprob ))),
- weight = eval(substitute(expression({
- wz <- matrix(0.0, n, M1*NOS)
- ned2l.dprob2 <- (1 - phi0) / (prob^2 * (1 - prob))
+qoiposbinom <- function(p, size, prob, pstr1 = 0) {
- wz[, NOS+(1:NOS)] <- c(w) * ned2l.dprob2 * dprob.deta^2
+ LLL <- max(length(p), length(size), length(prob), length(pstr1))
+ if (length(p) != LLL) p <- rep_len(p, LLL)
+ if (length(size) != LLL) size <- rep_len(size, LLL)
+ if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+ if (length(pstr1) != LLL) pstr1 <- rep_len(pstr1, LLL)
+ ans <- rep_len(NA_real_, LLL)
+ deflat.limit <- size * prob / (1 + (size-1) * prob - 1 / (1-prob)^(size-1))
+ ans[p <= pstr1] <- 1
+ pindex <- (deflat.limit <= pstr1) & (pstr1 < p)
+ ans[pindex] <-
+ qposbinom((p[pindex] - pstr1[pindex]) / (1 - pstr1[pindex]),
+ size = size[pindex],
+ prob = prob[pindex])
- mu.phi0 <- phi0
- tmp100 <- mu.phi0 * (1.0 - mu.phi0)
- tmp200 <- if ( .lpobs0 == "logit" && is.empty.list( .epobs0 )) {
- cbind(c(w) * tmp100)
- } else {
- cbind(c(w) * (dphi0.deta^2) / tmp100)
- }
- wz[, 1:NOS] <- tmp200
+ ans[p == 0] <- 1
+ ans[prob == 0] <- NaN
+ ans[pstr1 < deflat.limit] <- NaN
+ ans[1 < pstr1] <- NaN
- wz <- wz[, interleave.VGAM(ncol(wz), M1 = M1)]
+ ans[p < 0] <- NaN
+ ans[1 < p] <- NaN
+ ans
+} # qoiposbinom
- wz
- }), list( .lpobs0 = lpobs0,
- .epobs0 = epobs0 ))))
-} # End of zageometric
+roiposbinom <- function(n, size, prob, pstr1 = 0) {
+ qoiposbinom(runif(n), size, prob, pstr1 = pstr1)
+} # roiposbinom
- zageometricff <-
- function(lprob = "logit", lonempobs0 = "logit",
- type.fitted = c("mean", "prob", "pobs0", "onempobs0"),
- imethod = 1,
- iprob = NULL, ionempobs0 = NULL,
- zero = "onempobs0") {
+if (FALSE)
+ oiposbinomial <-
+ function(lpstr1 = "logit", lprob = "loge",
+ type.fitted = c("mean", "prob", "pobs1", "pstr1", "onempstr1"),
+ iprob = NULL,
+ gpstr1 = (1:19)/20,
+ gprobs.y = (1:19)/20, # 20160518; grid for finding lambd.init
+ imethod = 1,
+ zero = NULL) {
- lprob <- as.list(substitute(lprob))
- eprob <- link2list(lprob)
- lprob <- attr(eprob, "function.name")
- lonempobs0 <- as.list(substitute(lonempobs0))
- eonempobs0 <- link2list(lonempobs0)
- lonempobs0 <- attr(eonempobs0, "function.name")
- type.fitted <- match.arg(type.fitted,
- c("mean", "prob", "pobs0", "onempobs0"))[1]
+ stop("this family function is not working yet")
if (!is.Numeric(imethod, length.arg = 1,
integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+
+ lpstr1 <- as.list(substitute(lpstr1))
+ epstr1 <- link2list(lpstr1)
+ lpstr1 <- attr(epstr1, "function.name")
+
+ lprobb <- as.list(substitute(lprob))
+ eprobb <- link2list(lprobb)
+ lprobb <- attr(eprobb, "function.name")
- if (length(iprob))
- if (!is.Numeric(iprob, positive = TRUE) ||
- max(iprob) >= 1)
- stop("argument 'iprob' out of range")
- if (length(ionempobs0))
- if (!is.Numeric(ionempobs0, positive = TRUE) ||
- max(ionempobs0) >= 1)
- stop("argument 'ionempobs0' out of range")
+
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "prob", "pobs1", "pstr1", "onempstr1"))[1]
+
+ iprobb <- iprob
+ if (length(iprobb))
+ if (!is.Numeric(iprobb, positive = TRUE))
+ stop("argument 'iprob' values must be positive")
new("vglmff",
- blurb = c("Zero-altered geometric ",
- "(Bernoulli and positive-geometric conditional model)\n\n",
+ blurb = c("One-inflated positive binomial\n\n",
"Links: ",
- namesof("prob" , lprob , earg = eprob , tag = FALSE), ", ",
- namesof("onempobs0", lonempobs0, earg = eonempobs0, tag = FALSE), "\n",
- "Mean: onempobs0 / prob"),
+ namesof("pstr1", lpstr1, earg = epstr1 ), ", ",
+ namesof("prob", lprobb, earg = eprobb ), "\n",
+ "Mean: pstr1 + (1 - pstr1) * ",
+ "size * prob / (1 - (1-prob)^size)"),
constraints = eval(substitute(expression({
-
constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
predictors.names = predictors.names,
M1 = 2)
@@ -6990,19 +8124,21 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
list(M1 = 2,
Q1 = 1,
expected = TRUE,
+ imethod = .imethod ,
multipleResponses = TRUE,
- parameters.names = c("prob", "onempobs0"),
+ parameters.names = c("pstr1", "prob"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
+ .imethod = imethod,
.type.fitted = type.fitted
))),
-
initialize = eval(substitute(expression({
M1 <- 2
temp5 <-
w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
Is.nonnegative.y = TRUE,
Is.integer.y = TRUE,
ncol.w.max = Inf,
@@ -7015,233 +8151,244 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
+ NOS <- ncoly <- ncol(y)
+ extra$ncoly <- ncoly
+ extra$dimnamesy <- dimnames(y)
+ M <- M1 * ncoly
+ extra$type.fitted <- .type.fitted
- extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
- extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
- extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
-
- extra$dimnamesy <- dimnames(y)
- extra$type.fitted <- .type.fitted
-
-
- mynames1 <- param.names("prob", ncoly)
- mynames2 <- param.names("onempobs0", ncoly)
+ mynames1 <- param.names("pstr1", ncoly)
+ mynames2 <- param.names("prob", ncoly)
predictors.names <-
- c(namesof(mynames1, .lprob , earg = .eprob , tag = FALSE),
- namesof(mynames2, .lonempobs0 , earg = .eonempobs0 , tag = FALSE))[
- interleave.VGAM(M1*NOS, M1 = M1)]
+ c(namesof(mynames1, .lpstr1 , earg = .epstr1 , tag = FALSE),
+ namesof(mynames2, .lprobb , earg = .eprobb , tag = FALSE))[
+ interleave.VGAM(M, M1 = M1)]
+
if (!length(etastart)) {
- foo <- function(x) mean(as.numeric(x == 0))
- phi0.init <- matrix(apply(y, 2, foo), n, ncoly, byrow = TRUE)
- if (length( .ionempobs0 ))
- phi0.init <- matrix( 1 - .ionempobs0 , n, ncoly, byrow = TRUE)
+ lambd.init <-
+ pstr1.init <- matrix(NA_real_, n, NOS)
+ gpstr1 <- .gpstr1
+ gprobs.y <- .gprobs.y
+ iprobb <- .iprobb
+ oiposbinom.Loglikfun <- function(pstr1, prob, y, x, w, extraargs) {
+ sum(c(w) * doiposbinom(x = y, pstr1 = pstr1,
+ prob = probb, log = TRUE))
+ }
- prob.init <-
- if ( .imethod == 2)
- 1 / (1 + y + 1/16) else
- if ( .imethod == 1)
- (1 - phi0.init) / (1 +
- matrix(colSums(y * w) / colSums(w) + 1/16,
- n, ncoly, byrow = TRUE)) else
- (1 - phi0.init) / (1 +
- matrix(apply(y, 2, median), n, ncoly, byrow = TRUE) + 1/16)
+ for (jay in 1:NOS) { # For each response 'y_jay'... do:
+ TFvec <- y[, jay] > 1 # Important to exclude the 1s
+ posyvec <- y[TFvec, jay] # Variable name unchanged (lazy)
+ lambd.init.jay <- if ( .imethod == 1) {
+ quantile(posyvec, probs = gprobs.y) - 1/2 # + 1/16
+ } else if ( .imethod == 2) {
+ weighted.mean(posyvec, w = w[TFvec, jay]) - 1/2
+ } else {
+ warning("argument 'imethod' should have the value 1 or 2")
+ }
+ if (length(iprobb)) { # zz
+ lambd.init.jay <- iprobb[jay]
+ } else {
+ }
- if (length( .iprob ))
- prob.init <- matrix( .iprob , n, ncoly, byrow = TRUE)
+ try.this <-
+ grid.search2(gpstr1, lambd.init.jay,
+ objfun = oiposbinom.Loglikfun,
+ y = y[, jay], # x = x[TFvec, , drop = FALSE],
+ w = w[, jay],
+ ret.objfun = TRUE) # Last value is the loglik
+ pstr1.init[, jay] <- try.this["Value1"]
+ lambd.init[, jay] <- (try.this["Value2"] + y[, jay]) / 2
+ lambd.init[, jay] <- try.this["Value2"]
+ } # for (jay ...)
- etastart <-
- cbind(theta2eta( prob.init, .lprob , earg = .eprob ),
- theta2eta(1 - phi0.init, .lonempobs0 , earg = .eonempobs0 ))
-
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
- }
- }), list( .lonempobs0 = lonempobs0, .lprob = lprob,
- .eonempobs0 = eonempobs0, .eprob = eprob,
- .ionempobs0 = ionempobs0, .iprob = iprob,
- .imethod = imethod,
- .type.fitted = type.fitted ))),
+ etastart <- cbind(theta2eta(pstr1.init, .lpstr1 , earg = .epstr1 ),
+ theta2eta(lambd.init, .lprobb , earg = .eprobb ))[,
+ interleave.VGAM(M, M1 = M1)]
+ mustart <- NULL # Since etastart has been computed.
+ } # End of !length(etastart)
+ }), list( .lpstr1 = lpstr1, .lprobb = lprobb,
+ .epstr1 = epstr1, .eprobb = eprobb,
+ .iprobb = iprobb,
+ .gpstr1 = gpstr1,
+ .gprobs.y = gprobs.y,
+ .imethod = imethod, # .probs.y = probs.y,
+ .type.fitted = type.fitted ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+ type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
warning("cannot find 'type.fitted'. ",
"Returning the 'mean'.")
"mean"
}
type.fitted <- match.arg(type.fitted,
- c("mean", "prob", "pobs0", "onempobs0"))[1]
-
- NOS <- extra$NOS
- M1 <- 2
-
- prob <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
- .lprob , earg = .eprob ))
- onempobs0 <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
- .lonempobs0 , earg = .eonempobs0 ))
-
-
- ans <- switch(type.fitted,
- "mean" = onempobs0 / prob,
- "prob" = prob,
- "pobs0" = 1 - onempobs0, # P(Y=0)
- "onempobs0" = onempobs0) # P(Y>0)
+ c("mean", "prob", "pobs1", "pstr1", "onempstr1"))[1]
+
+ pstr1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr1 , earg = .epstr1 )
+ probb <- eta2theta(eta[, c(FALSE, TRUE)], .lprobb , earg = .eprobb )
+
+ ans <-
+ switch(type.fitted,
+ "mean" = pstr1 - (1 - pstr1) * probb / expm1(-probb),
+ "prob" = probb,
+ "pobs1" = doipospois(1, prob = probb, pstr1 = pstr1), # Pr(Y=1)
+ "pstr1" = pstr1,
+ "onempstr1" = 1 - pstr1)
if (length(extra$dimnamesy) &&
is.matrix(ans) &&
length(extra$dimnamesy[[2]]) == ncol(ans) &&
length(extra$dimnamesy[[2]]) > 0) {
- dimnames(ans) <- extra$dimnamesy
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
} else
if (NCOL(ans) == 1 &&
is.matrix(ans)) {
colnames(ans) <- NULL
}
ans
- }, list( .lonempobs0 = lonempobs0, .lprob = lprob,
- .eonempobs0 = eonempobs0, .eprob = eprob ))),
+ }, list( .lpstr1 = lpstr1, .lprobb = lprobb,
+ .epstr1 = epstr1, .eprobb = eprobb,
+ .type.fitted = type.fitted
+ ))),
last = eval(substitute(expression({
- temp.names <- c(rep( .lprob , len = NOS),
- rep( .lonempobs0 , len = NOS))
- temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
- misc$link <- temp.names
-
- misc$earg <- vector("list", M1 * NOS)
-
- names(misc$link) <-
- names(misc$earg) <-
- c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
+ misc$link <-
+ c(rep_len( .lpstr1 , NOS),
+ rep_len( .lprobb , NOS))[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
+ names(misc$link) <- temp.names
- for (ii in 1:NOS) {
- misc$earg[[M1*ii-1]] <- .eprob
- misc$earg[[M1*ii ]] <- .eonempobs0
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- temp.names
+ for (ii in 1:ncoly) {
+ misc$earg[[M1*ii-1]] <- .epstr1
+ misc$earg[[M1*ii ]] <- .eprobb
}
-
-
- misc$expected <- TRUE
- misc$imethod <- .imethod
- misc$ionempobs0 <- .ionempobs0
- misc$iprob <- .iprob
- misc$multipleResponses <- TRUE
- }), list( .lonempobs0 = lonempobs0, .lprob = lprob,
- .eonempobs0 = eonempobs0, .eprob = eprob,
- .ionempobs0 = ionempobs0, .iprob = iprob,
- .imethod = imethod ))),
- loglikelihood = eval(substitute(
+ }), list( .lpstr1 = lpstr1, .lprobb = lprobb,
+ .epstr1 = epstr1, .eprobb = eprobb ))),
+ loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- NOS <- extra$NOS
- M1 <- 2
-
- prob <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
- .lprob , earg = .eprob ))
- onempobs0 <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
- .lonempobs0 , earg = .eonempobs0 ))
-
+ pstr1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr1 , earg = .epstr1 )
+ probb <- eta2theta(eta[, c(FALSE, TRUE)], .lprobb , earg = .eprobb )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- ll.elts <-
- c(w) * dzageom(x = y, pobs0 = 1 - onempobs0, prob = prob,
- log = TRUE)
+ ll.elts <- c(w) * doipospois(x = y, pstr1 = pstr1, probb = probb,
+ log = TRUE)
if (summation) {
sum(ll.elts)
} else {
ll.elts
}
}
- }, list( .lonempobs0 = lonempobs0, .lprob = lprob,
- .eonempobs0 = eonempobs0, .eprob = eprob ))),
- vfamily = c("zageometricff"),
-
+ }, list( .lpstr1 = lpstr1, .lprobb = lprobb,
+ .epstr1 = epstr1, .eprobb = eprobb ))),
+ vfamily = c("oiposbinomial"),
simslot = eval(substitute(
function(object, nsim) {
-
pwts <- if (length(pwts <- object at prior.weights) > 0)
pwts else weights(object, type = "prior")
if (any(pwts != 1))
warning("ignoring prior weights")
eta <- predict(object)
- onempobs0 <- cbind(eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
- .lonempobs0 , earg = .eonempobs0 ))
- prob <- cbind(eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
- .lprob , earg = .eprob ))
- rzageom(nsim * length(prob), pobs0 = 1 - onempobs0, prob = prob)
- }, list( .lonempobs0 = lonempobs0, .lprob = lprob,
- .eonempobs0 = eonempobs0, .eprob = eprob ))),
-
-
-
-
- deriv = eval(substitute(expression({
- M1 <- 2
- NOS <- ncol(eta) / M1 # extra$NOS
- y0 <- extra$y0
- skip <- extra$skip.these
+ pstr1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr1 , earg = .epstr1 )
+ probb <- eta2theta(eta[, c(FALSE, TRUE)], .lprobb , earg = .eprobb )
+ roipospois(nsim * length(probb), probb = probb, pstr1 = pstr1)
+ }, list( .lpstr1 = lpstr1, .lprobb = lprobb,
+ .epstr1 = epstr1, .eprobb = eprobb ))),
- prob <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
- .lprob , earg = .eprob ))
- onempobs0 <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
- .lonempobs0 , earg = .eonempobs0 ))
- pobs0 <- 1 - onempobs0
+ validparams = eval(substitute(function(eta, y, extra = NULL) {
+ pstr1 <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr1 ,
+ earg = .epstr1 )
+ probb <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .lprobb ,
+ earg = .eprobb )
+ okay1 <- all(is.finite(pstr1)) && all(pstr1 < 1) &&
+ all(is.finite(probb)) && all(0 < probb) && all(prob < 1)
+ deflat.limit <- size * prob / (1 + (size-1) * prob - 1 / (1-prob)^(size-1))
+ okay2.deflat <- TRUE
+ if (okay1 && !(okay2.deflat <- all(deflat.limit < pstr1)))
+ warning("parameter 'pstr1' is too negative even allowing for ",
+ "0-deflation.")
+ okay1 && okay2.deflat
+ }, list( .lpstr1 = lpstr1, .lprobb = lprobb,
+ .epstr1 = epstr1, .eprobb = eprobb ))),
- dl.dprob <- 1 / prob - (y - 1) / (1 - prob)
- dl.donempobs0 <- +1 / (onempobs0)
- for (spp. in 1:NOS) {
- dl.donempobs0[skip[, spp.], spp.] <- -1 / pobs0[skip[, spp.], spp.]
- dl.dprob[skip[, spp.], spp.] <- 0
- }
- dprob.deta <- dtheta.deta(prob, .lprob , earg = .eprob )
- donempobs0.deta <- dtheta.deta(onempobs0, .lonempobs0 ,
- earg = .eonempobs0 )
- ans <- c(w) * cbind(dl.dprob * dprob.deta,
- dl.donempobs0 * donempobs0.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
- ans
- }), list( .lonempobs0 = lonempobs0, .lprob = lprob,
- .eonempobs0 = eonempobs0, .eprob = eprob ))),
+ deriv = eval(substitute(expression({
+ M1 <- 2
+ NOS <- M / M1
+ pstr1 <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr1 ,
+ earg = .epstr1 )
+ probb <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .lprobb ,
+ earg = .eprobb )
+
+ pmf1 <- -probb * exp(-probb) / expm1(-probb)
+ onempmf1 <- 1 - pmf1 # doipospois(1, probb = probb, pstr1 = pstr1)
+ pobs1 <- pstr1 + (1 - pstr1) * pmf1
+ index1 <- as.matrix(y == 1)
+
+ dl.dpstr1 <- onempmf1 / pobs1
+ dl.dpstr1[!index1] <- -1 / (1 - pstr1[!index1])
+
+ dpmf1.dprobb <- exp(-probb) *
+ (1 - probb - exp(-probb)) / (expm1(-probb))^2
+
+ d3 <- deriv3( ~ exp(-probb) * probb / (1 - exp(-probb)),
+ c("probb"), hessian = TRUE)
+ eval.d3 <- eval(d3)
+ d2pmf1.dprobb2 <- attr(eval.d3, "hessian")
+ dim(d2pmf1.dprobb2) <- c(n, NOS) # Matrix it, even for NOS==1
+
+ dl.dprobb <- (1 - pstr1) * dpmf1.dprobb / pobs1 #
+ dl.dprobb[!index1] <- y[!index1] / probb[!index1] - 1 -
+ 1 / expm1(probb[!index1])
+
+ dpstr1.deta <- dtheta.deta(pstr1, .lpstr1 , earg = .epstr1 )
+ dprobb.deta <- dtheta.deta(probb, .lprobb , earg = .eprobb )
+
+ myderiv <- c(w) * cbind(dl.dpstr1 * dpstr1.deta,
+ dl.dprobb * dprobb.deta)
+ myderiv[, interleave.VGAM(M, M1 = M1)]
+ }), list( .lpstr1 = lpstr1, .lprobb = lprobb,
+ .epstr1 = epstr1, .eprobb = eprobb ))),
weight = eval(substitute(expression({
- wz <- matrix(0.0, n, M1*NOS)
-
-
- ned2l.dprob2 <- (1 - pobs0) / (prob^2 * (1 - prob))
-
- wz[, (1:NOS)] <- c(w) * ned2l.dprob2 * dprob.deta^2
+ ned2l.dpstr12 <- onempmf1 / ((1 - pstr1) * pobs1) #
+ ned2l.dpstr1probb <- dpmf1.dprobb / pobs1 #
+ ned2l.dprobb2 <-
+ (((1 - pstr1) * dpmf1.dprobb)^2) / pobs1 -
+ (1 - pstr1) * d2pmf1.dprobb2 +
+ (1 - pstr1) * (1/probb - exp(-probb) *
+ (1 - exp(-probb) - probb * exp(-probb)) / (expm1(-probb))^3)
+
+ wz <- array(c(c(w) * ned2l.dpstr12 * dpstr1.deta^2,
+ c(w) * ned2l.dprobb2 * dprobb.deta^2,
+ c(w) * ned2l.dpstr1probb * dpstr1.deta * dprobb.deta),
+ dim = c(n, M / M1, 3))
+ wz <- arwz2wz(wz, M = M, M1 = M1)
+ wz
+ }), list( .lprobb = lprobb, .eprobb = eprobb ))))
+} # oiposbinomial
- mu.phi0 <- pobs0 # phi0
- tmp100 <- mu.phi0 * (1.0 - mu.phi0)
- tmp200 <- if ( FALSE &&
- .lonempobs0 == "logit" &&
- is.empty.list( .eonempobs0 )) {
- cbind(c(w) * tmp100)
- } else {
- cbind(c(w) * (donempobs0.deta^2) / tmp100)
- }
- wz[, NOS+(1:NOS)] <- tmp200
- wz <- wz[, interleave.VGAM(ncol(wz), M1 = M1)]
- wz
- }), list( .lonempobs0 = lonempobs0,
- .eonempobs0 = eonempobs0 ))))
-} # End of zageometricff
diff --git a/R/fittedvlm.R b/R/fittedvlm.R
index 7d70107..02bde96 100644
--- a/R/fittedvlm.R
+++ b/R/fittedvlm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/formula.vlm.q b/R/formula.vlm.q
index 04f538a..7b71a07 100644
--- a/R/formula.vlm.q
+++ b/R/formula.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/generic.q b/R/generic.q
index 02015d3..72c0ba0 100644
--- a/R/generic.q
+++ b/R/generic.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/links.q b/R/links.q
index 783da5d..88c3ddd 100644
--- a/R/links.q
+++ b/R/links.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -707,7 +707,7 @@ care.exp <- function(x,
multilogit <-
function(theta,
- refLevel = "last",
+ refLevel = "(Last)",
M = NULL, # stop("argument 'M' not specified"),
whitespace = FALSE,
bvalue = NULL,
@@ -721,15 +721,15 @@ care.exp <- function(x,
stop("the length of 'refLevel' must be one")
if (is.character(refLevel)) {
- if (refLevel != "last")
- stop('if a character, refLevel must be "last"')
+ if (refLevel != "(Last)")
+ stop('if a character, refLevel must be "(Last)"')
refLevel <- -1
} else
if (is.factor(refLevel)) {
if (is.ordered(refLevel))
warning("argument 'refLevel' is from an ordered factor")
refLevel <- as.character(refLevel) == levels(refLevel)
- refLevel <- (1:length(refLevel))[refLevel]
+ refLevel <- (seq_along(refLevel))[refLevel]
if (!is.Numeric(refLevel, length.arg = 1,
integer.valued = TRUE, positive = TRUE))
stop("could not coerce 'refLevel' into a single positive integer")
@@ -1134,9 +1134,9 @@ foldsqrt <- function(theta, # = NA , = NULL,
thmat <- cbind(theta)
- lambda <- rep(lambda, len = ncol(thmat)) # Allow recycling for lambda
+ lambda <- rep_len(lambda, ncol(thmat)) # Allow recycling for lambda
if (is.Numeric(cutpoint))
- cutpoint <- rep(cutpoint, len = ncol(thmat))
+ cutpoint <- rep_len(cutpoint, ncol(thmat))
if (ncol(thmat) > 1) {
answer <- thmat
for (ii in 1:ncol(thmat))
@@ -1230,7 +1230,7 @@ foldsqrt <- function(theta, # = NA , = NULL,
thmat <- cbind(theta)
if (ncol(thmat) > 1) {
answer <- thmat
- cutpoint <- rep(cutpoint, len = ncol(thmat)) # Reqd for the for loop
+ cutpoint <- rep_len(cutpoint, ncol(thmat)) # Reqd for the for loop
for (ii in 1:ncol(thmat))
answer[, ii] <- Recall(theta = thmat[, ii],
cutpoint = cutpoint,
@@ -1351,8 +1351,8 @@ foldsqrt <- function(theta, # = NA , = NULL,
thmat <- cbind(theta)
- kay <- rep(kay, len = ncol(thmat)) # Allow recycling for kay
- cutpoint <- rep(cutpoint, len = ncol(thmat)) # Allow recycling for cutpoint
+ kay <- rep_len(kay, ncol(thmat)) # Allow recycling for kay
+ cutpoint <- rep_len(cutpoint, ncol(thmat)) # Allow recycling for cutpoint
if (ncol(thmat) > 1) {
answer <- thmat
for (ii in 1:ncol(thmat))
@@ -1477,7 +1477,7 @@ warning("20150711; this function has not been updated")
thmat <- cbind(theta)
- kay <- rep(kay, len = ncol(thmat)) # Allow recycling for kay
+ kay <- rep_len(kay, ncol(thmat)) # Allow recycling for kay
if (ncol(thmat) > 1) {
answer <- thmat
for (ii in 1:ncol(thmat))
@@ -1578,7 +1578,7 @@ warning("20150711; this function has not been updated")
temp <- cut(y, breaks = breaks, labels = FALSE)
temp <- c(temp) # integer vector of integers
- if (any(is.na(temp)))
+ if (anyNA(temp))
warning("there are NAs")
answer <- if (ncol(y) > 1) matrix(temp, nrow(y), ncol(y)) else temp
if (ncol(y) > 1) {
diff --git a/R/logLik.vlm.q b/R/logLik.vlm.q
index 72e2376..d7e375d 100644
--- a/R/logLik.vlm.q
+++ b/R/logLik.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/lrwaldtest.R b/R/lrwaldtest.R
index df2b0f1..845f342 100644
--- a/R/lrwaldtest.R
+++ b/R/lrwaldtest.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -199,7 +199,7 @@ lrtest_vglm <- function(object, ..., name = NULL) {
}
}
- rval <- matrix(rep(NA_real_, 5 * nmodels), ncol = 5)
+ rval <- matrix(rep_len(NA_real_, 5 * nmodels), ncol = 5)
colnames(rval) <- c("#Df", "LogLik", "Df", "Chisq", "Pr(>Chisq)")
rownames(rval) <- 1:nmodels
@@ -374,7 +374,7 @@ lrtest.default <- function(object, ..., name = NULL) {
}
}
- rval <- matrix(rep(NA_real_, 5 * nmodels), ncol = 5)
+ rval <- matrix(rep_len(NA_real_, 5 * nmodels), ncol = 5)
colnames(rval) <- c("#Df", "LogLik", "Df", "Chisq", "Pr(>Chisq)")
rownames(rval) <- 1:nmodels
@@ -549,7 +549,7 @@ waldtest_default <- function(object, ..., vcov = NULL,
stop("to compare more than 2 models `vcov.' needs to be a function")
test <- match.arg(test)
- rval <- matrix(rep(NA_real_, 4 * nmodels), ncol = 4)
+ rval <- matrix(rep_len(NA_real_, 4 * nmodels), ncol = 4)
colnames(rval) <- c("Res.Df", "Df", test,
paste("Pr(>", test, ")", sep = ""))
rownames(rval) <- 1:nmodels
diff --git a/R/model.matrix.vglm.q b/R/model.matrix.vglm.q
index 9bc6581..fca1ab6 100644
--- a/R/model.matrix.vglm.q
+++ b/R/model.matrix.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -55,8 +55,8 @@ attrassignlm <- function(object, ...)
vlm2lm.model.matrix <-
- function(x.vlm, Hlist = NULL,
- which.linpred = 1,
+ function(x.vlm, Hlist = NULL,
+ which.linpred = 1,
M = NULL) {
@@ -83,7 +83,7 @@ attrassignlm <- function(object, ...)
stop("'n.lm' does not seem to be an integer")
linpred.index <- which.linpred
vecTF <- Hmatrices[linpred.index, ] != 0
- X.lm.jay <- x.vlm[(0:(n.lm - 1)) * M + linpred.index, vecTF,
+ X.lm.jay <- x.vlm[(0:(n.lm - 1)) * M + linpred.index, vecTF,
drop = FALSE]
X.lm.jay
}
@@ -93,7 +93,7 @@ attrassignlm <- function(object, ...)
lm2vlm.model.matrix <-
- function(x, Hlist = NULL, assign.attributes = TRUE,
+ function(x, Hlist = NULL, assign.attributes = TRUE,
M = NULL, xij = NULL, Xm2 = NULL) {
@@ -144,7 +144,7 @@ attrassignlm <- function(object, ...)
nasgn <- oasgn <- attr(x, "assign")
lowind <- 0
- for (ii in 1:length(oasgn)) {
+ for (ii in seq_along(oasgn)) {
mylen <- length(oasgn[[ii]]) * ncolHlist[oasgn[[ii]][1]]
nasgn[[ii]] <- (lowind+1):(lowind+mylen)
lowind <- lowind + mylen
@@ -157,11 +157,11 @@ attrassignlm <- function(object, ...)
fred <- unlist(lapply(nasgn, length)) / unlist(lapply(oasgn, length))
vasgn <- vector("list", sum(fred))
kk <- 0
- for (ii in 1:length(oasgn)) {
+ for (ii in seq_along(oasgn)) {
temp <- matrix(nasgn[[ii]], ncol = length(oasgn[[ii]]))
for (jloc in 1:nrow(temp)) {
kk <- kk + 1
- vasgn[[kk]] <- temp[jloc,]
+ vasgn[[kk]] <- temp[jloc, ]
}
}
names(vasgn) <- vlabel(names(oasgn), fred, M)
@@ -186,41 +186,41 @@ attrassignlm <- function(object, ...)
at.vlmx <- attr(X.vlm, "assign")
at.Xm2 <- attr(Xm2, "assign")
- for (ii in 1:length(xij)) {
- form.xij <- xij[[ii]]
- if (length(form.xij) != 3)
- stop("xij[[", ii, "]] is not a formula with a response")
- tform.xij <- terms(form.xij)
- aterm.form <- attr(tform.xij, "term.labels") # Does not include response
- if (length(aterm.form) != M)
- stop("xij[[", ii, "]] does not contain ", M, " terms")
-
- name.term.y <- as.character(form.xij)[2]
- cols.X.vlm <- at.vlmx[[name.term.y]] # May be > 1 in length.
-
- x.name.term.2 <- aterm.form[1] # Choose the first one
- One.such.term <- at.Xm2[[x.name.term.2]]
- for (bbb in 1:length(One.such.term)) {
- use.cols.Xm2 <- NULL
- for (sss in 1:M) {
- x.name.term.2 <- aterm.form[sss]
- one.such.term <- at.Xm2[[x.name.term.2]]
- use.cols.Xm2 <- c(use.cols.Xm2, one.such.term[bbb])
- } # End of sss
-
- allXk <- Xm2[,use.cols.Xm2,drop=FALSE]
- cmat.no <- (at.x[[name.term.y]])[1] # First one will do (all the same).
+ for (ii in seq_along(xij)) {
+ form.xij <- xij[[ii]]
+ if (length(form.xij) != 3)
+ stop("xij[[", ii, "]] is not a formula with a response")
+ tform.xij <- terms(form.xij)
+ aterm.form <- attr(tform.xij, "term.labels") # Does not include response
+ if (length(aterm.form) != M)
+ stop("xij[[", ii, "]] does not contain ", M, " terms")
+
+ name.term.y <- as.character(form.xij)[2]
+ cols.X.vlm <- at.vlmx[[name.term.y]] # May be > 1 in length.
+
+ x.name.term.2 <- aterm.form[1] # Choose the first one
+ One.such.term <- at.Xm2[[x.name.term.2]]
+ for (bbb in seq_along(One.such.term)) {
+ use.cols.Xm2 <- NULL
+ for (sss in 1:M) {
+ x.name.term.2 <- aterm.form[sss]
+ one.such.term <- at.Xm2[[x.name.term.2]]
+ use.cols.Xm2 <- c(use.cols.Xm2, one.such.term[bbb])
+ } # End of sss
+
+ allXk <- Xm2[, use.cols.Xm2, drop = FALSE]
+ cmat.no <- (at.x[[name.term.y]])[1] # 1st one will do (all the same).
cmat <- Hlist[[cmat.no]]
Rsum.k <- ncol(cmat)
tmp44 <- kronecker(matrix(1, nrow.X.lm, 1), t(cmat)) *
- kronecker(allXk, matrix(1,ncol(cmat), 1)) # n*Rsum.k x M
+ kronecker(allXk, matrix(1, ncol(cmat), 1)) # n*Rsum.k x M
tmp44 <- array(t(tmp44), c(M, Rsum.k, nrow.X.lm))
- tmp44 <- aperm(tmp44, c(1,3,2)) # c(M, n, Rsum.k)
+ tmp44 <- aperm(tmp44, c(1, 3, 2)) # c(M, n, Rsum.k)
rep.index <- cols.X.vlm[((bbb-1)*Rsum.k+1):(bbb*Rsum.k)]
- X.vlm[,rep.index] <- c(tmp44)
- } # End of bbb
- } # End of for (ii in 1:length(xij))
+ X.vlm[, rep.index] <- c(tmp44)
+ } # End of bbb
+ } # End of for (ii in seq_along(xij))
if (assign.attributes) {
attr(X.vlm, "vassign") <- vasgn
@@ -228,7 +228,7 @@ attrassignlm <- function(object, ...)
attr(X.vlm, "xij") <- xij
}
X.vlm
-}
+} # lm2vlm.model.matrix
@@ -245,9 +245,9 @@ model.matrix.vlm <- function(object, ...)
- model.matrixvlm <- function(object,
- type = c("vlm", "lm", "lm2", "bothlmlm2"),
- linpred.index = NULL,
+ model.matrixvlm <- function(object,
+ type = c("vlm", "lm", "lm2", "bothlmlm2"),
+ linpred.index = NULL,
...) {
@@ -258,11 +258,11 @@ model.matrix.vlm <- function(object, ...)
if (length(linpred.index) &&
type != "lm")
- stop("Must set 'type = \"lm\"' when 'linpred.index' is ",
+ stop("Must set 'type = \"lm\"' when 'linpred.index' is ",
"assigned a value")
if (length(linpred.index) &&
length(object at control$xij))
- stop("Currently cannot handle 'xij' models when 'linpred.index' is ",
+ stop("Currently cannot handle 'xij' models when 'linpred.index' is ",
"assigned a value")
@@ -277,7 +277,7 @@ model.matrix.vlm <- function(object, ...)
kill.con <- if (length(object at contrasts)) object at contrasts else NULL
- x <- vmodel.matrix.default(object, data = data,
+ x <- vmodel.matrix.default(object, data = data,
contrasts.arg = kill.con)
tt <- terms(object)
attr(x, "assign") <- attrassigndefault(x, tt)
@@ -291,7 +291,7 @@ model.matrix.vlm <- function(object, ...)
kill.con <- if (length(object.copy2 at contrasts))
object.copy2 at contrasts else NULL
- Xm2 <- vmodel.matrix.default(object.copy2, data = data,
+ Xm2 <- vmodel.matrix.default(object.copy2, data = data,
contrasts.arg = kill.con)
ttXm2 <- terms(object.copy2 at misc$form2)
attr(Xm2, "assign") <- attrassigndefault(Xm2, ttXm2)
@@ -312,17 +312,17 @@ model.matrix.vlm <- function(object, ...)
M <- object at misc$M
Hlist <- object at constraints # == constraints(object, type = "lm")
- X.vlm <- lm2vlm.model.matrix(x = x, Hlist = Hlist,
+ X.vlm <- lm2vlm.model.matrix(x = x, Hlist = Hlist,
xij = object at control$xij, Xm2 = Xm2)
if (type == "vlm") {
return(X.vlm)
} else if (type == "lm" && length(linpred.index)) {
- if (!is.Numeric(linpred.index, integer.valued = TRUE, positive = TRUE,
+ if (!is.Numeric(linpred.index, integer.valued = TRUE, positive = TRUE,
length.arg = 1))
stop("bad input for argument 'linpred.index'")
if (!length(intersect(linpred.index, 1:M)))
- stop("argument 'linpred.index' should have ",
+ stop("argument 'linpred.index' should have ",
"a single value from the set 1:", M)
Hlist <- Hlist
@@ -348,12 +348,12 @@ setMethod("model.matrix", "vlm", function(object, ...)
model.matrixvgam <-
- function(object,
- type = c("lm", "vlm", "lm", "lm2", "bothlmlm2"),
- linpred.index = NULL,
+ function(object,
+ type = c("lm", "vlm", "lm", "lm2", "bothlmlm2"),
+ linpred.index = NULL,
...) {
model.matrixvlm(object = object,
- type = type[1],
+ type = type[1],
linpred.index = linpred.index, ...)
}
setMethod("model.matrix", "vgam", function(object, ...)
@@ -365,7 +365,7 @@ setMethod("model.matrix", "vgam", function(object, ...)
model.framevlm <- function(object,
- setupsmart = TRUE,
+ setupsmart = TRUE,
wrapupsmart = TRUE, ...) {
dots <- list(...)
@@ -406,15 +406,15 @@ setMethod("model.frame", "vlm", function(formula, ...)
vmodel.matrix.default <-
- function(object, data = environment(object),
+ function(object, data = environment(object),
contrasts.arg = NULL, xlev = NULL, ...) {
t <- if (missing(data)) terms(object) else terms(object, data = data)
if (is.null(attr(data, "terms")))
data <- model.frame(object, data, xlev = xlev) else {
- reorder <- match(sapply(attr(t, "variables"), deparse,
+ reorder <- match(sapply(attr(t, "variables"), deparse,
width.cutoff = 500)[-1], names(data))
- if (any(is.na(reorder)))
+ if (anyNA(reorder))
stop("model frame and formula mismatch in model.matrix()")
if (!identical(reorder, seq_len(ncol(data))))
data <- data[, reorder, drop = FALSE]
@@ -425,7 +425,7 @@ setMethod("model.frame", "vlm", function(formula, ...)
namD <- names(data)
for (i in namD) if (is.character(data[[i]])) {
data[[i]] <- factor(data[[i]])
- warning(gettextf("variable '%s' converted to a factor", i),
+ warning(gettextf("variable '%s' converted to a factor", i),
domain = NA)
}
isF <- sapply(data, function(x) is.factor(x) || is.logical(x))
@@ -439,7 +439,7 @@ setMethod("model.frame", "vlm", function(formula, ...)
for (nn in namC) {
if (is.na(ni <- match(nn, namD)))
warning(gettextf(
- "variable '%s' is absent, its contrast will be ignored",
+ "variable '%s' is absent, its contrast will be ignored",
nn), domain = NA) else {
ca <- contrasts.arg[[nn]]
if (is.matrix(ca))
@@ -470,9 +470,9 @@ setMethod("model.frame", "vlm", function(formula, ...)
depvar.vlm <-
- function(object,
- type = c("lm", "lm2"),
- drop = FALSE,
+ function(object,
+ type = c("lm", "lm2"),
+ drop = FALSE,
...) {
type <- match.arg(type, c("lm", "lm2"))[1]
ans <- if (type == "lm") {
@@ -486,9 +486,9 @@ depvar.vlm <-
if (!isGeneric("depvar"))
- setGeneric("depvar",
+ setGeneric("depvar",
function(object, ...)
- standardGeneric("depvar"),
+ standardGeneric("depvar"),
package = "VGAM")
@@ -507,8 +507,8 @@ setMethod("depvar", "rcim", function(object, ...)
-npred.vlm <- function(object,
- type = c("total", "one.response"),
+npred.vlm <- function(object,
+ type = c("total", "one.response"),
...) {
if (!missing(type))
type <- as.character(substitute(type))
@@ -549,7 +549,7 @@ npred.vlm <- function(object,
if (!isGeneric("npred"))
- setGeneric("npred", function(object, ...) standardGeneric("npred"),
+ setGeneric("npred", function(object, ...) standardGeneric("npred"),
package = "VGAM")
@@ -570,7 +570,7 @@ setMethod("npred", "rcim", function(object, ...)
hatvaluesvlm <-
- function(model,
+ function(model,
type = c("diagonal", "matrix", "centralBlocks"), ...) {
@@ -665,10 +665,10 @@ setMethod("hatvalues", "rcim", function(model, ...)
hatplot.vlm <-
- function(model, multiplier = c(2, 3),
- lty = "dashed",
- xlab = "Observation",
- ylab = "Hat values",
+ function(model, multiplier = c(2, 3),
+ lty = "dashed",
+ xlab = "Observation",
+ ylab = "Hat values",
ylim = NULL, ...) {
if (is(model, "vlm")) {
@@ -698,8 +698,8 @@ hatplot.vlm <-
if (is.null(ylim))
ylim <- c(0, max(hatval))
for (jay in 1:M) {
- plot(hatval[, jay], type = "n", main = predictors.names[jay],
- ylim = ylim, xlab = xlab, ylab = ylab,
+ plot(hatval[, jay], type = "n", main = predictors.names[jay],
+ ylim = ylim, xlab = xlab, ylab = ylab,
...)
points(1:N, hatval[, jay], ...)
abline(h = multiplier * ncol.X.vlm / (N * M), lty = lty, ...)
@@ -739,10 +739,10 @@ setMethod("hatplot", "rcim", function(model, ...)
dfbetavlm <-
- function(model,
- maxit.new = 1,
- trace.new = FALSE,
- smallno = 1.0e-8,
+ function(model,
+ maxit.new = 1,
+ trace.new = FALSE,
+ smallno = 1.0e-8,
...) {
if (!is(model, "vlm"))
@@ -782,26 +782,26 @@ dfbetavlm <-
}
w.orig <- if (length(orig.w) != n.lm)
- rep(orig.w, length.out = n.lm) else
+ rep_len(orig.w, n.lm) else
orig.w
w.orig[ii] <- w.orig[ii] * smallno # Relative
- fit <- vglm.fit(x = X.lm,
+ fit <- vglm.fit(x = X.lm,
X.vlm.arg = X.vlm, # Should be more efficient
y = if (y.integer)
round(depvar(model) * c(pweights) / c(orig.w)) else
- (depvar(model) * c(pweights) / c(orig.w)),
+ (depvar(model) * c(pweights) / c(orig.w)),
w = w.orig, # Set to zero so that it is 'deleted'.
- Xm2 = NULL, Ym2 = NULL,
- etastart = etastart, # coefstart = NULL,
- offset = offset,
- family = model at family,
- control = new.control,
- criterion = new.control$criterion, # "coefficients",
- qr.arg = FALSE,
- constraints = constraints(model, type = "term"),
- extra = model at extra,
- Terms = Terms.zz,
+ Xm2 = NULL, Ym2 = NULL,
+ etastart = etastart, # coefstart = NULL,
+ offset = offset,
+ family = model at family,
+ control = new.control,
+ criterion = new.control$criterion, # "coefficients",
+ qr.arg = FALSE,
+ constraints = constraints(model, type = "term"),
+ extra = model at extra,
+ Terms = Terms.zz,
function.name = "vglm")
dfbeta[ii, ] <- coef.model - fit$coeff
@@ -840,8 +840,8 @@ setMethod("dfbeta", "rcim", function(model, ...)
-hatvaluesbasic <- function(X.vlm,
- diagWm,
+hatvaluesbasic <- function(X.vlm,
+ diagWm,
M = 1) {
diff --git a/R/mux.q b/R/mux.q
index 30ab1ed..0cf28f7 100644
--- a/R/mux.q
+++ b/R/mux.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -19,7 +19,7 @@ mux34 <- function(xmat, cc, symmetric = FALSE) {
stop("'cc' is not a matrix")
c( .C("VGAM_C_mux34", as.double(xmat), as.double(cc),
as.integer(nnn), as.integer(RRR),
- as.integer(symmetric), ans = as.double(rep(0.0, nnn)),
+ as.integer(symmetric), ans = as.double(rep_len(0.0, nnn)),
NAOK = TRUE)$ans)
}
@@ -40,7 +40,7 @@ mux34 <- function(xmat, cc, symmetric = FALSE) {
stop("'cc' is not a matrix")
c( .Fortran("vgamf90mux34", as.double(xmat), as.double(cc),
as.integer(n), as.integer(R),
- as.integer(symmetric), ans = as.double(rep(0.0, n)),
+ as.integer(symmetric), ans = as.double(rep_len(0.0, n)),
NAOK = TRUE)$ans)
}
@@ -62,7 +62,7 @@ mux2 <- function(cc, xmat) {
M <- d[1]
if (d[2] != p || d[3] != n)
stop("dimension size inconformable")
- ans <- rep(NA_real_, n*M)
+ ans <- rep_len(NA_real_, n*M)
fred <- .C("mux2", as.double(cc), as.double(t(xmat)),
ans = as.double(ans), as.integer(p), as.integer(n),
as.integer(M), NAOK = TRUE)
@@ -81,7 +81,7 @@ mux22 <- function(cc, xmat, M, upper = FALSE, as.matrix = FALSE) {
index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
dimm.value <- nrow(cc) # Usually M or M(M+1)/2
- ans <- rep(NA_real_, n*M)
+ ans <- rep_len(NA_real_, n*M)
fred <- .C("mux22", as.double(cc), as.double(t(xmat)),
ans = as.double(ans), as.integer(dimm.value),
as.integer(index$row), as.integer(index$col),
@@ -280,7 +280,7 @@ mux15 <- function(cc, xmat) {
if (max(abs(t(cc)-cc))>0.000001)
stop("argument 'cc' is not symmetric")
- ans <- rep(NA_real_, n*M*M)
+ ans <- rep_len(NA_real_, n*M*M)
fred <- .C("mux15", as.double(cc), as.double(t(xmat)),
ans = as.double(ans), as.integer(M),
as.integer(n), NAOK = TRUE)
@@ -375,7 +375,7 @@ vchol <- function(cc, M, n, silent = FALSE, callno = 0) {
} else {
- ans[, index] <- tmp777 # restored 16/10/03
+ ans[, index] <- tmp777 # restored 20031016
}
}
dim(ans) <- c(MM, n) # Make sure
diff --git a/R/nobs.R b/R/nobs.R
index 36e2d84..c685677 100644
--- a/R/nobs.R
+++ b/R/nobs.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/penvps.R b/R/penvps.R
new file mode 100644
index 0000000..26d9de0
--- /dev/null
+++ b/R/penvps.R
@@ -0,0 +1,139 @@
+# These functions are
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+Pen.psv <-
+ function(constraints = constraints, ps.list = ps.list) {
+ assignx <- ps.list$assignx
+ nassignx <- names(assignx)
+ indexterms <- ps.list$indexterms
+
+
+ which.X.ps <- ps.list$which.X.ps
+
+ S.arg <- ps.list$S.arg
+
+ lambdalist <- ps.list$lambdalist
+
+ ridge.adj <- ps.list$ridge.adj
+
+ term.labels <- ps.list$term.labels
+
+
+
+ index <- numeric()
+ lambda.new <- list()
+ pen.new.list <- list()
+ ncol.X.ps <- sapply(which.X.ps, length)
+ ncolHlist.model <- unlist(lapply(constraints, ncol))
+
+
+ ncolHlist.new <- ncolHlist.model
+ if (names(constraints)[[1]] == "(Intercept)") {
+ ncolHlist.new <- ncolHlist.new[-1]
+ nassignx <- nassignx[-1]
+ }
+
+
+
+ ncol.H.ps <- ncolHlist.new[indexterms]
+ nps <- nassignx[indexterms]
+
+
+
+ lambdalen <- sapply(lambdalist, length)
+
+
+
+
+
+ for (ii in seq_along(ncol.H.ps)) {
+ nlambda <- lambdalen[ii] # lambdalen[[ii]]
+ if (nlambda == ncol.H.ps[ii]) {
+ lambda.new[[ii]] <- lambdalist[[ii]]
+ } else {
+ if (nlambda > ncol.H.ps[ii])
+ warning("too many lambdas; using the first few")
+ lambda.new[[ii]] <- rep_len(lambdalist[[ii]], ncol.H.ps[ii])
+ }
+
+ names(lambda.new)[[ii]] <- nps[ii] # nps[[ii]]
+
+
+
+
+
+
+ if (ridge.adj[[ii]] == 0) {
+ lambda.diag <- diag(sqrt(lambda.new[[ii]]))
+ pen.noridge <- kronecker(lambda.diag, S.arg[[ii]])
+ ooo <- matrix(1:(ncol.H.ps[ii] * ncol.X.ps[ii]),
+ ncol = ncol.X.ps[ii], byrow = TRUE)
+ pen.new.list[[ii]] <- pen.noridge[, ooo]
+ names(pen.new.list)[[ii]] <- nps[ii] # nps[[ii]]
+ } else {
+ ioffset <- 0
+ joffset <- 0
+ Dmat1 <- matrix(0,
+ ncol.H.ps[ii] * (ncol(S.arg[[ii]]) + nrow(S.arg[[ii]])),
+ ncol.H.ps[ii] * ncol(S.arg[[ii]]))
+ for (jay in 1:ncol.H.ps[ii]) {
+ pen.set <- sqrt(lambda.new[[ii]][jay]) * S.arg[[ii]]
+ pen.ridge <- rbind(pen.set,
+ sqrt(ridge.adj[[ii]]) * diag(ncol(S.arg[[ii]])))
+ Dmat1[ioffset + 1:nrow(pen.ridge),
+ joffset + 1:ncol(pen.ridge)] <- pen.ridge
+ ioffset <- ioffset + nrow(pen.ridge)
+ joffset <- joffset + ncol(pen.ridge)
+ } # for jay
+ ooo <- matrix(1:(ncol.H.ps[ii] * ncol.X.ps[ii]),
+ ncol = ncol.X.ps[ii], byrow = TRUE)
+ pen.new.list[[ii]] <- Dmat1[, ooo]
+ names(pen.new.list)[[ii]] <- nps[ii] # nps[[ii]]
+ ioffset <- 0
+ joffset <- 0
+ } # if-else ridge.adj
+ } # for
+
+
+
+
+ ncol.allterms <- sapply(assignx, length)
+
+ ncol.model <- if (names(constraints)[[1]] == "(Intercept)")
+ ncol.allterms[-1] else ncol.allterms
+ nrowpen.new.list <- sapply(pen.new.list, nrow)
+ nrowPen <- sum(nrowpen.new.list)
+ ncolPen <- sum(ncol.allterms * ncolHlist.model)
+ iioffset <- 0
+ Dmat2 <- matrix(0, nrowPen, ncolPen)
+ jay <- 0
+
+
+ jjoffset <- if (names(constraints)[[1]] == "(Intercept)")
+ ncolHlist.model[1] else 0
+
+ for (ii in seq_along(term.labels)) {
+ if (indexterms[ii]) {
+ jay <- jay + 1
+ ind.x <- iioffset + 1:nrow(pen.new.list[[jay]])
+ ind.y <- jjoffset + 1:ncol(pen.new.list[[jay]])
+ Dmat2[ind.x, ind.y] <- pen.new.list[[jay]]
+ iioffset <- iioffset + nrow(pen.new.list[[jay]])
+ jjoffset <- jjoffset + ncol(pen.new.list[[jay]])
+ } else {
+ jjoffset <- jjoffset + ncolHlist.new[ii] * ncol.model[ii]
+ }
+ } # ii
+
+ Xvlm.aug <- Dmat2
+
+ attr(Xvlm.aug, "lambda.vlm") <- lambda.new
+ Xvlm.aug
+}
+
+
+
+
diff --git a/R/plot.vglm.q b/R/plot.vgam.R
similarity index 91%
rename from R/plot.vglm.q
rename to R/plot.vgam.R
index 2181ccf..184aaea 100644
--- a/R/plot.vglm.q
+++ b/R/plot.vgam.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -119,13 +119,14 @@ ylim.scale <- function(ylim, scale = 0) {
+
getallresponses <- function(xij) {
if (!is.list(xij))
return("")
allterms <- lapply(xij, terms)
allres <- NULL
- for (ii in 1:length(xij))
+ for (ii in seq_along(xij))
allres <- c(allres,
as.character(attr(allterms[[ii]], "variables"))[2])
allres
@@ -138,7 +139,7 @@ getallresponses <- function(xij) {
terms = attr((object at terms)$terms, "term.labels"),
raw = TRUE, deriv.arg = deriv.arg, se = FALSE,
varxij = 1) {
- Terms <- terms(object) # 11/8/03; object at terms$terms
+ Terms <- terms(object) # 20030811; object at terms$terms
aa <- attributes(Terms)
all.terms <- labels(Terms)
xvars <- parse(text = all.terms)
@@ -168,7 +169,8 @@ getallresponses <- function(xij) {
evars <- evars[[1]]
} else
if (length(evars) > 1 &&
- any(getallresponses(object at control$xij) == names(xnames)) ) {
+ length(intersect(getallresponses(object at control$xij), names(xnames)))
+ ) {
@@ -292,61 +294,6 @@ preplotvgam <-
-plotvlm <- function(object, residuals = NULL, rugplot= FALSE, ...) {
- stop("sorry, this function hasn't been written yet")
-}
-
-
-
-
-plotvglm <-
- function(x,
- type = c("vglm", "vgam"),
- newdata = NULL, y = NULL,
- residuals = NULL, rugplot = TRUE,
- se = FALSE, scale = 0,
- raw = TRUE, offset.arg = 0,
- deriv.arg = 0, overlay = FALSE,
- type.residuals = c("deviance", "working", "pearson", "response"),
- plot.arg = TRUE,
- which.term = NULL, which.cf = NULL,
- control = plotvgam.control(...),
- varxij = 1, ...) {
-
- ptype <- match.arg(type, c("vglm", "vgam"))[1]
-
- if (ptype == "vglm") {
- stop("this function has not been written yet!")
- }
-
-
-
- if (length(newdata))
- newdata <- newdata else newdata <- NULL
-
-
-
- invisible(
- plot.vgam(x = x, newdata = newdata, y = y,
- residuals = residuals, rugplot = rugplot,
- se = se, scale = scale,
- raw = raw, offset.arg = offset.arg,
- deriv.arg = deriv.arg, overlay = overlay,
- type.residuals = type.residuals,
- plot.arg = plot.arg,
- which.term = which.term, which.cf = which.cf,
- control = control,
- varxij = varxij, ...)
- )
-}
-
-
-
-
-
-
-
-
@@ -532,11 +479,11 @@ vplot.numeric <-
stop("length of 'x' and 'y' do not seem to match")
y <- as.matrix(y)
if (!length(which.cf))
- which.cf <- 1:ncol(y) # Added 7/8/04
+ which.cf <- 1:ncol(y) # Added 20040807
if (!is.null(se.y))
se.y <- as.matrix(se.y)
- if (!is.null(se.y) && any(is.na(se.y)))
+ if (!is.null(se.y) && anyNA(se.y))
se.y <- NULL
if (!is.null(residuals)) {
@@ -638,16 +585,16 @@ vplot.numeric <-
} else {
YLAB <- ylab
- pcex <- rep(pcex, len = ncol(uy))
- pch <- rep(pch , len = ncol(uy))
- pcol <- rep(pcol, len = ncol(uy))
- lcol <- rep(lcol, len = ncol(uy))
- llty <- rep(llty, len = ncol(uy))
- llwd <- rep(llwd, len = ncol(uy))
- slty <- rep(slty, len = ncol(uy))
- rcol <- rep(rcol, len = ncol(uy))
- scol <- rep(scol, len = ncol(uy))
- slwd <- rep(slwd, len = ncol(uy))
+ pcex <- rep_len(pcex, ncol(uy))
+ pch <- rep_len(pch , ncol(uy))
+ pcol <- rep_len(pcol, ncol(uy))
+ lcol <- rep_len(lcol, ncol(uy))
+ llty <- rep_len(llty, ncol(uy))
+ llwd <- rep_len(llwd, ncol(uy))
+ slty <- rep_len(slty, ncol(uy))
+ rcol <- rep_len(rcol, ncol(uy))
+ scol <- rep_len(scol, ncol(uy))
+ slwd <- rep_len(slwd, ncol(uy))
for (ii in 1:ncol(uy)) {
if (!length(which.cf) ||
@@ -742,7 +689,7 @@ vplot.factor <-
if (!is.null(se.y))
se.y <- as.matrix(se.y)
- if (!is.null(se.y) && any(is.na(se.y)))
+ if (!is.null(se.y) && anyNA(se.y))
se.y <- NULL
if (!is.null(residuals)) {
@@ -762,7 +709,7 @@ vplot.factor <-
se = se, xlim = xlim, ylim = ylim, ...)
} else {
for (ii in 1:ncol(y)) {
- ylab <- rep(ylab, len = ncol(y))
+ ylab <- rep_len(ylab, ncol(y))
if (ncol(y) > 1)
ylab <- dimnames(y)[[2]]
vvplot.factor(x, y[, ii,drop = FALSE],
@@ -899,16 +846,7 @@ setMethod("vplot", "numeric", function(x, ...)
-setMethod("plot", "vlm",
- function(x, y, ...) {
- if (!missing(y))
- stop("cannot process the 'y' argument")
- invisible(plotvlm(x, y, ...))})
-setMethod("plot", "vglm",
- function(x, y, ...) {
- if (!missing(y))
- stop("cannot process the 'y' argument")
- invisible(plotvglm(x = x, y = y, ...))})
+
setMethod("plot", "vgam",
function(x, y, ...) {
if (!missing(y))
@@ -936,7 +874,7 @@ plotqrrvglm <- function(object,
res <- resid(object, type = rtype)
my.ylab <- if (length(object at misc$ynames)) object at misc$ynames else
- rep(" ", len = M)
+ rep_len(" ", M)
Rtype <- switch(rtype, pearson = "Pearson", response = "Response",
deviance = "Deviance", working = "Working")
@@ -982,3 +920,17 @@ put.caption <- function(text.arg = "(a)",
+
+
+setMethod("plot", "psvgam",
+ function(x, y, ...) {
+ if (!missing(y))
+ stop("cannot process the 'y' argument")
+ invisible(plot.vgam(x = x, y = y, ...))})
+
+
+
+
+
+
+
diff --git a/R/plot.vglm.R b/R/plot.vglm.R
new file mode 100644
index 0000000..13c038f
--- /dev/null
+++ b/R/plot.vglm.R
@@ -0,0 +1,106 @@
+# These functions are
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
+
+
+plotvlm <- function(object, residuals = NULL, rugplot= FALSE, ...) {
+ stop("sorry, this function hasn't been written yet")
+}
+
+
+
+
+
+
+plotvglm <-
+ function(x,
+ which = "(All)",
+
+
+
+ ...) {
+
+
+ show <- rep(FALSE, 10000)
+ if (is.character(which) && which == "(All)") {
+ show[TRUE] <- TRUE
+ } else {
+ show[which] <- TRUE
+ }
+
+
+
+ presid <- resid(x, type = "pearson")
+ lapred <- predict(x)
+ M <- ncol(lapred)
+ for (jay in 1:M) {
+ if (show[jay]) {
+ use.x <- lapred[, jay]
+ if (one.x <- diff(range(use.x)) < 1e-10)
+ use.x[TRUE] <- jitter(mean(use.x))
+ plot(use.x, presid[, jay],
+ ylab = "Pearson residuals",
+ xlab = paste(if (one.x) "Jittered l" else
+ "L", "inear predictor ", jay, sep = ""),
+ ...)
+ }
+ }
+
+
+
+ hvmat <- hatvalues(x)
+ for (jay in 1:M) {
+ if (show[M + jay]) {
+ use.x <- hvmat[, jay]
+ if (one.x <- diff(range(use.x)) < 1e-10)
+ use.x[TRUE] <- jitter(mean(use.x))
+ plot(use.x, presid[, jay],
+ ylab = "Pearson residuals",
+ xlab = paste(if (one.x) "Jittered h" else
+ "H", "at values for linear predictor ", jay,
+ sep = ""),
+ ...)
+ }
+ }
+
+
+
+
+
+
+ invisible(x)
+}
+
+
+
+
+
+
+
+
+
+
+
+
+setMethod("plot", "vlm",
+ function(x, y, ...) {
+ if (!missing(y))
+ stop("cannot process the 'y' argument")
+ invisible(plotvlm(x, y, ...))})
+setMethod("plot", "vglm",
+ function(x, y, ...) {
+ if (!missing(y))
+ stop("cannot process the 'y' argument")
+ invisible(plotvglm(x = x, ...))})
+
+
+
+
+
+
+
diff --git a/R/predict.vgam.q b/R/predict.vgam.q
index 7bbe8a7..9b63b6e 100644
--- a/R/predict.vgam.q
+++ b/R/predict.vgam.q
@@ -1,11 +1,12 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
+
predict.vgam <-
function(object, newdata = NULL,
type = c("link", "response", "terms"),
@@ -162,7 +163,7 @@ predict.vgam <-
cs <- if (raw) cumsum(c(1, ncolHlist)) else
cumsum(c(1, M + 0 * ncolHlist))
tmp6 <- vector("list", length(ncolHlist))
- for (ii in 1:length(tmp6))
+ for (ii in seq_along(tmp6))
tmp6[[ii]] <- cs[ii]:(cs[ii+1]-1)
names(tmp6) <- names(ncolHlist)
}
@@ -213,7 +214,7 @@ predict.vgam <-
TT <- ncol(object at var)
predictor$se.fit <- sqrt(predictor$se.fit^2 +
- TS * object at var %*% rep(1, TT))
+ TS * object at var %*% rep_len(1, TT))
} else {
predictor <- predictor + eta.mat
}
diff --git a/R/predict.vglm.q b/R/predict.vglm.q
index cc946b3..baa24cb 100644
--- a/R/predict.vglm.q
+++ b/R/predict.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -356,11 +356,11 @@ setMethod("predictvglmS4VGAM", signature(VGAMff = "binom2.or"),
if (se.fit) {
- predn$junk.component <- rep(coef(object), length = n.ahead)
- predn$se.fit.junk.component <- rep(diag(vcov(object)), length = n.ahead)
+ predn$junk.component <- rep_len(coef(object), n.ahead)
+ predn$se.fit.junk.component <- rep_len(diag(vcov(object)), n.ahead)
} else {
could.return.this.instead.of.predn <-
- predn2 <- rep(coef(object), length = n.ahead)
+ predn2 <- rep_len(coef(object), n.ahead)
}
predn
})
diff --git a/R/predict.vlm.q b/R/predict.vlm.q
index 218beb2..e3b73d3 100644
--- a/R/predict.vlm.q
+++ b/R/predict.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -72,7 +72,7 @@ predict.vlm <- function(object,
if (object at misc$intercept.only &&
nrow(X) != nrow(newdata)) {
as.save <- attr(X, "assign")
- X <- X[rep(1, nrow(newdata)), , drop = FALSE]
+ X <- X[rep_len(1, nrow(newdata)), , drop = FALSE]
dimnames(X) <- list(dimnames(newdata)[[1]], "(Intercept)")
attr(X, "assign") <- as.save # Restored
}
@@ -153,13 +153,13 @@ predict.vlm <- function(object,
index <- charmatch(terms.arg, nv)
if (all(index == 0)) {
warning("no match found; returning all terms")
- index <- 1:length(nv)
+ index <- seq_along(nv)
}
vasgn <- vasgn[nv[index]]
}
- if (any(is.na(object at coefficients)))
- stop("cannot handle NAs in 'object at coefficients'")
+ if (anyNA(object at coefficients))
+ stop("cannot handle NAs in 'object at coefficients'")
dname2 <- object at misc$predictors.names
if (se.fit) {
@@ -250,7 +250,7 @@ predict.vlm <- function(object,
}
}
- temp <- if (raw) ncolHlist else rep(M, length(ncolHlist))
+ temp <- if (raw) ncolHlist else rep_len(M, length(ncolHlist))
dd <- vlabel(names(ncolHlist), temp, M)
if (se.fit) {
dimnames(pred$fitted.values) <-
@@ -273,7 +273,7 @@ predict.vlm <- function(object,
if (!raw)
cs <- cumsum(c(1, M + 0 * ncolHlist))
fred <- vector("list", length(ncolHlist))
- for (ii in 1:length(fred))
+ for (ii in seq_along(fred))
fred[[ii]] <- cs[ii]:(cs[ii+1]-1)
names(fred) <- names(ncolHlist)
if (se.fit) {
@@ -362,7 +362,7 @@ subconstraints <- function(assign, constraints) {
ans <- vector("list", length(assign))
if (!length(assign) || !length(constraints))
stop("assign and/or constraints is empty")
- for (ii in 1:length(assign))
+ for (ii in seq_along(assign))
ans[[ii]] <- constraints[[assign[[ii]][1]]]
names(ans) <- names(assign)
ans
@@ -372,7 +372,7 @@ subconstraints <- function(assign, constraints) {
is.linear.term <- function(ch) {
lchar <- length(ch)
- ans <- rep(FALSE, len = lchar)
+ ans <- rep_len(FALSE, lchar)
for (ii in 1:lchar) {
nc <- nchar(ch[ii])
x <- substring(ch[ii], 1:nc, 1:nc)
@@ -386,7 +386,7 @@ is.linear.term <- function(ch) {
canonical.Hlist <- function(Hlist) {
- for (ii in 1:length(Hlist)) {
+ for (ii in seq_along(Hlist)) {
temp <- Hlist[[ii]] * 0
temp[cbind(1:ncol(temp), 1:ncol(temp))] <- 1
Hlist[[ii]] <- temp
diff --git a/R/print.vglm.q b/R/print.vglm.q
index 09d6de1..d3381c7 100644
--- a/R/print.vglm.q
+++ b/R/print.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -18,7 +18,7 @@ show.vglm <- function(object) {
coef <- object at coefficients
if (any(nas <- is.na(coef))) {
if (is.null(names(coef)))
- names(coef) <- paste("b", 1:length(coef), sep = "")
+ names(coef) <- paste("b", seq_along(coef), sep = "")
cat("\nCoefficients: (", sum(nas),
" not defined because of singularities)\n", sep = "")
} else {
@@ -155,7 +155,7 @@ print.vglm <- function(x, ...) {
coef <- x at coefficients
if (any(nas <- is.na(coef))) {
if (is.null(names(coef)))
- names(coef) <- paste("b", 1:length(coef), sep = "")
+ names(coef) <- paste("b", seq_along(coef), sep = "")
cat("\nCoefficients: (", sum(nas),
" not defined because of singularities)\n", sep = "")
} else {
@@ -223,7 +223,7 @@ print.vgam <- function(x, digits = 2, ...) {
if (length(llx))
cat("Log-likelihood:", format(llx), "\n")
- criterion <- attr(terms(x), "criterion") # 11/8/03; x at terms$terms,
+ criterion <- attr(terms(x), "criterion") # 20030811; x at terms$terms,
if (!is.null(criterion) &&
criterion != "coefficients")
cat(paste(criterion, ":", sep = ""), format(x[[criterion]]), "\n")
diff --git a/R/print.vlm.q b/R/print.vlm.q
index c9c9a49..66c8f1b 100644
--- a/R/print.vlm.q
+++ b/R/print.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/psfun.R b/R/psfun.R
new file mode 100644
index 0000000..198435c
--- /dev/null
+++ b/R/psfun.R
@@ -0,0 +1,183 @@
+# These functions are
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
+
+
+ps <-
+ function(x,
+ ...,
+ ps.intervals = NULL,
+ lambda = 0, degree = 2, order = 2,
+ ridge.adj = 1e-5, ridge.inv = 0.0001,
+ spillover = 0.01, maxlambda = 1e4) {
+
+
+ xs <- substitute(x)
+ ans <- as.character(xs)
+ x.index <- as.vector(x)
+
+
+
+
+ x.orig <- x.index
+ xdots <- list(...)
+ uses.xij <- length(xdots) > 0
+ if (uses.xij)
+ x.index <- as.vector(c(x.index, unlist(xdots)))
+ if (is.null(ps.intervals))
+ ps.intervals <- ceiling(1.5 * log(length(unique(x.index))))
+
+
+
+ number.knots <- ps.intervals + 2 * degree + 1
+ xl <- min(x.index)
+ xr <- max(x.index)
+
+
+
+ if (smart.mode.is("read")) {
+ smartlist <- get.smart()
+ xl <- smartlist$xl # Overwrite its value
+ xr <- smartlist$xr # Overwrite its value
+ ps.intervals <- smartlist$ps.intervals # Ditto
+ number.knots <- ps.intervals + 2 * degree + 1 # Redo
+ lambda <- smartlist$lambda
+ degree <- smartlist$degree
+ order <- smartlist$order
+ ridge.adj <- smartlist$ridge.adj
+ ridge.inv <- smartlist$ridge.inv
+ spillover <- smartlist$spillover
+ maxlambda <- smartlist$maxlambda
+ maXX <- smartlist$maXX
+ Cmat <- smartlist$Cmat
+ } else {
+ maXX <- NULL
+ Cmat <- NULL
+ }
+
+ xmax <- xr + spillover * (xr - xl)
+ xmin <- xl - spillover * (xr - xl)
+ dx <- (xmax - xmin) / ps.intervals
+ nx <- names(x.index)
+ nax <- is.na(x.index)
+ if (nas <- any(nax))
+ x.index <- x[!nax]
+ sorder <- degree + 1
+ if (length(ps.intervals)) {
+ nAknots <- ps.intervals - 1
+ if (nAknots < 1) {
+ nAknots <- 1
+ warning("ps.intervals was too small; have used 2")
+ }
+
+
+
+
+
+
+ if (nAknots > 0) {
+ Aknots <- seq(from = xmin - degree * dx,
+ to = xmax + degree * dx, by = dx)
+ } else {
+ knots <- NULL
+ }
+ }
+ basis <- splineDesign(Aknots, x.index, sorder, 0 * x.index)
+ n.col <- ncol(basis)
+ if (nas) {
+ nmat <- matrix(NA_real_, length(nax), n.col)
+ nmat[!nax, ] <- basis
+ basis <- nmat
+ }
+ dimnames(basis) <- list(1:nrow(basis), 1:n.col)
+ if ((order - n.col + 1) > 0) {
+ order <- n.col - 1
+ warning("order was too large; have used ", n.col - 1)
+ }
+ if (any(lambda < 0)) {
+ lambda[lambda < 0] <- 0
+ warning("some lambda values are negative : have used lambda = ",
+ paste(lambda, collapse = ", "))
+ }
+
+ if (any(lambda > maxlambda)) {
+ lambda[lambda > maxlambda] <- maxlambda
+ warning("some lambda values are > ", maxlambda, ": ",
+ "for stability have used lambda = ",
+ paste(lambda, collapse = ", "))
+ }
+ aug <- if (order > 0) diff(diag(n.col), diff = order) else diag(n.col)
+
+
+ Pen <- t(aug) %*% aug
+ pen.aug <- (Pen + t(Pen))/2
+
+
+
+ if (is.null(maXX))
+ maXX <- mean(abs(t(basis) %*% basis))
+ maS <- mean(abs(pen.aug))/maXX
+
+
+
+ pen.aug <- pen.aug / maS
+ kk <- ncol(basis)
+ if (is.null(Cmat))
+ Cmat <- matrix(colSums(basis), 1, kk)
+
+
+ qrCt <- qr(t(Cmat))
+ jay <- nrow(Cmat) # 1
+ XZ <- t(qr.qty(qrCt, t(basis))[(jay+1):kk, ])
+
+
+ ZtSZ <- qr.qty(qrCt, t(qr.qty(qrCt, t(pen.aug))))[(jay+1):kk,
+ (jay+1):kk]
+
+
+ basis <- XZ
+
+
+ if (smart.mode.is("write"))
+ put.smart(list(xl = xl,
+ xr = xr,
+ ps.intervals = ps.intervals,
+ lambda = lambda,
+ degree = degree,
+ order = order,
+ ridge.adj = ridge.adj,
+ ridge.inv = ridge.inv,
+ spillover = spillover,
+ maxlambda = maxlambda,
+ maXX = maXX,
+ Cmat = Cmat))
+
+
+
+
+ basis <- basis[seq_along(x.orig), , drop = FALSE]
+
+
+
+
+ attr(basis, "S.arg") <- ZtSZ
+
+ attr(basis, "degree") <- degree
+ attr(basis, "knots") <- Aknots
+ attr(basis, "lambda") <- lambda # Vector
+ attr(basis, "order") <- order
+ attr(basis, "ps.intervals") <- ps.intervals
+ attr(basis, "ps.xargument") <- ans
+ attr(basis, "ridge.adj") <- ridge.adj
+ attr(basis, "ridge.inv") <- ridge.inv
+ basis
+}
+
+
+
+
diff --git a/R/psv2magic.R b/R/psv2magic.R
new file mode 100644
index 0000000..67acbaf
--- /dev/null
+++ b/R/psv2magic.R
@@ -0,0 +1,97 @@
+# These functions are
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+ psv2magic <-
+ function(x.VLM, constraints, lambda.vlm, ps.list) {
+
+
+
+
+ colperm <- function(x, from, to) {
+ ncx <- ncol(x)
+ if (length(from) != length(to) ||
+ any(from != round(from)) ||
+ any(from < 1 | from > ncx) ||
+ any(duplicated(from)) ||
+ any(sort(from) != sort(to)))
+ stop("invalid column permutation indices")
+ perm <- seq(length = ncx)
+ perm[to] <- perm[from]
+ x[, perm]
+ }
+
+
+
+ assignx <- ps.list$assignx
+ nassignx <- names(assignx)
+ indexterms <- ps.list$indexterms
+ which.X.ps <- ps.list$which.X.ps
+ term.labels <- ps.list$term.labels
+ ncol.X.ps <- sapply(which.X.ps, length)
+ ncolHlist.model <- unlist(lapply(constraints, ncol))
+
+
+ ncolHlist.new <- ncolHlist.model
+ if (names(constraints)[[1]] == "(Intercept)") {
+ ncolHlist.new <- ncolHlist.new[-1]
+ nassignx <- nassignx[-1]
+ }
+
+
+ ncol.H.ps <- ncolHlist.new[indexterms]
+ num.ps.terms <- length(which.X.ps)
+
+
+ allterms <- length(term.labels)
+ ncol.allterms <- sapply(assignx, length)
+
+ ncol.model <- if (names(constraints)[[1]] == "(Intercept)")
+ ncol.allterms[-1] else ncol.allterms
+ jay <- 0
+ jjoffset <- if (names(constraints)[[1]] == "(Intercept)")
+ ncolHlist.model[1] else 0
+ perm.list <- list()
+ for (ii in seq_along(term.labels)) {
+ if (indexterms[ii]) {
+ jay <- jay + 1
+ perm.list[[jay]] <-
+ matrix(jjoffset + 1:(ncol.X.ps[jay] * ncol.H.ps[jay]),
+ ncol = ncol.H.ps[jay], byrow = TRUE)
+ jjoffset <- jjoffset + ncol.H.ps[[jay]] * ncol.X.ps[[jay]]
+ } else {
+ jjoffset <- jjoffset + ncolHlist.new[ii] * ncol.model[ii]
+ }
+ }
+ vindex.min <- sapply(perm.list, min) # function(x) min(x)
+ vindex.max <- sapply(perm.list, max) # function(x) max(x)
+ o1 <- vector("list", length(ncol.H.ps)) # list()
+ for (ii in seq_along(ncol.H.ps)) {
+ o1[[ii]] <- vindex.min[ii]:vindex.max[ii]
+ }
+ ooo <- unlist(o1) # do.call("c", o1)
+ ppp <- unlist(perm.list) # do.call("c", perm.list)
+
+
+ off.list <- vector("list", num.ps.terms) # list()
+ for (ii in 1:num.ps.terms) {
+ index <- 0
+ off.list[[ii]] <- numeric()
+ for (jay in 1: ncol.H.ps[ii]) {
+ off.list[[ii]][jay] <- vindex.min[ii] + index
+ index <- ncol.X.ps[ii] * jay
+ }
+ }
+
+ rl <-
+ list(x.VLM.new = colperm(x.VLM, ppp, ooo),
+ sp = unlist(lambda.vlm),
+ S.arg = rep(ps.list$S.arg, ncol.H.ps), # Argument 'S' of magic()
+ off = unlist(off.list))
+ rl
+}
+
+
diff --git a/R/qrrvglm.control.q b/R/qrrvglm.control.q
index 14e86cd..4b81d1a 100644
--- a/R/qrrvglm.control.q
+++ b/R/qrrvglm.control.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -18,8 +18,7 @@ qrrvglm.control <- function(Rank = 1,
FastAlgorithm = TRUE,
GradientFunction = TRUE,
Hstep = 0.001,
- isd.latvar = rep(c(2, 1, rep(0.5, length = Rank)),
- length = Rank),
+ isd.latvar = rep_len(c(2, 1, rep_len(0.5, Rank)), Rank),
iKvector = 0.1,
iShape = 0.1,
ITolerances = NULL,
@@ -27,7 +26,7 @@ qrrvglm.control <- function(Rank = 1,
maxitl = 40,
imethod = 1,
Maxit.optim = 250,
- MUXfactor = rep(7, length = Rank),
+ MUXfactor = rep_len(7, Rank),
noRRR = ~ 1,
Norrr = NA,
optim.maxit = 20,
@@ -153,7 +152,7 @@ qrrvglm.control <- function(Rank = 1,
Bestof = Bestof,
checkwz = checkwz,
Cinit = Cinit,
- Crow1positive=as.logical(rep(Crow1positive, len = Rank)),
+ Crow1positive=as.logical(rep_len(Crow1positive, Rank)),
ConstrainedQO = TRUE, # A constant, not a control parameter
Corner = FALSE, # Needed for valt.1iter()
Dzero = NULL,
@@ -163,7 +162,7 @@ qrrvglm.control <- function(Rank = 1,
FastAlgorithm = FastAlgorithm,
GradientFunction = GradientFunction,
Hstep = Hstep,
- isd.latvar = rep(isd.latvar, len = Rank),
+ isd.latvar = rep_len(isd.latvar, Rank),
iKvector = as.numeric(iKvector),
iShape = as.numeric(iShape),
I.tolerances = I.tolerances,
@@ -171,7 +170,7 @@ qrrvglm.control <- function(Rank = 1,
imethod = imethod,
Maxit.optim = Maxit.optim,
min.criterion = TRUE, # needed for calibrate
- MUXfactor = rep(MUXfactor, length = Rank),
+ MUXfactor = rep_len(MUXfactor, Rank),
noRRR = noRRR,
optim.maxit = optim.maxit,
OptimizeWrtC = TRUE,
diff --git a/R/qtplot.q b/R/qtplot.q
index 3c5e38c..43929e7 100644
--- a/R/qtplot.q
+++ b/R/qtplot.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -45,7 +45,7 @@ qtplot.lms.bcg <- function(percentiles = c(25,50,75),
sigma <- eta[, 3]
shape <- 1 / (lambda * sigma)^2
for (ii in 1:lp) {
- ccc <- rep(cc[ii]/100, len=nrow(eta))
+ ccc <- rep_len(cc[ii]/100, nrow(eta))
ccc <- ifelse(lambda > 0, ccc, 1-ccc)
answer[, ii] <- eta[, 2] *
(qgamma(ccc, shape = shape)/shape)^(1/lambda)
@@ -241,10 +241,10 @@ plotqtplot.lmscreg <-
}
- tcol.arg <- rep(tcol.arg, length = lp)
- lcol.arg <- rep(lcol.arg, length = lp)
- llwd.arg <- rep(llwd.arg, length = lp)
- llty.arg <- rep(llty.arg, length = lp)
+ tcol.arg <- rep_len(tcol.arg, lp)
+ lcol.arg <- rep_len(lcol.arg, lp)
+ llwd.arg <- rep_len(llwd.arg, lp)
+ llty.arg <- rep_len(llty.arg, lp)
for (ii in 1:lp) {
temp <- cbind(xx, fitted.values[, ii])
temp <- temp[sort.list(temp[, 1]), ]
@@ -298,7 +298,7 @@ if (TRUE) {
}
-qtplot.egumbel <-
+qtplot.gumbelff <-
qtplot.gumbel <-
function(object, show.plot = TRUE, y.arg = TRUE,
spline.fit = FALSE, label = TRUE,
@@ -324,7 +324,7 @@ qtplot.gumbel <-
if (is.Numeric(R))
- R <- rep(R, length = nrow(eta))
+ R <- rep_len(R, nrow(eta))
if (!is.Numeric(percentiles))
stop("the 'percentiles' argument needs to be assigned a value")
@@ -345,10 +345,10 @@ qtplot.gumbel <-
lp <- length(percentiles) # Does not include mpv
- tcol.arg <- rep(tcol.arg, length = lp+mpv)
- lcol.arg <- rep(lcol.arg, length = lp+mpv)
- llwd.arg <- rep(llwd.arg, length = lp+mpv)
- llty.arg <- rep(llty.arg, length = lp+mpv)
+ tcol.arg <- rep_len(tcol.arg, lp+mpv)
+ lcol.arg <- rep_len(lcol.arg, lp+mpv)
+ llwd.arg <- rep_len(llwd.arg, lp+mpv)
+ llty.arg <- rep_len(llty.arg, lp+mpv)
X <- model.matrixvlm(object, type = "lm")
if (is.matrix(X) && length(object at y) && ncol(X)==2 &&
@@ -690,6 +690,7 @@ vgety <- function(object, newdata = NULL) {
"rlplot.vextremes" <- function(object, ...) {
+
newcall <- paste("rlplot.", object at family@vfamily[1],
"(object = object, ... )", sep = "")
newcall <- parse(text = newcall)[[1]]
@@ -698,7 +699,7 @@ vgety <- function(object, newdata = NULL) {
-rlplot.egev <-
+rlplot.gevff <-
rlplot.gev <-
function(object, show.plot = TRUE,
probability = c((1:9)/100, (1:9)/10, 0.95, 0.99, 0.995, 0.999),
diff --git a/R/residuals.vlm.q b/R/residuals.vlm.q
index 72f1c13..a99a79f 100644
--- a/R/residuals.vlm.q
+++ b/R/residuals.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -36,7 +36,7 @@ residualsvlm <-
M <- object at misc$M
wz <- weights(object, type = "work") # $weights
if (!length(wz))
- wz <- if (M == 1) rep(1, n) else matrix(1, n, M)
+ wz <- if (M == 1) rep_len(1, n) else matrix(1, n, M)
if (M == 1) {
if (any(wz < 0))
@@ -127,7 +127,7 @@ residualsvglm <-
w <- object at prior.weights
if (!length(w))
- w <- rep(1, n)
+ w <- rep_len(1, n)
eta <- object at predictors
dev.fn <- object at family@deviance # May not 'exist' for that model
@@ -156,7 +156,7 @@ residualsvglm <-
mu <- object at fitted
w <- object at prior.weights
if (is.null(w))
- w <- rep(1, n)
+ w <- rep_len(1, n)
eta <- object at predictors
if (!is.null(ll.fn <- object at family@loglikelihood)) {
extra <- object at extra
diff --git a/R/rrvglm.R b/R/rrvglm.R
index 70d2991..897010c 100644
--- a/R/rrvglm.R
+++ b/R/rrvglm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -61,11 +61,11 @@ rrvglm <- function(formula,
offset <- model.offset(mf)
if (is.null(offset))
- offset <- 0 # yyy ???
+ offset <- 0 # yyy ???
w <- model.weights(mf)
- if (!length(w))
- w <- rep(1, nrow(mf))
- else if (ncol(as.matrix(w))==1 && any(w < 0))
+ if (!length(w)) {
+ w <- rep_len(1, nrow(mf))
+ } else if (ncol(as.matrix(w)) == 1 && any(w < 0))
stop("negative weights not allowed")
if (is.character(family))
@@ -81,7 +81,6 @@ rrvglm <- function(formula,
if (!is.null(family at first))
eval(family at first)
- # 10/12/04: testing for an empty (function) slot not elegant:
if (control$Quadratic && control$FastAlgorithm &&
length(as.list(family at deviance)) <= 1)
stop("The fast algorithm requires the family ",
@@ -102,7 +101,7 @@ rrvglm <- function(formula,
Terms = mt, function.name = function.name, ...)
if (control$Bestof > 1) {
- deviance.Bestof <- rep(fit$crit.list$deviance, len= control$Bestof)
+ deviance.Bestof <- rep_len(fit$crit.list$deviance, control$Bestof)
for (tries in 2:control$Bestof) {
if (control$trace && (control$Bestof>1))
cat(paste("\n========================= Fitting model", tries,
diff --git a/R/rrvglm.control.q b/R/rrvglm.control.q
index 76c4be3..f073cbd 100644
--- a/R/rrvglm.control.q
+++ b/R/rrvglm.control.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/rrvglm.fit.q b/R/rrvglm.fit.q
index 643f2cc..1b915c6 100644
--- a/R/rrvglm.fit.q
+++ b/R/rrvglm.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -8,8 +8,9 @@
+
rrvglm.fit <-
- function(x, y, w = rep(1, length(x[, 1])),
+ function(x, y, w = rep_len(1, nrow(x)),
etastart = NULL, mustart = NULL, coefstart = NULL,
offset = 0, family,
control = rrvglm.control(...),
@@ -95,7 +96,7 @@ rrvglm.fit <-
if (is.character(rrcontrol$Dzero)) {
index <- match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]])
- if (any(is.na(index)))
+ if (anyNA(index))
stop("Dzero argument didn't fully match y-names")
if (length(index) == M)
stop("all linear predictors are linear in the ",
@@ -259,7 +260,7 @@ rrvglm.fit <-
c.list <- list(z = as.double(z), fit = as.double(t(eta)),
one.more = TRUE,
- coeff = as.double(rep(1,ncol(X.vlm.save))),
+ coeff = as.double(rep_len(1, ncol(X.vlm.save))),
U = as.double(U),
copy.X.vlm = copy.X.vlm,
X.vlm = if (copy.X.vlm) as.double(X.vlm.save) else
@@ -471,7 +472,7 @@ rrvglm.fit <-
}
c.list$one.more <- one.more
- c.list$coeff <- runif(length(new.coeffs)) # 12/3/03; twist needed!
+ c.list$coeff <- runif(length(new.coeffs)) # 20030312; twist needed!
old.coeffs <- new.coeffs
} # End of while()
@@ -496,7 +497,7 @@ rrvglm.fit <-
asgn <- attr(X.vlm.save, "assign")
if (nice31) {
- coefs <- rep(0, len = length(xnrow.X.vlm))
+ coefs <- rep_len(0, length(xnrow.X.vlm))
rank <- ncol.X.vlm
} else {
coefs <- tfit$coefficients
@@ -519,10 +520,10 @@ rrvglm.fit <-
}
if (nice31) {
- effects <- rep(0, len = 77)
+ effects <- rep_len(0, 77)
} else {
effects <- tfit$effects
- neff <- rep("", nrow.X.vlm)
+ neff <- rep_len("", nrow.X.vlm)
neff[seq(ncol.X.vlm)] <- cnames
names(effects) <- neff
diff --git a/R/s.q b/R/s.q
index 403f2d3..2b13cd8 100644
--- a/R/s.q
+++ b/R/s.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/s.vam.q b/R/s.vam.q
index b2e9db0..0fc9f2a 100644
--- a/R/s.vam.q
+++ b/R/s.vam.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -36,7 +36,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
odfvec <- lapply(data, attr, "df")
s.xargument <- lapply(data, attr, "s.xargument")
- for (kk in 1:length(nwhich)) {
+ for (kk in seq_along(nwhich)) {
ii <- nwhich[kk]
temp <- osparv[[ii]]
@@ -47,7 +47,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
warning("only the first ", ncolHlist[ii], " values of ",
"'spar' are used for variable '", s.xargument, "'")
}
- osparv[[ii]] <- rep(temp, length = ncolHlist[ii]) # Recycle
+ osparv[[ii]] <- rep_len(temp, ncolHlist[ii]) # Recycle
temp <- odfvec[[ii]]
if (!is.numeric(temp) || any(temp < 1)) {
@@ -57,7 +57,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
warning("only the first ", ncolHlist[ii], " value(s) of 'df' ",
"are used for variable '", s.xargument, "'")
}
- odfvec[[ii]] <- rep(temp, length = ncolHlist[ii]) # Recycle
+ odfvec[[ii]] <- rep_len(temp, ncolHlist[ii]) # Recycle
if (max(temp) > smooth.frame$neffec[kk]-1) {
stop("'df' value too high for variable '", s.xargument, "'")
}
@@ -90,7 +90,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
smooth.frame$s.xargument <- s.xargument # Stored here
smooth.frame$smap <-
- as.vector(cumsum(c(1, ncolHlist[nwhich]))[1:length(nwhich)])
+ as.vector(cumsum(c(1, ncolHlist[nwhich]))[seq_along(nwhich)])
smooth.frame$try.sparv <- osparv
@@ -230,7 +230,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
Bspline <- vector("list", length(nwhich))
names(Bspline) <- nwhich
- for (ii in 1:length(nwhich)) {
+ for (ii in seq_along(nwhich)) {
b.coefs <- fit$bcoeff[(smooth.frame$bindex[ii]):
(smooth.frame$bindex[ii + 1] - 1)]
b.coefs <- matrix(b.coefs, ncol = ncolHlist[nwhich[ii]])
@@ -247,7 +247,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
Leverages <- vector("list", length(nwhich))
names(Leverages) <- nwhich
- for (ii in 1:length(nwhich)) {
+ for (ii in seq_along(nwhich)) {
levvec <- fit$levmat[(smooth.frame$lindex[ii]):
(smooth.frame$lindex[ii+1]-1)]
levmat <- matrix(levvec,
diff --git a/R/simulate.vglm.R b/R/simulate.vglm.R
index 07e6a6a..58d178b 100644
--- a/R/simulate.vglm.R
+++ b/R/simulate.vglm.R
@@ -1,12 +1,10 @@
-# These functions are Copyright (C) 1998-2013 T. W. Yee All rights reserved.
+# These functions are
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# All rights reserved.
+
-# Trying to get simulate() to work for some VGAM family functions.
-# 20131228
-# Last modified:
-# 20131228: adapting simulate.vglm() from stats:::simulate.lm
-# It comes from R 3.0.2.
@@ -26,23 +24,16 @@ simulate.vlm <- function (object, nsim = 1, seed = NULL, ...) {
n <- length(ftd)
ntot <- n * nsim
Fam <- if (inherits(object, "vlm")) {
-# object at family$family
object at family
} else {
-# "gaussian"
stop("cannot get at the 'family' slot")
}
-# if (!is.null(Fam at simslot)) {
-#print("Hi1")
val <-
if (length(Fam at simslot) > 0) {
Fam at simslot(object, nsim)
} else {
stop(gettextf("family '%s' not implemented", Fam), domain = NA)
}
-#print("val")
-#print( val )
-#stop("hello")
if (!is.list(val)) {
dim(val) <- c(n, nsim)
val <- as.data.frame(val)
diff --git a/R/smart.R b/R/smart.R
index 91249e0..ba9b4d4 100644
--- a/R/smart.R
+++ b/R/smart.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -238,7 +238,7 @@ is.smart <- function(object) {
basis <- basis[, -1L, drop = FALSE]
n.col <- ncol(basis)
if (nas) {
- nmat <- matrix(NA, length(nax), n.col)
+ nmat <- matrix(NA_real_, length(nax), n.col)
nmat[!nax, ] <- basis
basis <- nmat
}
@@ -325,7 +325,7 @@ attr( sm.bs, "smart") <- TRUE
drop = FALSE])
n.col <- ncol(basis)
if (nas) {
- nmat <- matrix(NA, length(nax), n.col)
+ nmat <- matrix(NA_real_, length(nax), n.col)
nmat[!nax, ] <- basis
basis <- nmat
}
@@ -388,7 +388,7 @@ attr( sm.ns, "smart") <- TRUE
- if (any(is.na(x)))
+ if (anyNA(x))
stop("missing values are not allowed in 'poly'")
n <- degree + 1
if (raw) {
diff --git a/R/step.vglm.q b/R/step.vglm.q
deleted file mode 100644
index f5f3bea..0000000
--- a/R/step.vglm.q
+++ /dev/null
@@ -1,17 +0,0 @@
-# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
-# All rights reserved.
-
-
-step.vglm <- function(fit, ...) {
- cat("Sorry, this function has not been written yet. Returning a NULL.\n")
- NULL
-}
-
-
-
-
-
-
-
-
diff --git a/R/summary.vgam.q b/R/summary.vgam.q
index 87ca084..065d35f 100644
--- a/R/summary.vgam.q
+++ b/R/summary.vgam.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -214,7 +214,7 @@ show.vanova <- function(x, digits = .Options$digits, ...) {
if (!is.null(heading))
cat(heading, sep = "\n")
attr(x, "heading") <- NULL
- for (ii in 1:length(x)) {
+ for (ii in seq_along(x)) {
xx <- x[[ii]]
xna <- is.na(xx)
xx <- format(zapsmall(xx, digits))
diff --git a/R/summary.vglm.q b/R/summary.vglm.q
index 6d78325..c405f00 100644
--- a/R/summary.vglm.q
+++ b/R/summary.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/summary.vlm.q b/R/summary.vlm.q
index 00945d8..ba06f3c 100644
--- a/R/summary.vlm.q
+++ b/R/summary.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -40,7 +40,7 @@ summaryvlm <-
NULL
}
- if (any(is.na(Coefs))) {
+ if (anyNA(Coefs)) {
warning(paste("Some NAs in the coefficients---no summary",
" provided; returning object\n"))
return(object)
diff --git a/R/vgam.R b/R/vgam.R
index 353a6d6..70c82a5 100644
--- a/R/vgam.R
+++ b/R/vgam.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -8,6 +8,8 @@
+
+
vgam <- function(formula,
family, data = list(),
weights = NULL, subset = NULL, na.action = na.fail,
@@ -32,7 +34,9 @@ vgam <- function(formula,
if (missing(data))
data <- environment(formula)
- mtsave <- terms(formula, "s", data = data)
+ mtsave <- terms(formula, specials = c("s", "ps"), data = data)
+
+
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "weights", "na.action",
@@ -109,7 +113,7 @@ vgam <- function(formula,
spars2 <- lapply(mf2, attr, "spar")
dfs2 <- lapply(mf2, attr, "df")
sx2 <- lapply(mf2, attr, "s.xargument")
- for (ii in 1:length(mf)) {
+ for (ii in seq_along(mf)) {
if (length(sx2[[ii]])) {
attr(mf[[ii]], "spar") <- spars2[[ii]]
attr(mf[[ii]], "dfs2") <- dfs2[[ii]]
@@ -123,7 +127,7 @@ vgam <- function(formula,
w <- model.weights(mf)
if (!length(w)) {
- w <- rep(1, nrow(mf))
+ w <- rep_len(1, nrow(mf))
} else if (ncol(as.matrix(w)) == 1 && any(w < 0))
stop("negative weights not allowed")
@@ -142,26 +146,22 @@ vgam <- function(formula,
n <- dim(x)[1]
- if (FALSE && is.R()) {
- family at linkinv <- eval(family at linkinv)
- family at link <- eval(family at link)
-
- for (ii in names(.min.criterion.VGAM))
- if (length(family[[ii]]))
- family[[ii]] <- eval(family[[ii]])
- }
if (length(slot(family, "first")))
eval(slot(family, "first"))
- if (method != "vgam.fit")
- stop("method must be \"model.frame\" or \"vgam.fit\"")
-
- # --------------------------------------------------------------
aa <- attributes(mtsave)
smoothers <- aa$specials
+ mgcv.ps <- length(smoothers$ps) > 0
+ mgcv.PS <- length(smoothers$PS) > 0
+ any.ps.terms <- mgcv.ps || mgcv.PS
+ mgcv.s <- length(smoothers$s) > 0
+ if (any.ps.terms && mgcv.s)
+ stop("cannot include both s() and ps() (or PS()) terms in the formula")
+
+
nonparametric <- length(smoothers$s) > 0
@@ -175,7 +175,53 @@ vgam <- function(formula,
} else {
function.name <- "vglm" # This is effectively so
}
+
+
+
+
+
+
+
+ are.ps.terms <- (length(smoothers$ps) + length(smoothers$PS)) > 0
+ if (are.ps.terms) {
+ control$criterion <- "coefficients" # Overwrite if necessary
+
+ if (length(smoothers$ps) > 0) {
+ ff.ps <- apply(aa$factors[smoothers[["ps"]],,drop = FALSE], 2, any)
+ smoothers[["ps"]] <-
+ if (any(ff.ps)) seq(along = ff.ps)[aa$order == 1 & ff.ps] else NULL
+ smooth.labels <- aa$term.labels[unlist(smoothers)]
+ }
+
+
+
+
+
+
+ assignx <- attr(x, "assign")
+ which.X.ps <- assignx[smooth.labels]
+ data <- mf[, names(which.X.ps), drop = FALSE]
+ attr(data, "class") <- NULL
+ S.arg <- lapply(data, attr, "S.arg")
+ lambdalist <- lapply(data, attr, "lambda")
+ ridge.adj <- lapply(data, attr, "ridge.adj")
+ term.labels <- aa$term.labels
+
+
+ }
+
+
+ ps.list <- if (any.ps.terms)
+ list(indexterms = ff.ps,
+ intercept = aa$intercept,
+ which.X.ps = which.X.ps,
+ S.arg = S.arg,
+ lambdalist = lambdalist,
+ ridge.adj = ridge.adj,
+ term.labels = term.labels,
+ assignx = assignx) else
+ NULL
fit <- vgam.fit(x = x, y = y, w = w, mf = mf,
@@ -186,7 +232,9 @@ vgam <- function(formula,
constraints = constraints, extra = extra, qr.arg = qr.arg,
Terms = mtsave,
nonparametric = nonparametric, smooth.labels = smooth.labels,
- function.name = function.name, ...)
+ function.name = function.name,
+ ps.list = ps.list,
+ ...)
if (is.Numeric(fit$nl.df) && any(fit$nl.df < 0)) {
@@ -194,12 +242,14 @@ vgam <- function(formula,
}
+
+
if (!is.null(fit[["smooth.frame"]])) {
- fit <- fit[-1] # Strip off smooth.frame
+ fit <- fit[-1] # Strip off smooth.frame
} else {
}
- fit$smomat <- NULL # Not needed
+ fit$smomat <- NULL # Not needed
fit$call <- ocall
if (model)
@@ -220,8 +270,14 @@ vgam <- function(formula,
fit$smart.prediction <- get.smart.prediction()
+
+
+
+
answer <-
- new("vgam",
+ new(
+ if (any.ps.terms) "psvgam" else "vgam",
+
"assign" = attr(x, "assign"),
"call" = fit$call,
"coefficients" = fit$coefficients,
@@ -263,6 +319,18 @@ vgam <- function(formula,
+ if (length(fit$misc$Xvlm.aug)) {
+ slot(answer, "psslot") <-
+ list(Xvlm.aug = fit$misc$Xvlm.aug,
+ ps.list = fit$misc$ps.list,
+ magicfit = fit$misc$magicfit)
+ fit$misc$Xvlm.aug <- NULL
+ fit$misc$ps.list <- NULL
+ fit$misc$magicfit <- NULL
+ }
+
+
+
if (x.arg && length(Xm2))
slot(answer, "Xm2") <- Xm2 # The second (lm) design matrix
if (y.arg && length(Ym2))
@@ -319,14 +387,23 @@ vgam <- function(formula,
slot(answer, "effects") <- fit$effects
- answer
-}
-attr(vgam, "smart") <- TRUE
+ if (nonparametric && is.buggy.vlm(answer)) {
+ warning("some s() terms have constraint matrices that have columns",
+ " which are not orthogonal;",
+ " try using ps() instead of s().")
+ } else {
+ }
+
+
+ answer
+}
+attr(vgam, "smart") <- TRUE
+
@@ -381,14 +458,13 @@ shadowvgam <-
-
is.buggy.vlm <- function(object, each.term = FALSE, ...) {
Hk.list <- constraints(object)
ncl <- names(Hk.list)
- TFvec <- rep(FALSE, length = length(ncl))
+ TFvec <- rep_len(FALSE, length(ncl))
names(TFvec) <- ncl
@@ -400,7 +476,7 @@ is.buggy.vlm <- function(object, each.term = FALSE, ...) {
return(if (each.term) TFvec else any(TFvec))
}
- for (kay in 1:length(ncl)) {
+ for (kay in seq_along(ncl)) {
cmat <- Hk.list[[kay]]
if (ncol(cmat) > 1 && substring(ncl[kay], 1, 2) == "s(") {
CMat <- crossprod(cmat) # t(cmat) %*% cmat
@@ -426,8 +502,3 @@ setMethod("is.buggy", signature(object = "vlm"),
-
-
-
-
-
diff --git a/R/vgam.control.q b/R/vgam.control.q
index a5e8954..0f5db47 100644
--- a/R/vgam.control.q
+++ b/R/vgam.control.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -20,6 +20,7 @@ vgam.control <- function(all.knots = FALSE,
se.fit = TRUE,
trace = FALSE,
wzepsilon = .Machine$double.eps^0.75,
+ xij = NULL,
...) {
@@ -79,11 +80,12 @@ vgam.control <- function(all.knots = FALSE,
criterion = criterion,
epsilon = epsilon,
maxit = maxit,
- nk=nk,
+ nk = nk,
min.criterion = .min.criterion.VGAM,
save.weights = as.logical(save.weights)[1],
se.fit = as.logical(se.fit)[1],
trace = as.logical(trace)[1],
+ xij = if (is(xij, "formula")) list(xij) else xij,
wzepsilon = wzepsilon)
}
@@ -102,12 +104,12 @@ vgam.nlchisq <- function(qr, resid, wz, smomat, deriv, U, smooth.labels,
trivc <- trivial.constraints(constraints)
- ans <- rep(NA_real_, length = ncol(smomat))
+ ans <- rep_len(NA_real_, ncol(smomat))
Uderiv <- vbacksub(U, t(deriv), M = M, n = n) # \bU_i^{-1} \biu_i
ptr <- 0
- for (ii in 1:length(smooth.labels)) {
+ for (ii in seq_along(smooth.labels)) {
cmat <- constraints[[ smooth.labels[ii] ]]
index <- (ptr + 1):(ptr + ncol(cmat))
diff --git a/R/vgam.fit.q b/R/vgam.fit.q
index dae6af9..9e138f3 100644
--- a/R/vgam.fit.q
+++ b/R/vgam.fit.q
@@ -1,27 +1,38 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
+
+
+
vgam.fit <-
- function(x, y, w, mf,
+ function(x, y, w = rep_len(1, nrow(x)), mf,
Xm2 = NULL, Ym2 = NULL, # Added 20130730
- etastart, mustart, coefstart,
- offset, family, control, criterion = "coefficients",
- constraints = NULL, extra, qr.arg,
+ etastart = NULL, mustart = NULL, coefstart = NULL,
+ offset = 0, family, control = vgam.control(),
+ criterion = "coefficients",
+ constraints = NULL, extra= NULL, qr.arg = FALSE,
Terms,
nonparametric, smooth.labels,
- function.name = "vgam", ...) {
+ function.name = "vgam",
+ ps.list = NULL, # mf,
+ ...) {
+
+
+ mgcvvgam <- length(ps.list) > 0 # iff \exists ps() or PS() term
+
eff.n <- nrow(x) # + sum(abs(w[1:nrow(x)]))
specialCM <- NULL
post <- list()
- check.Rank <- TRUE # Set this to false for family functions vppr() etc.
+ check.rank <- TRUE
+ check.rank <- control$Check.rank
epsilon <- control$epsilon
maxit <- control$maxit
save.weights <- control$save.weights
@@ -121,7 +132,21 @@ vgam.fit <-
bf <- "vlm.wfit"
}
- X.vlm.save <- lm2vlm.model.matrix(x, Hlist, xij = control$xij)
+
+ X.vlm.save <- lm2vlm.model.matrix(x, Hlist, xij = control$xij,
+ Xm2 = Xm2) # 20160420
+
+
+
+
+
+
+
+ if (mgcvvgam) {
+ Xvlm.aug <- Pen.psv(constraints = constraints, ps.list = ps.list)
+ first.ps <- TRUE
+ }
+
if (length(coefstart)) {
@@ -157,7 +182,7 @@ vgam.fit <-
c.list <- list(wz = as.double(wz), z = as.double(z),
fit = as.double(t(eta)),
one.more = TRUE, U = as.double(U),
- coeff = as.double(rep(1, ncol(X.vlm.save))))
+ coeff = as.double(rep_len(1, ncol(X.vlm.save))))
dX.vlm <- as.integer(dim(X.vlm.save))
@@ -167,9 +192,37 @@ vgam.fit <-
stop(ncol.X.vlm, " parameters but only ", nrow.X.vlm, " observations")
+
+
+
+ if (mgcvvgam) {
+ bf.call <- expression(vlm.wfit(xmat = X.vlm.save, z,
+ Hlist = Hlist, U = U, matrix.out = FALSE, is.vlmX = TRUE,
+ qr = qr.arg, xij = NULL,
+ Xvlm.aug = Xvlm.aug,
+ ps.list = ps.list, constraints = constraints,
+ first.ps = first.ps,
+ trace = trace))
+ }
+
+
while (c.list$one.more) {
tfit <- eval(bf.call) # fit$smooth.frame is new
+
+ if (mgcvvgam) {
+ first.ps <- tfit$first.ps
+ Xvlm.aug <- tfit$Xvlm.aug
+ ps.list <- tfit$ps.list
+ magicfit <- tfit$magicfit
+ }
+
+
+
+
+
+
+
c.list$coeff <- tfit$coefficients
tfit$predictors <- tfit$fitted.values + offset
@@ -182,7 +235,15 @@ vgam.fit <-
+
+
fv <- c.list$fit
+
+ if (mgcvvgam) {
+ fv <- head(fv, n*M)
+ }
+
+
new.coeffs <- c.list$coeff
if (length(family at middle))
@@ -224,6 +285,9 @@ vgam.fit <-
if (!is.finite(one.more) || !is.logical(one.more))
one.more <- FALSE
+
+
+
if (one.more) {
iter <- iter + 1
deriv.mu <- eval(family at deriv)
@@ -259,7 +323,7 @@ vgam.fit <-
eval(family at fini)
coefs <- tfit$coefficients
- asgn <- attr(X.vlm.save, "assign") # 20011129 was x
+ asgn <- attr(X.vlm.save, "assign") # 20011129 was x
names(coefs) <- xnrow.X.vlm
cnames <- xnrow.X.vlm
@@ -268,7 +332,9 @@ vgam.fit <-
rank <- tfit$rank
if (rank < ncol(x))
stop("rank < ncol(x) is bad")
- } else rank <- ncol(x)
+ } else {
+ rank <- ncol(x)
+ }
R <- tfit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
R[lower.tri(R)] <- 0
@@ -294,7 +360,7 @@ vgam.fit <-
names(mu) <- names(fv)
}
- tfit$fitted.values <- NULL # Have to kill it off 3/12/01
+ tfit$fitted.values <- NULL # Have to kill it off 20011203
fit <- structure(c(tfit,
list(assign = asgn,
constraints = Hlist,
@@ -309,7 +375,7 @@ vgam.fit <-
df.residual <- nrow.X.vlm - rank
- if (!se.fit) {
+ if (!mgcvvgam && !se.fit) {
fit$varmat <- NULL
}
@@ -355,7 +421,7 @@ vgam.fit <-
- if (se.fit && length(fit$s.xargument)) {
+ if (!mgcvvgam && se.fit && length(fit$s.xargument)) {
misc$varassign <- varassign(Hlist, names(fit$s.xargument))
}
@@ -366,6 +432,28 @@ vgam.fit <-
}
+
+
+
+
+
+
+
+
+
+
+
+
+
+ if (mgcvvgam) {
+ misc$Xvlm.aug <- Xvlm.aug
+ misc$ps.list <- ps.list
+ misc$magicfit <- magicfit
+ }
+
+
+
+
crit.list <- list()
if (criterion != "coefficients")
crit.list[[criterion]] <- fit[[criterion]] <- new.crit
@@ -395,7 +483,15 @@ vgam.fit <-
-
+
+
+
+
+
+
+
+
+
if (w[1] != 1 || any(w != w[1]))
fit$prior.weights <- w
@@ -419,7 +515,8 @@ vgam.fit <-
- fit$misc <- NULL # 20020608; It is necessary to kill it as it
+ fit$misc <- NULL
+
structure(c(fit, list(
contrasts = attr(x, "contrasts"),
control = control,
@@ -457,7 +554,7 @@ new.assign <- function(X, Hlist) {
kk <- 0
low <- 1
- for (ii in 1:length(asgn)) {
+ for (ii in seq_along(asgn)) {
len <- low:(low + ncolHlist[ii] * lasgn[ii] -1)
temp <- matrix(len, ncolHlist[ii], lasgn[ii])
for (mm in 1:ncolHlist[ii])
diff --git a/R/vgam.match.q b/R/vgam.match.q
index 9fc6ea7..ae1c189 100644
--- a/R/vgam.match.q
+++ b/R/vgam.match.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -10,7 +10,7 @@ vgam.match <- function(x, all.knots = FALSE, nk = NULL) {
if (is.list(x)) {
nvar <- length(x)
if (length(nk))
- nk <- rep(nk, length = nvar)
+ nk <- rep_len(nk, nvar)
temp <- vgam.match(x[[1]], all.knots = all.knots, nk = nk[1])
ooo <- matrix(temp$matcho, length(temp$matcho), nvar)
@@ -39,7 +39,7 @@ vgam.match <- function(x, all.knots = FALSE, nk = NULL) {
xmin = xmin, xmax = xmax))
}
- if (!is.null(attributes(x)$NAs) || any(is.na(x)))
+ if (!is.null(attributes(x)$NAs) || anyNA(x))
stop("cannot smooth on variables with NAs")
sx <- unique(sort(as.vector(x))) # "as.vector()" strips off attributes
@@ -53,7 +53,7 @@ vgam.match <- function(x, all.knots = FALSE, nk = NULL) {
xmax <- sx[neffec]
xbar <- (sx - xmin) / (xmax - xmin)
- noround <- TRUE # Improvement 3/8/02
+ noround <- TRUE # Improvement 20020803
if (all.knots) {
knot <- if (noround) {
valid.vknotl2(c(rep(xbar[1], 3), xbar, rep(xbar[neffec], 3)))
diff --git a/R/vglm.R b/R/vglm.R
index a080dfb..4b554fe 100644
--- a/R/vglm.R
+++ b/R/vglm.R
@@ -1,10 +1,11 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
+
vglm <- function(formula,
family, data = list(),
weights = NULL, subset = NULL, na.action = na.fail,
@@ -88,9 +89,8 @@ vglm <- function(formula,
offset <- 0 # yyy ???
w <- model.weights(mf)
if (!length(w)) {
- w <- rep(1, nrow(mf))
- } else
- if (ncol(as.matrix(w)) == 1 && any(w < 0))
+ w <- rep_len(1, nrow(mf))
+ } else if (ncol(as.matrix(w)) == 1 && any(w < 0))
stop("negative weights not allowed")
if (is.character(family))
@@ -163,7 +163,7 @@ vglm <- function(formula,
slot(answer, "offset") <- as.matrix(offset)
if (length(fit$weights))
- slot(answer, "weights") <- as.matrix(fit$weights)
+ slot(answer, "weights") <- as.matrix(fit$weights)
if (x.arg)
slot(answer, "x") <- fit$x # The 'small' (lm) design matrix
@@ -262,10 +262,3 @@ shadowvglm <-
-
-
-
-
-
-
-
diff --git a/R/vglm.control.q b/R/vglm.control.q
index 1f61120..d426584 100644
--- a/R/vglm.control.q
+++ b/R/vglm.control.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/vglm.fit.q b/R/vglm.fit.q
index 817c67f..455d575 100644
--- a/R/vglm.fit.q
+++ b/R/vglm.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -8,7 +8,7 @@
vglm.fit <-
- function(x, y, w = rep(1, length(x[, 1])),
+ function(x, y, w = rep_len(1, nrow(x)),
X.vlm.arg = NULL,
Xm2 = NULL, Ym2 = NULL,
etastart = NULL, mustart = NULL, coefstart = NULL,
@@ -26,7 +26,7 @@ vglm.fit <-
specialCM <- NULL
post <- list()
- check.rank <- TRUE # Set this to false for family functions vppr() etc.
+ check.rank <- TRUE # Set this to FALSE for family functions vppr() etc.
check.rank <- control$Check.rank
nonparametric <- FALSE
epsilon <- control$epsilon
@@ -42,19 +42,16 @@ vglm.fit <-
- n <- dim(x)[1]
+ n <- nrow(x)
-
- copy.X.vlm <- FALSE # May be overwritten in @initialize
stepsize <- orig.stepsize
- old.coeffs <- coefstart
+ old.coeffs <- coefstart # May be a NULL
- intercept.only <- ncol(x) == 1 &&
- dimnames(x)[[2]] == "(Intercept)"
+ intercept.only <- ncol(x) == 1 && colnames(x) == "(Intercept)"
y.names <- predictors.names <- NULL # May be overwritten in @initialize
n.save <- n
@@ -72,10 +69,7 @@ vglm.fit <-
if (length(etastart)) {
eta <- etastart
mu <- if (length(mustart)) mustart else
- if (length(body(slot(family, "linkinv"))))
- slot(family, "linkinv")(eta, extra) else
- warning("argument 'etastart' assigned a value ",
- "but there is no 'linkinv' slot to use it")
+ slot(family, "linkinv")(eta, extra = extra)
}
@@ -83,7 +77,7 @@ vglm.fit <-
if (length(mustart)) {
mu <- mustart
if (length(body(slot(family, "linkfun")))) {
- eta <- slot(family, "linkfun")(mu, extra)
+ eta <- slot(family, "linkfun")(mu, extra = extra)
} else {
warning("argument 'mustart' assigned a value ",
"but there is no 'linkfun' slot to use it")
@@ -91,16 +85,11 @@ vglm.fit <-
}
- validparams <- if (length(body(slot(family, "validparams")))) {
- slot(family, "validparams")(eta, extra = extra)
- } else {
- TRUE
- }
- validfitted <- if (length(body(slot(family, "validfitted")))) {
- slot(family, "validfitted")(mu, extra = extra)
- } else {
- TRUE
- }
+ validparams <- validfitted <- TRUE
+ if (length(body(slot(family, "validparams"))))
+ validparams <- slot(family, "validparams")(eta, y = y, extra = extra)
+ if (length(body(slot(family, "validfitted"))))
+ validfitted <- slot(family, "validfitted")(mu, y = y, extra = extra)
if (!(validparams && validfitted))
stop("could not obtain valid initial values. ",
"Try using 'etastart', 'coefstart' or 'mustart', else ",
@@ -117,24 +106,23 @@ vglm.fit <-
eval(slot(family, "constraints"))
- Hlist <- process.constraints(constraints, x, M,
+ Hlist <- process.constraints(constraints, x = x, M = M,
specialCM = specialCM,
Check.cm.rank = control$Check.cm.rank)
ncolHlist <- unlist(lapply(Hlist, ncol))
- dimB <- sum(ncolHlist)
- X.vlm.save <- if (length(X.vlm.arg)) {
- X.vlm.arg
- } else {
- lm2vlm.model.matrix(x, Hlist, xij = control$xij,
- Xm2 = Xm2)
- }
+ X.vlm.save <-
+ if (length(X.vlm.arg)) {
+ X.vlm.arg
+ } else {
+ lm2vlm.model.matrix(x, Hlist, xij = control$xij, Xm2 = Xm2)
+ }
@@ -142,11 +130,11 @@ vglm.fit <-
eta <- if (ncol(X.vlm.save) > 1) {
matrix(X.vlm.save %*% coefstart, n, M, byrow = TRUE) + offset
} else {
- matrix(X.vlm.save * coefstart, n, M, byrow = TRUE) + offset
+ matrix(X.vlm.save * coefstart, n, M, byrow = TRUE) + offset
}
if (M == 1)
eta <- c(eta)
- mu <- slot(family, "linkinv")(eta, extra)
+ mu <- slot(family, "linkinv")(eta, extra = extra)
}
@@ -159,74 +147,56 @@ vglm.fit <-
coefficients = 1,
tfun(mu = mu, y = y, w = w,
res = FALSE, eta = eta, extra))
- old.crit <- ifelse(minimize.criterion, 10 * new.crit + 10,
- -10 * new.crit - 10)
deriv.mu <- eval(slot(family, "deriv"))
wz <- eval(slot(family, "weight"))
if (control$checkwz)
- wz <- checkwz(wz, M = M, trace = trace,
- wzepsilon = control$wzepsilon)
+ wz <- checkwz(wz, M = M, trace = trace, wzepsilon = control$wzepsilon)
U <- vchol(wz, M = M, n = n, silent = !trace)
tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset
- c.list <- list(z = as.double(z),
- fit = as.double(t(eta)),
- one.more = TRUE,
- coeff = as.double(rep(1, ncol(X.vlm.save))),
- U = as.double(U),
- copy.X.vlm = copy.X.vlm,
- X.vlm = if (copy.X.vlm) as.double(X.vlm.save) else
- double(3))
+ one.more <- TRUE
- dX.vlm <- as.integer(dim(X.vlm.save))
- nrow.X.vlm <- dX.vlm[[1]]
- ncol.X.vlm <- dX.vlm[[2]]
-
+ nrow.X.vlm <- nrow(X.vlm.save)
+ ncol.X.vlm <- ncol(X.vlm.save)
if (nrow.X.vlm < ncol.X.vlm)
- stop(ncol.X.vlm, " parameters but only ", nrow.X.vlm, " observations")
+ stop("There are ", ncol.X.vlm, " parameters but only ",
+ nrow.X.vlm, " observations")
- while (c.list$one.more) {
- tfit <- vlm.wfit(xmat = X.vlm.save, z,
+ while (one.more) {
+ tfit <- vlm.wfit(xmat = X.vlm.save, zmat = z,
Hlist = NULL, U = U,
- matrix.out = FALSE,
- is.vlmX = TRUE,
- qr = qr.arg, xij = NULL) # fit$smooth.frame is new
-
- c.list$coeff <- tfit$coefficients
+ matrix.out = FALSE, is.vlmX = TRUE,
+ qr = qr.arg, xij = NULL)
- tfit$predictors <- tfit$fitted.values
- c.list$fit <- tfit$fitted.values
+
- if (!c.list$one.more) {
- break
- }
- fv <- c.list$fit
- new.coeffs <- c.list$coeff
+ fv <- tfit$fitted.values
+ new.coeffs <- tfit$coefficients # c.list$coeff
if (length(slot(family, "middle")))
eval(slot(family, "middle"))
eta <- fv + offset
- mu <- slot(family, "linkinv")(eta, extra)
+ mu <- slot(family, "linkinv")(eta, extra = extra)
if (length(slot(family, "middle2")))
eval(slot(family, "middle2"))
old.crit <- new.crit
- new.crit <-
+ new.crit <-
switch(criterion,
coefficients = new.coeffs,
tfun(mu = mu, y = y, w = w,
@@ -247,12 +217,13 @@ vglm.fit <-
coefficients = {if (length(new.crit) > 2) cat("\n");
cat(UUUU, fill = TRUE, sep = ", ")},
cat(UUUU, fill = TRUE, sep = ", "))
- }
+ } # if (trace && orig.stepsize == 1)
take.half.step <- (control$half.stepsizing &&
length(old.coeffs)) &&
((orig.stepsize != 1) ||
+ (!is.finite(new.crit)) || # 20160321
(criterion != "coefficients" &&
(if (minimize.criterion) new.crit > old.crit else
new.crit < old.crit)))
@@ -261,20 +232,15 @@ vglm.fit <-
if (!take.half.step && length(old.coeffs)) {
- validparams <- if (length(body(slot(family, "validparams")))) {
- slot(family, "validparams")(eta, extra = extra)
- } else {
- TRUE
- }
- validfitted <- if (length(body(slot(family, "validfitted")))) {
- slot(family, "validfitted")(mu, extra = extra)
- } else {
- TRUE
- }
+ validparams <- validfitted <- TRUE
+ if (length(body(slot(family, "validparams"))))
+ validparams <- slot(family, "validparams")(eta, y = y, extra = extra)
+ if (length(body(slot(family, "validfitted"))))
+ validfitted <- slot(family, "validfitted")(mu, y = y, extra = extra)
take.half.step <- !(validparams && validfitted)
- if (take.half.step) {
+ if (FALSE && take.half.step) {
stepsize <- orig.stepsize / 4
}
}
@@ -282,62 +248,58 @@ vglm.fit <-
if (take.half.step) {
- stepsize <- 2 * min(orig.stepsize, 2*stepsize)
+ stepsize <- (1 + (orig.stepsize != 1)) * orig.stepsize
new.coeffs.save <- new.coeffs
- if (trace)
+ if (trace)
cat("Taking a modified step")
repeat {
- if (trace) {
- cat(".")
- flush.console()
- }
- stepsize <- stepsize / 2
- if (too.small <- stepsize < 1e-6)
- break
- new.coeffs <- (1-stepsize) * old.coeffs +
- stepsize * new.coeffs.save
-
- if (length(slot(family, "middle")))
- eval(slot(family, "middle"))
-
- fv <- X.vlm.save %*% new.coeffs
- if (M > 1)
- fv <- matrix(fv, n, M, byrow = TRUE)
-
- eta <- fv + offset
- mu <- slot(family, "linkinv")(eta, extra)
-
- if (length(slot(family, "middle2")))
- eval(slot(family, "middle2"))
-
- new.crit <-
- switch(criterion,
- coefficients = new.coeffs,
- tfun(mu = mu, y = y, w = w,
- res = FALSE, eta = eta, extra))
-
-
- validparams <- if (length(body(slot(family, "validparams")))) {
- slot(family, "validparams")(eta, extra = extra)
- } else {
- TRUE
- }
- validfitted <- if (length(body(slot(family, "validfitted")))) {
- slot(family, "validfitted")(mu, extra = extra)
- } else {
- TRUE
- }
-
-
- if (validparams && validfitted &&
- (criterion == "coefficients" ||
- (( minimize.criterion && new.crit < old.crit) ||
- (!minimize.criterion && new.crit > old.crit))))
- break
+ if (trace) {
+ cat(".")
+ flush.console()
+ }
+ stepsize <- stepsize / 2
+ if (too.small <- stepsize < 1e-6)
+ break
+ new.coeffs <- (1-stepsize) * old.coeffs +
+ stepsize * new.coeffs.save
+
+ if (length(slot(family, "middle")))
+ eval(slot(family, "middle"))
+
+ fv <- X.vlm.save %*% new.coeffs
+ if (M > 1)
+ fv <- matrix(fv, n, M, byrow = TRUE)
+
+ eta <- fv + offset
+ mu <- slot(family, "linkinv")(eta, extra = extra)
+
+ if (length(slot(family, "middle2")))
+ eval(slot(family, "middle2"))
+
+ new.crit <-
+ switch(criterion,
+ coefficients = new.coeffs,
+ tfun(mu = mu, y = y, w = w,
+ res = FALSE, eta = eta, extra))
+
+
+ validparams <- validfitted <- TRUE
+ if (length(body(slot(family, "validparams"))))
+ validparams <- slot(family, "validparams")(eta, y, extra = extra)
+ if (length(body(slot(family, "validfitted"))))
+ validfitted <- slot(family, "validfitted")(mu, y, extra = extra)
+
+ if (validparams && validfitted &&
+ (is.finite(new.crit)) && # 20160321
+ (criterion == "coefficients" ||
+ (( minimize.criterion && new.crit < old.crit) ||
+ (!minimize.criterion && new.crit > old.crit))))
+ break
} # of repeat
if (trace)
cat("\n")
+
if (too.small) {
warning("iterations terminated because ",
"half-step sizes are very small")
@@ -361,10 +323,10 @@ vglm.fit <-
if (length(new.crit) > 2) cat("\n");
cat(UUUU, fill = TRUE, sep = ", ")},
cat(UUUU, fill = TRUE, sep = ", "))
- }
+ } # if (trace)
one.more <- eval(control$convergence)
- }
+ } # Not too.small
} else {
one.more <- eval(control$convergence)
}
@@ -372,26 +334,28 @@ vglm.fit <-
if (!is.logical(one.more))
one.more <- FALSE
+
if (one.more) {
iter <- iter + 1
deriv.mu <- eval(slot(family, "deriv"))
wz <- eval(slot(family, "weight"))
if (control$checkwz)
- wz <- checkwz(wz, M = M, trace = trace,
- wzepsilon = control$wzepsilon)
+ wz <- checkwz(wz, M = M, trace = trace, wzepsilon = control$wzepsilon)
U <- vchol(wz, M = M, n = n, silent = !trace)
tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset
- c.list$z <- z
- c.list$U <- U
- if (copy.X.vlm)
- c.list$X.vlm <- X.vlm.save
- }
+ } # if (one.more)
+
+
+
+ if (!one.more && take.half.step && orig.stepsize == 1)
+ warning("some quantities such as z, residuals, SEs may ",
+ "be inaccurate due to convergence at a half-step")
+
+
- c.list$one.more <- one.more
- c.list$coeff <- runif(length(new.coeffs)) # 20030312; twist needed!
old.coeffs <- new.coeffs
} # End of while()
@@ -410,12 +374,12 @@ vglm.fit <-
eval(slot(family, "fini"))
if (M > 1)
- tfit$predictors <- matrix(tfit$predictors, n, M)
+ fv <- matrix(fv, n, M)
- coefs <- tfit$coefficients
+ final.coefs <- new.coeffs # Was tfit$coefficients prior to 20160317
asgn <- attr(X.vlm.save, "assign")
- names(coefs) <- xnrow.X.vlm
+ names(final.coefs) <- xnrow.X.vlm
rank <- tfit$rank
cnames <- xnrow.X.vlm
@@ -423,30 +387,31 @@ vglm.fit <-
if (check.rank && rank < ncol.X.vlm)
stop("vglm only handles full-rank models (currently)")
+
R <- tfit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
R[lower.tri(R)] <- 0
attributes(R) <- list(dim = c(ncol.X.vlm, ncol.X.vlm),
dimnames = list(cnames, cnames), rank = rank)
effects <- tfit$effects
- neff <- rep("", nrow.X.vlm)
+ neff <- rep_len("", nrow.X.vlm)
neff[seq(ncol.X.vlm)] <- cnames
names(effects) <- neff
- dim(tfit$predictors) <- c(n, M)
+ dim(fv) <- c(n, M)
dn <- labels(x)
yn <- dn[[1]]
xn <- dn[[2]]
- residuals <- z - tfit$predictors
+ wresiduals <- z - fv # Replaced by fv 20160408
if (M == 1) {
- tfit$predictors <- as.vector(tfit$predictors)
- residuals <- as.vector(residuals)
- names(residuals) <- names(tfit$predictors) <- yn
+ fv <- as.vector(fv)
+ wresiduals <- as.vector(wresiduals)
+ names(wresiduals) <- names(fv) <- yn
} else {
- dimnames(residuals) <- dimnames(tfit$predictors) <-
- list(yn, predictors.names)
+ dimnames(wresiduals) <-
+ dimnames(fv) <- list(yn, predictors.names)
}
if (is.matrix(mu)) {
@@ -464,7 +429,7 @@ vglm.fit <-
df.residual <- nrow.X.vlm - rank
fit <- list(assign = asgn,
- coefficients = coefs,
+ coefficients = final.coefs,
constraints = Hlist,
df.residual = df.residual,
df.total = n * M,
@@ -472,7 +437,7 @@ vglm.fit <-
fitted.values = mu,
offset = offset,
rank = rank,
- residuals = residuals,
+ residuals = wresiduals,
R = R,
terms = Terms) # terms: This used to be done in vglm()
@@ -501,7 +466,7 @@ vglm.fit <-
orig.assign = attr(x, "assign"),
p = ncol(x),
ncol.X.vlm = ncol.X.vlm,
- ynames = dimnames(y)[[2]])
+ ynames = colnames(y))
crit.list <- list()
@@ -527,7 +492,7 @@ vglm.fit <-
eval(slot(family, "last"))
structure(c(fit,
- list(predictors = tfit$predictors,
+ list(predictors = fv, # tfit$predictors,
contrasts = attr(x, "contrasts"),
control = control,
crit.list = crit.list,
diff --git a/R/vlm.R b/R/vlm.R
index ab6be50..55354ed 100644
--- a/R/vlm.R
+++ b/R/vlm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -73,7 +73,7 @@ vlm <- function(formula,
predictors.names <- dy2
if (!length(prior.weights)) {
- prior.weights <- rep(1, len = n)
+ prior.weights <- rep_len(1, n)
names(prior.weights) <- dy1
}
if (any(prior.weights <= 0))
diff --git a/R/vlm.wfit.q b/R/vlm.wfit.q
index 839b51e..a6f7004 100644
--- a/R/vlm.wfit.q
+++ b/R/vlm.wfit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -10,6 +10,8 @@
+
+
vlm.wfit <-
function(xmat, zmat, Hlist, wz = NULL, U = NULL,
matrix.out = FALSE, is.vlmX = FALSE, ResSS = TRUE, qr = FALSE,
@@ -22,8 +24,15 @@ vlm.wfit <-
ncol(xmat)
},
xij = NULL,
- lp.names = NULL, Eta.range = NULL, Xm2 = NULL, ...) {
+ lp.names = NULL, Eta.range = NULL, Xm2 = NULL,
+ Xvlm.aug = NULL,
+ ps.list = NULL,
+ constraints = NULL, first.ps = FALSE,
+ trace = FALSE,
+
+ ...) {
+ mgcvvgam <- length(ps.list)
missing.Hlist <- missing(Hlist)
zmat <- as.matrix(zmat)
@@ -71,8 +80,82 @@ vlm.wfit <-
+
+
+
+
+ if (mgcvvgam) {
+ m.objects <- psv2magic(x.VLM = X.vlm,
+ constraints = constraints,
+ lambda.vlm = attr(Xvlm.aug, "lambda.vlm"),
+ ps.list = ps.list)
+ if (FALSE && trace) {
+ cat("m.objects$sp \n")
+ print( m.objects$sp )
+ cat("m.objects$off \n")
+ print( m.objects$off )
+ }
+
+ if (first.ps) {
+ m.objects$sp <- rep_len(-1, length(m.objects$sp))
+ }
+
+
+
+ magicfit <- mgcv::magic(y = z.vlm,
+ X = m.objects$x.VLM.new,
+ sp = m.objects$sp,
+ S = m.objects$S.arg,
+ off = m.objects$off,
+ gcv = FALSE)
+ SP <- magicfit$sp
+ if (FALSE && trace) {
+ cat("SP \n")
+ print( SP )
+ }
+
+ length.lambda.vlm <- sapply(attr(Xvlm.aug, "lambda.vlm"), length) # lambda.new
+ sp.opt <- vector("list", length(length.lambda.vlm)) # list()
+ iioffset <- 0
+ for (ii in seq_along(length.lambda.vlm)) {
+ sp.opt[[ii]] <- SP[iioffset + 1:length.lambda.vlm[ii]]
+ iioffset <- iioffset + length.lambda.vlm[ii]
+ }
+ names(sp.opt) <- names(ps.list$which.X.ps)
+ if (FALSE && trace) {
+ cat("sp.opt \n")
+ print( sp.opt )
+ }
+
+ ps.list$lambdalist <- sp.opt
+ Xvlm.aug <- Pen.psv(constraints = constraints, ps.list = ps.list)
+
+
+ first.ps <- FALSE # May have been TRUE on entry but is FALSE on exit
+
+
+ X.vlm <- rbind(X.vlm, Xvlm.aug)
+ z.vlm <- c(z.vlm, rep(0, nrow(Xvlm.aug)))
+ }
+
+
+
+
ans <- lm.fit(X.vlm, y = z.vlm, ...)
+
+
+ if (mgcvvgam) {
+ ans$residuals <- head(ans$residuals, n*M)
+ ans$effects <- head(ans$effects, n*M)
+ ans$fitted.values <- head(ans$fitted.values, n*M)
+ ans$qr$qr <- head(ans$qr$qr, n*M)
+ }
+
+
+
+
+
if (ResSS) {
ans$ResSS <- sum(ans$resid^2)
if (only.ResSS)
@@ -110,6 +193,23 @@ vlm.wfit <-
ans$constraints <- Hlist
ans$contrasts <- contrast.save
+
+
+
+
+
+
+
+
+ if (mgcvvgam) {
+ ans$first.ps <- first.ps # Updated.
+ ans$ps.list <- ps.list # Updated wrt "lambdalist" component.
+ ans$Xvlm.aug <- Xvlm.aug # Updated matrix.
+ ans$magicfit <- magicfit # Updated.
+ }
+
+
+
if (x.ret) {
ans$X.vlm <- X.vlm.save
}
@@ -141,40 +241,8 @@ vlm.wfit <-
}
ans$mat.coefficients <- t(B)
ans
-}
-
-
-
-
-if (FALSE)
-print.vlm.wfit <- function(x, ...) {
- if (!is.null(cl <- x$call)) {
- cat("Call:\n")
- dput(cl)
- }
-
- coef <- x$coefficients
- cat("\nCoefficients:\n")
- print(coef, ...)
-
- rank <- x$rank
- if (is.null(rank)) {
- rank <- sum(!is.na(coef))
- }
- n <- x$misc$n
- M <- x$misc$M
- rdf <- x$df.resid
- if (is.null(rdf)) {
- rdf <- (n - rank) * M
- }
- cat("\nDegrees of Freedom:", n*M, "Total;", rdf, "Residual\n")
-
- if (!is.null(x$ResSS)) {
- cat("Residual Sum of Squares:", format(x$ResSS), "\n")
- }
+} # vlm.wfit
- invisible(x)
-}
diff --git a/R/vsmooth.spline.q b/R/vsmooth.spline.q
index dfc0f24..ea06895 100644
--- a/R/vsmooth.spline.q
+++ b/R/vsmooth.spline.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -150,7 +150,7 @@ vsmooth.spline <-
my.call <- match.call()
if (missing(y)) {
if (is.list(x)) {
- if (any(is.na(match(c("x", "y"), names(x)))))
+ if (anyNA(match(c("x", "y"), names(x))))
stop("cannot find 'x' and 'y' in list")
y <- x$y
x <- x$x
@@ -175,14 +175,14 @@ vsmooth.spline <-
stop("lengths of arguments 'x' and 'y' must match")
}
- if (any(is.na(xvector)) || any(is.na(ymat))) {
+ if (anyNA(xvector) || anyNA(ymat)) {
stop("NAs not allowed in arguments 'x' or 'y'")
}
if (is.null(w)) {
wzmat <- matrix(1, n_lm, M)
} else {
- if (any(is.na(w))) {
+ if (anyNA(w)) {
stop("NAs not allowed in argument 'w'")
}
wzmat <- as.matrix(w)
@@ -226,7 +226,7 @@ vsmooth.spline <-
stop("not enough unique 'x' values (need 7 or more)")
}
- dim1U <- dim2wz # 10/1/00; was M * (M+1) / 2
+ dim1U <- dim2wz # 20000110; was M * (M+1) / 2
collaps <- .C("vsuff9",
as.integer(n_lm), as.integer(neff), as.integer(ooo),
@@ -261,8 +261,8 @@ vsmooth.spline <-
}
ncb0 <- ncol(constraints[[2]]) # Of xxx and not of the intercept
- spar <- rep(if (length(spar)) spar else 0, length = ncb0)
- dfvec <- rep(df, length = ncb0)
+ spar <- rep_len(if (length(spar)) spar else 0, ncb0)
+ dfvec <- rep_len(df, ncb0)
if (!missing.spar) {
ispar <- 1
@@ -286,7 +286,7 @@ vsmooth.spline <-
"Bcoefficients" = matrix(NA_real_, 1, 1),
"knots" = numeric(0),
"xmin" = numeric(0),
- "xmax" = numeric(0)) # 8/11/03
+ "xmax" = numeric(0)) # 20031108
dratio <- NA_real_
@@ -294,12 +294,12 @@ vsmooth.spline <-
new("vsmooth.spline",
"call" = my.call,
"constraints" = constraints,
- "df" = if (ispar == 0) dfvec else rep(2, length(spar)),
+ "df" = if (ispar == 0) dfvec else rep_len(2, length(spar)),
"lfit" = lfit,
"nlfit" = junk.fill,
- "spar" = if (ispar == 1) spar else rep(Inf, length(dfvec)),
+ "spar" = if (ispar == 1) spar else rep_len(Inf, length(dfvec)),
"lambda" = if (ispar == 1) dratio * 16.0^(spar * 6.0 - 2.0) else
- rep(Inf, length(dfvec)),
+ rep_len(Inf, length(dfvec)),
"w" = matrix(collaps$wzbar, neff, dim2wz),
"x" = usortx,
"y" = lfit at fitted.values,
@@ -311,13 +311,13 @@ vsmooth.spline <-
xbar <- (usortx - usortx[1]) / (usortx[neff] - usortx[1])
- noround <- TRUE # Improvement 3/8/02
+ noround <- TRUE # Improvement 20020803
nknots <- nk
if (all.knots) {
knot <- if (noround) {
- valid.vknotl2(c(rep(xbar[1], 3), xbar, rep(xbar[neff], 3)))
+ valid.vknotl2(c(rep_len(xbar[1], 3), xbar, rep_len(xbar[neff], 3)))
} else {
- c(rep(xbar[1], 3), xbar, rep(xbar[neff], 3))
+ c(rep_len(xbar[1], 3), xbar, rep_len(xbar[neff], 3))
}
if (length(nknots)) {
warning("overriding 'nk' by 'all.knots = TRUE'")
@@ -560,12 +560,12 @@ plotvsmooth.spline <- function(x, xlab = "x", ylab = "", points = TRUE,
add = FALSE, ...) {
points.arg <- points; rm(points)
M <- ncol(x at y)
- pcol <- rep(pcol, length = M)
- pcex <- rep(pcex, length = M)
- pch <- rep(pch, length = M)
- lcol <- rep(lcol, length = M)
- lwd <- rep(lwd, length = M)
- lty <- rep(lty, length = M)
+ pcol <- rep_len(pcol, M)
+ pcex <- rep_len(pcex, M)
+ pch <- rep_len(pch, M)
+ lcol <- rep_len(lcol, M)
+ lwd <- rep_len(lwd, M)
+ lty <- rep_len(lty, M)
if (!add)
matplot(x at x, x at yin, type = "n", xlab = xlab, ylab = ylab, ...)
for (ii in 1:ncol(x at y)) {
diff --git a/build/vignette.rds b/build/vignette.rds
index e76231e..88bcc61 100644
Binary files a/build/vignette.rds and b/build/vignette.rds differ
diff --git a/data/Huggins89.t1.rda b/data/Huggins89.t1.rda
index fc819bc..d0d3f98 100644
Binary files a/data/Huggins89.t1.rda and b/data/Huggins89.t1.rda differ
diff --git a/data/Huggins89table1.rda b/data/Huggins89table1.rda
index 9aa7bc5..d56d6dd 100644
Binary files a/data/Huggins89table1.rda and b/data/Huggins89table1.rda differ
diff --git a/data/alclevels.rda b/data/alclevels.rda
index 77f6197..6018317 100644
Binary files a/data/alclevels.rda and b/data/alclevels.rda differ
diff --git a/data/alcoff.rda b/data/alcoff.rda
index e67e446..b78f322 100644
Binary files a/data/alcoff.rda and b/data/alcoff.rda differ
diff --git a/data/auuc.rda b/data/auuc.rda
index bd45d80..ef07801 100644
Binary files a/data/auuc.rda and b/data/auuc.rda differ
diff --git a/data/backPain.rda b/data/backPain.rda
index 6ce3225..d193471 100644
Binary files a/data/backPain.rda and b/data/backPain.rda differ
diff --git a/data/beggs.rda b/data/beggs.rda
index 646f791..c5c483a 100644
Binary files a/data/beggs.rda and b/data/beggs.rda differ
diff --git a/data/car.all.rda b/data/car.all.rda
index 7647963..0e7cbd8 100644
Binary files a/data/car.all.rda and b/data/car.all.rda differ
diff --git a/data/cfibrosis.rda b/data/cfibrosis.rda
index 6c833f2..3a2968c 100644
Binary files a/data/cfibrosis.rda and b/data/cfibrosis.rda differ
diff --git a/data/corbet.rda b/data/corbet.rda
index 88ceb38..393c32a 100644
Binary files a/data/corbet.rda and b/data/corbet.rda differ
diff --git a/data/crashbc.rda b/data/crashbc.rda
index 0e040a3..9ea59d9 100644
Binary files a/data/crashbc.rda and b/data/crashbc.rda differ
diff --git a/data/crashf.rda b/data/crashf.rda
index 8f11ae9..17cbc5a 100644
Binary files a/data/crashf.rda and b/data/crashf.rda differ
diff --git a/data/crashi.rda b/data/crashi.rda
index b8f8947..7cfcbfa 100644
Binary files a/data/crashi.rda and b/data/crashi.rda differ
diff --git a/data/crashmc.rda b/data/crashmc.rda
index 8a20da6..750fd44 100644
Binary files a/data/crashmc.rda and b/data/crashmc.rda differ
diff --git a/data/crashp.rda b/data/crashp.rda
index 723d7be..ebb43dc 100644
Binary files a/data/crashp.rda and b/data/crashp.rda differ
diff --git a/data/crashtr.rda b/data/crashtr.rda
index b1ab63c..028fb69 100644
Binary files a/data/crashtr.rda and b/data/crashtr.rda differ
diff --git a/data/deermice.rda b/data/deermice.rda
index 40e314b..541ff0d 100644
Binary files a/data/deermice.rda and b/data/deermice.rda differ
diff --git a/data/ducklings.rda b/data/ducklings.rda
index 491263f..e859522 100644
Binary files a/data/ducklings.rda and b/data/ducklings.rda differ
diff --git a/data/finney44.rda b/data/finney44.rda
index 21ae1b2..b335fab 100644
Binary files a/data/finney44.rda and b/data/finney44.rda differ
diff --git a/data/flourbeetle.rda b/data/flourbeetle.rda
index ead5249..0b78e46 100644
Binary files a/data/flourbeetle.rda and b/data/flourbeetle.rda differ
diff --git a/data/hspider.rda b/data/hspider.rda
index 82ad0dc..16ad1ef 100644
Binary files a/data/hspider.rda and b/data/hspider.rda differ
diff --git a/data/lakeO.rda b/data/lakeO.rda
index 794caa5..67ce7ab 100644
Binary files a/data/lakeO.rda and b/data/lakeO.rda differ
diff --git a/data/leukemia.rda b/data/leukemia.rda
index 6634af0..96e579a 100644
Binary files a/data/leukemia.rda and b/data/leukemia.rda differ
diff --git a/data/marital.nz.rda b/data/marital.nz.rda
index b883ec3..fd555a3 100644
Binary files a/data/marital.nz.rda and b/data/marital.nz.rda differ
diff --git a/data/melbmaxtemp.rda b/data/melbmaxtemp.rda
index 7a6f3f3..dc45051 100644
Binary files a/data/melbmaxtemp.rda and b/data/melbmaxtemp.rda differ
diff --git a/data/pneumo.rda b/data/pneumo.rda
index 02da66d..cfba086 100644
Binary files a/data/pneumo.rda and b/data/pneumo.rda differ
diff --git a/data/prinia.rda b/data/prinia.rda
index 4d28a68..993474a 100644
Binary files a/data/prinia.rda and b/data/prinia.rda differ
diff --git a/data/ruge.rda b/data/ruge.rda
index c4c2033..2c0a632 100644
Binary files a/data/ruge.rda and b/data/ruge.rda differ
diff --git a/data/toxop.rda b/data/toxop.rda
index 8081ef2..8720537 100644
Binary files a/data/toxop.rda and b/data/toxop.rda differ
diff --git a/data/venice.rda b/data/venice.rda
index e21d790..2a970cf 100644
Binary files a/data/venice.rda and b/data/venice.rda differ
diff --git a/data/venice90.rda b/data/venice90.rda
index 92d185a..aa8fbc2 100644
Binary files a/data/venice90.rda and b/data/venice90.rda differ
diff --git a/data/wine.rda b/data/wine.rda
index 877f503..89d3408 100644
Binary files a/data/wine.rda and b/data/wine.rda differ
diff --git a/inst/doc/categoricalVGAM.pdf b/inst/doc/categoricalVGAM.pdf
index 498bcfa..3da21d4 100644
Binary files a/inst/doc/categoricalVGAM.pdf and b/inst/doc/categoricalVGAM.pdf differ
diff --git a/inst/doc/crVGAM.pdf b/inst/doc/crVGAM.pdf
index f81f86a..0f9b60d 100644
Binary files a/inst/doc/crVGAM.pdf and b/inst/doc/crVGAM.pdf differ
diff --git a/man/AR1.Rd b/man/AR1.Rd
index ee9a1f1..b680a26 100644
--- a/man/AR1.Rd
+++ b/man/AR1.Rd
@@ -1,6 +1,6 @@
\name{AR1}
\alias{AR1}
-\alias{AR1.control}
+% \alias{AR1.control}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Autoregressive Process with Order-1 Family Function }
\description{
@@ -9,15 +9,15 @@
}
\usage{
-AR1(ldrift = "identitylink", lsd = "loge", lvar = "loge",
- lrho = "rhobit", idrift = NULL,
- isd = NULL, ivar = NULL, irho = NULL, imethod = 1,
- ishrinkage = 1, type.likelihood = c("exact", "conditional"),
- var.arg = FALSE, nodrift = FALSE, almost1 = 0.99,
- zero = c(if (var.arg) "var" else "sd", "rho"))
- AR1.control(half.stepsizing = FALSE, ...)
+AR1(ldrift = "identitylink", lsd = "loge", lvar = "loge", lrho = "rhobit",
+ idrift = NULL, isd = NULL, ivar = NULL, irho = NULL, imethod = 1,
+ ishrinkage = 0.95, type.likelihood = c("exact", "conditional"),
+ type.EIM = c("exact", "approximate"), var.arg = FALSE, nodrift = FALSE,
+ print.EIM = FALSE, zero = c(if (var.arg) "var" else "sd", "rho"))
+
}
% zero = c(-2, -3)
+% AR1.control(epsilon = 1e-6, maxit = 30, stepsize = 1, ...)
% deviance.arg = FALSE,
@@ -26,9 +26,9 @@ AR1(ldrift = "identitylink", lsd = "loge", lvar = "loge",
\item{ldrift, lsd, lvar, lrho}{
Link functions applied to the scaled mean, standard deviation
or variance, and correlation parameters.
- The parameter \code{drift} is known as the \emph{drift}, and
- it is a scaled mean.
- See \code{\link{Links}} for more choices.
+ The parameter \code{drift} is known as the \emph{drift}, and
+ it is a scaled mean.
+ See \code{\link{Links}} for more choices.
}
@@ -66,6 +66,28 @@ AR1(ldrift = "identitylink", lsd = "loge", lvar = "loge",
}
+ \item{type.EIM}{
+ What type of expected information matrix (EIM) is used in
+ Fisher scoring. By default, this family function calls
+ \code{\link[VGAM:AR1EIM]{AR1EIM}}, which recursively
+ computes the exact EIM for the AR process with Gaussian
+ white noise. See Porat and Friedlander (1986) for further
+ details on the exact EIM.
+
+ If \code{type.EIM = "approximate"} then
+ approximate expression for the EIM of Autoregressive processes
+ is used; this approach holds when the number of observations
+ is large enough. Succinct details about the approximate EIM
+ are delineated at Porat and Friedlander (1987).
+
+
+ }
+ \item{print.EIM}{
+ Logical. If \code{TRUE}, then the first few EIMs are printed.
+ Here, the result shown is the sum of each EIM.
+
+
+ }
\item{type.likelihood}{
What type of likelihood function is maximized.
The first choice (default) is the sum of the marginal likelihood
@@ -78,33 +100,14 @@ AR1(ldrift = "identitylink", lsd = "loge", lvar = "loge",
}
-
-
-\item{almost1}{
- A value close to 1 but slightly smaller. One of the off-diagonal
- EIM elements is multiplied by this, to ensure that the EIM is
- positive-definite.
-
+% \item{epsilon, maxit, stepsize,...}{
+% Same as \code{\link[VGAM:vglm.control]{vglm.control}}.
+%
+%
+% }
}
-
-
-\item{half.stepsizing, \ldots}{
- A logical value, overwriting that of \code{\link{vglm.control}}.
- Currently this setting is potentially dangerous, and is used
- for aesthetics at the solution---no jittering occurs.
- This can often be seen by setting \code{trace = TRUE} when the
- value is set to \code{TRUE}.
- The jittering is due to some heuristics applied to handle the
- first observation---either by setting its prior weight to a value
- very close to 0, else adjust for its EIM which is not of full rank.
-
-
-
-}
-
-}
\details{
The AR-1 model implemented here has
\deqn{Y_1 \sim N(\mu, \sigma^2 / (1-\rho^2)), }{%
@@ -117,27 +120,55 @@ AR1(ldrift = "identitylink", lsd = "loge", lvar = "loge",
Here are a few notes:
- 1. A test for stationarity might be to test
- whether \eqn{\mu^*}{mu^*} is intercept-only.
- 2. The mean of all the \eqn{Y_i}{Y(i)}
+ (1). A test for weak stationarity might be to verify whether
+ \eqn{1/\rho}{1/rho} lies outside the unit circle.
+ (2). The mean of all the \eqn{Y_i}{Y(i)}
is \eqn{\mu^* /(1-\rho)}{mu^* / (1-rho)} and
these are returned as the fitted values.
- 3. The correlation of all the \eqn{Y_i}{Y(i)} with \eqn{Y_{i-1}}{Y(i-1)}
+ (3). The correlation of all the \eqn{Y_i}{Y(i)} with \eqn{Y_{i-1}}{Y(i-1)}
is \eqn{\rho}{rho}.
- 4. The default link function ensures that \eqn{-1 < \rho < 1}{-1 < rho < 1}.
+ (4). The default link function ensures that
+ \eqn{-1 < \rho < 1}{-1 < rho < 1}.
+
+
+
+% (1). ... whether \eqn{\mu^*}{mu^*} is intercept-only.
}
\section{Warning}{
- Monitoring convergence is urged: set \code{trace = TRUE}.
-
+ Monitoring convergence is urged, i.e., set \code{trace = TRUE}.
+ Moreover, if the exact EIMs are used, set \code{print.EIM = TRUE}
+ to compare the computed exact to the approximate EIM.
+
+ Under the VGLM/VGAM approach, parameters can be modelled in terms
+ of covariates. Particularly, if the standard deviation of
+ the white noise is modelled in this way, then
+ \code{type.EIM = "exact"} may certainly lead to unstable
+ results. The reason is that white noise is a stationary
+ process, and consequently, its variance must remain as a constant.
+ Consequently, the use of variates to model
+ this parameter contradicts the assumption of
+ stationary random components to compute the exact EIMs proposed
+ by Porat and Friedlander (1987).
+
+ To prevent convergence issues in such cases, this family function
+ internally verifies whether the variance of the white noise remains
+ as a constant at each Fisher scoring iteration.
+ If this assumption is violated and \code{type.EIM = "exact"} is set,
+ then \code{AR1} automatically shifts to
+ \code{type.EIM = "approximate"}.
+ Also, a warning is accordingly displayed.
- Yet to do: add an argument that allows the scaled mean parameter
- to be deleted, i.e, a 2-parameter model is fitted.
- Yet to do: \code{ARff(p.lag = 1)} should hopefully be written soon.
-
+
+ %Thus, if modelling the standard deviation of the white noise
+ %is required, the use of \code{type.EIM = "approximate"} is
+ %highly recommended.
+ %Yet to do: add an argument that allows the scaled mean parameter
+ %to be deleted, i.e, a 2-parameter model is fitted.
+ %Yet to do: \code{ARff(p.lag = 1)} should hopefully be written soon.
}
\value{
@@ -147,79 +178,121 @@ AR1(ldrift = "identitylink", lsd = "loge", lvar = "loge",
}
-%\references{
+\references{
+ Porat, B. and Friedlander, B. (1987)
+ The Exact Cramer-Rao Bond for Gaussian Autoregressive Processes.
+ \emph{IEEE Transactions on Aerospace and Electronic Systems},
+ \bold{AES-23(4)}, 537--542.
+
+
+}
+\author{ Victor Miranda (exact method) and
+ Thomas W. Yee (approximate method).}
-%}
-\author{ Thomas W. Yee and Victor Miranda }
\note{
- For \code{type.likelihood = "conditional"},
- the prior weight for the first observation is set to
- some small positive number, which has the effect of deleting
- that observation.
- However, \eqn{n} is still the original \eqn{n} so that
- statistics such as the residual degrees of freedom are
- unchanged (uncorrected possibly).
+ %For \code{type.likelihood = "conditional"},
+ %the prior weight for the first observation is set to
+ %some small positive number, which has the effect of deleting
+ %that observation.
+ %However, \eqn{n} is still the original \eqn{n} so that
+ %statistics such as the residual degrees of freedom are
+ %unchanged (uncorrected possibly).
Multiple responses are handled.
The mean is returned as the fitted values.
+
+% Argument \code{zero} can be either a numeric or a character
+% vector. It must specify the position(s) or name(s) of the
+% parameters to be modeled as intercept-only. If names are used,
+% notice that parameter names in this family function are
- Practical experience has shown that half-stepping is a very
- good idea. The default options use step sizes that are
- about one third the usual step size. Consequently,
- \code{maxit} is increased to about 100, by default.
-
+
+% \deqn{c("drift", "var" or "sd", "rho").}
-}
+ %Practical experience has shown that half-stepping is a very
+ %good idea. The default options use step sizes that are
+ %about one third the usual step size. Consequently,
+ %\code{maxit} is increased to about 100, by default.
+
+}
+
\seealso{
+ \code{\link{AR1EIM}},
\code{\link{vglm.control}},
\code{\link{dAR1}},
- \code{\link{uninormal}},
- \code{\link[stats]{arima.sim}},
+ \code{\link[stats]{arima.sim}}.
}
\examples{
-# Example 1: using arima.sim() to generate a stationary time series
-nn <- 1000; set.seed(1)
+### Example 1: using arima.sim() to generate a 0-mean stationary time series.
+nn <- 500
tsdata <- data.frame(x2 = runif(nn))
-ar.coef.1 <- rhobit(-2, inverse = TRUE) # Approx -0.8
-ar.coef.2 <- rhobit( 1, inverse = TRUE) # Approx 0.5
+ar.coef.1 <- rhobit(-1.55, inverse = TRUE) # Approx -0.65
+ar.coef.2 <- rhobit( 1.0, inverse = TRUE) # Approx 0.50
+set.seed(1)
tsdata <- transform(tsdata,
index = 1:nn,
TS1 = arima.sim(nn, model = list(ar = ar.coef.1),
- sd = exp(1.0)),
+ sd = exp(1.5)),
TS2 = arima.sim(nn, model = list(ar = ar.coef.2),
- sd = exp(1.0 + 2 * x2)))
-fit1a <- vglm(cbind(TS1, TS2) ~ x2, AR1(zero = "rho", nodrift = TRUE),
- data = tsdata, trace = TRUE)
-coef(fit1a, matrix = TRUE)
-summary(fit1a) # SEs are useful to know
-
-# Example 2: another stationary time series
-nn <- 1000
-my.rho <- rhobit(-1.0, inverse = TRUE)
-my.mu <- 2.5
+ sd = exp(1.0 + 1.5 * x2)))
+
+### An autoregressive intercept--only model. ###
+### Using the exact EIM, and "nodrift = TRUE" ###
+fit1a <- vglm(TS1 ~ 1, data = tsdata, trace = TRUE,
+ AR1(var.arg = FALSE, nodrift = TRUE,
+ type.EIM = "exact",
+ print.EIM = FALSE),
+ crit = "coefficients")
+Coef(fit1a)
+summary(fit1a)
+
+### Two responses. Here, the white noise standard deviation of TS2 ###
+### is modelled in terms of 'x2'. Also, 'type.EIM = exact'. ###
+fit1b <- vglm(cbind(TS1, TS2) ~ x2,
+ AR1(zero = NULL, nodrift = TRUE,
+ var.arg = FALSE,
+ type.EIM = "exact"),
+ constraints = list("(Intercept)" = diag(4),
+ "x2" = rbind(0, 0, 1, 0)),
+ data = tsdata, trace = TRUE, crit = "coefficients")
+coef(fit1b, matrix = TRUE)
+summary(fit1b)
+
+\dontrun{
+### Example 2: another stationary time series
+nn <- 500
+my.rho <- rhobit(1.0, inverse = TRUE)
+my.mu <- 1.0
my.sd <- exp(1)
tsdata <- data.frame(index = 1:nn, TS3 = runif(nn))
+
+set.seed(2)
for (ii in 2:nn)
- tsdata$TS3[ii] <- my.mu + my.rho * tsdata$TS3[ii-1] + rnorm(1, sd = my.sd)
+ tsdata$TS3[ii] <- my.mu/(1 - my.rho) +
+ my.rho * tsdata$TS3[ii-1] + rnorm(1, sd = my.sd)
tsdata <- tsdata[-(1:ceiling(nn/5)), ] # Remove the burn-in data:
-fit2a <- vglm(TS3 ~ 1, AR1(type.likelihood = "conditional"),
- data = tsdata, trace = TRUE)
-coef(fit2a, matrix = TRUE)
-summary(fit2a) # SEs are useful to know
-Coef(fit2a)["rho"] # Estimate of rho for intercept-only models
-my.rho
-coef(fit2a)[1] # drift
-my.mu # Should be the same
-head(weights(fit2a, type= "prior")) # First one is effectively deleted
-head(weights(fit2a, type= "working")) # Ditto
+
+### Fitting an AR(1). The exact EIMs are used.
+fit2a <- vglm(TS3 ~ 1, AR1(type.likelihood = "exact", # "conditional",
+ type.EIM = "exact"),
+ data = tsdata, trace = TRUE, crit = "coefficients")
+
+Coef(fit2a)
+summary(fit2a) # SEs are useful to know
+
+Coef(fit2a)["rho"] # Estimate of rho, for intercept-only models
+my.rho # The 'truth' (rho)
+Coef(fit2a)["drift"] # Estimate of drift, for intercept-only models
+my.mu /(1 - my.rho) # The 'truth' (drift)
+}
}
\keyword{models}
\keyword{regression}
@@ -230,6 +303,8 @@ head(weights(fit2a, type= "working")) # Ditto
% data = tsdata, trace = TRUE)
+%head(weights(fit2a, type = "prior")) # First one is effectively deleted
+%head(weights(fit2a, type = "working")) # Ditto
diff --git a/man/AR1EIM.Rd b/man/AR1EIM.Rd
new file mode 100644
index 0000000..3ea23c3
--- /dev/null
+++ b/man/AR1EIM.Rd
@@ -0,0 +1,303 @@
+\name{AR1EIM}
+\alias{AR1EIM}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{Computation of the Exact EIM of an Order-1 Autoregressive Process
+
+
+}
+\description{Computation of the exact Expected Information Matrix of
+ the Autoregressive process of order-\eqn{1} (AR(\eqn{1}))
+ with Gaussian white noise and stationary
+ random components.
+
+}
+
+\usage{
+AR1EIM(x = NULL, var.arg = NULL, p.drift = NULL,
+ WNsd = NULL, ARcoeff1 = NULL, eps.porat = 1e-2)
+}
+
+\arguments{
+ \item{x}{
+ A vector of quantiles. The gaussian time series for which the EIMs
+ are computed.
+
+
+ If multiple time series are being analyzed, then \code{x} must be
+ a matrix where each column allocates a response.
+ That is, the number of columns (denoted as \eqn{NOS}) must match
+ the number of responses.
+
+
+
+ }
+ \item{var.arg}{
+ Logical. Same as with \code{\link[VGAM:AR1]{AR1}}.
+
+
+
+ }
+ \item{p.drift}{
+ A numeric vector with the \emph{scaled mean(s)} (commonly referred as
+ \emph{drift}) of the AR process(es) in turn.
+ Its length matches the number of responses.
+
+
+
+ }
+ \item{WNsd, ARcoeff1}{
+ Matrices.
+ The standard deviation of the white noise, and the
+ correlation (coefficient) of the AR(\eqn{1}) model,
+ for \bold{each} observation.
+
+
+ That is, the dimension for each matrix is \eqn{N \times NOS}{N x NOS},
+ where \eqn{N} is the number of observations and \eqn{NOS} is the
+ number of responses. Else, these arguments are recycled.
+
+
+
+ }
+ \item{eps.porat}{
+ A very small positive number to test whether the standar deviation
+ (\code{WNsd}) is close enough to its value estimated in this function.
+
+ See below for further details.
+
+
+
+ }
+
+}
+\details{
+ This function implements the algorithm of Porat and Friedlander
+ (1986) to \emph{recursively} compute the exact expected
+ information matrix (EIM) of Gaussian time series with stationary
+ random components.
+
+
+ By default, when the VGLM/VGAM family function
+ \code{\link[VGAM:AR1]{AR1}} is used to fit an AR(\eqn{1}) model
+ via \code{\link[VGAM:vglm]{vglm}}, Fisher scoring is executed using
+ the \bold{approximate} EIM for the AR process. However, this model
+ can also be fitted using the \bold{exact} EIMs computed by
+ \code{AR1EIM}.
+
+
+ Given \eqn{N} consecutive data points,
+ \eqn{ {y_{0}, y_{1}, \ldots, y_{N - 1} } }{ {y[0], y[1], \ldots,
+ y[N - 1]} } with probability density \eqn{f(\boldsymbol{y})}{f(y)},
+ the Porat and Friedlander algorithm
+ calculates the EIMs
+ \eqn{ [J_{n-1}(\boldsymbol{\theta})] }{J(n-1)[\theta]},
+ for all \eqn{1 \leq n \leq N}{1 \le n \le N}. This is done based on the
+ Levinson-Durbin algorithm for computing the orthogonal polynomials of
+ a Toeplitz matrix.
+ In particular, for the AR(\eqn{1}) model, the vector of parameters
+ to be estimated under the VGAM/VGLM approach is
+
+ \deqn{ \boldsymbol{\eta} = (\mu^{*}, loge(\sigma^2), rhobit(\rho)),}{
+ \eta = ( mu^*, loge(sigma^2), rhobit(rho)),
+ }
+ where \eqn{\sigma^2}{sigma^2} is the variance of the white noise
+ and \eqn{mu^{*}}{mu^*} is the drift parameter
+ (See \code{\link[VGAM:AR1]{AR1}} for further details on this).
+
+ Consequently, for each observation \eqn{n = 1, \ldots, N}, the EIM,
+ \eqn{J_{n}(\boldsymbol{\theta})}{Jn[\theta]}, has dimension
+ \eqn{3 \times 3}{3 x 3}, where the diagonal elements are:
+ %Notice, however, that the Porat and Friedlander algorithm considers
+ %\eqn{ { y_t } }{ {y[t]}} as a zero-mean process.
+ %Then, for each \eqn{n = 1, \ldots, N},
+ %\eqn{ [J_{n}(\boldsymbol{\theta})] }{Jn[\theta]} is a
+ %\eqn{2 \times 2}{2 x 2} matrix, with elements
+ \deqn{ J_{[n, 1, 1]} =
+ E[ -\partial^2 \log f(\boldsymbol{y}) / \partial ( \mu^{*} )^2 ], }{
+ J[n, 1, 1] = E[ -\delta^2 log f(y) / \delta (mu^*)^2 ], }
+
+ \deqn{ J_{[n, 2, 2]} =
+ E[ -\partial^2 \log f(\boldsymbol{y}) / \partial (\sigma^2)^2 ], }{
+ J[n, 2, 2] = E[ - \delta^2 log f(y) / \delta (\sigma^2)^2 ],}
+
+ and
+
+ \deqn{ J_{[n, 3, 3]} =
+ E[ -\partial^2 \log f(\boldsymbol{y}) / \partial ( \rho )^2 ]. }{
+ J[n, 3, 3] = E[ -\delta^2 log f(y) / \delta (rho)^2]. }
+
+ As for the off-diagonal elements, one has the usual entries, i.e.,
+ \deqn{ J_{[n, 1, 2]} = J_{[n, 2, 1]} =
+ E[ -\partial^2 \log f(\boldsymbol{y}) / \partial \sigma^2
+ \partial \rho], }{
+ J[n, 1, 2] = J[n, 2, 1] =
+ E[ -\delta^2 log f(y) / \delta \sigma^2 \delta rho ],}
+ etc.
+
+ If \code{var.arg = FALSE}, then \eqn{\sigma} instead of \eqn{\sigma^2}
+ is estimated. Therefore, \eqn{J_{[n, 2, 2]}}{J[n, 2, 2]},
+ \eqn{J_{[n, 1, 2]}}{J[n, 1, 2]}, etc., are correspondingly replaced.
+
+
+ Once these expected values are internally computed, they are returned
+ in an array of dimension \eqn{N \times 1 \times 6}{N x 1 x 6},
+ of the form
+
+ \deqn{J[, 1, ] = [ J_{[ , 1, 1]}, J_{[ , 2, 2]}, J_{[ , 3, 3]},
+ J_{[ , 1, 2]}, J_{[, 2, 3]}, J_{[ , 1, 3]} ]. }{
+ J[, 1, ] = [ J[ , 1, 1], J[ , 2, 2], J[ , 3, 3],
+ J[ , 1, 2], J[ , 2, 3], J[ , 1, 3] ].
+ }
+
+ \code{AR1EIM} handles multiple time series, say \eqn{NOS}.
+ If this happens, then it accordingly returns an array of
+ dimension \eqn{N \times NOS \times 6 }{N x NOS x 6}. Here,
+ \eqn{J[, k, ]}, for \eqn{k = 1, \ldots, NOS}, is a matrix
+ of dimension \eqn{N \times 6}{N x 6}, which
+ stores the EIMs for the \eqn{k^{th}}{k}th response, as
+ above, i.e.,
+
+
+ \deqn{J[, k, ] = [ J_{[ , 1, 1]}, J_{[ , 2, 2]},
+ J_{[ , 3, 3]}, \ldots ], }{
+ J[, k, ] = [ J[ , 1, 1], J[ , 2, 2], J[ , 3, 3], \ldots ],
+ }
+
+
+ the \emph{bandwith} form, as per required by
+ \code{\link[VGAM:AR1]{AR1}}.
+
+
+}
+\value{
+ An array of dimension \eqn{N \times NOS \times 6}{N x NOS x 6},
+ as above.
+
+
+ This array stores the EIMs calculated from the joint density as
+ a function of
+ \deqn{\boldsymbol{\theta} = (\mu^*, \sigma^2, \rho). }{
+ \theta = (mu^*, sigma^2, rho).
+ }
+
+
+ Nevertheless, note that, under the VGAM/VGLM approach, the EIMs
+ must be correspondingly calculated in terms of the linear
+ predictors, \eqn{\boldsymbol{\eta}}{\eta}.
+
+
+}
+\note{
+ For simplicity, one can assume that the time series analyzed has
+ a 0-mean. Consequently, where the family function
+ \code{\link[VGAM:AR1]{AR1}} calls \code{AR1EIM} to compute
+ the EIMs, the argument \code{p.drift} is internally set
+ to zero-vector, whereas \code{x} is \emph{centered} by
+ subtracting its mean value.
+
+
+}
+\section{Asymptotic behaviour of the algorithm}{
+ For large enough \eqn{n}, the EIMs,
+ \eqn{J_n(\boldsymbol{\theta})}{Jn(\theta)},
+ become approximately linear in \eqn{n}. That is, for some
+ \eqn{n_0}{n0},
+
+ \deqn{ J_n(\boldsymbol{\theta}) \equiv
+ J_{n_0}(\boldsymbol{\theta}) + (n - n_0)
+ \bar{J}(\boldsymbol{\theta}),~~~~~~(**) }{
+ Jn(\theta) -> Jn0(\theta) + (n - n0) * Jbar(\theta), (*)
+ }
+ where \eqn{ \bar{J}(\boldsymbol{\theta}) }{ Jbar(\theta)} is
+ a constant matrix.
+
+
+ This relationsihip is internally considered if a proper value
+ of \eqn{n_0}{n0} is determined. Different ways can be adopted to
+ find \eqn{n_0}{n0}. In \code{AR1EIM}, this is done by checking
+ the difference between the internally estimated variances and the
+ entered ones at \code{WNsd}.
+ If this difference is less than
+ \code{eps.porat} at some iteration, say at iteration \eqn{n_0}{n0},
+ then \code{AR1EIM} takes
+ \eqn{ \bar{J}(\boldsymbol{\theta})}{Jbar(\theta)}
+ as the last computed increment of
+ \eqn{J_n(\boldsymbol{\theta})}{Jn(\theta)}, and extraplotates
+ \eqn{J_k(\boldsymbol{\theta})}{Jk(\theta)}, for all
+ \eqn{k \geq n_0 }{k \ge n0} using \eqn{(*)}.
+ Else, the algorithm will complete the iterations for
+ \eqn{1 \leq n \leq N}{1 \le n \le N}.
+
+
+ Finally, note that the rate of convergence reasonably decreases if
+ the asymptotic relationship \eqn{(*)} is used to compute
+ \eqn{J_k(\boldsymbol{\theta})}{Jk(\theta)},
+ \eqn{k \geq n_0 }{k \ge n0}. Normally, the number
+ of operations involved on this algorithm is proportional to
+ \eqn{N^2}.
+
+
+ See Porat and Friedlander (1986) for full details on the asymptotic
+ behaviour of the algorithm.
+
+
+
+}
+\section{Warning}{
+ Arguments \code{WNsd}, and \code{ARcoeff1} are matrices of dimension
+ \eqn{N \times NOS}{N x NOS}. Else, these arguments are accordingly
+ recycled.
+
+
+}
+\references{
+ Porat, B. and Friedlander, B. (1986)
+ Computation of the Exact Information Matrix of Gaussian Time Series
+ with Stationary Random Components.
+ \emph{IEEE Transactions on Acoustics, Speech, and Signal Processing},
+ \bold{54(1)}, 118--130.
+
+
+}
+\author{
+ V. Miranda and T. W. Yee.
+
+
+}
+\seealso{
+ \code{\link[VGAM:AR1]{AR1}}.
+
+
+}
+\examples{
+ set.seed(1)
+ nn <- 500
+ ARcoeff1 <- c(0.3, 0.25) # Will be recycled.
+ WNsd <- c(exp(1), exp(1.5)) # Will be recycled.
+ p.drift <- c(0, 0) # Zero-mean gaussian time series.
+
+ ### Generate two (zero-mean) AR(1) processes ###
+ ts1 <- p.drift[1]/(1 - ARcoeff1[1]) +
+ arima.sim(model = list(ar = ARcoeff1[1]), n = nn,
+ sd = WNsd[1])
+ ts2 <- p.drift[2]/(1 - ARcoeff1[2]) +
+ arima.sim(model = list(ar = ARcoeff1[2]), n = nn,
+ sd = WNsd[2])
+
+ ARdata <- matrix(cbind(ts1, ts2), ncol = 2)
+
+
+ ### Compute the exact EIMs: TWO responses. ###
+ ExactEIM <- AR1EIM(x = ARdata, var.arg = FALSE, p.drift = p.drift,
+ WNsd = WNsd, ARcoeff1 = ARcoeff1)
+
+ ### For response 1:
+ head(ExactEIM[, 1 ,]) # NOTICE THAT THIS IS A (nn x 6) MATRIX!
+
+ ### For response 2:
+ head(ExactEIM[, 2 ,]) # NOTICE THAT THIS IS A (nn x 6) MATRIX!
+}
+
+
+
+
diff --git a/man/AR1UC.Rd b/man/AR1UC.Rd
index e6117f7..2a936b3 100644
--- a/man/AR1UC.Rd
+++ b/man/AR1UC.Rd
@@ -1,9 +1,9 @@
\name{dAR1}
\alias{dAR1}
\alias{dAR1}
-%\alias{pbisa}
-%\alias{qbisa}
-%\alias{rbisa}
+%\alias{pAR1}
+%\alias{qAR1}
+%\alias{rAR1}
\title{The AR-1 Autoregressive Process}
\description{
Density for the AR-1 model.
@@ -50,13 +50,15 @@ dAR1(x, drift = 0, var.error = 1, ARcoef1 = 0.0,
-% \code{pbisa} gives the distribution function, and
-% \code{qbisa} gives the quantile function, and
-% \code{rbisa} generates random deviates.
+% \code{pAR1} gives the distribution function, and
+% \code{qAR1} gives the quantile function, and
+% \code{rAR1} generates random deviates.
}
+
\author{ T. W. Yee and Victor Miranda }
+
\details{
Most of the background to this function is given
in \code{\link{AR1}}.
diff --git a/man/CommonVGAMffArguments.Rd b/man/CommonVGAMffArguments.Rd
index 78a6b43..3cd89e4 100644
--- a/man/CommonVGAMffArguments.Rd
+++ b/man/CommonVGAMffArguments.Rd
@@ -1,7 +1,7 @@
\name{CommonVGAMffArguments}
\alias{CommonVGAMffArguments}
\alias{TypicalVGAMfamilyFunction}
-\title{Common VGAM family function Arguments }
+\title{Common VGAM Family Function Arguments }
\description{
Here is a description of some common and typical arguments found
in many \pkg{VGAM} family functions, e.g.,
@@ -101,7 +101,25 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
% The actual search values will be \code{unique(sort(c(gshape)))}, etc.
If the grid search is 2-dimensional then it is advisable not to
make the vectors too long as a nested \code{for} loop may be used.
- Ditto for 3-dimensions.
+ Ditto for 3-dimensions etc.
+ Sometimes a \code{".mux"} is added as a suffix, e.g., \code{gshape.mux};
+ this means that the grid is created relatively and not absolutely,
+ e.g., its values are multipled by some single initial estimate of the
+ parameter in order to create the grid on an absolute scale.
+
+
+
+ Some family functions have an argument called \code{gprobs.y}.
+ This is fed into the \code{probs}
+ argument of \code{\link[stats:quantile]{quantile}} in order to
+ obtain some values of central tendency of the response,
+ i.e., some spread of values in the middle.
+ when \code{imethod = 1} to obtain an initial value for the mean
+ Some family functions have an argument called \code{iprobs.y}, and
+ if so, then these values can overwrite \code{gprobs.y}.
+
+
+
% Then the actual search values will be \code{unique(sort(c(gshape, 1/gshape)))}, etc.
diff --git a/man/Select.Rd b/man/Select.Rd
index e8054c8..98b2dfd 100644
--- a/man/Select.Rd
+++ b/man/Select.Rd
@@ -3,8 +3,7 @@
\alias{subsetcol}
% \alias{subsetc}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{
- Select variables for a formula response or the RHS of a formula
+\title{ Select Variables for a Formula Response or the RHS of a Formula
%% ~~function to do ... ~~
diff --git a/man/VGAM-package.Rd b/man/VGAM-package.Rd
index d464b2b..dbb2ba3 100644
--- a/man/VGAM-package.Rd
+++ b/man/VGAM-package.Rd
@@ -89,17 +89,20 @@ Maintainer: Thomas Yee \email{t.yee at auckland.ac.nz}.
}
+
\section{Warning}{
- This package is undergoing continual development and improvement.
- Until my monograph comes out and this package is released as version 1.0-0
- the user should treat everything subject to change.
+ This package is undergoing continual development and improvement,
+ therefore users should treat everything as subject to change.
This includes the
family function names,
argument names,
many of the internals,
the use of link functions,
and slot names.
- Some future pain can be minimized by using good programming
+ For example, all link functions may be renamed so that they
+ end in \code{"link"},
+ e.g., \code{loglink()} instead of \code{loge()}.
+ Some future pain can be avoided by using good programming
techniques, e.g., using extractor/accessor functions such as
\code{coef()}, \code{weights()}, \code{vcov()},
\code{predict()}.
@@ -267,7 +270,7 @@ aa at post$deplot # Contains density function values
# Example 5; GEV distribution for extremes
-(fit5 <- vglm(maxtemp ~ 1, egev, data = oxtemp, trace = TRUE))
+(fit5 <- vglm(maxtemp ~ 1, gevff, data = oxtemp, trace = TRUE))
head(fitted(fit5))
coef(fit5, matrix = TRUE)
Coef(fit5)
@@ -278,3 +281,9 @@ sqrt(diag(vcov(fit5))) # Approximate standard errors
}
+% Until my monograph comes out and this package is released as version 1.0-0
+% the user should treat everything subject to change.
+
+
+
+
diff --git a/man/betaII.Rd b/man/betaII.Rd
index 9c72543..877acfe 100644
--- a/man/betaII.Rd
+++ b/man/betaII.Rd
@@ -9,7 +9,8 @@
\usage{
betaII(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
iscale = NULL, ishape2.p = NULL, ishape3.q = NULL, imethod = 1,
- gscale = exp(-5:5), gshape2.p = exp(-5:5), gshape3.q = exp(-5:5),
+ gscale = exp(-5:5), gshape2.p = exp(-5:5),
+ gshape3.q = seq(0.75, 4, by = 0.25),
probs.y = c(0.25, 0.5, 0.75), zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
diff --git a/man/betaR.Rd b/man/betaR.Rd
index 334c69a..5148f31 100644
--- a/man/betaR.Rd
+++ b/man/betaR.Rd
@@ -120,6 +120,7 @@ betaR(lshape1 = "loge", lshape2 = "loge",
\seealso{
\code{\link{betaff}},
+% \code{\link{zoibetaR}},
\code{\link[stats:Beta]{Beta}},
\code{\link{genbetaII}},
\code{\link{betaII}},
diff --git a/man/betabinomUC.Rd b/man/betabinomUC.Rd
index ebf2cf6..d0e4196 100644
--- a/man/betabinomUC.Rd
+++ b/man/betabinomUC.Rd
@@ -8,15 +8,15 @@
\alias{pbetabinom.ab}
%\alias{qbetabinom.ab}
\alias{rbetabinom.ab}
-%\alias{Ozibetabinom}
-\alias{dozibetabinom}
-\alias{pozibetabinom}
-%\alias{qozibetabinom}
-\alias{rozibetabinom}
-\alias{dozibetabinom.ab}
-\alias{pozibetabinom.ab}
-%\alias{qozibetabinom.ab}
-\alias{rozibetabinom.ab}
+%\alias{Zoibetabinom}
+\alias{dzoibetabinom}
+\alias{pzoibetabinom}
+%\alias{qzoibetabinom}
+\alias{rzoibetabinom}
+\alias{dzoibetabinom.ab}
+\alias{pzoibetabinom.ab}
+%\alias{qzoibetabinom.ab}
+\alias{rzoibetabinom.ab}
\title{The Beta-Binomial Distribution}
\description{
Density, distribution function, and random
@@ -32,14 +32,14 @@ rbetabinom(n, size, prob, rho = 0)
dbetabinom.ab(x, size, shape1, shape2, log = FALSE, Inf.shape = 1e6)
pbetabinom.ab(q, size, shape1, shape2, log.p = FALSE)
rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
-dozibetabinom(x, size, prob, rho = 0, pstr0 = 0, pstrsize = 0, log = FALSE)
-pozibetabinom(q, size, prob, rho, pstr0 = 0, pstrsize = 0,
+dzoibetabinom(x, size, prob, rho = 0, pstr0 = 0, pstrsize = 0, log = FALSE)
+pzoibetabinom(q, size, prob, rho, pstr0 = 0, pstrsize = 0,
lower.tail = TRUE, log.p = FALSE)
-rozibetabinom(n, size, prob, rho = 0, pstr0 = 0, pstrsize = 0)
-dozibetabinom.ab(x, size, shape1, shape2, pstr0 = 0, pstrsize = 0, log = FALSE)
-pozibetabinom.ab(q, size, shape1, shape2, pstr0 = 0, pstrsize = 0,
+rzoibetabinom(n, size, prob, rho = 0, pstr0 = 0, pstrsize = 0)
+dzoibetabinom.ab(x, size, shape1, shape2, pstr0 = 0, pstrsize = 0, log = FALSE)
+pzoibetabinom.ab(q, size, shape1, shape2, pstr0 = 0, pstrsize = 0,
lower.tail = TRUE, log.p = FALSE)
-rozibetabinom.ab(n, size, shape1, shape2, pstr0 = 0, pstrsize = 0)
+rzoibetabinom.ab(n, size, shape1, shape2, pstr0 = 0, pstrsize = 0)
}
@@ -125,9 +125,10 @@ rozibetabinom.ab(n, size, shape1, shape2, pstr0 = 0, pstrsize = 0)
% \code{qbetabinom} and \code{qbetabinom.ab} gives the quantile function, and
- \code{dozibetabinom} and \code{dozibetabinom.ab} give the inflated density,
- \code{pozibetabinom} and \code{pozibetabinom.ab} give the inflated distribution function, and
- \code{rozibetabinom} and \code{rozibetabinom.ab} generate random inflated deviates.
+
+ \code{dzoibetabinom} and \code{dzoibetabinom.ab} give the inflated density,
+ \code{pzoibetabinom} and \code{pzoibetabinom.ab} give the inflated distribution function, and
+ \code{rzoibetabinom} and \code{rzoibetabinom.ab} generate random inflated deviates.
}
@@ -165,7 +166,7 @@ rozibetabinom.ab(n, size, shape1, shape2, pstr0 = 0, pstrsize = 0)
}
\note{
- \code{pozibetabinom}, \code{pozibetabinom.ab},
+ \code{pzoibetabinom}, \code{pzoibetabinom.ab},
\code{pbetabinom} and \code{pbetabinom.ab} can be particularly slow.
The functions here ending in \code{.ab} are called from those
functions which don't.
@@ -180,7 +181,7 @@ rozibetabinom.ab(n, size, shape1, shape2, pstr0 = 0, pstrsize = 0)
\seealso{
\code{\link{betabinomial}},
\code{\link{betabinomialff}},
- \code{\link{Ozibeta}}.
+ \code{\link{Zoabeta}}.
}
@@ -212,7 +213,7 @@ barplot(rbind(dy, ty / sum(ty)),
set.seed(208); N <- 1000000; size = 20;
pstr0 <- 0.2; pstrsize <- 0.2
-k <- rozibetabinom.ab(N, size, s1, s2, pstr0, pstrsize)
+k <- rzoibetabinom.ab(N, size, s1, s2, pstr0, pstrsize)
hist(k, probability = TRUE, border = "blue",
main = "Blue = inflated; orange = ordinary beta-binomial",
breaks = -0.5 : (size + 0.5))
@@ -221,7 +222,7 @@ sum(k == size) / N # Proportion of size
lines(0 : size,
dbetabinom.ab(0 : size, size, s1, s2), col = "orange")
lines(0 : size, col = "blue",
- dozibetabinom.ab(0 : size, size, s1, s2, pstr0, pstrsize))
+ dzoibetabinom.ab(0 : size, size, s1, s2, pstr0, pstrsize))
}
}
\keyword{distribution}
diff --git a/man/betaff.Rd b/man/betaff.Rd
index 35caaab..483ed19 100644
--- a/man/betaff.Rd
+++ b/man/betaff.Rd
@@ -118,8 +118,9 @@ betaff(A = 0, B = 1, lmu = "logit", lphi = "loge",
\seealso{
\code{\link{betaR}},
+% \code{\link{zoibetaR}},
\code{\link[stats:Beta]{Beta}},
- \code{\link{dozibeta}},
+ \code{\link{dzoabeta}},
\code{\link{genbetaII}},
\code{\link{betaII}},
\code{\link{betabinomialff}},
diff --git a/man/binom2.orUC.Rd b/man/binom2.orUC.Rd
index 4bd7c51..5226bd3 100644
--- a/man/binom2.orUC.Rd
+++ b/man/binom2.orUC.Rd
@@ -15,8 +15,7 @@ rbinom2.or(n, mu1,
oratio = 1, exchangeable = FALSE, tol = 0.001, twoCols = TRUE,
colnames = if (twoCols) c("y1","y2") else c("00", "01", "10", "11"),
ErrorCheck = TRUE)
-dbinom2.or(mu1,
- mu2 = if (exchangeable) mu1 else stop("'mu2' not specified"),
+dbinom2.or(mu1, mu2 = if (exchangeable) mu1 else stop("'mu2' not specified"),
oratio = 1, exchangeable = FALSE, tol = 0.001,
colnames = c("00", "01", "10", "11"), ErrorCheck = TRUE)
@@ -86,11 +85,13 @@ dbinom2.or(mu1,
\code{\link{binom2.or}}.
+
The function \code{dbinom2.or} does not really compute the density
(because that does not make sense here) but rather returns the
four joint probabilities.
+
}
\value{
The function \code{rbinom2.or} returns
@@ -110,26 +111,25 @@ dbinom2.or(mu1,
}
\examples{
-nn <- 2000 # Example 1
-ymat <- rbinom2.or(n = nn, mu1 = 0.8, oratio = exp(2), exch = TRUE)
+nn <- 1000 # Example 1
+ymat <- rbinom2.or(nn, mu1 = logit(1, inv = TRUE), oratio = exp(2), exch = TRUE)
(mytab <- table(ymat[, 1], ymat[, 2], dnn = c("Y1", "Y2")))
(myor <- mytab["0","0"] * mytab["1","1"] / (mytab["1","0"] * mytab["0","1"]))
fit <- vglm(ymat ~ 1, binom2.or(exch = TRUE))
coef(fit, matrix = TRUE)
-
bdata <- data.frame(x2 = sort(runif(nn))) # Example 2
-bdata <- transform(bdata, mu1 = logit(-2 + 4*x2, inverse = TRUE),
- mu2 = logit(-1 + 3*x2, inverse = TRUE))
+bdata <- transform(bdata, mu1 = logit(-2 + 4 * x2, inverse = TRUE),
+ mu2 = logit(-1 + 3 * x2, inverse = TRUE))
dmat <- with(bdata, dbinom2.or(mu1 = mu1, mu2 = mu2, oratio = exp(2)))
ymat <- with(bdata, rbinom2.or(n = nn, mu1 = mu1, mu2 = mu2, oratio = exp(2)))
fit2 <- vglm(ymat ~ x2, binom2.or, data = bdata)
coef(fit2, matrix = TRUE)
\dontrun{
matplot(with(bdata, x2), dmat, lty = 1:4, col = 1:4, type = "l",
- main = "Joint probabilities", ylim = 0:1, lwd = 2,
+ main = "Joint probabilities", ylim = 0:1,
ylab = "Probabilities", xlab = "x2", las = 1)
-legend(x = 0, y = 0.5, lty = 1:4, col = 1:4, lwd = 2,
+legend("top", lty = 1:4, col = 1:4,
legend = c("1 = (y1=0, y2=0)", "2 = (y1=0, y2=1)",
"3 = (y1=1, y2=0)", "4 = (y1=1, y2=1)"))
}
diff --git a/man/binormal.Rd b/man/binormal.Rd
index 4f12763..861a8cc 100644
--- a/man/binormal.Rd
+++ b/man/binormal.Rd
@@ -1,7 +1,7 @@
\name{binormal}
\alias{binormal}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Bivariate normal distribution family function }
+\title{ Bivariate Normal Distribution Family Function }
\description{
Maximum likelihood estimation of the five parameters of a bivariate
normal distribution.
diff --git a/man/binormalUC.Rd b/man/binormalUC.Rd
index 8ce2d68..a990b78 100644
--- a/man/binormalUC.Rd
+++ b/man/binormalUC.Rd
@@ -4,7 +4,7 @@
\alias{dbinorm}
\alias{pbinorm}
\alias{rbinorm}
-\title{Bivariate normal distribution cumulative distribution function}
+\title{Bivariate Normal Distribution Cumulative Distribution Function}
\description{
Density,
cumulative distribution function
diff --git a/man/calibrate.qrrvglm.control.Rd b/man/calibrate.qrrvglm.control.Rd
index 6310779..e46b1da 100644
--- a/man/calibrate.qrrvglm.control.Rd
+++ b/man/calibrate.qrrvglm.control.Rd
@@ -1,7 +1,7 @@
\name{calibrate.qrrvglm.control}
\alias{calibrate.qrrvglm.control}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Control function for CQO/CAO calibration }
+\title{ Control Function for CQO/CAO Calibration }
\description{
Algorithmic constants and parameters for running
\code{\link{calibrate.qrrvglm}} are set using this function.
diff --git a/man/cens.gumbel.Rd b/man/cens.gumbel.Rd
index 00df388..79666f0 100644
--- a/man/cens.gumbel.Rd
+++ b/man/cens.gumbel.Rd
@@ -102,7 +102,7 @@ London: Springer-Verlag.
\seealso{
\code{\link{gumbel}},
- \code{\link{egumbel}},
+ \code{\link{gumbelff}},
\code{\link{rgumbel}},
\code{\link{guplot}},
\code{\link{gev}},
diff --git a/man/clo.Rd b/man/clo.Rd
index 9bb514c..66c093f 100644
--- a/man/clo.Rd
+++ b/man/clo.Rd
@@ -1,7 +1,7 @@
\name{clo}
\alias{clo}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Redirects the user to rrvglm }
+\title{ Redirects the User to rrvglm() }
\description{
Redirects the user to the function \code{\link{rrvglm}}.
}
diff --git a/man/cumulative.Rd b/man/cumulative.Rd
index 93cbf28..4a25961 100644
--- a/man/cumulative.Rd
+++ b/man/cumulative.Rd
@@ -81,10 +81,11 @@ cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
Multiple responses? If \code{TRUE} then the input should be
a matrix with values \eqn{1,2,\dots,L}, where \eqn{L=J+1} is the
number of levels.
- Each column of the matrix is a response, i.e., multivariate response.
+ Each column of the matrix is a response, i.e., multiple responses.
A suitable matrix can be obtained from \code{Cut}.
+
}
% \item{apply.parint}{
% Logical.
diff --git a/man/dagum.Rd b/man/dagum.Rd
index e81df31..a1da353 100644
--- a/man/dagum.Rd
+++ b/man/dagum.Rd
@@ -9,8 +9,8 @@
\usage{
dagum(lscale = "loge", lshape1.a = "loge", lshape2.p = "loge",
iscale = NULL, ishape1.a = NULL, ishape2.p = NULL, imethod = 1,
- lss = TRUE, gscale = exp(-5:5), gshape1.a = exp(-5:5), gshape2.p = exp(-5:5),
- probs.y = c(0.25, 0.5, 0.75), zero = "shape")
+ lss = TRUE, gscale = exp(-5:5), gshape1.a = seq(0.75, 4, by = 0.25),
+ gshape2.p = exp(-5:5), probs.y = c(0.25, 0.5, 0.75), zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
% zero = ifelse(lss, -(2:3), -c(1, 3))
diff --git a/man/depvar.Rd b/man/depvar.Rd
index 85e8835..08ef307 100644
--- a/man/depvar.Rd
+++ b/man/depvar.Rd
@@ -1,7 +1,7 @@
\name{depvar}
\alias{depvar}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Response variable extracted }
+\title{ Response Variable Extracted }
\description{
A generic function that extracts the response/dependent
variable from objects.
diff --git a/man/dirichlet.Rd b/man/dirichlet.Rd
index 86d128d..3842567 100644
--- a/man/dirichlet.Rd
+++ b/man/dirichlet.Rd
@@ -7,8 +7,7 @@
}
\usage{
-dirichlet(link = "loge", parallel = FALSE, zero = NULL,
- imethod = 1)
+dirichlet(link = "loge", parallel = FALSE, zero = NULL, imethod = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -126,10 +125,10 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
\examples{
-ydata <- data.frame(rdiric(n = 1000,
+ddata <- data.frame(rdiric(n = 1000,
shape = exp(c(y1 = -1, y2 = 1, y3 = 0))))
fit <- vglm(cbind(y1, y2, y3) ~ 1, dirichlet,
- data = ydata, trace = TRUE, crit = "coef")
+ data = ddata, trace = TRUE, crit = "coef")
Coef(fit)
coef(fit, matrix = TRUE)
head(fitted(fit))
@@ -137,7 +136,7 @@ head(fitted(fit))
\keyword{models}
\keyword{regression}
-% colnames(ydata) <- paste("y", 1:3, sep = "")
+% colnames(ddata) <- paste("y", 1:3, sep = "")
diff --git a/man/double.expbinomial.Rd b/man/double.expbinomial.Rd
index 7e5b1cb..3799030 100644
--- a/man/double.expbinomial.Rd
+++ b/man/double.expbinomial.Rd
@@ -104,7 +104,7 @@ double.expbinomial(lmean = "logit", ldispersion = "logit",
\author{ T. W. Yee }
\note{
This function processes the input in the same way
- as \code{\link{binomialff}}, however multivariate responses are
+ as \code{\link{binomialff}}, however multiple responses are
not allowed (\code{binomialff(multiple.responses = FALSE)}).
diff --git a/man/ducklings.Rd b/man/ducklings.Rd
index 127a56a..c948264 100644
--- a/man/ducklings.Rd
+++ b/man/ducklings.Rd
@@ -2,7 +2,7 @@
\alias{ducklings}
\docType{data}
\title{
-Relative Frequencies of Serum Proteins in white Pekin ducklings
+Relative Frequencies of Serum Proteins in White Pekin Ducklings
%% ~~ data name/kind ... ~~
}
diff --git a/man/fisk.Rd b/man/fisk.Rd
index f90ebba..2ca33f7 100644
--- a/man/fisk.Rd
+++ b/man/fisk.Rd
@@ -10,7 +10,8 @@
\usage{
fisk(lscale = "loge", lshape1.a = "loge", iscale = NULL,
ishape1.a = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5),
- gshape1.a = exp(-5:5), probs.y = c(0.25, 0.5, 0.75), zero = "shape")
+ gshape1.a = seq(0.75, 4, by = 0.25), probs.y = c(0.25, 0.5, 0.75),
+ zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/gaussianff.Rd b/man/gaussianff.Rd
index d186653..2c6f6ae 100644
--- a/man/gaussianff.Rd
+++ b/man/gaussianff.Rd
@@ -2,7 +2,7 @@
%\alias{gaussian}
\alias{gaussianff}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Gaussian (normal) Family Function }
+\title{ Gaussian (Normal) Family Function }
\description{
Fits a generalized linear model to a response with Gaussian (normal)
errors. The dispersion parameter may be known or unknown.
diff --git a/man/gev.Rd b/man/gev.Rd
index c4ba363..cbc6292 100644
--- a/man/gev.Rd
+++ b/man/gev.Rd
@@ -1,6 +1,6 @@
\name{gev}
\alias{gev}
-\alias{egev}
+\alias{gevff}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Generalized Extreme Value Distribution Family Function }
\description{
@@ -9,16 +9,21 @@
}
\usage{
-gev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
- percentiles = c(95, 99), iscale=NULL, ishape = NULL,
- imethod = 1, gshape = c(-0.45, 0.45), tolshape0 = 0.001,
- type.fitted = c("percentiles", "mean"), giveWarning = TRUE,
+gev(llocation = "identitylink", lscale = "loge",
+ lshape = logoff(offset = 0.5), percentiles = c(95, 99),
+ ilocation = NULL, iscale = NULL, ishape = NULL, imethod = 1,
+ gprobs.y = (1:9)/10, gscale.mux = exp((-5:5)/6),
+ gshape = (-5:5) / 11 + 0.01,
+ iprobs.y = NULL, tolshape0 = 0.001,
+ type.fitted = c("percentiles", "mean"),
zero = c("scale", "shape"))
-egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
- percentiles = c(95, 99), iscale=NULL, ishape = NULL,
- imethod = 1, gshape = c(-0.45, 0.45), tolshape0 = 0.001,
- type.fitted = c("percentiles", "mean"), giveWarning = TRUE,
- zero = c("scale", "shape"))
+gevff(llocation = "identitylink", lscale = "loge",
+ lshape = logoff(offset = 0.5), percentiles = c(95, 99),
+ ilocation = NULL, iscale = NULL, ishape = NULL, imethod = 1,
+ gprobs.y = (1:9)/10, gscale.mux = exp((-5:5)/6),
+ gshape = (-5:5) / 11 + 0.01,
+ iprobs.y = NULL, tolshape0 = 0.001,
+ type.fitted = c("percentiles", "mean"), zero = c("scale", "shape"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -79,18 +84,21 @@ egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
}
- \item{iscale, ishape}{
- Numeric. Initial value for \eqn{\sigma}{sigma} and
+ \item{ilocation, iscale, ishape}{
+ Numeric. Initial value for the location parameter, \eqn{\sigma}{sigma} and
\eqn{\xi}{xi}. A \code{NULL} means a value is computed internally.
- The argument \code{ishape} is more important than the other two because
- they are initialized from the initial \eqn{\xi}{xi}.
+ The argument \code{ishape} is more important than the other two.
If a failure to converge occurs, or even to obtain initial values occurs,
try assigning \code{ishape} some value
(positive or negative; the sign can be very important).
- Also, in general, a larger value of \code{iscale} is better than a
+ Also, in general, a larger value of \code{iscale} tends to be better than a
smaller value.
+% because they are initialized from the initial \eqn{\xi}{xi}.
+
+
+
}
% \item{rshape}{
% Numeric, of length 2.
@@ -109,22 +117,36 @@ egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
\item{imethod}{
Initialization method. Either the value 1 or 2.
- Method 1 involves choosing the best \eqn{\xi}{xi} on a course grid with
- endpoints \code{gshape}.
- Method 2 is similar to the method of moments.
- If both methods fail try using \code{ishape}.
+ If both methods fail then try using \code{ishape}.
+ See \code{\link{CommonVGAMffArguments}} for information.
+
+
+% Method 1 involves choosing the best \eqn{\xi}{xi} on the grid values
+% given by \code{gshape}.
+% Method 2 is similar to the method of moments.
}
\item{gshape}{
- Numeric, of length 2.
- Range of \eqn{\xi}{xi} used for a grid search for a good initial value
+ Numeric vector.
+ The values are used for a grid search for an initial value
for \eqn{\xi}{xi}.
- Used only if \code{imethod} equals 1.
+ See \code{\link{CommonVGAMffArguments}} for information.
+
+
+% Used only if \code{imethod} equals 1.
}
- \item{tolshape0, giveWarning}{
+ \item{gprobs.y, gscale.mux, iprobs.y}{
+ Numeric vectors, used for the initial values.
+ See \code{\link{CommonVGAMffArguments}} for information.
+
+
+
+ }
+
+ \item{tolshape0}{
Passed into \code{\link{dgev}} when computing the log-likelihood.
@@ -182,15 +204,15 @@ egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
% and when \eqn{-1 < \xi < -0.5}{-1 < xi < -0.5} they exist but are
% non-regular; and when \eqn{\xi < -1}{xi < -1} then the maximum
-% likelihood estimators do not exist. In most environmental data
+% likelihood estimators do not exist. In most environmental data
% sets \eqn{\xi > -1}{xi > -1} so maximum likelihood works fine.
}
\section{Warning }{
Currently, if an estimate of \eqn{\xi}{xi} is too close to 0 then
- an error will occur for \code{gev()} with multivariate responses.
- In general, \code{egev()} is more reliable than \code{gev()}.
+ an error may occur for \code{gev()} with multivariate responses.
+ In general, \code{gevff()} is more reliable than \code{gev()}.
Fitting the GEV by maximum likelihood estimation can be numerically
@@ -204,6 +226,7 @@ egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
helps handle the problem of straying outside the parameter space.)
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -239,13 +262,27 @@ egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
\note{
The \pkg{VGAM} family function \code{gev} can handle a multivariate
- (matrix) response. If so, each row of the matrix is sorted into
+ (matrix) response, cf. multiple responses.
+ If so, each row of the matrix is sorted into
descending order and \code{NA}s are put last.
With a vector or one-column matrix response using
- \code{egev} will give the same result but be faster and it handles
+ \code{gevff} will give the same result but be faster and it handles
the \eqn{\xi = 0}{xi = 0} case.
The function \code{gev} implements Tawn (1988) while
- \code{egev} implements Prescott and Walden (1980).
+ \code{gevff} implements Prescott and Walden (1980).
+
+
+ Function \code{egev()} has been replaced by the
+ new family function \code{gevff()}. It now
+ conforms to the usual \pkg{VGAM} philosophy of
+ having \code{M1} linear predictors per (independent) response.
+ This is the usual way multiple responses are handled.
+ Hence \code{vglm(cbind(y1, y2)\ldots, gevff, \ldots)} will have
+ 6 linear predictors and it is possible to constrain the
+ linear predictors so that the answer is similar to \code{gev()}.
+ Missing values in the response of \code{gevff()} will be deleted;
+ this behaviour is the same as with almost every other
+ \pkg{VGAM} family function.
The shape parameter \eqn{\xi}{xi} is difficult to estimate
@@ -268,18 +305,19 @@ egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
\seealso{
\code{\link{rgev}},
\code{\link{gumbel}},
- \code{\link{egumbel}},
+ \code{\link{gumbelff}},
\code{\link{guplot}},
- \code{\link{rlplot.egev}},
+ \code{\link{rlplot.gevff}},
\code{\link{gpd}},
\code{\link{weibullR}},
\code{\link{frechet}},
\code{\link{extlogit}},
\code{\link{oxtemp}},
- \code{\link{venice}}.
+ \code{\link{venice}},
+ \code{\link{CommonVGAMffArguments}}.
- %\code{\link{egev}},
+ %\code{\link{gevff}},
%\code{\link{ogev}},
@@ -301,7 +339,7 @@ with(venice, lines(year, fitted(fit1)[,1], lty = "dashed", col = "blue"))
legend("topleft", lty = "dashed", col = "blue", "Fitted 95 percentile")
# Univariate example
-(fit <- vglm(maxtemp ~ 1, egev, data = oxtemp, trace = TRUE))
+(fit <- vglm(maxtemp ~ 1, gevff, data = oxtemp, trace = TRUE))
head(fitted(fit))
coef(fit, matrix = TRUE)
Coef(fit)
@@ -315,3 +353,16 @@ rlplot(fit)
\keyword{regression}
+
+
+% type.fitted = c("percentiles", "mean"), giveWarning = TRUE,
+
+% \item{gshape}{
+% Numeric, of length 2.
+% Range of \eqn{\xi}{xi} used for a grid search for a good initial value
+% for \eqn{\xi}{xi}.
+% Used only if \code{imethod} equals 1.
+% }
+
+
+
diff --git a/man/gevUC.Rd b/man/gevUC.Rd
index 65532b5..be7a64c 100644
--- a/man/gevUC.Rd
+++ b/man/gevUC.Rd
@@ -15,8 +15,8 @@
}
\usage{
-dgev(x, location = 0, scale = 1, shape = 0, log = FALSE, tolshape0 =
- sqrt(.Machine$double.eps), oobounds.log = -Inf, giveWarning = FALSE)
+dgev(x, location = 0, scale = 1, shape = 0, log = FALSE,
+ tolshape0 = sqrt(.Machine$double.eps))
pgev(q, location = 0, scale = 1, shape = 0, lower.tail = TRUE, log.p = FALSE)
qgev(p, location = 0, scale = 1, shape = 0, lower.tail = TRUE, log.p = FALSE)
rgev(n, location = 0, scale = 1, shape = 0)
@@ -34,6 +34,7 @@ rgev(n, location = 0, scale = 1, shape = 0)
Logical.
If \code{log = TRUE} then the logarithm of the density is returned.
+
}
\item{lower.tail, log.p}{
Same meaning as in \code{\link[stats:Uniform]{punif}}
@@ -48,21 +49,28 @@ rgev(n, location = 0, scale = 1, shape = 0)
this value then it will be assumed zero and a Gumbel distribution will
be used.
- }
- \item{oobounds.log, giveWarning}{
- Numeric and logical.
- The GEV distribution has support in the region satisfying
- \code{1+shape*(x-location)/scale > 0}. Outside that region, the
- logarithm of the density is assigned \code{oobounds.log}, which
- equates to a zero density.
- It should not be assigned a positive number, and ideally is very negative.
- Since \code{\link{egev}} uses this function it is necessary
- to return a finite value outside this region so as to allow
- for half-stepping. Both arguments are in support of this.
- This argument and others match those of \code{\link{egev}}.
}
+
+
+% 20160412; Depreciated:
+% \item{oobounds.log, giveWarning}{
+% Numeric and logical.
+% The GEV distribution has support in the region satisfying
+% \code{1+shape*(x-location)/scale > 0}. Outside that region, the
+% logarithm of the density is assigned \code{oobounds.log}, which
+% equates to a zero density.
+% It should not be assigned a positive number, and ideally is very negative.
+% Since \code{\link{egev}} uses this function it is necessary
+% to return a finite value outside this region so as to allow
+% for half-stepping. Both arguments are in support of this.
+% This argument and others match those of \code{\link{egev}}.
+% }
+
+
+
+
}
\value{
\code{dgev} gives the density,
@@ -82,7 +90,7 @@ London: Springer-Verlag.
\author{ T. W. Yee }
\details{
See \code{\link{gev}}, the \pkg{VGAM} family function
- for estimating the two parameters by maximum likelihood estimation,
+ for estimating the 3 parameters by maximum likelihood estimation,
for formulae and other details.
Apart from \code{n}, all the above arguments may be vectors and
are recyled to the appropriate length if necessary.
@@ -101,27 +109,33 @@ London: Springer-Verlag.
}
\seealso{
\code{\link{gev}},
- \code{\link{egev}},
+ \code{\link{gevff}},
\code{\link{vglm.control}}.
}
-\examples{
-\dontrun{ loc <- 2; sigma <- 1; xi <- -0.4
-x <- seq(loc - 3, loc + 3, by = 0.01)
+\examples{ loc <- 2; sigma <- 1; xi <- -0.4
+pgev(qgev(seq(0.05, 0.95, by = 0.05), loc, sigma, xi), loc, sigma, xi)
+\dontrun{ x <- seq(loc - 3, loc + 3, by = 0.01)
plot(x, dgev(x, loc, sigma, xi), type = "l", col = "blue", ylim = c(0,1),
- main = "Blue is density, red is cumulative distribution function",
+ main = "Blue is density, orange is cumulative distribution function",
sub = "Purple are 5,10,...,95 percentiles", ylab = "", las = 1)
abline(h = 0, col = "blue", lty = 2)
lines(qgev(seq(0.05, 0.95, by = 0.05), loc, sigma, xi),
dgev(qgev(seq(0.05, 0.95, by = 0.05), loc, sigma, xi), loc, sigma, xi),
col = "purple", lty = 3, type = "h")
-lines(x, pgev(x, loc, sigma, xi), type = "l", col = "red")
-abline(h = 0, lty = 2)
-
-pgev(qgev(seq(0.05, 0.95, by = 0.05), loc, sigma, xi), loc, sigma, xi)
+lines(x, pgev(x, loc, sigma, xi), type = "l", col = "orange")
+abline(h = 0, lty = 2, col = "gray50")
}
}
\keyword{distribution}
+%dgev(x, location = 0, scale = 1, shape = 0, log = FALSE, tolshape0 =
+% sqrt(.Machine$double.eps), oobounds.log = -Inf, giveWarning = FALSE)
+
+
+
+
+
+
diff --git a/man/gpd.Rd b/man/gpd.Rd
index 052111e..e2d3ee8 100644
--- a/man/gpd.Rd
+++ b/man/gpd.Rd
@@ -11,7 +11,7 @@
gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
percentiles = c(90, 95), iscale = NULL, ishape = NULL,
tolshape0 = 0.001, type.fitted = c("percentiles", "mean"),
- giveWarning = TRUE, imethod = 1, zero = "shape")
+ imethod = 1, zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -100,7 +100,7 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
% }
- \item{tolshape0, giveWarning}{
+ \item{tolshape0}{
Passed into \code{\link{dgpd}} when computing the log-likelihood.
@@ -300,3 +300,4 @@ plot(as(fit2, "vgam"), se = TRUE, scol = "blue") }
%
% # gdata <- transform(gdata, yy = y2 + rnorm(nn, sd = 0.1))
%
+% giveWarning = TRUE, imethod = 1, zero = "shape"
diff --git a/man/gpdUC.Rd b/man/gpdUC.Rd
index 616b049..a454914 100644
--- a/man/gpdUC.Rd
+++ b/man/gpdUC.Rd
@@ -15,8 +15,7 @@
}
\usage{
dgpd(x, location = 0, scale = 1, shape = 0, log = FALSE,
- tolshape0 = sqrt(.Machine$double.eps),
- oobounds.log = -Inf, giveWarning = FALSE)
+ tolshape0 = sqrt(.Machine$double.eps))
pgpd(q, location = 0, scale = 1, shape = 0,
lower.tail = TRUE, log.p = FALSE)
qgpd(p, location = 0, scale = 1, shape = 0,
@@ -52,23 +51,23 @@ rgpd(n, location = 0, scale = 1, shape = 0)
}
- \item{oobounds.log, giveWarning}{
- Numeric and logical.
- The GPD distribution has support in the region satisfying
- \code{(x-location)/scale > 0}
- and
- \code{1+shape*(x-location)/scale > 0}.
- Outside that region, the
- logarithm of the density is assigned \code{oobounds.log}, which
- equates to a zero density.
- It should not be assigned a positive number, and ideally is very negative.
- Since \code{\link{gpd}} uses this function it is necessary
- to return a finite value outside this region so as to allow
- for half-stepping. Both arguments are in support of this.
- This argument and others match those of \code{\link{gpd}}.
- }
+% \item{oobounds.log, giveWarning}{
+% Numeric and logical.
+% The GPD distribution has support in the region satisfying
+% \code{(x-location)/scale > 0}
+% and
+% \code{1+shape*(x-location)/scale > 0}.
+% Outside that region, the
+% logarithm of the density is assigned \code{oobounds.log}, which
+% equates to a zero density.
+% It should not be assigned a positive number, and ideally is very negative.
+% Since \code{\link{gpd}} uses this function it is necessary
+% to return a finite value outside this region so as to allow
+% for half-stepping. Both arguments are in support of this.
+% This argument and others match those of \code{\link{gpd}}.
+% }
}
\value{
@@ -131,3 +130,4 @@ pgpd(qgpd(seq(0.05, 0.95, by = 0.05), loc, sigma, xi), loc, sigma, xi)
\keyword{distribution}
+% oobounds.log = -Inf, giveWarning = FALSE
diff --git a/man/gumbel.Rd b/man/gumbel.Rd
index 0bd7746..90e9999 100644
--- a/man/gumbel.Rd
+++ b/man/gumbel.Rd
@@ -1,6 +1,6 @@
\name{gumbel}
\alias{gumbel}
-\alias{egumbel}
+\alias{gumbelff}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Gumbel Distribution Family Function }
\description{
@@ -12,9 +12,9 @@
gumbel(llocation = "identitylink", lscale = "loge",
iscale = NULL, R = NA, percentiles = c(95, 99),
mpv = FALSE, zero = NULL)
-egumbel(llocation = "identitylink", lscale = "loge",
- iscale = NULL, R = NA, percentiles = c(95, 99),
- mpv = FALSE, zero = NULL)
+gumbelff(llocation = "identitylink", lscale = "loge",
+ iscale = NULL, R = NA, percentiles = c(95, 99),
+ zero = "scale", mpv = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -147,6 +147,7 @@ egumbel(llocation = "identitylink", lscale = "loge",
}
\author{ T. W. Yee }
+
\section{Warning}{
When \code{R} is not given (the default) the fitted percentiles are
that of the data, and not of the
@@ -159,11 +160,16 @@ egumbel(llocation = "identitylink", lscale = "loge",
}
\note{
- \code{egumbel()} only handles a univariate response, and is
- preferred to \code{gumbel()} because it is faster.
+ Like many other usual \pkg{VGAM} family functions,
+ \code{gumbelff()} handles (independent) multiple responses.
+
+% and is preferred to \code{gumbel()} because it is faster.
- \code{gumbel()} can handle a multivariate response, i.e., a
+
+ \code{gumbel()} can handle
+ more of a
+ multivariate response, i.e., a
matrix with more than one column. Each row of the matrix is
sorted into descending order.
Missing values in the response are allowed but require
@@ -172,6 +178,7 @@ egumbel(llocation = "identitylink", lscale = "loge",
one has a matrix \code{y}, say, where
\code{y[, 2]} contains the second order statistics, etc.
+
% If a random variable \eqn{Y} has a \emph{reverse}
% \eqn{Gumbel(\mu,\sigma)}{Gumbel(mu,sigma)} distribution then \eqn{-Y}
@@ -189,7 +196,7 @@ egumbel(llocation = "identitylink", lscale = "loge",
\code{\link{cens.gumbel}},
\code{\link{guplot}},
\code{\link{gev}},
- \code{\link{egev}},
+ \code{\link{gevff}},
\code{\link{venice}}.
@@ -201,7 +208,7 @@ egumbel(llocation = "identitylink", lscale = "loge",
\examples{
# Example 1: Simulated data
gdata <- data.frame(y1 = rgumbel(n = 1000, loc = 100, scale = exp(1)))
-fit1 <- vglm(y1 ~ 1, egumbel(perc = NULL), data = gdata, trace = TRUE)
+fit1 <- vglm(y1 ~ 1, gumbelff(perc = NULL), data = gdata, trace = TRUE)
coef(fit1, matrix = TRUE)
Coef(fit1)
head(fitted(fit1))
diff --git a/man/gumbelUC.Rd b/man/gumbelUC.Rd
index 38066f9..4533d78 100644
--- a/man/gumbelUC.Rd
+++ b/man/gumbelUC.Rd
@@ -107,7 +107,7 @@ rgumbel(n, location = 0, scale = 1)
\seealso{
\code{\link{gumbel}},
- \code{\link{egumbel}},
+ \code{\link{gumbelff}},
\code{\link{gev}},
\code{\link{dgompertz}}.
diff --git a/man/guplot.Rd b/man/guplot.Rd
index b1314d8..c8ae6ca 100644
--- a/man/guplot.Rd
+++ b/man/guplot.Rd
@@ -85,7 +85,7 @@ guplot.vlm(object, ...)
}
\seealso{
\code{\link{gumbel}},
- \code{\link{egumbel}},
+ \code{\link{gumbelff}},
\code{\link{gev}},
\code{\link{venice}}.
diff --git a/man/huber.Rd b/man/huber.Rd
index e3ba3a3..5ebef79 100644
--- a/man/huber.Rd
+++ b/man/huber.Rd
@@ -2,7 +2,7 @@
\alias{huber2}
\alias{huber1}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Huber's least favourable distribution family function }
+\title{ Huber's Least Favourable Distribution Family Function }
\description{
M-estimation of the two parameters of
Huber's least favourable distribution.
diff --git a/man/huberUC.Rd b/man/huberUC.Rd
index 624c21d..92d0024 100644
--- a/man/huberUC.Rd
+++ b/man/huberUC.Rd
@@ -4,7 +4,7 @@
\alias{rhuber}
\alias{qhuber}
\alias{phuber}
-\title{Huber's least favourable distribution}
+\title{Huber's Least Favourable Distribution}
\description{
Density, distribution function, quantile function and random generation
for Huber's least favourable distribution, see Huber and Ronchetti (2009).
diff --git a/man/identitylink.Rd b/man/identitylink.Rd
index f5e5ec3..4def63e 100644
--- a/man/identitylink.Rd
+++ b/man/identitylink.Rd
@@ -40,7 +40,7 @@ identitylink(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
The function \code{negidentity} is the negative-identity link function and
corresponds to \eqn{g(\theta)=-\theta}{g(theta)=-theta}.
This is useful for some models, e.g., in the literature supporting the
- \code{\link{egev}} function it seems that half of the authors use
+ \code{\link{gevff}} function it seems that half of the authors use
\eqn{\xi=-k}{xi=-k} for the shape parameter and the other half use \eqn{k}
instead of \eqn{\xi}{xi}.
diff --git a/man/inv.paralogistic.Rd b/man/inv.paralogistic.Rd
index cf14cb1..27673a7 100644
--- a/man/inv.paralogistic.Rd
+++ b/man/inv.paralogistic.Rd
@@ -9,7 +9,7 @@
\usage{
inv.paralogistic(lscale = "loge", lshape1.a = "loge", iscale = NULL,
ishape1.a = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5),
- gshape1.a = exp(-5:5), probs.y = c(0.25, 0.5, 0.75),
+ gshape1.a = seq(0.75, 4, by = 0.25), probs.y = c(0.25, 0.5, 0.75),
zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
diff --git a/man/linkfun.Rd b/man/linkfun.Rd
index 8980e94..07d74ff 100644
--- a/man/linkfun.Rd
+++ b/man/linkfun.Rd
@@ -1,7 +1,7 @@
\name{linkfun}
\alias{linkfun}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Link functions }
+\title{ Link Functions }
\description{
Generic function for returning the link functions of a fitted object.
diff --git a/man/logc.Rd b/man/logc.Rd
index ada9b6c..21b720a 100644
--- a/man/logc.Rd
+++ b/man/logc.Rd
@@ -3,8 +3,8 @@
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Complementary-log Link Function }
\description{
- Computes the complentary-log transformation, including its inverse and the
- first two derivatives.
+ Computes the Complementary-log Transformation, Including its Inverse and the
+ First Two Derivatives.
}
\usage{
diff --git a/man/loge.Rd b/man/loge.Rd
index 854d311..0a63cba 100644
--- a/man/loge.Rd
+++ b/man/loge.Rd
@@ -3,7 +3,7 @@
\alias{negloge}
\alias{logneg}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Log link function, and variants }
+\title{ Log Link Function, and Variants }
\description{
Computes the log transformation, including its inverse and the first
two derivatives.
diff --git a/man/logoff.Rd b/man/logoff.Rd
index 33a220f..1bdeb29 100644
--- a/man/logoff.Rd
+++ b/man/logoff.Rd
@@ -1,7 +1,7 @@
\name{logoff}
\alias{logoff}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Log link function with an offset }
+\title{ Log Link Function with an Offset }
\description{
Computes the log transformation with an offset,
including its inverse and the first two derivatives.
diff --git a/man/lomax.Rd b/man/lomax.Rd
index 51e222e..071e3fd 100644
--- a/man/lomax.Rd
+++ b/man/lomax.Rd
@@ -10,7 +10,8 @@
\usage{
lomax(lscale = "loge", lshape3.q = "loge", iscale = NULL,
ishape3.q = NULL, imethod = 1, gscale = exp(-5:5),
- gshape3.q = exp(-5:5), probs.y = c(0.25, 0.5, 0.75), zero = "shape")
+ gshape3.q = seq(0.75, 4, by = 0.25),
+ probs.y = c(0.25, 0.5, 0.75), zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/marital.nz.Rd b/man/marital.nz.Rd
index 795d679..dc7f6f7 100644
--- a/man/marital.nz.Rd
+++ b/man/marital.nz.Rd
@@ -2,7 +2,7 @@
\alias{marital.nz}
\docType{data}
\title{
- New Zealand Marital Data.
+ New Zealand Marital Data
}
\description{
Some marital data mainly from a large NZ company collected in the
diff --git a/man/melbmaxtemp.Rd b/man/melbmaxtemp.Rd
index f7da7e4..501731c 100644
--- a/man/melbmaxtemp.Rd
+++ b/man/melbmaxtemp.Rd
@@ -1,7 +1,7 @@
\name{melbmaxtemp}
\alias{melbmaxtemp}
\docType{data}
-\title{ melbmaxtemp daily maximum temperatures}
+\title{ Melbourne Daily Maximum Temperatures}
\description{
Melbourne daily maximum temperatures in degrees Celsius
over the ten-year period 1981--1990.
diff --git a/man/multilogit.Rd b/man/multilogit.Rd
index 8021165..e80081b 100644
--- a/man/multilogit.Rd
+++ b/man/multilogit.Rd
@@ -8,7 +8,7 @@
}
\usage{
-multilogit(theta, refLevel = "last", M = NULL, whitespace = FALSE,
+multilogit(theta, refLevel = "(Last)", M = NULL, whitespace = FALSE,
bvalue = NULL, inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
}
diff --git a/man/multinomial.Rd b/man/multinomial.Rd
index 14a9802..c49d9da 100644
--- a/man/multinomial.Rd
+++ b/man/multinomial.Rd
@@ -9,7 +9,7 @@
}
\usage{
multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
- refLevel = "last", whitespace = FALSE)
+ refLevel = "(Last)", whitespace = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -36,16 +36,28 @@ multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
}
\item{refLevel}{
- Either a single positive integer or a value of the factor.
- If an integer then it specifies which
+ Either a (1) single positive integer or (2) a value of the factor
+ or (3) a character string.
+ If inputted as an integer then it specifies which
column of the response matrix is the reference or baseline level.
The default is the last one (the \eqn{(M+1)}th one).
- If used, this argument will be often assigned the value \code{1}.
+ If used, this argument will be usually assigned the value \code{1}.
If inputted as a value of a factor then beware of missing values
of certain levels of the factor
(\code{drop.unused.levels = TRUE} or
\code{drop.unused.levels = FALSE}).
See the example below.
+ If inputted as a character string then this should be
+ equal to (A) one of the levels of the factor response, else (B) one
+ of the column names of the matrix response of counts;
+ e.g.,
+ \code{vglm(cbind(normal, mild, severe) ~ let, multinomial(refLevel = "severe"), data = pneumo)}
+ if it was (incorrectly because the response is ordinal)
+ applied to the \code{\link{pneumo}} data set.
+ Another example is
+ \code{vglm(ethnicity ~ age, multinomial(refLevel = "European"), data = xs.nz)}
+ if it was applied to the \code{\link[VGAMdata]{xs.nz}} data set.
+
}
@@ -233,6 +245,7 @@ Cambridge University Press.
}
% \code{\link[base:Multinom]{rmultinom}}
+% \code{\link{pneumo}},
\examples{
diff --git a/man/nbcanlink.Rd b/man/nbcanlink.Rd
index 862b705..bf9fbae 100644
--- a/man/nbcanlink.Rd
+++ b/man/nbcanlink.Rd
@@ -1,7 +1,7 @@
\name{nbcanlink}
\alias{nbcanlink}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Negative binomial canonical link function }
+\title{ Negative Binomial Canonical Link Function }
\description{
Computes the negative binomial canonical link transformation,
including its inverse and the first two derivatives.
diff --git a/man/negbinomial.Rd b/man/negbinomial.Rd
index c25ad10..d6f7973 100644
--- a/man/negbinomial.Rd
+++ b/man/negbinomial.Rd
@@ -11,27 +11,24 @@
}
\usage{
negbinomial(zero = "size", parallel = FALSE, deviance.arg = FALSE,
- mds.min = 1e-04, nsimEIM = 500, cutoff.prob = 0.999, eps.trig = 1e-7,
- max.support = 4000, max.chunk.MB = 30,
+ mds.min = 1e-3, nsimEIM = 500, cutoff.prob = 0.999,
+ eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
lmu = "loge", lsize = "loge",
- imethod = 1, imu = NULL, probs.y = 0.35,
- ishrinkage = 0.95, isize = NULL, gsize.mux = exp((-12:6)/2))
+ imethod = 1, imu = NULL, iprobs.y = NULL,
+ gprobs.y = (0:9)/10, isize = NULL,
+ gsize.mux = exp(c(-30, -20, -15, -10, -6:3)))
polya(zero = "size", type.fitted = c("mean", "prob"),
- mds.min = 1e-04, nsimEIM = 500, cutoff.prob = 0.999,
- eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
- lprob = "logit", lsize = "loge",
- imethod = 1, iprob = NULL,
- probs.y = 0.35, ishrinkage = 0.95,
- isize = NULL, gsize.mux = exp((-12:6)/2),
- imunb = NULL)
+ mds.min = 1e-3, nsimEIM = 500, cutoff.prob = 0.999,
+ eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
+ lprob = "logit", lsize = "loge", imethod = 1, iprob = NULL,
+ iprobs.y = NULL, gprobs.y = (0:9)/10, isize = NULL,
+ gsize.mux = exp(c(-30, -20, -15, -10, -6:3)), imunb = NULL)
polyaR(zero = "size", type.fitted = c("mean", "prob"),
- mds.min = 1e-04, nsimEIM = 500, cutoff.prob = 0.999,
- eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
- lsize = "loge", lprob = "logit",
- imethod = 1, isize = NULL,
- iprob = NULL, probs.y = 0.35,
- ishrinkage = 0.95, gsize.mux = exp((-12:6)/2),
- imunb = NULL)
+ mds.min = 1e-3, nsimEIM = 500, cutoff.prob = 0.999,
+ eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
+ lsize = "loge", lprob = "logit", imethod = 1, iprob = NULL,
+ iprobs.y = NULL, gprobs.y = (0:9)/10, isize = NULL,
+ gsize.mux = exp(c(-30, -20, -15, -10, -6:3)), imunb = NULL)
}
% deviance.arg = FALSE,
@@ -62,6 +59,7 @@ polyaR(zero = "size", type.fitted = c("mean", "prob"),
computed internally using a gridsearch based on \code{gsize.mux}.
The last argument is ignored if used within \code{\link{cqo}}; see
the \code{iKvector} argument of \code{\link{qrrvglm.control}} instead.
+ In the future \code{isize} and \code{iprob} might be depreciated.
}
@@ -157,7 +155,8 @@ Minimum value of the NBD mean divided by \code{size} parameter.
The closer this ratio is to 0, the closer the distribution is
to a Poisson.
Iterations will stop when an estimate of \eqn{k} is so large,
-relative to the mean, than it is below this threshold.
+relative to the mean, than it is below this threshold
+(this is treated as a boundary of the parameter space).
@@ -190,7 +189,8 @@ respect to the \code{size} parameter.
estimates of the NBD mean parameter.
That is, it is on a relative scale rather than on an
absolute scale.
-
+ If the counts are very large in value then convergence fail might
+ occur; if so, then try a smaller value such as \code{gsize.mux = exp(-40)}.
}
@@ -230,10 +230,10 @@ respect to the \code{size} parameter.
}
\item{imethod}{
- An integer with value \code{1} or \code{2} or \code{3} which
+ An integer with value \code{1} or \code{2} etc. which
specifies the initialization method for the \eqn{\mu}{mu} parameter.
If failure to converge occurs try another value
- and/or else specify a value for \code{ishrinkage}
+ and/or else specify a value for \code{iprobs.y}
and/or else specify a value for \code{isize}.
@@ -251,26 +251,38 @@ respect to the \code{size} parameter.
}
- \item{probs.y}{
- Passed into the \code{probs} argument
+ \item{gprobs.y}{
+ A vector representing a grid;
+ passed into the \code{probs} argument
of \code{\link[stats:quantile]{quantile}}
- when \code{imethod = 3} to obtain an initial value for the mean.
+ when \code{imethod = 1} to obtain an initial value for the mean
+ of each response. Is overwritten by any value of \code{iprobs.y}.
}
- \item{ishrinkage}{
- How much shrinkage is used when initializing \eqn{\mu}{mu}.
- The value must be between 0 and 1 inclusive, and
- a value of 0 means the individual response values are used,
- and a value of 1 means the median or mean is used.
- This argument is used in conjunction with \code{imethod}.
- If convergence failure occurs try setting this argument to 1.
+ \item{iprobs.y}{
+ Passed into the \code{probs} argument
+ of \code{\link[stats:quantile]{quantile}}
+ when \code{imethod = 1} to obtain an initial value for the mean
+ of each response. Overwrites any value of \code{gprobs.y}.
+ This argument might be deleted in the future.
}
+% \item{ishrinkage}{
+% How much shrinkage is used when initializing \eqn{\mu}{mu}.
+% The value must be between 0 and 1 inclusive, and
+% a value of 0 means the individual response values are used,
+% and a value of 1 means the median or mean is used.
+% This argument is used in conjunction with \code{imethod}.
+% If convergence failure occurs try setting this argument to 1.
+% }
+
+
\item{zero}{
- Can be an integer-valued vector, usually assigned \eqn{-2}
- or \eqn{2} if used at all. Specifies which of the two
+ Can be an integer-valued vector, and if so, then
+ it is usually assigned \eqn{-2}
+ or \eqn{2}. Specifies which of the two
linear/additive predictors are modelled as an intercept
only. By default, the \eqn{k} parameter (after \code{lsize}
is applied) is modelled as a single unknown number that
@@ -375,23 +387,25 @@ respect to the \code{size} parameter.
\section{Warning}{
Poisson regression corresponds to \eqn{k} equalling
infinity. If the data is Poisson or close to Poisson,
- numerical problems will occur.
+ numerical problems may occur.
Some corrective measures are taken, e.g.,
- \eqn{k} is capped during estimation
+ \eqn{k} is effectively capped (relative to the mean) during estimation
to some large value and a warning is issued.
- Note that \code{dnbinom(0, mu, size = Inf)}
- currently
- is a \code{NaN} (a bug),
- therefore if the data has some 0s then
- setting \code{crit = "coef"} will avoid the problem that
- the log-likelihood will be undefined during the last
- stages of estimation.
And setting \code{stepsize = 0.5} for half stepping is
- probably a good idea too.
- Possibly setting \code{crit = "coef"} is a good idea because
- the log-likelihood is often a \code{NaN} when the \code{size}
- value is very large.
+ probably a good idea too when the data is extreme.
+
+
+% Possibly setting \code{crit = "coef"} is a good idea because
+% the log-likelihood is often a \code{NaN} when the \code{size}
+% value is very large.
+
+% Note that \code{dnbinom(0, mu, size = Inf)} currently
+% is a \code{NaN} (a bug),
+% therefore if the data has some 0s then
+% setting \code{crit = "coef"} will avoid the problem that
+% the log-likelihood will be undefined during the last
+% stages of estimation.
% Possibly choosing a log-log link may help in such cases,
@@ -524,7 +538,7 @@ Fitting the negative binomial distribution to biological data.
\code{max.support},
\code{nsimEIM},
\code{cutoff.prob},
- \code{ishrinkage},
+ \code{iprobs.y},
\code{imethod},
\code{isize},
\code{zero},
diff --git a/man/negbinomial.size.Rd b/man/negbinomial.size.Rd
index 1b61dfb..e5c549d 100644
--- a/man/negbinomial.size.Rd
+++ b/man/negbinomial.size.Rd
@@ -9,7 +9,7 @@
}
\usage{
negbinomial.size(size = Inf, lmu = "loge", imu = NULL,
- probs.y = 0.35, imethod = 1,
+ iprobs.y = 0.35, imethod = 1,
ishrinkage = 0.95, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -29,7 +29,7 @@ negbinomial.size(size = Inf, lmu = "loge", imu = NULL,
}
- \item{probs.y}{
+ \item{iprobs.y}{
Same as \code{\link{negbinomial}}.
diff --git a/man/normal.vcm.Rd b/man/normal.vcm.Rd
index e3e6b5a..18ace78 100644
--- a/man/normal.vcm.Rd
+++ b/man/normal.vcm.Rd
@@ -16,7 +16,8 @@ normal.vcm(link.list = list("(Default)" = "identitylink"),
lsd = "loge", lvar = "loge",
esd = list(), evar = list(),
var.arg = FALSE, imethod = 1,
- icoefficients = NULL, isd = NULL, zero = "sd")
+ icoefficients = NULL, isd = NULL, zero = "sd",
+ sd.inflation.factor = 2.5)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -55,7 +56,7 @@ normal.vcm(link.list = list("(Default)" = "identitylink"),
}
\item{var.arg, imethod, isd}{
- Same as \code{\link{uninormal}}.
+ Same as, or similar to, \code{\link{uninormal}}.
@@ -66,6 +67,18 @@ normal.vcm(link.list = list("(Default)" = "identitylink"),
viz. the standard deviation/variance parameter.
+
+ }
+
+ \item{sd.inflation.factor}{
+ Numeric, should be greater than 1.
+ The initial value of the standard deviation is multiplied by this,
+ unless \code{isd} is inputted.
+ Experience has shown that it is safer to start off with a larger value
+ rather than a smaller one.
+
+
+
}
diff --git a/man/notdocumentedyet.Rd b/man/notdocumentedyet.Rd
index 1da8c9e..e920ca4 100644
--- a/man/notdocumentedyet.Rd
+++ b/man/notdocumentedyet.Rd
@@ -3,7 +3,31 @@
%
%
%
-% 201602:
+% 201605
+\alias{deflat.limit.oipospois}
+% 20160418 (keyword: mgcvvgam)
+% \alias{ps}
+\alias{Pen.psv}
+\alias{psv2magic}
+% \alias{psvglm.fit}
+% \alias{psvlm.wfit}
+\alias{psvgam-class}
+% \alias{PS}
+%
+\alias{checkwz}
+\alias{vforsub}
+\alias{vbacksub}
+\alias{vchol}
+\alias{process.constraints}
+\alias{mux22}
+\alias{mux111}
+%
+%
+% 201602, 201603, 201604:
+\alias{genbetaII.Loglikfun4}
+\alias{posNBD.Loglikfun2}
+\alias{NBD.Loglikfun2}
+\alias{AR1.gammas}
\alias{Init.mu}
\alias{.min.criterion.VGAM}
\alias{predictvglmS4VGAM}
@@ -55,6 +79,9 @@
\alias{rlevy}
% 201407; expected.betabin.ab is needed for zibetabinomialff() in YBook.
\alias{grid.search}
+\alias{grid.search2}
+\alias{grid.search3}
+\alias{grid.search4}
\alias{expected.betabin.ab}
% 201406;
% \alias{interleave.VGAM} DONE 20151204
@@ -478,10 +505,8 @@
\alias{model.matrix.qrrvglm}
\alias{model.matrixvgam}
% \alias{mux11}
-% \alias{mux111}
% \alias{mux15}
% \alias{mux2}
-% \alias{mux22}
% \alias{mux5}
% \alias{mux55}
% \alias{mux7}
@@ -536,7 +561,6 @@
\alias{negzero.expression.VGAM}
\alias{process.binomial2.data.VGAM}
\alias{process.categorical.data.VGAM}
-% \alias{process.constraints}
% \alias{proj.vgam}
% \alias{proj.vglm}
\alias{put.caption}
@@ -631,8 +655,6 @@
% \alias{valt.2iter}
% \alias{valt.control}
% \alias{varassign}
-% \alias{vbacksub}
-% \alias{vchol}
% \alias{vchol.greenstadt}
\alias{vcontrol.expression}
% \alias{vcovdefault}
@@ -641,7 +663,6 @@
% \alias{vcovvlm}
% \alias{veigen}
% \alias{vellipse}
-% \alias{vforsub}
% \alias{vgam.fit}
% \alias{vgam.match}
% \alias{vgam.nlchisq}
diff --git a/man/oiposbinomUC.Rd b/man/oiposbinomUC.Rd
new file mode 100644
index 0000000..361d858
--- /dev/null
+++ b/man/oiposbinomUC.Rd
@@ -0,0 +1,121 @@
+\name{Oiposbinom}
+\alias{Oiposbinom}
+\alias{doiposbinom}
+\alias{poiposbinom}
+\alias{qoiposbinom}
+\alias{roiposbinom}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-Inflated Positive Binomial Distribution }
+\description{
+ Density,
+ distribution function,
+ quantile function and random generation
+ for the one-inflated positive
+ binomial distribution with parameter \code{pstr1}.
+
+}
+\usage{
+doiposbinom(x, size, prob, pstr1 = 0, log = FALSE)
+poiposbinom(q, size, prob, pstr1 = 0)
+qoiposbinom(p, size, prob, pstr1 = 0)
+roiposbinom(n, size, prob, pstr1 = 0)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, p, q, n}{Same as \code{\link{Posbinom}}. }
+ \item{size, prob}{Same as \code{\link{Posbinom}}. }
+ \item{pstr1}{
+ Probability of a structural one
+ (i.e., ignoring the positive binomial distribution),
+ called \eqn{\phi}{phi}.
+ The default value of \eqn{\phi = 0}{phi = 0} corresponds
+ to the response having a positive binomial distribution.
+
+
+ }
+ \item{log}{ Logical. Return the logarithm of the answer? }
+}
+\details{
+ The probability function of \eqn{Y} is 1 with probability
+ \eqn{\phi}{phi}, and \eqn{PosBinomial(size, prob)}{PosBinomial(size, prob)} with
+ probability \eqn{1-\phi}{1-phi}. Thus
+ \deqn{P(Y=1) =\phi + (1-\phi) P(W=1)}{%
+ P(Y=1) = phi + (1-phi) * P(W=1)}
+ where \eqn{W} is distributed as a
+ positive \eqn{binomial(size, prob)}{binomial(size, prob)} random variate.
+
+
+}
+\value{
+ \code{doiposbinom} gives the density,
+ \code{poiposbinom} gives the distribution function,
+ \code{qoiposbinom} gives the quantile function, and
+ \code{roiposbinom} generates random deviates.
+
+
+}
+%\references{ }
+\author{ T. W. Yee }
+\note{
+ The argument \code{pstr1} is recycled to the required length, and
+ usually has values which lie in the interval \eqn{[0,1]}.
+%
+%
+%
+ These functions actually allow for the \emph{zero-deflated
+ binomial} distribution. Here, \code{pstr1} is also permitted
+ to lie in the interval \eqn{[-A, 0]} for some positive
+ quantity \eqn{A}. The
+ resulting probability of a unit value is \emph{less than}
+ the nominal positive binomial value, and the use of \code{pstr1} to
+ stand for the probability of a structural 1 loses its
+ meaning.
+%
+%
+%
+ If \code{pstr1} equals \eqn{A}
+ then this corresponds to the 0- and 1-truncated binomial distribution.
+
+
+}
+
+\seealso{
+ \code{\link{posbinomial}},
+ \code{\link[stats:binomial]{dbinom}},
+ \code{\link{binomialff}}.
+
+
+% \code{\link{oiposbinomial}},
+
+
+}
+\examples{
+size <- 10; prob <- 0.2; pstr1 <- 0.4; x <- (-1):size
+(ii <- doiposbinom(x, size, prob, pstr1 = pstr1))
+table(roiposbinom(100, size, prob, pstr1 = pstr1))
+round(doiposbinom(x , size, prob, pstr1 = pstr1) * 100) # Should be similar
+
+\dontrun{ x <- 0:size
+par(mfrow = c(2, 1)) # One-Inflated Positive Binomial
+barplot(rbind(doiposbinom(x, size, prob, pstr1 = pstr1),
+ dposbinom(x, size, prob)),
+ beside = TRUE, col = c("blue", "orange"),
+ main = paste("OIPB(", size, ",", prob, ", pstr1 = ", pstr1, ") (blue) vs",
+ " PosBinomial(", size, ",", prob, ") (orange)", sep = ""),
+ names.arg = as.character(x))
+
+# Zero-deflated Pos Binomial
+deflat.limit <- -dposbinom(1, size, prob) / (1 - dposbinom(1, size, prob))
+deflat.limit <- size * prob / (1 + (size-1) * prob - 1 / (1-prob)^(size-1))
+newpstr1 <- round(deflat.limit, 3) + 0.001 # A little from the boundary
+barplot(rbind(doiposbinom(x, size, prob, pstr1 = newpstr1),
+ dposbinom(x, size, prob)),
+ beside = TRUE, col = c("blue","orange"),
+ main = paste("ODPB(", size, ",", prob, ", pstr1 = ", newpstr1, ") (blue) vs",
+ " PosBinomial(", size, ",", prob, ") (orange)", sep = ""),
+ names.arg = as.character(x)) }
+}
+\keyword{distribution}
+
+
+
diff --git a/man/oipospoisUC.Rd b/man/oipospoisUC.Rd
new file mode 100644
index 0000000..f3f2d7b
--- /dev/null
+++ b/man/oipospoisUC.Rd
@@ -0,0 +1,116 @@
+\name{Oipospois}
+\alias{Oipospois}
+\alias{doipospois}
+\alias{poipospois}
+\alias{qoipospois}
+\alias{roipospois}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-Inflated Positive Poisson Distribution }
+\description{
+ Density,
+ distribution function,
+ quantile function and random generation
+ for the one-inflated positive
+ Poisson distribution with parameter \code{pstr1}.
+
+}
+\usage{
+doipospois(x, lambda, pstr1 = 0, log = FALSE)
+poipospois(q, lambda, pstr1 = 0)
+qoipospois(p, lambda, pstr1 = 0)
+roipospois(n, lambda, pstr1 = 0)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, p, q, n}{Same as \code{\link{Pospois}}.}
+ \item{lambda}{ Vector of positive means. }
+ \item{pstr1}{
+ Probability of a structural one
+ (i.e., ignoring the positive Poisson distribution),
+ called \eqn{\phi}{phi}.
+ The default value of \eqn{\phi = 0}{phi = 0} corresponds
+ to the response having a positive Poisson distribution.
+
+
+ }
+ \item{log}{ Logical. Return the logarithm of the answer? }
+}
+\details{
+ The probability function of \eqn{Y} is 1 with probability
+ \eqn{\phi}{phi}, and \eqn{PosPoisson(\lambda)}{PosPoisson(lambda)} with
+ probability \eqn{1-\phi}{1-phi}. Thus
+ \deqn{P(Y=1) =\phi + (1-\phi) P(W=1)}{%
+ P(Y=1) = phi + (1-phi) * P(W=1)}
+ where \eqn{W} is distributed as a
+ positive \eqn{Poisson(\lambda)}{Poisson(lambda)} random variate.
+
+
+}
+\value{
+ \code{doipospois} gives the density,
+ \code{poipospois} gives the distribution function,
+ \code{qoipospois} gives the quantile function, and
+ \code{roipospois} generates random deviates.
+
+
+}
+%\references{ }
+\author{ T. W. Yee }
+\note{
+ The argument \code{pstr1} is recycled to the required length, and
+ usually has values which lie in the interval \eqn{[0,1]}.
+
+
+
+ These functions actually allow for the \emph{zero-deflated
+ Poisson} distribution. Here, \code{pstr1} is also permitted
+ to lie in the interval \code{[-lambda / (expm1(lambda) - lambda), 0]}.
+ The resulting probability of a unit count is \emph{less than}
+ the nominal positive Poisson value, and the use of \code{pstr1} to
+ stand for the probability of a structural 1 loses its
+ meaning.
+%
+%
+%
+ When \code{pstr1} equals \code{-lambda / (expm1(lambda) - lambda)}
+ this corresponds to the 0- and 1-truncated Poisson distribution.
+
+
+}
+
+\seealso{
+ \code{\link{Pospois}},
+ \code{\link{oipospoisson}},
+ \code{\link{pospoisson}},
+ \code{\link[stats:Poisson]{dpois}},
+ \code{\link{poissonff}}.
+
+
+}
+\examples{
+lambda <- 3; pstr1 <- 0.2; x <- (-1):7
+(ii <- doipospois(x, lambda, pstr1 = pstr1))
+table(roipospois(100, lambda, pstr1 = pstr1))
+round(doipospois(1:10, lambda, pstr1 = pstr1) * 100) # Should be similar
+
+\dontrun{ x <- 0:10
+par(mfrow = c(2, 1)) # One-Inflated Positive Poisson
+barplot(rbind(doipospois(x, lambda, pstr1 = pstr1), dpospois(x, lambda)),
+ beside = TRUE, col = c("blue", "orange"),
+ main = paste("OIPP(", lambda, ", pstr1 = ", pstr1, ") (blue) vs",
+ " PosPoisson(", lambda, ") (orange)", sep = ""),
+ names.arg = as.character(x))
+
+deflat.limit <- -lambda / (expm1(lambda) - lambda) # 0-deflated Pos Poisson
+newpstr1 <- round(deflat.limit, 3) + 0.001 # Inside and near the boundary
+barplot(rbind(doipospois(x, lambda, pstr1 = newpstr1),
+ dpospois(x, lambda)),
+ beside = TRUE, col = c("blue","orange"),
+ main = paste("ODPP(", lambda, ", pstr1 = ", newpstr1, ") (blue) vs",
+ " PosPoisson(", lambda, ") (orange)", sep = ""),
+ names.arg = as.character(x)) }
+}
+\keyword{distribution}
+
+
+
diff --git a/man/oipospoisson.Rd b/man/oipospoisson.Rd
new file mode 100644
index 0000000..32387e3
--- /dev/null
+++ b/man/oipospoisson.Rd
@@ -0,0 +1,91 @@
+\name{oipospoisson}
+\alias{oipospoisson}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-inflated Positive Poisson Distribution Family Function }
+\description{
+ Fits a 1-inflated positive Poisson distribution.
+}
+\usage{
+oipospoisson(lpstr1 = "logit", llambda = "loge",
+ type.fitted = c("mean", "lambda", "pobs1", "pstr1", "onempstr1"),
+ ilambda = NULL, gpstr1 = (1:19)/20, gprobs.y = (1:19)/20,
+ imethod = 1, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lpstr1, llambda}{
+ For \code{lpstr1}: the same idea as \code{\link{zipoisson}}
+ except it applies to a structural 1.
+
+
+ }
+ \item{ilambda, gpstr1, gprobs.y, imethod}{
+ For initial values.
+ See \code{\link{CommonVGAMffArguments}} for information.
+
+
+ }
+ \item{type.fitted, zero}{
+ See \code{\link{CommonVGAMffArguments}} for information.
+
+ }
+
+
+}
+\details{
+ The 1-inflated positive Poisson distribution is a mixture
+ distribution of the
+ positive (0-truncated) Poisson
+ distribution with some probability of obtaining a (structural) 1.
+ Thus there are two sources for obtaining the value 1.
+ It is similar to a zero-inflated Poisson model, except
+ the Poisson is replaced by a positive Poisson and the 0 is replaced
+ by 1.
+ This distribution is written here
+ in a way that retains a similar notation to the
+ zero-inflated Poisson, i.e., the
+ probability \eqn{P[Y=1]} involves another parameter \eqn{\phi}{phi}.
+ See \code{\link{zipoisson}}.
+
+
+ This family function can handle multiple responses.
+
+
+}
+\section{Warning }{
+ Under- or over-flow may occur if the data is ill-conditioned.
+
+}
+
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}} and \code{\link{vgam}}.
+
+
+}
+%\references{
+%}
+\author{ Thomas W. Yee }
+%\note{
+%}
+\seealso{
+ \code{\link{roipospois}},
+ \code{\link{pospoisson}},
+ \code{\link{zipoisson}},
+ \code{\link{poissonff}},
+ \code{\link{simulate.vlm}}.
+
+
+}
+\examples{ set.seed(1)
+pdata <- data.frame(x2 = runif(nn <- 1000)) # Artificial data
+pdata <- transform(pdata, pstr1 = 0.5, lambda = exp(3 - x2))
+pdata <- transform(pdata, y1 = roipospois(nn, lambda, pstr1 = pstr1))
+with(pdata, table(y1))
+fit1 <- vglm(y1 ~ x2, oipospoisson, data = pdata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/oxtemp.Rd b/man/oxtemp.Rd
index eb1c43e..da43942 100644
--- a/man/oxtemp.Rd
+++ b/man/oxtemp.Rd
@@ -27,7 +27,7 @@
% \references{
% }
\examples{
-\dontrun{ fit <- vglm(maxtemp ~ 1, egev, data = oxtemp, trace = TRUE) }
+\dontrun{ fit <- vglm(maxtemp ~ 1, gevff, data = oxtemp, trace = TRUE) }
}
\keyword{datasets}
diff --git a/man/paralogistic.Rd b/man/paralogistic.Rd
index 3225657..ebf9a1d 100644
--- a/man/paralogistic.Rd
+++ b/man/paralogistic.Rd
@@ -11,7 +11,8 @@
\usage{
paralogistic(lscale = "loge", lshape1.a = "loge", iscale = NULL,
ishape1.a = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5),
- gshape1.a = exp(-5:5), probs.y = c(0.25, 0.5, 0.75), zero = "shape")
+ gshape1.a = seq(0.75, 4, by = 0.25), probs.y = c(0.25, 0.5, 0.75),
+ zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
% zero = ifelse(lss, -2, -1)
diff --git a/man/plotrcim0.Rd b/man/plotrcim0.Rd
index bf99532..583dd48 100644
--- a/man/plotrcim0.Rd
+++ b/man/plotrcim0.Rd
@@ -1,8 +1,7 @@
\name{plotrcim0}
\alias{plotrcim0}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{
- Main effects plot for a Row-Column Interaction Model (RCIM)
+\title{ Main Effects Plot for a Row-Column Interaction Model (RCIM)
}
\description{
@@ -216,8 +215,9 @@ ii at post # Endowed with additional information
}
# Negative binomial example
+\dontrun{
fit1 <- rcim(alcoff.e, negbinomial, trace = TRUE)
-\dontrun{ plot(fit1, ylim = c(-2, 2)) }
+plot(fit1, ylim = c(-2, 2)) }
# Univariate normal example
fit2 <- rcim(alcoff.e, uninormal, trace = TRUE)
@@ -229,9 +229,10 @@ fit3 <- rcim(alcoff.e, alaplace1(tau = 0.5), maxit = 1000, trace = FALSE)
plot(fit3, ylim = c(-200, 250)) }
# Zero-inflated Poisson example on "crashp" (no 0s in alcoff)
+\dontrun{
cbind(rowSums(crashp)) # Easy to see the data
cbind(colSums(crashp)) # Easy to see the data
fit4 <- rcim(Rcim(crashp, rbaseline = "5", cbaseline = "Sun"),
zipoissonff, trace = TRUE)
-\dontrun{ plot(fit4, ylim = c(-3, 3)) }
+plot(fit4, ylim = c(-3, 3)) }
}
diff --git a/man/plotvgam.Rd b/man/plotvgam.Rd
index e24c92c..bd58952 100644
--- a/man/plotvgam.Rd
+++ b/man/plotvgam.Rd
@@ -135,7 +135,7 @@ plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
Used if \code{xij} of \code{\link{vglm.control}} was used,
this chooses which inner argument the component is plotted against.
This argument is related to \code{raw = TRUE} and terms such as
- \code{NS(dum1,dum2)} and constraint matrices that have more than
+ \code{NS(dum1, dum2)} and constraint matrices that have more than
one column. The default would plot the smooth against \code{dum1}
but setting \code{varxij = 2} could mean plotting the smooth against
\code{dum2}.
diff --git a/man/plotvglm.Rd b/man/plotvglm.Rd
index 60c0a66..5a17ac2 100644
--- a/man/plotvglm.Rd
+++ b/man/plotvglm.Rd
@@ -3,77 +3,65 @@
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Plots for VGLMs }
\description{
- Currently not working, this function can be used to feed
- the object to the VGAM plotting function.
- In the future some diagnostic plots will be plotted.
+ Currently this function plots the Pearson residuals versus
+ the linear predictors (\eqn{M} plots) and
+ plots the Pearson residuals versus
+ the hat values (\eqn{M} plots).
}
\usage{
-plotvglm(x, type = c("vglm", "vgam"),
- newdata = NULL, y = NULL, residuals = NULL,
- rugplot = TRUE, se = FALSE, scale = 0, raw = TRUE,
- offset.arg = 0, deriv.arg = 0, overlay = FALSE,
- type.residuals = c("deviance", "working", "pearson", "response"),
- plot.arg = TRUE, which.term = NULL, which.cf = NULL,
- control = plotvgam.control(...), varxij = 1, ...)
+plotvglm(x, which = "(All)", ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
- Same as \code{\link{plotvgam}}.
-
-
- }
- \item{type}{
- Default is the first choice.
- Currently the first choice gives an error (not written yet).
- If \code{"vgam"} then all the arguments are fed into
- \code{\link{plotvgam}}.
-
-
- }
-
- \item{newdata, y, residuals, rugplot}{
- Same as \code{\link{plotvgam}}.
-
+ An object of class \code{"vglm"} (see \code{\link{vglm-class}})
+ or inherits from that class.
+
+
+% Same as \code{\link{plotvgam}}.
- }
- \item{se, scale, raw, offset.arg}{
- Same as \code{\link{plotvgam}}.
}
- \item{deriv.arg, overlay, type.residuals}{
- Same as \code{\link{plotvgam}}.
+ \item{which}{
+ If a subset of the plots is required, specify a subset of the
+ numbers \code{1:(2*M)}.
+ The default is to plot them all.
- }
- \item{plot.arg, which.term, which.cf, control}{
- Same as \code{\link{plotvgam}}.
}
- \item{\dots, varxij}{
- Same as \code{\link{plotvgam}}.
+ \item{\dots}{
+ Arguments fed into the primitive \code{\link[graphics]{plot}}
+ functions.
}
}
\details{
- Currently this function has not been written.
- When this is done some diagnostic plots based on
- residuals and hatvalues will be done.
- In the meanwhile, this function can be used to
- call the plotting function for \code{\link{vgam}} objects.
+ This function is under development.
+ Currently it plots the Pearson residuals
+ against the predicted
+ values (on the transformed scale) and the hat values.
+ There are \eqn{2M} plots in total, therefore
+ users should call \code{\link[graphics]{par}}
+ to assign, e.g., the \code{mfrow} argument.
+ Note: Section 3.7 of Yee (2015) describes the
+ Pearson residuals and hat values for VGLMs.
}
\value{
- Same as \code{\link{plotvgam}}.
+ Returns the object invisibly.
+
+
+% Same as \code{\link{plotvgam}}.
}
%\references{
%}
-%\author{ Thomas W. Yee }
+\author{ T. W. Yee }
%\note{
% \code{plotvglm()} is quite buggy at the moment.
@@ -94,15 +82,22 @@ plotvglm(x, type = c("vglm", "vgam"),
}
\examples{
-coalminers <- transform(coalminers, Age = (age - 42) / 5)
-fit <- vglm(cbind(nBnW, nBW, BnW, BW) ~ sm.bs(Age),
- binom2.or(zero = NULL), data = coalminers)
-\dontrun{ par(mfrow = c(1, 3))
-plot(fit, type = "vgam", se = TRUE, ylim = c(-3, 2), las = 1)
-plot(fit, type = "vgam", se = TRUE, which.cf = 1:2,
- lcol = "blue", scol = "orange", ylim = c(-3, 2))
-plot(fit, type = "vgam", se = TRUE, which.cf = 1:2,
- lcol = "blue", scol = "orange", overlay = TRUE) }
+\dontrun{
+ndata <- data.frame(x2 = runif(nn <- 200))
+ndata <- transform(ndata, y1 = rnbinom(nn, mu = exp(3+x2), size = exp(1)))
+fit1 <- vglm(y1 ~ x2, negbinomial, data = ndata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+par(mfrow = c(2, 2))
+plot(fit1)
+
+# Manually produce the four plots
+plot(fit1, which = 1, col = "blue", las = 1, main = "main1")
+abline(h = 0, lty = "dashed", col = "gray50")
+plot(fit1, which = 2, col = "blue", las = 1, main = "main2")
+abline(h = 0, lty = "dashed", col = "gray50")
+plot(fit1, which = 3, col = "blue", las = 1, main = "main3")
+plot(fit1, which = 4, col = "blue", las = 1, main = "main4")
+}
}
\keyword{models}
\keyword{regression}
diff --git a/man/poissonff.Rd b/man/poissonff.Rd
index f0774fb..fcc9bab 100644
--- a/man/poissonff.Rd
+++ b/man/poissonff.Rd
@@ -136,7 +136,7 @@ poissonff(link = "loge", dispersion = 1, onedpar = FALSE, imu = NULL,
}
\section{Warning }{
- With a multivariate response, assigning a known dispersion parameter
+ With multiple responses, assigning a known dispersion parameter
for \emph{each} response is not handled well yet. Currently, only
a single known dispersion parameter is handled well.
diff --git a/man/posbinomUC.Rd b/man/posbinomUC.Rd
index e00d174..9189e7c 100644
--- a/man/posbinomUC.Rd
+++ b/man/posbinomUC.Rd
@@ -28,7 +28,9 @@ rposbinom(n, size, prob)
}
\item{size}{number of trials.
It is the \eqn{N} symbol in the formula
- given in \code{\link{posbinomial}}.
+ given in \code{\link{posbinomial}} and
+ should be positive.
+
}
\item{prob}{probability of success on each trial.
@@ -141,7 +143,7 @@ pdata <- transform(pdata, y1 = rposbinom(nn, size = sizev1, prob = prob1),
y2 = rposbinom(nn, size = sizev2, prob = prob2))
with(pdata, table(y1))
with(pdata, table(y2))
-# Multivariate response
+# Multiple responses
fit2 <- vglm(cbind(y1, y2) ~ x2, posbinomial(multiple.responses = TRUE),
trace = TRUE, data = pdata, weight = cbind(sizev1, sizev2))
coef(fit2, matrix = TRUE)
diff --git a/man/posgeomUC.Rd b/man/posgeomUC.Rd
index b794903..e9031e1 100644
--- a/man/posgeomUC.Rd
+++ b/man/posgeomUC.Rd
@@ -5,7 +5,7 @@
\alias{qposgeom}
\alias{rposgeom}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Positive-geometric Distribution }
+\title{ Positive-Geometric Distribution }
\description{
Density, distribution function, quantile function and random generation
for the positive-geometric distribution.
diff --git a/man/posnegbinUC.Rd b/man/posnegbinUC.Rd
index 4aadb12..13eed4f 100644
--- a/man/posnegbinUC.Rd
+++ b/man/posnegbinUC.Rd
@@ -31,14 +31,23 @@ rposnegbin(n, size, prob = NULL, munb = NULL)
(see \code{\link[stats:NegBinomial]{dnbinom}}).
Some arguments have been renamed slightly.
+
% This is called \eqn{\theta}{theta} in the \code{\link[MASS]{rnegbin}}
% function in the \code{MASS} library.
+
Short vectors are recycled.
The parameter \code{1/size} is known as a dispersion parameter;
as \code{size} approaches infinity, the negative binomial distribution
approaches a Poisson distribution.
+
+
+ Note that \code{prob} must lie in \eqn{(0,1)}, otherwise a
+ \code{NaN} is returned.
+
+
+
}
}
\details{
diff --git a/man/posnegbinomial.Rd b/man/posnegbinomial.Rd
index fd2ff9c..c6e02d7 100644
--- a/man/posnegbinomial.Rd
+++ b/man/posnegbinomial.Rd
@@ -9,12 +9,12 @@
}
\usage{
posnegbinomial(zero = "size", type.fitted = c("mean", "munb", "prob0"),
- nsimEIM = 500, cutoff.prob = 0.999,
- eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
- lmunb = "loge", lsize = "loge",
- imethod = 1, imunb = NULL, probs.y = 0.35,
- ishrinkage = 0.95, isize = NULL,
- gsize.mux = exp((-12:6)/2))
+ mds.min = 0.001, nsimEIM = 500, cutoff.prob = 0.999,
+ eps.trig = 1e-07, max.support = 4000, max.chunk.MB = 30,
+ lmunb = "loge", lsize = "loge", imethod = 1,
+ imunb = NULL, iprobs.y = NULL,
+ gprobs.y = (0:9)/10, isize = NULL,
+ gsize.mux = exp(c(-30, -20, -15, -10, -6:3)))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -49,7 +49,7 @@ posnegbinomial(zero = "size", type.fitted = c("mean", "munb", "prob0"),
}
- \item{probs.y, cutoff.prob}{
+ \item{mds.min, iprobs.y, cutoff.prob}{
Similar to \code{\link{negbinomial}}.
@@ -64,9 +64,10 @@ posnegbinomial(zero = "size", type.fitted = c("mean", "munb", "prob0"),
}
- \item{ishrinkage, imethod}{
+ \item{imethod, gprobs.y}{
See \code{\link{negbinomial}}.
+
}
\item{type.fitted}{
See \code{\link{CommonVGAMffArguments}} for details.
diff --git a/man/pospoisUC.Rd b/man/pospoisUC.Rd
index 3f91325..cb0d5c3 100644
--- a/man/pospoisUC.Rd
+++ b/man/pospoisUC.Rd
@@ -46,7 +46,12 @@ rpospois(n, lambda)
As \eqn{\lambda}{lambda} increases, the positive-Poisson and Poisson
distributions become more similar.
Unlike similar functions for the Poisson distribution, a zero value
- of \code{lambda} is not permitted here.
+ of \code{lambda} returns a \code{NaN}.
+
+
+
+% Unlike similar functions for the Poisson distribution, a zero value
+% of \code{lambda} is not permitted here.
}
diff --git a/man/pospoisson.Rd b/man/pospoisson.Rd
index 644262f..6eceb7f 100644
--- a/man/pospoisson.Rd
+++ b/man/pospoisson.Rd
@@ -74,12 +74,14 @@ The equilibrium size distribution of freely-forming groups.
}
\author{ Thomas W. Yee }
\note{
- This family function can handle a multivariate response.
+ This family function can handle multiple responses.
+
Yet to be done: a \code{quasi.pospoisson} which estimates a dispersion
parameter.
+
}
\seealso{
\code{\link{Pospois}},
diff --git a/man/ps.Rd b/man/ps.Rd
new file mode 100644
index 0000000..fa74a2b
--- /dev/null
+++ b/man/ps.Rd
@@ -0,0 +1,267 @@
+\name{ps}
+\alias{ps}
+%- Also NEED an `\alias' for EACH other topic documented here.
+\title{ Defining Penalized Spline Smooths in VGAM Formulas }
+\description{
+ \code{ps} is used in the definition of P-spline smooth terms within
+ \code{vgam} formulas.
+
+
+}
+\usage{
+ps(x, ..., ps.intervals = NULL, lambda = 0, degree = 2,
+ order = 2, ridge.adj = 1e-5, ridge.inv = 0.0001,
+ spillover = 0.01, maxlambda = 1e4)
+}
+%- maybe also `usage' for other objects documented here.
+\arguments{
+
+ \item{x}{
+ covariate (abscissae) to be smoothed.
+ Also called the regressor.
+ If the \code{xij} facility is used then more
+ covariates are needed in the \code{\dots} argument.
+
+
+
+% Currently at least 7 unique \code{x} values are needed.
+
+
+ }
+ \item{\dots}{
+ Used to accommodate the other \eqn{M-1} covariates
+ when the \code{xij} facility is used.
+ See Section 3.4.4 of Yee (2015) for something very similar.
+ This argument, found in the second argument, means that
+ the other argument names must be fully specified if used,
+ e.g., \code{ps.intervals} and not \code{ps.int}.
+ See the example below.
+ In the example below,
+ the term in the main formula is
+ \code{ps(gcost.air, gcost.trn, gcost.bus)}
+ and one might be tempted to use something like
+ \code{ps(gcost)} to represent that \code{xij} term.
+ However, this is not recommended because
+ \code{ps(gcost)} might not have the same number of columns
+ as \code{ps(gcost.air, gcost.trn, gcost.bus)} etc.
+ That is, it is best to select one of the diagonal elements
+ of the block matrix to represent that term.
+
+
+
+ }
+
+
+ \item{ps.intervals}{
+ the number of equally-spaced B-spline intervals.
+ Note that the number of knots is equal to
+ \code{ps.intervals + 2*degree + 1}.
+ The default, signified by \code{NULL}, means that
+ \code{ceiling(1.5 * log(length(unique(x.index))))}
+ is used, where \code{x.index} is the combined data.
+ This is not guaranteed to work on every data set, and
+ it might change in the future.
+
+
+
+ }
+ \item{lambda, maxlambda}{
+ \code{maxlambda} are the
+ non-negative regularization parameters for difference penalty,
+ whose values should be less than \code{maxlambda}.
+ Can be a vector. % zz.
+
+
+ }
+ \item{degree}{
+ degree of B-spline basis. Usually this will be 2 or 3; and
+ the values 1 or 4 might possibly be used.
+
+
+ }
+ \item{order}{
+ order of difference penalty (0 is the ridge penalty).
+
+
+ }
+ \item{ridge.adj, ridge.inv}{
+ small positive numbers to stabilize
+ linear dependencies among B-spline bases.
+
+
+ }
+ \item{spillover}{
+ small positive proportion of the range used on
+ the outside of the boundary values.
+
+
+ }
+}
+\details{
+ This function is currently used by \code{\link{vgam}} to
+ allow automatic smoothing parameter selection based on
+ P-splines and minimizing an UBRE quantity.
+ It is recommended above \code{\link{s}} also because backfitting
+ is not required.
+
+
+
+% Also, if \eqn{n} is the number of \emph{distinct} abscissae, then
+% \code{ps} will fail if \eqn{n < 7}.
+
+
+
+ Unlike \code{s}, which is symbolic and does not perform any smoothing itself,
+ this function does compute the penalized spline when
+ used by \code{\link{vgam}}.
+ When this function is used within \code{\link{vgam}}, automatic
+ smoothing parameter selection is implemented by calling
+ \code{\link[mgcv]{magic}} after the necessary link-ups are done.
+
+
+
+ This function is smart; it can be used for smart prediction
+ (Section 18.6 of Yee (2015)).
+
+
+
+}
+\value{
+ A matrix with attributes that are (only) used by \code{\link{vgam}}.
+ The number of rows of the matrix is \code{length(x)} and
+ the number of columns is \code{ps.intervals + degree - 1}.
+
+
+}
+\references{
+
+
+Eilers, P. H. C. and Marx, B. D. (2002).
+Generalized Linear Additive Smooth Structures.
+\emph{Journal of Computational and Graphical Statistics},
+\bold{11}(4): 758--783.
+
+
+
+Marx, B. D. and Eilers, P. H. C. (1998).
+Direct generalized linear modeling
+with penalized likelihood.
+\emph{CSDA}, \bold{28}(2): 193--209.
+
+
+
+Eilers, P. H. C. and Marx, B. D. (1996).
+Flexible smoothing with B-splines
+and penalties (with comments and rejoinder).
+\emph{Statistical Science}, \bold{11}(2): 89--121.
+
+
+
+Wood, S. N. (2004).
+Stable and efficient multiple smoothing parameter estimation
+for generalized additive models.
+\emph{J. Amer. Statist. Assoc.}, \bold{99}(467): 673--686.
+
+
+
+}
+\author{
+ B. D. Marx wrote the original function.
+ Subsequent edits were made by T. W. Yee and C. Somchit.
+
+
+}
+\note{
+ This function is currently under development and
+ may change in the future.
+ In particular, the default for \code{ps.intervals} is
+ subject to change.
+
+
+}
+
+% ~Make other sections like WARNING with \section{WARNING }{....} ~
+
+\seealso{
+ \code{\link{vgam}},
+ \code{\link{s}},
+ \code{\link{smartpred}},
+ \code{\link{is.smart}},
+ \code{\link[splines]{splineDesign}},
+ \code{\link[splines]{bs}},
+ \code{\link[mgcv]{magic}}.
+
+
+
+}
+
+\examples{
+ps(runif(10))
+ps(runif(10), ps.intervals = 5)
+
+\dontrun{
+data("TravelMode", package = "AER") # Need to install "AER" first
+air.df <- subset(TravelMode, mode == "air") # Form 4 smaller data frames
+bus.df <- subset(TravelMode, mode == "bus")
+trn.df <- subset(TravelMode, mode == "train")
+car.df <- subset(TravelMode, mode == "car")
+TravelMode2 <- data.frame(income = air.df$income,
+ wait.air = air.df$wait - car.df$wait,
+ wait.trn = trn.df$wait - car.df$wait,
+ wait.bus = bus.df$wait - car.df$wait,
+ gcost.air = air.df$gcost - car.df$gcost,
+ gcost.trn = trn.df$gcost - car.df$gcost,
+ gcost.bus = bus.df$gcost - car.df$gcost,
+ wait = air.df$wait) # Value is unimportant
+TravelMode2$mode <- subset(TravelMode, choice == "yes")$mode # The response
+TravelMode2 <- transform(TravelMode2, incom.air = income, incom.trn = 0,
+ incom.bus = 0)
+set.seed(1)
+TravelMode2 <- transform(TravelMode2,
+ junkx2 = runif(nrow(TravelMode2)))
+
+tfit2 <-
+ vgam(mode ~ ps(gcost.air, gcost.trn, gcost.bus) + ns(junkx2, 4) +
+ ps(incom.air, incom.trn, incom.bus) + wait ,
+ crit = "coef",
+ multinomial(parallel = FALSE ~ 1), data = TravelMode2,
+ xij = list(ps(gcost.air, gcost.trn, gcost.bus) ~
+ ps(gcost.air, gcost.trn, gcost.bus) +
+ ps(gcost.trn, gcost.bus, gcost.air) +
+ ps(gcost.bus, gcost.air, gcost.trn),
+ ps(incom.air, incom.trn, incom.bus) ~
+ ps(incom.air, incom.trn, incom.bus) +
+ ps(incom.trn, incom.bus, incom.air) +
+ ps(incom.bus, incom.air, incom.trn),
+ wait ~ wait.air + wait.trn + wait.bus),
+ form2 = ~ ps(gcost.air, gcost.trn, gcost.bus) +
+ ps(gcost.trn, gcost.bus, gcost.air) +
+ ps(gcost.bus, gcost.air, gcost.trn) +
+ wait +
+ ps(incom.air, incom.trn, incom.bus) +
+ ps(incom.trn, incom.bus, incom.air) +
+ ps(incom.bus, incom.air, incom.trn) +
+ junkx2 + ns(junkx2, 4) +
+ incom.air + incom.trn + incom.bus +
+ gcost.air + gcost.trn + gcost.bus +
+ wait.air + wait.trn + wait.bus)
+par(mfrow = c(2, 2))
+plot(tfit2, se = TRUE, lcol = "orange", scol = "blue", ylim = c(-4, 4))
+}
+}
+\keyword{models}
+\keyword{regression}
+\keyword{smooth}
+
+% binom2.or(exchangeable = TRUE ~ s(x2, 3))
+
+
+
+
+
+
+
+
+
+
+
diff --git a/man/qrrvglm.control.Rd b/man/qrrvglm.control.Rd
index 04cd0e5..b10f17a 100644
--- a/man/qrrvglm.control.Rd
+++ b/man/qrrvglm.control.Rd
@@ -1,7 +1,7 @@
\name{qrrvglm.control}
\alias{qrrvglm.control}
%- Also NEED an `\alias' for EACH other topic documented here.
-\title{ Control function for QRR-VGLMs (CQO) }
+\title{ Control Function for QRR-VGLMs (CQO) }
\description{
Algorithmic constants and parameters for a constrained quadratic
ordination (CQO), by fitting a \emph{quadratic reduced-rank vector
@@ -22,7 +22,7 @@ qrrvglm.control(Rank = 1,
FastAlgorithm = TRUE,
GradientFunction = TRUE,
Hstep = 0.001,
- isd.latvar = rep(c(2, 1, rep(0.5, length = Rank)), length = Rank),
+ isd.latvar = rep_len(c(2, 1, rep_len(0.5, Rank)), Rank),
iKvector = 0.1,
iShape = 0.1,
ITolerances = NULL,
@@ -30,7 +30,7 @@ qrrvglm.control(Rank = 1,
maxitl = 40,
imethod = 1,
Maxit.optim = 250,
- MUXfactor = rep(7, length = Rank),
+ MUXfactor = rep_len(7, Rank),
noRRR = ~ 1, Norrr = NA,
optim.maxit = 20,
Parscale = if (I.tolerances) 0.001 else 1.0,
diff --git a/man/qtplot.gumbel.Rd b/man/qtplot.gumbel.Rd
index 870bc3f..e90831d 100644
--- a/man/qtplot.gumbel.Rd
+++ b/man/qtplot.gumbel.Rd
@@ -1,6 +1,6 @@
\name{qtplot.gumbel}
\alias{qtplot.gumbel}
-\alias{qtplot.egumbel}
+\alias{qtplot.gumbelff}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Quantile Plot for Gumbel Regression }
\description{
@@ -21,8 +21,9 @@ qtplot.gumbel(object, show.plot = TRUE,
\item{object}{
A \pkg{VGAM} extremes model of the
Gumbel type, produced by modelling functions such as \code{\link{vglm}}
- and \code{\link{vgam}} with a family function either
- \code{"gumbel"} or \code{"egumbel"}.
+ and \code{\link{vgam}}, and with a family function that is either
+ \code{\link{gumbel}} or \code{\link{gumbelff}}.
+
}
\item{show.plot}{
diff --git a/man/reciprocal.Rd b/man/reciprocal.Rd
index 060e377..03bff63 100644
--- a/man/reciprocal.Rd
+++ b/man/reciprocal.Rd
@@ -2,7 +2,7 @@
\alias{reciprocal}
\alias{negreciprocal}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Reciprocal link function }
+\title{ Reciprocal Link Function }
\description{
Computes the reciprocal transformation, including its inverse and the
first two derivatives.
diff --git a/man/rlplot.egev.Rd b/man/rlplot.gevff.Rd
similarity index 93%
rename from man/rlplot.egev.Rd
rename to man/rlplot.gevff.Rd
index 578c81b..eb32a08 100644
--- a/man/rlplot.egev.Rd
+++ b/man/rlplot.gevff.Rd
@@ -1,5 +1,5 @@
-\name{rlplot.egev}
-\alias{rlplot.egev}
+\name{rlplot.gevff}
+\alias{rlplot.gevff}
\alias{rlplot.gev}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Return Level Plot for GEV Fits }
@@ -8,7 +8,7 @@
}
\usage{
-rlplot.egev(object, show.plot = TRUE,
+rlplot.gevff(object, show.plot = TRUE,
probability = c((1:9)/100, (1:9)/10, 0.95, 0.99, 0.995, 0.999),
add.arg = FALSE, xlab = if(log.arg) "Return Period (log-scale)" else
"Return Period", ylab = "Return Level",
@@ -24,7 +24,7 @@ rlplot.egev(object, show.plot = TRUE,
A \pkg{VGAM} extremes model of the
GEV-type, produced by \code{\link{vglm}}
with a family function either
- \code{"gev"} or \code{"egev"}.
+ \code{"gev"} or \code{"gevff"}.
}
\item{show.plot}{
@@ -130,18 +130,18 @@ London: Springer-Verlag.
}
\seealso{
- \code{\link{egev}}.
+ \code{\link{gevff}}.
}
\examples{
gdata <- data.frame(y = rgev(n <- 100, scale = 2, shape = -0.1))
-fit <- vglm(y ~ 1, egev, data = gdata, trace = TRUE)
+fit <- vglm(y ~ 1, gevff, data = gdata, trace = TRUE)
# Identity link for all parameters:
-fit2 <- vglm(y ~ 1, egev(lshape = identitylink, lscale = identitylink,
- iscale = 10), data = gdata, trace = TRUE)
+fit2 <- vglm(y ~ 1, gevff(lshape = identitylink, lscale = identitylink,
+ iscale = 10), data = gdata, trace = TRUE)
coef(fit2, matrix = TRUE)
\dontrun{
par(mfrow = c(1, 2))
diff --git a/man/rrvglm.control.Rd b/man/rrvglm.control.Rd
index 331b56f..6623039 100644
--- a/man/rrvglm.control.Rd
+++ b/man/rrvglm.control.Rd
@@ -1,7 +1,7 @@
\name{rrvglm.control}
\alias{rrvglm.control}
%- Also NEED an `\alias' for EACH other topic documented here.
-\title{ Control function for rrvglm }
+\title{ Control Function for rrvglm() }
\description{
Algorithmic constants and parameters for running \code{rrvglm} are set
using this function.
diff --git a/man/rrvglm.optim.control.Rd b/man/rrvglm.optim.control.Rd
index 5385108..93f9078 100644
--- a/man/rrvglm.optim.control.Rd
+++ b/man/rrvglm.optim.control.Rd
@@ -1,7 +1,7 @@
\name{rrvglm.optim.control}
\alias{rrvglm.optim.control}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Control function for rrvglm() calling optim() }
+\title{ Control Function for rrvglm() Calling optim() }
\description{
Algorithmic constants and parameters for running \code{optim} within
\code{rrvglm} are set using this function.
diff --git a/man/s.Rd b/man/s.Rd
index b2aaa41..3ce5594 100644
--- a/man/s.Rd
+++ b/man/s.Rd
@@ -1,10 +1,12 @@
\name{s}
\alias{s}
%- Also NEED an `\alias' for EACH other topic documented here.
-\title{ Defining smooths in VGAM formulae }
+\title{ Defining Smooths in VGAM Formulas }
\description{
\code{s} is used in the definition of (vector) smooth terms within
- \code{vgam} formulae.
+ \code{vgam} formulas.
+
+
}
\usage{
s(x, df = 4, spar = 0, ...)
@@ -102,6 +104,15 @@ Vector generalized additive models.
number of unique abscissae.
+
+ Currently a bug relating to the use of \code{s()} is that
+ only constraint matrices whose columns are orthogonal are handled
+ correctly. If any \code{s()} term has a constraint matrix that
+ does not satisfy this condition then a warning is issued.
+ See \code{\link{is.buggy}} for more information.
+
+
+
An alternative to using
\code{s} with \code{\link{vgam}} is
\code{\link[splines]{bs}}
@@ -111,6 +122,7 @@ Vector generalized additive models.
convergence is difficult.
+
}
% ~Make other sections like WARNING with \section{WARNING }{....} ~
@@ -119,6 +131,7 @@ Vector generalized additive models.
\code{\link{vgam}},
\code{\link{is.buggy}},
\code{\link{vsmooth.spline}}.
+% \code{\link{ps}}.
}
diff --git a/man/sinmad.Rd b/man/sinmad.Rd
index 6080be8..31cd360 100644
--- a/man/sinmad.Rd
+++ b/man/sinmad.Rd
@@ -134,10 +134,10 @@ set.seed(3)
sdata <- data.frame(y1 = rbeta(1000, 6, 6))
# hist(with(sdata, y1))
if (FALSE) {
-# This fails
+# These struggle
fit1 <- vglm(y1 ~ 1, sinmad(lss = FALSE), data = sdata, trace = TRUE)
fit1 <- vglm(y1 ~ 1, sinmad(lss = FALSE), data = sdata, trace = TRUE,
- maxit = 6, crit = "coef")
+ crit = "coef")
Coef(fit1)
}
# Try this remedy:
diff --git a/man/trplot.qrrvglm.Rd b/man/trplot.qrrvglm.Rd
index 11c5458..a8c4ace 100644
--- a/man/trplot.qrrvglm.Rd
+++ b/man/trplot.qrrvglm.Rd
@@ -15,9 +15,9 @@ trplot.qrrvglm(object, which.species = NULL, add = FALSE, show.plot = TRUE,
label.sites = FALSE, sitenames = rownames(object at y),
axes.equal = TRUE, cex = par()$cex,
col = 1:(nos * (nos - 1)/2), log = "",
- lty = rep(par()$lty, length.out = nos * (nos - 1)/2),
- lwd = rep(par()$lwd, length.out = nos * (nos - 1)/2),
- tcol = rep(par()$col, length.out = nos * (nos - 1)/2),
+ lty = rep_len(par()$lty, nos * (nos - 1)/2),
+ lwd = rep_len(par()$lwd, nos * (nos - 1)/2),
+ tcol = rep_len(par()$col, nos * (nos - 1)/2),
xlab = NULL, ylab = NULL,
main = "", type = "b", check.ok = TRUE, ...)
}
diff --git a/man/undocumented-methods.Rd b/man/undocumented-methods.Rd
index 052833c..74c6c9b 100644
--- a/man/undocumented-methods.Rd
+++ b/man/undocumented-methods.Rd
@@ -4,6 +4,8 @@
%\alias{ccoef-method}
%
%
+% 201604:
+\alias{plot,psvgam,ANY-method}
% 201602:
\alias{predictvglmS4VGAM,ANY,binom2.or-method}
% 201601:
diff --git a/man/vgam.Rd b/man/vgam.Rd
index 4f5594b..1184a53 100644
--- a/man/vgam.Rd
+++ b/man/vgam.Rd
@@ -172,6 +172,16 @@ The \code{VGAM} Package.
%contains further information and examples.
+
+
+%Wood, S. N. (2004).
+%Stable and efficient multiple smoothing parameter estimation
+%for generalized additive models.
+%\emph{J. Amer. Statist. Assoc.}, \bold{99}(467): 673--686.
+
+
+
+
}
\author{ Thomas W. Yee }
@@ -236,6 +246,12 @@ The \code{VGAM} Package.
\code{\link{cao}}.
+
+% \code{\link{ps}},
+% \code{\link[mgcv]{magic}}.
+
+
+
}
\examples{ # Nonparametric proportional odds model
@@ -257,7 +273,7 @@ pfit$sigma
fit2 <- vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2, 3)),
binomialff(multiple.responses = TRUE), data = hunua)
coef(fit2, matrix = TRUE) # Not really interpretable
-\dontrun{ plot(fit2, se = TRUE, overlay = TRUE, lcol = 1:2, scol = 1:2)
+\dontrun{ plot(fit2, se = TRUE, overlay = TRUE, lcol = 3:4, scol = 3:4)
ooo <- with(hunua, order(altitude))
with(hunua, matplot(altitude[ooo], fitted(fit2)[ooo,], ylim = c(0, 0.8),
diff --git a/man/vgam.control.Rd b/man/vgam.control.Rd
index 74e84b0..8813c32 100644
--- a/man/vgam.control.Rd
+++ b/man/vgam.control.Rd
@@ -1,7 +1,7 @@
\name{vgam.control}
\alias{vgam.control}
%- Also NEED an `\alias' for EACH other topic documented here.
-\title{ Control function for vgam }
+\title{ Control Function for vgam() }
\description{
Algorithmic constants and parameters for running \code{\link{vgam}}
are set using this function.
@@ -13,7 +13,7 @@ vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
epsilon = 1e-07, maxit = 30, na.action = na.fail,
nk = NULL, save.weights = FALSE, se.fit = TRUE,
trace = FALSE, wzepsilon = .Machine$double.eps^0.75,
- ...)
+ xij = NULL, ...)
}
%- maybe also `usage' for other objects documented here.
\arguments{
@@ -117,6 +117,12 @@ vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
%
% }
+ \item{xij}{
+ Same as \code{\link{vglm.control}}.
+
+
+
+ }
\item{\dots}{
other parameters that may be picked up from control
functions that are specific to the \pkg{VGAM} family function.
diff --git a/man/vglm.Rd b/man/vglm.Rd
index c282d95..d1375f8 100644
--- a/man/vglm.Rd
+++ b/man/vglm.Rd
@@ -447,6 +447,7 @@ The \code{VGAM} Package.
\code{\link{constraints.vlm}},
\code{\link{hatvaluesvlm}},
\code{\link{linkfun.vglm}},
+ \code{\link{plotvglm}},
\code{\link{predictvglm}},
\code{summary.vglm},
\code{AIC.vglm},
diff --git a/man/vglm.control.Rd b/man/vglm.control.Rd
index 350a938..d4d1ad3 100644
--- a/man/vglm.control.Rd
+++ b/man/vglm.control.Rd
@@ -1,7 +1,7 @@
\name{vglm.control}
\alias{vglm.control}
%- Also NEED an `\alias' for EACH other topic documented here.
-\title{ Control function for vglm }
+\title{ Control Function for vglm() }
\description{
Algorithmic constants and parameters for running \code{vglm} are set
using this function.
@@ -145,10 +145,12 @@ vglm.control(checkwz = TRUE, Check.rank = TRUE, Check.cm.rank = TRUE,
to give \emph{every} term used by the model.
+
The function \code{\link{Select}} can be used to
select variables beginning with the same character string.
+
}
% \item{jix}{
% A formula or a list of formulas specifying
diff --git a/man/zanegbinomial.Rd b/man/zanegbinomial.Rd
index 72b0327..0ef7f43 100644
--- a/man/zanegbinomial.Rd
+++ b/man/zanegbinomial.Rd
@@ -11,18 +11,20 @@
}
\usage{
zanegbinomial(zero = "size", type.fitted = c("mean", "munb", "pobs0"),
- nsimEIM = 500, cutoff.prob = 0.999, eps.trig = 1e-7,
- max.support = 4000, max.chunk.MB = 30, lpobs0 = "logit",
- lmunb = "loge", lsize = "loge", imethod = 1, ipobs0 = NULL,
- imunb = NULL, probs.y = 0.35, ishrinkage = 0.95,
- isize = NULL, gsize.mux = exp((-12:6)/2))
+ mds.min = 1e-3, nsimEIM = 500, cutoff.prob = 0.999,
+ eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
+ lpobs0 = "logit", lmunb = "loge", lsize = "loge",
+ imethod = 1, ipobs0 = NULL,
+ imunb = NULL, iprobs.y = NULL, gprobs.y = (0:9)/10,
+ isize = NULL, gsize.mux = exp(c(-30, -20, -15, -10, -6:3)))
zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
type.fitted = c("mean", "munb", "pobs0", "onempobs0"),
isize = NULL, ionempobs0 = NULL, zero = c("size",
- "onempobs0"), probs.y = 0.35, cutoff.prob = 0.999,
- eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
- gsize.mux = exp((-12:6)/2), imethod = 1, imunb = NULL,
- nsimEIM = 500, ishrinkage = 0.95)
+ "onempobs0"), mds.min = 1e-3, iprobs.y = NULL, gprobs.y = (0:9)/10,
+ cutoff.prob = 0.999, eps.trig = 1e-7, max.support = 4000,
+ max.chunk.MB = 30, gsize.mux = exp(c(-30, -20, -15, -10, -6:3)),
+ imethod = 1, imunb = NULL,
+ nsimEIM = 500)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -100,23 +102,33 @@ zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
\item{nsimEIM, imethod}{
See \code{\link{CommonVGAMffArguments}}.
- }
- \item{ishrinkage}{
- See \code{\link{negbinomial}}
- and \code{\link{CommonVGAMffArguments}}.
}
+% \item{ishrinkage}{
+% See \code{\link{negbinomial}}
+% and \code{\link{CommonVGAMffArguments}}.
+
+
+% }
+
+
+ \item{iprobs.y, gsize.mux, gprobs.y}{
+ See \code{\link{negbinomial}}.
+% and \code{\link{CommonVGAMffArguments}}.
- \item{probs.y, cutoff.prob, gsize.mux, eps.trig}{
+ }
+ \item{cutoff.prob, eps.trig}{
See \code{\link{negbinomial}}.
% and \code{\link{CommonVGAMffArguments}}.
+
}
- \item{max.support, max.chunk.MB}{
+ \item{mds.min, max.support, max.chunk.MB}{
See \code{\link{negbinomial}}.
% and \code{\link{CommonVGAMffArguments}}.
+
}
}
@@ -133,6 +145,7 @@ zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
call the zero-altered negative binomial a \emph{hurdle} model.
+
For one response/species, by default, the three linear/additive
predictors
for \code{zanegbinomial()}
@@ -140,6 +153,7 @@ zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
log(munb), log(k))^T}. This vector is recycled for multiple species.
+
The \pkg{VGAM} family function \code{zanegbinomialff()} has a few
changes compared to \code{zanegbinomial()}.
These are:
@@ -163,6 +177,7 @@ zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
and \code{\link{vgam}}.
+
The \code{fitted.values} slot of the fitted object,
which should be extracted by the generic function \code{fitted}, returns
the mean \eqn{\mu}{mu} (default) which is given by
@@ -227,7 +242,7 @@ for counts with extra zeros.
one family function.
- This family function can handle a multivariate response, e.g., more
+ This family function can handle multiple responses, e.g., more
than one species.
diff --git a/man/zapoisson.Rd b/man/zapoisson.Rd
index cc87972..caee709 100644
--- a/man/zapoisson.Rd
+++ b/man/zapoisson.Rd
@@ -74,7 +74,7 @@ zapoissonff(llambda = "loge", lonempobs0 = "logit", type.fitted =
% number that is estimated. It is modelled as a function of the
% explanatory variables by \code{zero = NULL}. A negative value
% means that the value is recycled, so setting \eqn{-1} means
-% all \eqn{p_0}{pobs0} are intercept-only (for multivariate responses).
+% all \eqn{p_0}{pobs0} are intercept-only (for multiple responses).
}
@@ -193,7 +193,7 @@ A Bayesian analysis of zero-inflated generalized Poisson model.
This family function effectively combines \code{\link{pospoisson}}
and \code{\link{binomialff}} into one family function.
- This family function can handle a multivariate response,
+ This family function can handle multiple responses,
e.g., more than one species.
diff --git a/man/zetaff.Rd b/man/zetaff.Rd
index ff7506e..92cb7d8 100644
--- a/man/zetaff.Rd
+++ b/man/zetaff.Rd
@@ -6,11 +6,11 @@
Estimates the parameter of the zeta distribution.
}
\usage{
-zetaff(link = "loge", init.p = NULL, zero = NULL)
+zetaff(link = "loge", ishape = NULL, gshape = exp(-3:4)/4, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link, init.p, zero}{
+ \item{link, ishape, zero}{
These arguments apply to the (positive) parameter \eqn{p}.
See \code{\link{Links}} for more choices.
Choosing \code{\link{loglog}} constrains \eqn{p>1}, but
@@ -19,6 +19,11 @@ zetaff(link = "loge", init.p = NULL, zero = NULL)
}
+ \item{gshape}{
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
+
+ }
}
\details{
In this long tailed distribution
diff --git a/man/zibinomUC.Rd b/man/zibinomUC.Rd
index 53f56a1..1333aa4 100644
--- a/man/zibinomUC.Rd
+++ b/man/zibinomUC.Rd
@@ -15,11 +15,19 @@
}
\usage{
dzibinom(x, size, prob, pstr0 = 0, log = FALSE)
-pzibinom(q, size, prob, pstr0 = 0, lower.tail = TRUE, log.p = FALSE)
-qzibinom(p, size, prob, pstr0 = 0, lower.tail = TRUE, log.p = FALSE)
+pzibinom(q, size, prob, pstr0 = 0)
+qzibinom(p, size, prob, pstr0 = 0)
rzibinom(n, size, prob, pstr0 = 0)
}
%- maybe also 'usage' for other objects documented here.
+
+
+%pzibinom(q, size, prob, pstr0 = 0, lower.tail = TRUE, log.p = FALSE)
+%qzibinom(p, size, prob, pstr0 = 0, lower.tail = TRUE, log.p = FALSE)
+
+
+
+
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
@@ -27,8 +35,9 @@ rzibinom(n, size, prob, pstr0 = 0)
given in \code{\link{zibinomial}}. }
\item{prob}{probability of success on each trial. }
\item{n}{ Same as in \code{\link[stats]{runif}}. }
- \item{log, log.p, lower.tail}{ Arguments that are passed on to
- \code{\link[stats:Binomial]{pbinom}}.}
+% \item{log, log.p, lower.tail}{
+ \item{log}{
+ Same as \code{\link[stats:Binomial]{pbinom}}.}
\item{pstr0}{
Probability of a structural zero (i.e., ignoring the binomial distribution),
called \eqn{\phi}{phi}.
diff --git a/man/zibinomial.Rd b/man/zibinomial.Rd
index e91a1b6..0a1da1f 100644
--- a/man/zibinomial.Rd
+++ b/man/zibinomial.Rd
@@ -66,7 +66,7 @@ zibinomialff(lprob = "logit", lonempstr0 = "logit",
% }
\item{multiple.responses}{
Logical. Currently it must be \code{FALSE} to mean the
- function does not handle multivariate responses. This
+ function does not handle multiple responses. This
is to remain compatible with the same argument in
\code{\link{binomialff}}.
@@ -214,7 +214,7 @@ fit <- vglm(cbind(y, sv - y) ~ 1, zibinomialff, data = zdata, trace = TRUE,
coef(fit, matrix = TRUE)
Coef(fit) # Useful for intercept-only models
-fitted(fit, type = "pobs0") # Estimate of P(Y = 0)
+head(fitted(fit, type = "pobs0")) # Estimate of P(Y = 0)
head(fitted(fit))
with(zdata, mean(y)) # Compare this with fitted(fit)
summary(fit)
diff --git a/man/zinegbinomial.Rd b/man/zinegbinomial.Rd
index fc44414..9905dcd 100644
--- a/man/zinegbinomial.Rd
+++ b/man/zinegbinomial.Rd
@@ -12,19 +12,21 @@
zinegbinomial(zero = "size",
type.fitted = c("mean", "munb", "pobs0", "pstr0",
"onempstr0"),
- nsimEIM = 500, cutoff.prob = 0.999, eps.trig = 1e-7,
- max.support = 4000, max.chunk.MB = 30,
+ mds.min = 1e-3, nsimEIM = 500, cutoff.prob = 0.999,
+ eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
lpstr0 = "logit", lmunb = "loge", lsize = "loge",
imethod = 1, ipstr0 = NULL, imunb = NULL,
- probs.y = 0.35, ishrinkage = 0.95,
- isize = NULL, gsize.mux = exp((-12:6)/2))
+ iprobs.y = NULL, isize = NULL,
+ gprobs.y = (0:9)/10,
+ gsize.mux = exp(c(-30, -20, -15, -10, -6:3)))
zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
type.fitted = c("mean", "munb", "pobs0", "pstr0",
"onempstr0"), imunb = NULL, isize = NULL, ionempstr0 =
NULL, zero = c("size", "onempstr0"), imethod = 1,
- ishrinkage = 0.95, probs.y = 0.35, cutoff.prob = 0.999,
+ iprobs.y = NULL, cutoff.prob = 0.999,
eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
- gsize.mux = exp((-12:6)/2), nsimEIM = 500)
+ gprobs.y = (0:9)/10, gsize.mux = exp((-12:6)/2),
+ mds.min = 1e-3, nsimEIM = 500)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -71,8 +73,8 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
\item{imethod}{
An integer with value \code{1} or \code{2} or \code{3} which
specifies the initialization method for the mean parameter.
- If failure to converge occurs try another value
- and/or else specify a value for \code{ishrinkage}.
+ If failure to converge occurs try another value.
+ See \code{\link{CommonVGAMffArguments}} for more information.
@@ -88,21 +90,26 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
}
- \item{ishrinkage, nsimEIM}{
+ \item{nsimEIM}{
See \code{\link{CommonVGAMffArguments}} for information.
}
- \item{probs.y, cutoff.prob, max.support, max.chunk.MB }{
+ \item{iprobs.y, cutoff.prob, max.support, max.chunk.MB }{
See \code{\link{negbinomial}}
- and/or \code{\link{posnegbinomial}} for details,
+ and/or \code{\link{posnegbinomial}} for details.
+
+
+ }
+ \item{mds.min, eps.trig}{
+ See \code{\link{negbinomial}} for details.
}
- \item{gsize.mux, eps.trig}{
+ \item{gprobs.y, gsize.mux}{
These arguments relate to grid searching in the initialization process.
See \code{\link{negbinomial}}
- and/or \code{\link{posnegbinomial}} for details,
+ and/or \code{\link{posnegbinomial}} for details.
}
@@ -212,7 +219,6 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
\code{stepsize} (in \code{\link{vglm.control}}),
\code{imethod},
\code{imunb},
- \code{ishrinkage},
\code{ipstr0},
\code{isize}, and/or
\code{zero} if there are explanatory variables.
@@ -223,6 +229,7 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
% An infinite loop might occur if some of the fitted values
% (the means) are too close to 0.
+% \code{ishrinkage},
diff --git a/man/zipebcom.Rd b/man/zipebcom.Rd
index 5ff413e..afb4e71 100644
--- a/man/zipebcom.Rd
+++ b/man/zipebcom.Rd
@@ -1,14 +1,17 @@
\name{zipebcom}
\alias{zipebcom}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Exchangeable bivariate cloglog odds-ratio model from a
- zero-inflated Poisson distribution }
+\title{ Exchangeable Bivariate cloglog Odds-ratio Model From a
+ Zero-inflated Poisson Distribution }
+
+
\description{
Fits an exchangeable bivariate odds-ratio model to two binary
responses with a complementary log-log link.
The data are assumed to come from a zero-inflated Poisson distribution
that has been converted to presence/absence.
+
}
\usage{
zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
@@ -24,6 +27,7 @@ zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
Argument \code{imu12} may be of length 2 (one element for each response).
+
}
\item{lphi12}{
Link function applied to the \eqn{\phi}{phi} parameter of the
@@ -31,6 +35,7 @@ zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
See \code{\link{Links}} for more choices.
+
}
\item{loratio}{
Link function applied to the odds ratio.
diff --git a/man/zipoisUC.Rd b/man/zipoisUC.Rd
index 62275b8..174b694 100644
--- a/man/zipoisUC.Rd
+++ b/man/zipoisUC.Rd
@@ -8,7 +8,8 @@
\title{ Zero-Inflated Poisson Distribution }
\description{
Density, distribution function, quantile function and random generation
- for the zero-inflated Poisson distribution with parameter \code{pstr0}.
+ for the zero-inflated and zero-deflated
+ Poisson distribution with parameter \code{pstr0}.
}
\usage{
@@ -29,6 +30,8 @@ rzipois(n, lambda, pstr0 = 0)
called \eqn{\phi}{phi}.
The default value of \eqn{\phi = 0}{phi = 0} corresponds
to the response having an ordinary Poisson distribution.
+ This argument may be negative to allow for 0-deflation, hence
+ its interpretation as a probability ceases.
}
\item{log}{ Logical. Return the logarithm of the answer? }
diff --git a/man/zipoisson.Rd b/man/zipoisson.Rd
index 85c7184..a536759 100644
--- a/man/zipoisson.Rd
+++ b/man/zipoisson.Rd
@@ -4,8 +4,8 @@
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Zero-Inflated Poisson Distribution Family Function }
\description{
- Fits a zero-inflated Poisson distribution by full maximum likelihood
- estimation.
+ Fits a zero-inflated or zero-deflated
+ Poisson distribution by full maximum likelihood estimation.
}
\usage{
@@ -182,21 +182,26 @@ zipoissonff(llambda = "loge", lonempstr0 = "logit", type.fitted =
% the \code{misc} slot with component name \code{pstr0}.
- Although the functions in \code{\link{Zipois}}
- can handle the zero-\emph{deflated} Poisson distribution,
- this family function cannot estimate this very well in general.
- One sets \code{lpstr0 = identitylink}, however, the iterations
- might fall outside the parameter space.
- Practically, it is restricted to intercept-models only
- (see example below).
- Also, one might need inputting good initial values
- or using a simpler model to obtain initial values.
+ This family function can be used to estimate the 0-\emph{deflated} model,
+ hence \code{pstr0} is not to be interpreted as a probability.
+ One should set, e.g., \code{lpstr0 = "identitylink"}.
+ Likewise, the functions in \code{\link{Zipois}}
+ can handle the zero-deflated Poisson distribution too.
+ Although the iterations
+ might fall outside the parameter space, the \code{validparams} slot
+ should keep them inside.
+ A (somewhat) similar alternative for
+ zero-deflation is to try the zero-altered Poisson model
+ (see \code{\link{zapoisson}}).
+
+
+% Practically, it is restricted to intercept-models only
+% (see example below).
+% Also, one might need inputting good initial values
+% or using a simpler model to obtain initial values.
% If there is a covariate then it is best to
% constrain \code{pstr0} to be intercept-only, e.g.,
% by \code{zipoisson(lpstr0 = identitylink, zero = -1)}.
- A (somewhat) similar and more reliable method for
- zero-deflation is to try the zero-altered Poisson model
- (see \code{\link{zapoisson}}).
The use of this \pkg{VGAM} family function with \code{\link{rrvglm}}
@@ -290,21 +295,22 @@ fitted(fit)
with(abdata, weighted.mean(y, w)) # Compare this with fitted(fit)
summary(fit)
-# Example 4: zero-deflated model for an intercept-only data
+# Example 4: zero-deflated model for intercept-only data
zdata <- transform(zdata, lambda3 = loge(0.0, inverse = TRUE))
zdata <- transform(zdata, deflat.limit = -1 / expm1(lambda3)) # Boundary
# The 'pstr0' parameter is negative and in parameter space:
-zdata <- transform(zdata, usepstr0 = deflat.limit / 1.5)
+zdata <- transform(zdata, usepstr0 = deflat.limit / 2) # Not too near the boundary
zdata <- transform(zdata, y3 = rzipois(nn, lambda3, pstr0 = usepstr0))
head(zdata)
with(zdata, table(y3)) # A lot of deflation
-fit3 <- vglm(y3 ~ 1, zipoisson(zero = -1, lpstr0 = identitylink),
+fit3 <- vglm(y3 ~ 1, zipoisson(zero = -1, lpstr0 = "identitylink"),
data = zdata, trace = TRUE, crit = "coef")
coef(fit3, matrix = TRUE)
# Check how accurate it was:
zdata[1, "usepstr0"] # Answer
coef(fit3)[1] # Estimate
Coef(fit3)
+vcov(fit3) # Is positive-definite
# Example 5: This RR-ZIP is known as a COZIGAM or COZIVGLM-ZIP
set.seed(123)
diff --git a/man/zoabetaR.Rd b/man/zoabetaR.Rd
new file mode 100644
index 0000000..4651aa7
--- /dev/null
+++ b/man/zoabetaR.Rd
@@ -0,0 +1,107 @@
+\name{zoabetaR}
+\alias{zoabetaR}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Zero- and One-Inflated Beta Distribution Family Function }
+\description{
+ Estimation of the shape parameters of the two-parameter beta
+ distribution plus the probabilities of a 0 and/or a 1.
+
+}
+\usage{
+zoabetaR(lshape1 = "loge", lshape2 = "loge", lpobs0 = "logit",
+ lpobs1 = "logit", ishape1 = NULL, ishape2 = NULL, trim = 0.05,
+ type.fitted = c("mean", "pobs0", "pobs1", "beta.mean"),
+ parallel.shape = FALSE, parallel.pobs = FALSE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lshape1, lshape2, lpobs0, lpobs1}{
+ Details at \code{\link{CommonVGAMffArguments}}.
+ See \code{\link{Links}} for more choices.
+
+
+ }
+ \item{ishape1, ishape2}{
+ Details at \code{\link{CommonVGAMffArguments}}.
+
+
+ }
+ \item{trim, zero}{
+ Same as \code{\link{betaR}}.
+
+
+ }
+ \item{parallel.shape, parallel.pobs}{
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
+
+ }
+ \item{type.fitted}{
+ The choice \code{"beta.mean"} mean to return the mean of
+ the beta distribution; the 0s and 1s are ignored.
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
+
+ }
+}
+\details{
+ The standard 2-parameter beta distribution has a support on (0,1),
+ however, many datasets have 0 and/or 1 values too.
+ This family function handles 0s and 1s (at least one of
+ them must be present) in
+ the data set by modelling the probability of a 0 by a
+ logistic regression (default link is the logit), and similarly
+ for the probability of a 1. The remaining proportion,
+ \code{1-pobs0-pobs1},
+ of the data comes from a standard beta distribution.
+ This family function therefore extends \code{\link{betaR}}.
+ One has \eqn{M=3} or \eqn{M=4} per response.
+ Multiple responses are allowed.
+
+
+}
+\value{
+ Similar to \code{\link{betaR}}.
+
+
+}
+%\references{
+
+
+%}
+\author{ Thomas W. Yee and Xiangjie Xue. }
+%\note{
+%}
+
+
+\seealso{
+ \code{\link{Zoabeta}},
+ \code{\link{betaR}},
+ \code{\link{betaff}},
+ \code{\link[stats:Beta]{Beta}},
+ \code{\link{zipoisson}}.
+
+
+}
+\examples{
+nn <- 1000; set.seed(1)
+bdata <- data.frame(x2 = runif(nn))
+bdata <- transform(bdata,
+ pobs0 = logit(-2 + x2, inverse = TRUE),
+ pobs1 = logit(-2 + x2, inverse = TRUE))
+bdata <- transform(bdata,
+ y1 = rzoabeta(nn, shape1 = exp(1 + x2), shape2 = exp(2 - x2),
+ pobs0 = pobs0, pobs1 = pobs1))
+summary(bdata)
+fit1 <- vglm(y1 ~ x2, zoabetaR(parallel.pobs = TRUE),
+ data = bdata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+summary(fit1)
+}
+\keyword{regression}
+
+
+% y1 = rbeta(nn, shape1 = exp(1 + x2), shape2 = exp(2 - x2))
+%rrr <- runif(nn)
+%bdata$y1[rrr < bdata$p0] <- 0
+%bdata$y1[rrr > 1 - bdata$p1] <- 1
diff --git a/man/ozibetaUC.Rd b/man/zoabetaUC.Rd
similarity index 60%
rename from man/ozibetaUC.Rd
rename to man/zoabetaUC.Rd
index 299c637..97141f9 100644
--- a/man/ozibetaUC.Rd
+++ b/man/zoabetaUC.Rd
@@ -1,9 +1,9 @@
-\name{Ozibeta}
-\alias{Ozibeta}
-\alias{dozibeta}
-\alias{pozibeta}
-\alias{qozibeta}
-\alias{rozibeta}
+\name{Zoabeta}
+\alias{Zoabeta}
+\alias{dzoabeta}
+\alias{pzoabeta}
+\alias{qzoabeta}
+\alias{rzoabeta}
\title{The Zero/One-Inflated Beta Distribution}
\description{
Density, distribution function, and random
@@ -12,23 +12,25 @@
}
\usage{
-dozibeta(x, shape1, shape2, pobs0 = 0, pobs1 = 0, log = FALSE,
+dzoabeta(x, shape1, shape2, pobs0 = 0, pobs1 = 0, log = FALSE,
tol = .Machine$double.eps)
-pozibeta(q, shape1, shape2, pobs0 = 0, pobs1 = 0,
+pzoabeta(q, shape1, shape2, pobs0 = 0, pobs1 = 0,
lower.tail = TRUE, log.p = FALSE, tol = .Machine$double.eps)
-qozibeta(p, shape1, shape2, pobs0 = 0, pobs1 = 0,
+qzoabeta(p, shape1, shape2, pobs0 = 0, pobs1 = 0,
lower.tail = TRUE, log.p = FALSE, tol = .Machine$double.eps)
-rozibeta(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
+rzoabeta(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
tol = .Machine$double.eps)
}
\arguments{
\item{x, q, p, n}{Same as \code{\link[stats]{Beta}}. }
- \item{pobs0}{
- vector of probabilities that 0 are observed (\eqn{\omega_0}{omega_0}). }
- \item{pobs1}{
- vector of probabilities that 1 are observed (\eqn{\omega_1}{omega_1}). }
+ \item{pobs0, pobs1}{
+ vector of probabilities that 0 and 1 are observed
+ (\eqn{\omega_0}{omega_0}
+ and
+ \eqn{\omega_1}{omega_1}).
+ }
\item{shape1, shape2}{
Same as \code{\link[stats]{Beta}}.
@@ -43,7 +45,7 @@ rozibeta(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
}
\item{tol}{
- Numeric, tolerance for testing equality with 0.
+ Numeric, tolerance for testing equality with 0 and 1.
}
@@ -51,10 +53,10 @@ rozibeta(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
}
\value{
- \code{dozibeta} gives the density,
- \code{pozibeta} gives the distribution function,
- \code{qozibeta} gives the quantile, and
- \code{rozibeta} generates random deviates.
+ \code{dzoabeta} gives the density,
+ \code{pzoabeta} gives the distribution function,
+ \code{qzoabeta} gives the quantile, and
+ \code{rzoabeta} generates random deviates.
@@ -87,6 +89,7 @@ rozibeta(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
%
%}
\seealso{
+ \code{\link{zoabetaR}},
\code{\link[base:Special]{beta}},
\code{\link{betaR}},
\code{\link{Betabinom}}.
@@ -95,27 +98,31 @@ rozibeta(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
}
\examples{
\dontrun{
-set.seed(208); N <- 10000
-k <- rozibeta(N, 2, 3, 0.2, 0.2)
-hist(k, probability = TRUE, border = "blue",
- main = "Blue = inflated; orange = ordinary beta")
-sum(k == 0) / N # Proportion of 0
-sum(k == 1) / N # Proportion of 1
+N <- 1000; y <- rzoabeta(N, 2, 3, 0.2, 0.2)
+hist(y, probability = TRUE, border = "blue", las = 1,
+ main = "Blue = 0- and 1-altered; orange = ordinary beta")
+sum(y == 0) / N # Proportion of 0s
+sum(y == 1) / N # Proportion of 1s
Ngrid <- 1000
lines(seq(0, 1, length = Ngrid),
dbeta(seq(0, 1, length = Ngrid), 2, 3), col = "orange")
lines(seq(0, 1, length = Ngrid), col = "blue",
- dozibeta(seq(0, 1, length = Ngrid), 2 , 3, 0.2, 0.2))
-
-set.seed(1234); k <- runif(1000)
-sum(abs(qozibeta(k, 2, 3) - qbeta(k, 2, 3)) > .Machine$double.eps) # Should be 0
-sum(abs(pozibeta(k, 10, 7) - pbeta(k, 10, 7)) > .Machine$double.eps) # Should be 0
+ dzoabeta(seq(0, 1, length = Ngrid), 2 , 3, 0.2, 0.2))
}
}
\keyword{distribution}
-%dozibeta(c(-1, NA, 0.5, 2), 2, 3, 0.2, 0.2) # should be NA
-%dozibeta(0.5, c(NA, Inf), 4, 0.2, 0.1) # should be NA
-%dozibeta(0.5, 2.2, 4.3, NA, 0.3) # should be NA
-%dozibeta(0.5, 2, 3, 0.5, 0.6) # should NaN
+%dzoabeta(c(-1, NA, 0.5, 2), 2, 3, 0.2, 0.2) # should be NA
+%dzoabeta(0.5, c(NA, Inf), 4, 0.2, 0.1) # should be NA
+%dzoabeta(0.5, 2.2, 4.3, NA, 0.3) # should be NA
+%dzoabeta(0.5, 2, 3, 0.5, 0.6) # should NaN
+
+
+%set.seed(1234); k <- runif(1000)
+%sum(abs(qzoabeta(k, 2, 3) - qbeta(k, 2, 3)) > .Machine$double.eps) # Should be 0
+%sum(abs(pzoabeta(k, 10, 7) - pbeta(k, 10, 7)) > .Machine$double.eps) # Should be 0
+
+
+
+
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-vgam.git
More information about the debian-science-commits
mailing list