[r-cran-vgam] 49/63: Import Upstream version 1.0-1
Andreas Tille
tille at debian.org
Tue Jan 24 13:54:39 UTC 2017
This is an automated email from the git hooks/post-receive script.
tille pushed a commit to branch master
in repository r-cran-vgam.
commit 63ade0c8b532477aef48e492f79ce79531e2fd22
Author: Andreas Tille <tille at debian.org>
Date: Tue Jan 24 14:17:05 2017 +0100
Import Upstream version 1.0-1
---
DESCRIPTION | 8 +-
MD5 | 414 +++----
NAMESPACE | 29 +-
NEWS | 103 +-
R/aamethods.q | 66 ++
R/calibrate.q | 4 +-
R/cao.R | 2 +-
R/cao.fit.q | 10 +-
R/coef.vlm.q | 31 +-
R/cqo.R | 2 +-
R/family.actuary.R | 412 +++----
R/family.aunivariate.R | 164 ++-
R/family.basics.R | 126 +-
R/family.binomial.R | 242 +++-
R/family.bivariate.R | 301 +++--
R/family.categorical.R | 1105 +++++++++++++++--
R/family.censored.R | 163 +--
R/family.circular.R | 153 ++-
R/family.exp.R | 20 +-
R/family.extremes.R | 283 +++--
R/family.genetic.R | 29 +-
R/family.glmgam.R | 131 +-
R/family.loglin.R | 49 +-
R/family.math.R | 96 ++
R/family.mixture.R | 261 ++--
R/family.nonlinear.R | 38 +-
R/family.normal.R | 204 ++--
R/family.others.R | 525 +++++++-
R/family.positive.R | 761 +++++++++---
R/family.qreg.R | 307 +++--
R/family.rcim.R | 3 +-
R/family.rcqo.R | 2 +-
R/family.robust.R | 30 +-
R/family.rrr.R | 14 +-
R/family.sur.R | 5 +-
R/family.survival.R | 60 +-
R/family.ts.R | 95 +-
R/family.univariate.R | 1947 +++++++++++++++++-------------
R/family.zeroinf.R | 2638 +++++++++++++++++++++++++----------------
R/links.q | 79 +-
R/lrwaldtest.R | 6 +-
R/mux.q | 8 +-
R/plot.vglm.q | 2 +-
R/predict.vglm.q | 98 +-
R/predict.vlm.q | 14 +-
R/print.vglm.q | 27 +
R/qtplot.q | 8 +-
R/residuals.vlm.q | 4 +-
R/rrvglm.fit.q | 2 +-
R/summary.vglm.q | 166 ++-
R/summary.vlm.q | 5 +-
R/vgam.control.q | 2 +-
R/vglm.R | 1 -
R/vglm.control.q | 9 +-
R/vglm.fit.q | 75 +-
R/vlm.wfit.q | 2 +-
R/vsmooth.spline.q | 6 +-
build/vignette.rds | Bin 480 -> 480 bytes
data/Huggins89.t1.rda | Bin 443 -> 443 bytes
data/Huggins89table1.rda | Bin 445 -> 445 bytes
data/alclevels.rda | Bin 550 -> 551 bytes
data/alcoff.rda | Bin 547 -> 548 bytes
data/auuc.rda | Bin 246 -> 246 bytes
data/backPain.rda | Bin 488 -> 474 bytes
data/beggs.rda | Bin 198 -> 198 bytes
data/car.all.rda | Bin 6965 -> 6968 bytes
data/cfibrosis.rda | Bin 264 -> 264 bytes
data/corbet.rda | Bin 240 -> 244 bytes
data/crashbc.rda | Bin 374 -> 374 bytes
data/crashf.rda | Bin 340 -> 341 bytes
data/crashi.rda | Bin 491 -> 491 bytes
data/crashmc.rda | Bin 385 -> 385 bytes
data/crashp.rda | Bin 376 -> 376 bytes
data/crashtr.rda | Bin 361 -> 361 bytes
data/deermice.rda | Bin 392 -> 393 bytes
data/ducklings.rda | Bin 561 -> 561 bytes
data/finney44.rda | Bin 210 -> 210 bytes
data/flourbeetle.rda | Bin 344 -> 344 bytes
data/hspider.rda | Bin 1344 -> 1344 bytes
data/lakeO.rda | Bin 335 -> 335 bytes
data/leukemia.rda | Bin 329 -> 329 bytes
data/marital.nz.rda | Bin 10456 -> 10432 bytes
data/melbmaxtemp.rda | Bin 4265 -> 4263 bytes
data/pneumo.rda | Bin 267 -> 267 bytes
data/prinia.rda | Bin 1229 -> 1229 bytes
data/ruge.rda | Bin 258 -> 258 bytes
data/toxop.rda | Bin 473 -> 473 bytes
data/venice.rda | Bin 976 -> 981 bytes
data/venice90.rda | Bin 8072 -> 8000 bytes
data/wine.rda | Bin 269 -> 270 bytes
inst/doc/categoricalVGAM.pdf | Bin 735199 -> 645909 bytes
inst/doc/crVGAM.pdf | Bin 511655 -> 421544 bytes
man/AR1.Rd | 52 +-
man/CommonVGAMffArguments.Rd | 78 +-
man/UtilitiesVGAM.Rd | 146 +++
man/acat.Rd | 3 +
man/alaplace3.Rd | 4 +-
man/betaII.Rd | 3 +-
man/betaR.Rd | 4 +-
man/betabinomUC.Rd | 81 +-
man/betabinomial.Rd | 10 +-
man/betabinomialff.Rd | 4 +-
man/betaff.Rd | 1 +
man/bigamma.mckay.Rd | 2 +-
man/bilogistic.Rd | 3 +-
man/binom2.or.Rd | 7 +-
man/binom2.rho.Rd | 37 +-
man/binormal.Rd | 2 +-
man/bisa.Rd | 17 +-
man/bistudentt.Rd | 2 +-
man/cauchy.Rd | 4 +-
man/cens.gumbel.Rd | 4 +-
man/cens.normal.Rd | 14 +-
man/cloglog.Rd | 1 +
man/coefvgam.Rd | 89 ++
man/coefvlm.Rd | 3 +
man/cratio.Rd | 12 +-
man/dagum.Rd | 3 +-
man/double.cens.normal.Rd | 8 +-
man/double.expbinomial.Rd | 10 +-
man/{expint.Rd => expint3.Rd} | 42 +-
man/fisk.Rd | 3 +-
man/fittedvlm.Rd | 2 +-
man/freund61.Rd | 5 +-
man/gamma2.Rd | 23 +-
man/gammaR.Rd | 3 +-
man/genbetaII.Rd | 11 +-
man/gengamma.Rd | 12 +-
man/genpoisson.Rd | 3 +-
man/geometric.Rd | 2 +-
man/gev.Rd | 13 +-
man/gpd.Rd | 4 +-
man/gumbel.Rd | 4 +-
man/gumbelII.Rd | 3 +-
man/huber.Rd | 4 +-
man/inv.gaussianff.Rd | 2 +-
man/inv.lomax.Rd | 6 +-
man/inv.paralogistic.Rd | 2 +-
man/laplace.Rd | 4 +-
man/lerch.Rd | 3 +-
man/levy.Rd | 5 +-
man/lgammaff.Rd | 3 +-
man/lino.Rd | 4 +-
man/lms.bcg.Rd | 2 +-
man/lms.bcn.Rd | 9 +-
man/lms.yjn.Rd | 4 +-
man/log1mexp.Rd | 90 ++
man/log1pexp.Rd | 66 --
man/logistic.Rd | 7 +-
man/logit.Rd | 2 +
man/logitoffsetlink.Rd | 106 ++
man/loglinb2.Rd | 11 +-
man/loglinb3.Rd | 8 +-
man/lognormal.Rd | 8 +-
man/lomax.Rd | 2 +-
man/makeham.Rd | 1 +
man/margeff.Rd | 94 +-
man/mccullagh89.Rd | 6 +-
man/micmen.Rd | 16 +-
man/mix2exp.Rd | 2 +-
man/mix2normal.Rd | 7 +-
man/mix2poisson.Rd | 2 +-
man/multinomial.Rd | 3 +-
man/nbcanlink.Rd | 10 +-
man/negbinomial.Rd | 198 +++-
man/negbinomial.size.Rd | 2 +-
man/normal.vcm.Rd | 5 +-
man/notdocumentedyet.Rd | 31 +-
man/ozibetaUC.Rd | 121 ++
man/paralogistic.Rd | 4 +-
man/pgamma.deriv.Rd | 3 +-
man/poissonff.Rd | 2 +-
man/posnegbinomial.Rd | 93 +-
man/posnormal.Rd | 4 +-
man/pospoisson.Rd | 11 +-
man/prentice74.Rd | 7 +-
man/quasibinomialff.Rd | 3 +-
man/quasipoissonff.Rd | 5 +-
man/rec.normal.Rd | 3 +-
man/riceff.Rd | 4 +-
man/sc.studentt2.Rd | 2 +-
man/simplex.Rd | 7 +-
man/sinmad.Rd | 3 +-
man/skellam.Rd | 2 +-
man/slash.Rd | 3 +-
man/sratio.Rd | 4 +-
man/studentt.Rd | 4 +-
man/summaryvglm.Rd | 20 +-
man/tikuv.Rd | 10 +-
man/tobit.Rd | 6 +-
man/truncweibull.Rd | 6 +-
man/undocumented-methods.Rd | 44 +
man/uninormal.Rd | 2 +-
man/vglmff-class.Rd | 10 +
man/vonmises.Rd | 5 +-
man/weibull.mean.Rd | 2 +-
man/weibullR.Rd | 3 +-
man/yip88.Rd | 10 +-
man/zabinomial.Rd | 17 +-
man/zageometric.Rd | 6 +-
man/zanegbinomial.Rd | 74 +-
man/zapoisson.Rd | 31 +-
man/zero.Rd | 13 +-
man/zeta.Rd | 11 +-
man/zetaff.Rd | 2 +-
man/zibinomial.Rd | 16 +-
man/zigeometric.Rd | 6 +-
man/zinegbinomial.Rd | 137 ++-
man/zipebcom.Rd | 2 +-
man/zipoisson.Rd | 43 +-
src/tyeepolygamma3.c | 52 +
211 files changed, 9594 insertions(+), 4025 deletions(-)
diff --git a/DESCRIPTION b/DESCRIPTION
index e1df98d..81d0c3d 100755
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,6 +1,6 @@
Package: VGAM
-Version: 1.0-0
-Date: 2015-10-29
+Version: 1.0-1
+Date: 2016-03-15
Title: Vector Generalized Linear and Additive Models
Author: Thomas W. Yee <t.yee at auckland.ac.nz>
Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
@@ -30,6 +30,6 @@ NeedsCompilation: yes
BuildVignettes: yes
LazyLoad: yes
LazyData: yes
-Packaged: 2015-10-29 02:09:52 UTC; tyee001
+Packaged: 2016-03-15 08:53:23 UTC; tyee001
Repository: CRAN
-Date/Publication: 2015-10-29 08:29:12
+Date/Publication: 2016-03-15 10:51:21
diff --git a/MD5 b/MD5
index 09c8638..d046b43 100644
--- a/MD5
+++ b/MD5
@@ -1,146 +1,146 @@
66414b6ed296192426033f4ac29a6af2 *BUGS
7ee5b2dc375f5ec613dffed100ca7b3d *ChangeLog
-ca856d175101c0ee856cd1e2b7dff965 *DESCRIPTION
+18f48164383f8841c382cd707f6eb1f6 *DESCRIPTION
e640665d8993539374917f850992ddc7 *LICENCE.note
-60d8c06d89c4c07cb251d84568bc8706 *NAMESPACE
-0b078da380c69041235063cf7c3fe68a *NEWS
+570ed3c2ae75b34ac517bb91a1cda92e *NAMESPACE
+a91a54eee4f44ad484b53f569aa4b0f0 *NEWS
31e60bca4249bc261445355bd6496609 *R/Links.R
-b6b017bdea768a643afc8171516d193b *R/aamethods.q
+ed47c2f7a4154dfa299eaaebc78b31ab *R/aamethods.q
4ffc1530ca8113d2f2d8b0d5cc1db282 *R/add1.vglm.q
29b192ec0239f8f013e99ef759823732 *R/attrassign.R
19fd9a65f33bfc01a56d0ee1f4752159 *R/bAIC.q
f96b47c7279f6b68a3946245deff4429 *R/build.terms.vlm.q
-560f0250d8606fc4c7bbcba5474ef9ff *R/calibrate.q
-8fa625cc47ab28b74bd41019d20b7b02 *R/cao.R
-ce3d85bf00ad08175321e2098ae87462 *R/cao.fit.q
-4ded73a0a27a728457ca3ecfa02bb9ed *R/coef.vlm.q
+f86d6550ffdd1b3ca9f06252dcf7ba50 *R/calibrate.q
+c575b259c9dd77477ec86e3df977b00c *R/cao.R
+34c0e75f6712d52bfb75cdcc5a62395e *R/cao.fit.q
+6280eb07b90ccb8852ad8706fd0cb13b *R/coef.vlm.q
523b3faf78519c00346b1843bd5db02d *R/confint.vlm.R
-77638f2e22a3dd774115c472bf0c33e8 *R/cqo.R
+58e1930c4422d41f5c118f39459be314 *R/cqo.R
9a4e3479392194fbe0c6e55cacb03f62 *R/cqo.fit.q
d411a1bf3bfbe7057b4211255c33ba53 *R/deviance.vlm.q
54b928344dc9efab031bf3e83d04f21f *R/effects.vglm.q
-73607c9675d480649795248cf79816db *R/family.actuary.R
-622eef73eae77e8f11a3be61c1d177de *R/family.aunivariate.R
-a92c19967dad3ac7f28a999be60bdd35 *R/family.basics.R
-d9b278484e9eeb0977f4cf37449f6d81 *R/family.binomial.R
-7b722a4d252e8889459cd4dccc734ee6 *R/family.bivariate.R
-fb37a29e583745096fdd1ca4c6b20e87 *R/family.categorical.R
-4d9023a91086b21b57ba417816b791ab *R/family.censored.R
-290eb0cf20c680e3822312da99e778c8 *R/family.circular.R
-fde7624d1a27f4c981dbea13dfca9f8d *R/family.exp.R
-b5a955403628ff48d9bd5137a72b5358 *R/family.extremes.R
+928ecb3851ae3ff62ceab17baa7c8992 *R/family.actuary.R
+3cd382a09e1312f28ef0f3b9bf21fca4 *R/family.aunivariate.R
+1bd1dacbe2b70799e91a49b33f6c7afc *R/family.basics.R
+7fe4f2801d52575344a7520ed828a8cf *R/family.binomial.R
+2fcda4f92db74fd1af970029218ddb38 *R/family.bivariate.R
+79e723214af3efeaee67fc7b287455d7 *R/family.categorical.R
+092006fb107b2a1864ce284854e8ed0f *R/family.censored.R
+c9be4e7bfd15babad00c1872694f0f1f *R/family.circular.R
+adc8b490a49386c200b94980987d8fc3 *R/family.exp.R
+57d70a513f9e8041807a24d13354696e *R/family.extremes.R
251b551aaf906c754d9e75342e7ea1af *R/family.functions.R
-5870ba488892a27748d73c96fe09fd9e *R/family.genetic.R
-81bc7044f78ed67dedfe721b45e70c9f *R/family.glmgam.R
-040039ac1ac77acc7355986786188113 *R/family.loglin.R
-5679a8a30b54ac8f66dd945b2d1ccd2a *R/family.math.R
-40b0c38439d400fa0ec5004104f472b1 *R/family.mixture.R
-8a6c638eb360f7a74881ab5d18721600 *R/family.nonlinear.R
-ae9004a896cfc5a6c0aec0ee9137901e *R/family.normal.R
-8e71759e50f7fdbc320f4b3dfb57b304 *R/family.others.R
-2c3afca36be4104086c9b132e05561b6 *R/family.positive.R
-6c41acf5b9e4e2e43eb7b8520196b8e0 *R/family.qreg.R
-5bdb4590aaaff9bdde698a20fbaaac84 *R/family.rcim.R
-eaf63cac3cffe7fd0bd9352fe8223a60 *R/family.rcqo.R
-303fbdf3b0b917cdf71e170b50934d49 *R/family.robust.R
-5b373c7cddc6faad4894c9ff7738c8f2 *R/family.rrr.R
-943ff0caa6e0cf7294b32a0a8dc1ad98 *R/family.sur.R
-d8765cca44c6676d5c3761e609fd6476 *R/family.survival.R
-b88f86145cb3ad38d701edf852208a3f *R/family.ts.R
-b6a1108501f71db2e93afa194e8d678b *R/family.univariate.R
+8e8f859b7d6fcfc7bc0217f1c55283f8 *R/family.genetic.R
+3ac000dc4c07a19b8fb7a6c5496db839 *R/family.glmgam.R
+0f7af1b8f7a3ebb16ceac11221cab219 *R/family.loglin.R
+a8ae0b3b7507b40def24bedad2696097 *R/family.math.R
+9781662e0e6a85930eda8f0b470dbb9c *R/family.mixture.R
+9fcb7721324170b016e5d05e4762ecf0 *R/family.nonlinear.R
+fac1e304db31c1df631388c2f5c98afa *R/family.normal.R
+3e81dcdfbf1108dc27ad849424b5ed06 *R/family.others.R
+170f18f3a46c734b5fc42f3e137284e3 *R/family.positive.R
+44a39dac95371e0a9214632a4daefce2 *R/family.qreg.R
+9f8a2d77c4b80645472c258c609596cc *R/family.rcim.R
+ce2d185ba2afa3e7611f80c267280039 *R/family.rcqo.R
+028ae9e8c53a0dd3033e01f62b8c1a39 *R/family.robust.R
+81bcc233613aa077ae6fd75bc5a8eaaf *R/family.rrr.R
+ca8dfa29ba9b7733213147f573058f18 *R/family.sur.R
+9ceab9c4a48c8ab4124208896da02a43 *R/family.survival.R
+d35aa31fb99b813eb649ccfd0650126b *R/family.ts.R
+212f797fad47aa701641a0d11c18d886 *R/family.univariate.R
8d7d5df3e005750453f8ed0977c0c4f6 *R/family.vglm.R
-aa4b052796fac2667f825c9fcdb7b4bc *R/family.zeroinf.R
+8b7b2aa7b6b35f6d7bf84d5637d57d93 *R/family.zeroinf.R
e5a738b6ba3f59a3962eb089e56e5786 *R/fittedvlm.R
27dae12416e0840c1f75f4f18e0146f0 *R/formula.vlm.q
1c7d28893d43c88a934731219098fd5c *R/generic.q
-542665d45f3a87c4fe8e8c549a39ac11 *R/links.q
+aa6ec9eb642c7b79e0c05c54c13e7d35 *R/links.q
06929b2f0a102fcca301a9f265279e04 *R/logLik.vlm.q
-92736375efccc88013c357fd287aa4cb *R/lrwaldtest.R
+7538bff4855fdeeb147da4822187ddf3 *R/lrwaldtest.R
3c2bc6b07e880eb2f6ae5bfc3ee8f55e *R/model.matrix.vglm.q
-c9f890ae5310b45be85da9fd237b98e4 *R/mux.q
+0a2fe5c2fef0512b723c10dbf8980914 *R/mux.q
ec00a9fdace1922ca78877ac43605737 *R/nobs.R
-8c7a83a2e5c10a871e722e3c307ad79b *R/plot.vglm.q
+2b915559877c7bce5d0893ab740e7842 *R/plot.vglm.q
a2547eed9a5570094efec6573e6f9f9b *R/predict.vgam.q
-6e8e3c05882565d21c582f369a13b673 *R/predict.vglm.q
-b9109db7f638db25728c3118e6baf41d *R/predict.vlm.q
-cfb0659e61f097d41e0266ee71d15a9d *R/print.vglm.q
+2742099a9c2a856a3789ab8c6751bd7b *R/predict.vglm.q
+43a7efd329b4711111bb2e9d883b8717 *R/predict.vlm.q
+f0cee50fe2d92666ebd71d021c117352 *R/print.vglm.q
74f7393a57eec9a96cc7d04a569037ca *R/print.vlm.q
c431e12369752358904f6704832decd5 *R/qrrvglm.control.q
-d767ac65a1275661aa88e8e3cfe214cf *R/qtplot.q
-78e9224292be8718824d53dd2165dad4 *R/residuals.vlm.q
+186c1f01e378f2e03a6b3dd994551622 *R/qtplot.q
+a08653bfeb60aa7bc249535920889972 *R/residuals.vlm.q
9d5826ad08d66734f7403d17fcbba5f6 *R/rrvglm.R
e278dec435eddcc0345a59bd9dd56f6d *R/rrvglm.control.q
-7f713596fe6bb361c2e4e6a7520daec8 *R/rrvglm.fit.q
+54ad4c9dd8dd789a5d15cfb150c1f384 *R/rrvglm.fit.q
cf62bdb183fe009cd47316eacbe3b14e *R/s.q
156f02ed65d0d90c17241cbada6d0c00 *R/s.vam.q
400c72e71f4820b58f292523029c6245 *R/simulate.vglm.R
366887aff30fbfac5afb77ed10597005 *R/smart.R
89968d39bff60306bab87cc1e3ebdca1 *R/step.vglm.q
ea860d4429fbcfb1c8e494a198e72adc *R/summary.vgam.q
-5e33c1f3af46348ed7ac16fff8cc3307 *R/summary.vglm.q
-8233ae7e692d6254ac739541a4774109 *R/summary.vlm.q
+77bacb6a3a26f41d314c7398229afe84 *R/summary.vglm.q
+242bb02f33630d22c6cd0219d375326e *R/summary.vlm.q
f53cc75eb61ade505b6ee2d55f0ac377 *R/vgam.R
-3a7ea81a3f0c6509e71466cfae4c108c *R/vgam.control.q
+4ccd8af1480a81325e5863775a59f597 *R/vgam.control.q
f6da05ed223f0cac5b7731c8f5da2095 *R/vgam.fit.q
c7836fc6514f090c9852ef7427b68a95 *R/vgam.match.q
-602d47027ca2488c44bf5aa5299049e1 *R/vglm.R
-714a3a58e7584c7f2545aed04187a167 *R/vglm.control.q
-08ce604d1ce30bb186f59cc23faab9a2 *R/vglm.fit.q
+1501c23247071b2f853288145f80ac89 *R/vglm.R
+ce25aa40db5b384a7f1ca04ed8acb3fb *R/vglm.control.q
+49a5d53b383a87546d9cfc081c4c05f1 *R/vglm.fit.q
d3c11b3c2876d98a37ea6f4a5658a4a6 *R/vlm.R
-568fedfc13182adbd374ec27d7a75600 *R/vlm.wfit.q
-9c9d0afc47501544ea1da2703e60b4e9 *R/vsmooth.spline.q
-6163dccd84afd0591286d3d3d44f5393 *build/vignette.rds
-b7d1c6c8f8393b07c7e9b604adc07a98 *data/Huggins89.t1.rda
-c89278fea9afbea65d5a5c67ea8920ca *data/Huggins89table1.rda
+b6d7090a5d83d261dc018d72d87f0ed1 *R/vlm.wfit.q
+8923fb07fdb84505792742d2a25b793d *R/vsmooth.spline.q
+c50b7cc72caa37922a7210113c736f47 *build/vignette.rds
+6f925d3a68c25b310fc7f2e95d780e7b *data/Huggins89.t1.rda
+3247124812b7363afd2ba89f3d51b6be *data/Huggins89table1.rda
d89f69ab78bc3c7a526960c8bdb9454b *data/V1.txt.gz
-44af01b902591edbe25947cfb93b82a2 *data/alclevels.rda
-51600a9d117c2e4e508498c2b8c5b062 *data/alcoff.rda
-64a9bda0da78dc1be3934317c4022344 *data/auuc.rda
-9af66d0bf992be0147dd449c0b60d236 *data/backPain.rda
+be3b7d8c4d48c5d6dd28ae75b92d7f10 *data/alclevels.rda
+4a7c49e2c65c91646dfc1b0b9f010898 *data/alcoff.rda
+ff5853d8b50a88855ec1d31c550699bb *data/auuc.rda
+40caae8677f57d3e0316d671d30f016e *data/backPain.rda
4fa3eac69a59ea5ed0123d54528e5595 *data/backPain.txt.gz
-340932cda23a74745e46571a70dc3882 *data/beggs.rda
+eec4e857ffb9c0888b4cfa59b3db915d *data/beggs.rda
e039fd36c33b359830b2ac811ca7fc49 *data/bmi.nz.txt.xz
-fc97edacd7b4a480edd006de2413bb55 *data/car.all.rda
-4a83e2e836f3a9708daed64dfcbbcd2f *data/cfibrosis.rda
+15e0d982e815b30ad197b083f6524975 *data/car.all.rda
+0c536d00a3a92028e6867817b5ee4f78 *data/cfibrosis.rda
b29c1a4125f0898885b0a723442d6a92 *data/chest.nz.txt.bz2
4df5fd8b5db905c4c19071e1e6a698a4 *data/chinese.nz.txt.gz
3cb8bc8e1fc615416f0c8838a50b3f51 *data/coalminers.txt.gz
-c6ee8d21ed687cad8123dc3162865c9e *data/corbet.rda
-0f1edaf1442a9006da63acfd7fb59a0d *data/crashbc.rda
-f8e248b3a1082019db47ec264c9f7d5c *data/crashf.rda
-34bee33037d6199dfbbfc2d52ba96158 *data/crashi.rda
-f74434eca35fd11b2ecf33b601178f62 *data/crashmc.rda
-e76aab67b2e6c2ec1ed3e39f50b7f474 *data/crashp.rda
-ab1e91319254f296f302090e8d9abda5 *data/crashtr.rda
-a2c09c5f8c31f870be7131ecd160639e *data/deermice.rda
-c9dd72b680bd6d9c7861e3dc5f3bcb83 *data/ducklings.rda
+690479e10cc5260d1e357a17c3c4007e *data/corbet.rda
+a0f47c847f1ea403e53f30073ef34942 *data/crashbc.rda
+aed7745292af4d98dce60b28772ad463 *data/crashf.rda
+af5cf05f5abbf40942009d3c2b875bf2 *data/crashi.rda
+32a0bbc55cf912483109718084f093a0 *data/crashmc.rda
+7ad8337140f5d172a4c0c6e0083a921a *data/crashp.rda
+b2af6eda780da9cc80ce4a2256687129 *data/crashtr.rda
+43963d3f939a513c3e95c4dd331efbc6 *data/deermice.rda
+297c62e906af91f0d8fd64592a1e8736 *data/ducklings.rda
08e87bb80a2364697b17ccec6260387c *data/enzyme.txt.gz
-968c21409ce398b3d22466d70d79fb31 *data/finney44.rda
-9a25176f2b7870d254248d522e548726 *data/flourbeetle.rda
+2f9f1cc67454cd0022e4b4d9b6e20152 *data/finney44.rda
+0947d375a8dac8b8f2df6c29d2a1e356 *data/flourbeetle.rda
3125b7b004c671f9d4516999c8473eac *data/gew.txt.gz
bec512b2d2d680889c9b71c7b97dbffd *data/grain.us.txt.bz2
9dcb8cdf026f5468fa70f8037fd72a0b *data/hormone.txt.bz2
-e9b5697bdd74940ed221d71f70485f1a *data/hspider.rda
+3fba8d7e31acb3936d3db511f2aa9d2a *data/hspider.rda
dffe21fbabf645127bccc3f3733098a7 *data/hunua.txt.bz2
-fea8d38efa5ed3a8141dd1566eb89fc1 *data/lakeO.rda
-3a63fd948d478efcee0a4439ca9571b7 *data/leukemia.rda
+bc0b0413c6641d1e4ddc260b96bd2eba *data/lakeO.rda
+f02e8aab9f481aff0e3399357305d259 *data/leukemia.rda
aba4885e0eeda8ee887a422fee01e02a *data/lirat.txt.gz
7d7e59127af09903659c5727d71acc56 *data/machinists.txt.gz
-04eefb3f13e372e5b0370b0f04d9ab8b *data/marital.nz.rda
-a2fc80eba077edd682ac20c42d40890d *data/melbmaxtemp.rda
+e4d66877901e8e6a093ba6ab74a425a6 *data/marital.nz.rda
+08492303e0f51013202d0c921108d9f2 *data/melbmaxtemp.rda
56490506642d6415ac67d9b6a7f7aff6 *data/olym08.txt.gz
fe334fe839d5efbe61aa3a757c38faeb *data/olym12.txt.gz
3ed63397c4a34f3233326ade6cfd1279 *data/oxtemp.txt.gz
-93121d35f3ce58883f92d5d76f697083 *data/pneumo.rda
+ffa511438e40e3c06118a2e6a06b6783 *data/pneumo.rda
0cd66b7ce4e596ad3ca75e1e2ec0a73c *data/prats.txt.gz
-9f2a88b4c56838b56329eb2c11d310be *data/prinia.rda
-b08aebe141c9d5fa30c8864930836015 *data/ruge.rda
-5b3d2c05e50f5083846d15f115399209 *data/toxop.rda
+9e83d8a32f482fee9f3a9136818e8bd9 *data/prinia.rda
+e2121052c9b7de83f077605eb9b5e19f *data/ruge.rda
+51a6bfdb4caf035ad470cb730e3fd917 *data/toxop.rda
1b059fc42c890bf89f2282298828d098 *data/ucberk.txt.gz
-11fc4f6aa2d660a7a178b41990ec9b60 *data/venice.rda
-314b5a505fb5ba5e55efcde3a706cc34 *data/venice90.rda
+adea95cad99f0e03f86c1d56f2926fa7 *data/venice.rda
+8c11f6736ada0e413b40673c38e7e459 *data/venice90.rda
e990ca4deea25b60febd2d315a6a9ec4 *data/waitakere.txt.bz2
-d739b5c0e33ebee609294cb35283fbc7 *data/wine.rda
+d570c2c7cfa9467f8b273b9902021116 *data/wine.rda
81f7f0844a196dc48e91870c4cfafc99 *demo/00Index
9327dcfa4015cf47172717bac166f353 *demo/binom2.or.R
b9f0af62a654d77a3052997eb4cc15e2 *demo/cqo.R
@@ -151,16 +151,16 @@ ab8081763fe2144558be25f3a154327b *demo/vgam.R
d2fcbc6a325172d058671fd977d0b5e5 *inst/CITATION
4ff0e35d38b3c5bb38f1f7232b9af863 *inst/doc/categoricalVGAM.R
bfa11dbdbff271fb20342560f2bacd53 *inst/doc/categoricalVGAM.Rnw
-849d8750de988da008419f2ceac54902 *inst/doc/categoricalVGAM.pdf
+3746d48d12209b86c2ab7665ed0e6fd2 *inst/doc/categoricalVGAM.pdf
2f57d2a0610fd514e05aae8ea94d8ebc *inst/doc/crVGAM.R
8e489008d8b8b8f769e5e93e351c9c42 *inst/doc/crVGAM.Rnw
-792ac4fba77b864da03f3af65b90b2db *inst/doc/crVGAM.pdf
+84efd2c0c9082cd8a48ead5b91f0c4e7 *inst/doc/crVGAM.pdf
9b97006cdc82d3a0c0ace3d43c9758de *man/A1A2A3.Rd
4bc543c785c8a213c46693e2e37f5f00 *man/AA.Aa.aa.Rd
26a120083d1d9d77ac0a5193d0c186b9 *man/AB.Ab.aB.ab.Rd
c6c2a703e0f76c8b0f9e0a7d36f13386 *man/ABO.Rd
38647708600610216a454c61450810ff *man/AICvlm.Rd
-30130df5de09e7ef03e6a85a34e6e100 *man/AR1.Rd
+17a911f0784d0ecd30d53f9eeafd522f *man/AR1.Rd
e7f6a39f61b6403d60cf99f0e17f3dc1 *man/AR1UC.Rd
0f4a799e95b245cfa0b5a37280a446ef *man/BICvlm.Rd
32daae0afb71eae3cdeefc042f4241c6 *man/Coef.Rd
@@ -169,7 +169,7 @@ e7f6a39f61b6403d60cf99f0e17f3dc1 *man/AR1UC.Rd
a89beda3a48d5ff1cfdfae4636032a62 *man/Coef.rrvglm-class.Rd
4da595e2cf6fffc2227871e745a5ee77 *man/Coef.rrvglm.Rd
9d39d6e12ea6e56f687a10f76cb1803c *man/Coef.vlm.Rd
-5c3794b2da0ebcbd6461a95bda2b7e2c *man/CommonVGAMffArguments.Rd
+5b55112125b3f2bdf8dec0219570950d *man/CommonVGAMffArguments.Rd
098a57d6e5525de04157c61dea2e1b9b *man/Huggins89.t1.Rd
ce79d0626711d299c9c0cc2efab3abac *man/Inv.gaussian.Rd
b9505b66dea5b1311aa8d2700d3d6a34 *man/Links.Rd
@@ -184,10 +184,11 @@ d39629f7598851d50262b1075321525a *man/SURff.Rd
20a760cb2a7468d974d2de5c88d870e3 *man/SurvS4-class.Rd
6ed5239b716d4aaef069b66f248503f0 *man/SurvS4.Rd
21dc3918d6b5375c18dcc6cc05be554e *man/Tol.Rd
+eeed63e131219077a163410c683fd32e *man/UtilitiesVGAM.Rd
6930cfc91e602940cafeb95cbe4a60d3 *man/V1.Rd
3656d1dde004b1de74846eaf813a2f69 *man/VGAM-package.Rd
-93acacd4fef4b73ba027faff69619938 *man/acat.Rd
-8320c9356f95587835bb7503df9ad125 *man/alaplace3.Rd
+ce8d4266cb5eeb30fbe40e28ff554f5e *man/acat.Rd
+d2407fe64af0c4369d18ff4cc5f58a34 *man/alaplace3.Rd
8c0d8e4d9e634a0c2539e3a052afa9cc *man/alaplaceUC.Rd
8e181f4f03b718c6c9825ea3b6c4b8d6 *man/amlbinomial.Rd
f6c521d0142c7e65e7d5aad6880616ee *man/amlexponential.Rd
@@ -200,12 +201,12 @@ bcddb8c1df8893cf14a4400ee5dee6df *man/backPain.Rd
65a5426c021e0a6c90731c14786a3395 *man/benfUC.Rd
afa1ccbe6dd6e769dc1bbbc5702148dd *man/benini.Rd
12d28242eea600b3e6f52db5d71d871f *man/beniniUC.Rd
-dbf1d7ee255da6a85fbafbc84f2c0650 *man/betaII.Rd
-3a31e0a304c2ccab10469d866ae8acdb *man/betaR.Rd
-d489f43e8771ddb6f32e121be29b838a *man/betabinomUC.Rd
-bbb0ddef9113d1b8d1e036ac66f9bb87 *man/betabinomial.Rd
-4e9c0e3075be1050db8ad3fe1e8dce6e *man/betabinomialff.Rd
-29d0247eaef9f6447e173c8ac994acbd *man/betaff.Rd
+c22880cb87b5d3fcc1b394e5c4d0cfc4 *man/betaII.Rd
+55e5c7726717a7dca9e4785cc9871801 *man/betaR.Rd
+6a57403bd9855568e232f74234bf7681 *man/betabinomUC.Rd
+3dc23022db723ea07649cac674dd0e2f *man/betabinomial.Rd
+5049c8cf22a2f1e637db29a40ad3c3b1 *man/betabinomialff.Rd
+98dd26e554dcebe5b9de6dfab6ffdeb4 *man/betaff.Rd
4b590ee6208b2f3025109b82c1f6d67c *man/betageomUC.Rd
725a8c9d8b4a9facb0c3cb815d75266b *man/betageometric.Rd
7553029f69c2be7dbb20c864b97102e5 *man/betanormUC.Rd
@@ -219,25 +220,25 @@ faeb492060203a0d89d5cf4f40b0e4c4 *man/bifgmcopUC.Rd
57536bc44454e58eb293b928919c92ca *man/bifgmexp.Rd
5e0bc6b73af5b7a56805a2f7600a439d *man/bifrankcop.Rd
4e57b0a38391fdfe5e57e39799ae9d6d *man/bifrankcopUC.Rd
-3996c974a214c0d706d20d820a9a1fa0 *man/bigamma.mckay.Rd
+24ffd4d97c8b5d9c71c6702c4ecb3316 *man/bigamma.mckay.Rd
7a1c045834b0bd9de92a4aa97f52ab3c *man/bigumbelIexp.Rd
ffcbfc72f334094f6dfd4842ab522e96 *man/bilogisUC.Rd
-e913aabb8e3808c637d264f28c90bf52 *man/bilogistic.Rd
-c7a7e2b700c4358fb65489876ead2d79 *man/binom2.or.Rd
+df5c6274584e9a5b961b253c498c0580 *man/bilogistic.Rd
+1e3bfb0dc5eb125518194b131c78ecc3 *man/binom2.or.Rd
129f6be1cf1a039f137e5ef3da503fca *man/binom2.orUC.Rd
-a8cc7cbfa4c21672956a187c4ffba22d *man/binom2.rho.Rd
+3da84a2c9a4148aa7f062129c7b40c8d *man/binom2.rho.Rd
20cb304b16a9073488621b104549e361 *man/binom2.rhoUC.Rd
29a9e5aa565832fad506a6a45c7b2897 *man/binomialff.Rd
-92806ec6cd9c65373fffb732eda114b5 *man/binormal.Rd
+2bb4acbcb6e81694a0eee8c794932afe *man/binormal.Rd
3e2bebdf7d5db7a0c7960d6b6f1597b5 *man/binormalUC.Rd
ad66bf95a28851ff1f77b8675352cc04 *man/binormalcop.Rd
9758ba4618c9c24caafec486b01238f5 *man/binormcopUC.Rd
1d943aad478481e7bf4c4b1a9540706c *man/biplackettcop.Rd
79d9cd96d00531b88793d55a07d29842 *man/biplackettcopUC.Rd
bdad9ecfb116c4f30f930bcaf7208735 *man/biplot-methods.Rd
-03369be2b6898192a83d14253ca3b1d8 *man/bisa.Rd
+d04726582d80a3f32bf27f8c5d3a690f *man/bisa.Rd
8b2718247258cfa11b0857a922c512ab *man/bisaUC.Rd
-f0816002d3fb698dbc17a6e55d91c18f *man/bistudentt.Rd
+ce60753888f08f05ba46dbd49dc0f4b8 *man/bistudentt.Rd
0489e2ceeed7b2aaf9cbcf6cfcabae81 *man/bistudenttUC.Rd
81a2433effb7547679702256a5536b04 *man/bmi.nz.Rd
214e2f5b25156e937a5af65d1e6e1b58 *man/borel.tanner.Rd
@@ -254,10 +255,10 @@ afbb7b695f652a4bccfb0e6cb80a8739 *man/cao.Rd
10f72289cb33f5f734d39826893a280b *man/cardUC.Rd
53ff522ff00f7bcfe443309762441150 *man/cardioid.Rd
a458bca3e32bdc653cd924dd564ee58d *man/cauchit.Rd
-d361f0253fb328f70a716c09fd597fdc *man/cauchy.Rd
+957dd50f814f492806ec05aa4c046569 *man/cauchy.Rd
4973007c9a18278e2130994b68a2e47d *man/cdf.lmscreg.Rd
-6c41f48884c2e92fa7842266d02a5a6d *man/cens.gumbel.Rd
-f96d45016bcca1b72249a3548520a2cf *man/cens.normal.Rd
+345accaaab82cc5d1f08b8d25c1432c4 *man/cens.gumbel.Rd
+49787b380cee2941b0b8d04b602ebadb *man/cens.normal.Rd
72901f13efe7d772fc5ed78bd6c58cea *man/cens.poisson.Rd
94e6c5ea5488d93e0400ce9675e4d692 *man/cfibrosis.Rd
a443fafdb223e2fa87d3766ea31d3fd8 *man/cgo.Rd
@@ -265,9 +266,10 @@ a443fafdb223e2fa87d3766ea31d3fd8 *man/cgo.Rd
922ebc06682ee2090eb1804d9939ec03 *man/chinese.nz.Rd
9dc1deb6ea4940257ebab8f072584b74 *man/chisq.Rd
aff05a422130d8ced689190eec1b09dd *man/clo.Rd
-f0fa4d5fd65cc5d53012b586f24b3fb3 *man/cloglog.Rd
+e35c0ce37b72050ab56a340fa1d4f375 *man/cloglog.Rd
b1985e33c967fdddf79e10cbb646b974 *man/coalminers.Rd
-e492f5f148514df05cc4bf101b7505e2 *man/coefvlm.Rd
+eb8ba8eea01187377705b5cb7d682947 *man/coefvgam.Rd
+7ab6167f053b9ac7bb36f855293af71e *man/coefvlm.Rd
1409b01c52bad85c87e9740fb003699a *man/concoef-methods.Rd
e9a2bf379aac3e4035b8259463a5374b *man/concoef.Rd
19ee88e086b371be838206bd11b5479e *man/confintvglm.Rd
@@ -275,9 +277,9 @@ e9a2bf379aac3e4035b8259463a5374b *man/concoef.Rd
523567ea78adcaaeab2d9629b2aa2cf2 *man/corbet.Rd
5314268c4257680ac10edf26e9222944 *man/cqo.Rd
8b1b3a39d15fe353a7eceec9f6a327d4 *man/crashes.Rd
-72ae26906f75fb658caf9ced32ba15a7 *man/cratio.Rd
+b7742b0b5c630d48f1834fb5fefc0835 *man/cratio.Rd
002568187283dd7faf83534553674e94 *man/cumulative.Rd
-f2ce3a3f6ad52abbbb75eddf5baf1893 *man/dagum.Rd
+99f24227c802897e75bce7f82ba99a7d *man/dagum.Rd
12192f19751804a540e6d0852e29726c *man/dagumUC.Rd
d5439d37875ba50990406c5c5f8595eb *man/deermice.Rd
dbebc9542906034905fe1137e86a1256 *man/deplot.lmscreg.Rd
@@ -286,8 +288,8 @@ bffbb780b54bd3c8c76cf546ec87e4a0 *man/df.residual.Rd
276aebb1ed4a71af9f9096e9f9c4515d *man/dirichlet.Rd
17afdbe28f8a8d93725e2747c2daa303 *man/dirmul.old.Rd
7a63063be35f8510ea5198556bf1c192 *man/dirmultinomial.Rd
-ed927db10e5cf69502d5485f300a9aa7 *man/double.cens.normal.Rd
-7557104d36b3087ed4d34345bdab7017 *man/double.expbinomial.Rd
+7c78ad345e44a5b81963f0cfc744f701 *man/double.cens.normal.Rd
+99e58209c99f594f80fc7da1524cfa53 *man/double.expbinomial.Rd
1da4d63047f620bd38bc5fadf56ebfaf *man/ducklings.Rd
90481ad7be6cb76a82e99694a2a8e016 *man/eexpUC.Rd
92007c408a76e89f46e756eba4724a44 *man/enormUC.Rd
@@ -299,7 +301,7 @@ cb83f77886603d8f133964c227915d08 *man/expexpff.Rd
772ca8da2a38dbc5a2ffcb2138f91368 *man/expexpff1.Rd
eccfa33017118bc7314ef168695a595e *man/expgeometric.Rd
f39dd0be93d3e24eda78f08310ff4b2f *man/expgeometricUC.Rd
-93cc460d2fd8c787aa6feaf5347f1685 *man/expint.Rd
+1b6f2c2a7b9fbbe335a89fa0275733aa *man/expint3.Rd
6ab5a59ea1b5f61fbe676577b3882529 *man/explink.Rd
89ce96662b931aa17182192618085ed0 *man/explogUC.Rd
e51211ad603eeecbe72cd7f6db0e76e0 *man/explogff.Rd
@@ -312,9 +314,9 @@ c5d0b237e64605d008502da6b8f4f64c *man/felixUC.Rd
9d679a175cfe7165b89906441e5efebc *man/fill.Rd
b929e2ab670eb59700bc4a1db07bbbc0 *man/finney44.Rd
460448c26c4268e7870bbff5f9d2fb66 *man/fisherz.Rd
-c75c1ffce51c2de0fec04f54bbaf466b *man/fisk.Rd
+6d12a492e19a8f452b575c9f4473ded8 *man/fisk.Rd
5966dbc9e396bd3cbb15b2650d885177 *man/fiskUC.Rd
-c75d3ae0a8669fed4a71f54b8be64266 *man/fittedvlm.Rd
+97bcdcc90669435272c5d940f0b6d967 *man/fittedvlm.Rd
742b72298fd6b2ca944812681ad625a6 *man/flourbeetle.Rd
c0269f789f9739dc6aeeb20b446ae751 *man/foldnormUC.Rd
3909f1a56c381d71501b6fde8d6647fe *man/foldnormal.Rd
@@ -322,36 +324,36 @@ e1413cdef7d5b35f976738561f60a91a *man/foldsqrt.Rd
628edb6d51c54d246702e9521ba6470c *man/formulavlm.Rd
7af865ab486ea1d5d043bdef4bbf81cc *man/frechet.Rd
dabb4b7cdd3422f239888fb85ca5a70b *man/frechetUC.Rd
-cad07bc11ec21b13ecdbc3b93ec8efc0 *man/freund61.Rd
+babdf09c0633ab6fce48345f26984611 *man/freund61.Rd
c4aea59df1932e36cd6fb2ec38110e6d *man/gamma1.Rd
-6b32b9c30d5243afb42c0e403e70f842 *man/gamma2.Rd
-c173815d95bd553fa952911bd2ca71aa *man/gammaR.Rd
+13beda968ad3c4461042e74b89e744c5 *man/gamma2.Rd
+969c6650372ab79d1751a733754f0dac *man/gammaR.Rd
3558584dfba54663dc4de34e21cc9aa9 *man/gammahyperbola.Rd
edd2c4cefb99138667d2528f3d878bad *man/garma.Rd
e0fdd50e95e43075ac79c911f05c0b61 *man/gaussianff.Rd
-a666a1118f74b8bff779fa283e483cbc *man/genbetaII.Rd
+6bdfa23e246b5ec65b369e4e746574e9 *man/genbetaII.Rd
45999add2a92fc243422b25bfc8f8198 *man/genbetaIIUC.Rd
-00ace61cf251e01ebf8144a503c4305d *man/gengamma.Rd
+69a758aeab4a968d9e9f74d96a43fa17 *man/gengamma.Rd
588e10d5c3fd9ff745c679435c5f2457 *man/gengammaUC.Rd
0a765eb0392ad75d94c0b0f0c517f9fb *man/genpoisUC.Rd
-296e471d13459805b0cb9d98e2de2a00 *man/genpoisson.Rd
+8cd5ee8e81b3db18715e148f372d9c15 *man/genpoisson.Rd
15429ac99e67921a77cb78e47210d7fc *man/genrayleigh.Rd
2b8ec736188410b1502ce23ba1852463 *man/genrayleighUC.Rd
-94c6189883bf1848735e23156e25cdc0 *man/geometric.Rd
+ac050e093931cbc8b783c56728350b69 *man/geometric.Rd
ea16a72ebd8739cd2133e91fd9c92662 *man/get.smart.Rd
d89a22500e2031841b7bcfa1d8607d44 *man/get.smart.prediction.Rd
-7d533bf53d40503606dda3a614245aa1 *man/gev.Rd
+a793d458ea8847106a2f0ade265a6a1b *man/gev.Rd
0496867739918b68919e42a4018a338c *man/gevUC.Rd
fd070015282f2cca2b0a4b8200822551 *man/gew.Rd
7ac66cc25e3d13cc7fed08bb6b85e1db *man/golf.Rd
9a635d01c2a0f08b71517df675b20a92 *man/gompertz.Rd
8170cb9545cf35f1768db069b13a893e *man/gompertzUC.Rd
-7ec773041e29285cfe05226d6d58a30e *man/gpd.Rd
+59edbd8559281a0c9f3ed748d67ec12e *man/gpd.Rd
9cbfd18331d52c4fb66f0221d76be01f *man/gpdUC.Rd
7e50fed7b6ffe72b14e243fcc601fc50 *man/grain.us.Rd
6e28498b6d44f47f2663a6be72f68529 *man/grc.Rd
-00bd52370e6b9e28b1ec106c6ecb2b09 *man/gumbel.Rd
-bd6be76e82363793b9186e55d0e35bd0 *man/gumbelII.Rd
+62e50cb71aa52e64f6395a83e13b23e5 *man/gumbel.Rd
+f4c347dbfde0cbe8013496d5f8ef175a *man/gumbelII.Rd
5099d1835eebc1b4610481e77463a50c *man/gumbelIIUC.Rd
6a66a220a209ae6d1c7eb0bf57f59671 *man/gumbelUC.Rd
fc6b1658cbcb87054ab516552b6875f9 *man/guplot.Rd
@@ -359,7 +361,7 @@ fc6b1658cbcb87054ab516552b6875f9 *man/guplot.Rd
d5ad348b7727127369874c7e7faf49bd *man/hatvalues.Rd
2be497a8d77472f00279d19f735863b5 *man/hormone.Rd
93557c7aca25514dc023773bdd045d76 *man/hspider.Rd
-f4fc4645d2d190ef9b82cce1ee8b29d2 *man/huber.Rd
+ff68401c69a2da4605086cb24fb7944e *man/huber.Rd
bddbb4682e3ee5c97f116acfc15d3f3f *man/huberUC.Rd
d3df700bb2a4f9ae85b13abe7ffea123 *man/hunua.Rd
592f01af00d4309ecb01ed58b764e12e *man/hyperg.Rd
@@ -369,10 +371,10 @@ e3a9765eba431e1f55e2fdc11ff52b4b *man/hypersecant.Rd
7f0e64784914835bb11c6f43643aae15 *man/iam.Rd
c978905e9ad1554330e74b3088faa909 *man/identitylink.Rd
857cbf6f8c5970a18867fe560f275f6f *man/inv.binomial.Rd
-745b6c5557776c23bed67b268f03f432 *man/inv.gaussianff.Rd
-c64f106b3cd1010819641b86b926440a *man/inv.lomax.Rd
+3e5254faf43189942b98ee8dafaaa06f *man/inv.gaussianff.Rd
+a78ed6bfc5949e6586975bf781ece433 *man/inv.lomax.Rd
4492e4a4f91d5fe7d4ec75a128bf4e07 *man/inv.lomaxUC.Rd
-af702822d0c222741dc25184e3a6a134 *man/inv.paralogistic.Rd
+84c75096c0dd15930a3d6df360fb0967 *man/inv.paralogistic.Rd
6f740a890a174ff4ff3879fa8719ec58 *man/inv.paralogisticUC.Rd
b2ce02b5af6709a1b2d294fcf254d393 *man/is.buggy.Rd
a501c3d3de4a744a0e0cdbc0673b543d *man/is.parallel.Rd
@@ -383,26 +385,26 @@ e68a1f19e55cd95da21eec0b119c0ad8 *man/is.smart.Rd
255a587274163051c7c5e81b79bb24cd *man/kumarUC.Rd
1bcedd3ac3a0c7467e5dee8ba1de9ace *man/lakeO.Rd
decbd103cc5311735e70d906d170c742 *man/lambertW.Rd
-e80a85ec4d067a1549cc8249666f75c2 *man/laplace.Rd
+640c78cf542ad1ee952d75baa009bb83 *man/laplace.Rd
55f7da75a7695c5f00b10d600711bab9 *man/laplaceUC.Rd
16b21ecf83bb8fce76079502877b2fbd *man/latvar.Rd
2cd5151baff29f9d8dd996dc48293301 *man/leipnik.Rd
-2e88465ad75446bbbccf208661193a8c *man/lerch.Rd
+3bd268665a29f6a6edb1b3387b69b2d5 *man/lerch.Rd
8c7fca39c92e5f79391a7881a0f44026 *man/leukemia.Rd
-632c83ea2a7b229a64a4679f9fa6b52f *man/levy.Rd
+42550fcfd84f5f7ee4efb5886d1fe224 *man/levy.Rd
d3fb68f03d6cc946da6b48772bea3297 *man/lgammaUC.Rd
-745ab1fea005b7572910ae5919111054 *man/lgammaff.Rd
+d3d35561bb39104a648833365e13bb26 *man/lgammaff.Rd
1bb4af539f983579a19c180c3ab29aec *man/lindUC.Rd
271536a592dedaff73d9cde20c844d76 *man/lindley.Rd
53b900fd7a3bc5a1f4ff6a9b9353d4e9 *man/linkfun.Rd
79a20f167d06958b953c5a7a8dfe16f0 *man/linkfun.vglm.Rd
-20873e71a07de6b42d07fc6e0008ea05 *man/lino.Rd
+c6df85746e6410c593e22489045a88e5 *man/lino.Rd
f56802c0fe3ec1b61cd313c370b9ff58 *man/linoUC.Rd
b5dfa4faa955b15ebade0a3bdc8f93fe *man/lirat.Rd
-913facfe3f915290ad154061ccd5accb *man/lms.bcg.Rd
-77ad928a6aa56adf1cfed93e6358369d *man/lms.bcn.Rd
-b0a070fdafa635bab794c5cf3ac88ba0 *man/lms.yjn.Rd
-20824c03fc9d40f749ca42d60805124d *man/log1pexp.Rd
+1cb54dfd175703b0fa36ff139404217f *man/lms.bcg.Rd
+1d9caf2fdc9cad915a7df45cfe4790f4 *man/lms.bcn.Rd
+2bab43fb4c3c8bc597867838aecb67df *man/lms.yjn.Rd
+0dad131a129a97908dfa39adac5ca812 *man/log1mexp.Rd
34cbd6bc583c55d2acd79a46a66e064e *man/logF.Rd
06a1ce6e6f01fca7e7037eabc6cf3dad *man/logF.UC.Rd
9f80bd504e1c75b0c7b29b3449cf7362 *man/logLikvlm.Rd
@@ -410,16 +412,17 @@ b0a070fdafa635bab794c5cf3ac88ba0 *man/lms.yjn.Rd
34497f2200a115323b8be4c181dc5b09 *man/logc.Rd
1e7009d720bba4d0201441cd02be84d7 *man/loge.Rd
20cc0c73ee555790179879533cb526f7 *man/logff.Rd
-12d3a7e35301ecb632191ccf31a63296 *man/logistic.Rd
-b283163521ea21b87f21463b719fc75f *man/logit.Rd
+227fe95675d683b575accc2d9390755c *man/logistic.Rd
+c65e7936494787bc6fa0c31d931d8f6b *man/logit.Rd
+501f8acee0a27cb53cd02f174e37fe9e *man/logitoffsetlink.Rd
8822ba593955e90e63a8779aaf74d29b *man/loglapUC.Rd
0f6dd1a9c0fc77dd6521af733693f52e *man/loglaplace.Rd
-49d5183ac04d29b5427b9159fa101dc3 *man/loglinb2.Rd
-22ad47055f4be0a62a6f418b0024c911 *man/loglinb3.Rd
+bc4fdb6ecc0913ebadab7deb1a95efed *man/loglinb2.Rd
+4290a696c9eedd140e5d64489b6f29be *man/loglinb3.Rd
f5f48817604ad9b59304d4fb571359dd *man/loglog.Rd
-a6cbcf688c21d36c440c24b56dd36113 *man/lognormal.Rd
+7495135db74b6b1eb9646755218e7020 *man/lognormal.Rd
e859c980e26eb3e483d0f3648b502d13 *man/logoff.Rd
-1a96739cc02213e306e77d33c5dec358 *man/lomax.Rd
+e23c05c9f84263ac83055c5f03eb7d30 *man/lomax.Rd
dbc62e15528097b42fb64d49be5f22f3 *man/lomaxUC.Rd
ac49f1d5575295a237328c2de3cbab10 *man/lqnorm.Rd
fc9ca61a4c495cf650cba5a458b0dae1 *man/lrtest.Rd
@@ -427,37 +430,38 @@ f0a38f0b82c1525dcd51687a2f2768c1 *man/lvplot.Rd
7dcf0051720ee4587304e819ecc8de71 *man/lvplot.qrrvglm.Rd
16b238586876d84bad0a1420402b5718 *man/lvplot.rrvglm.Rd
c5760c3960748f906230ded119478271 *man/machinists.Rd
-3c2901cca3e665cc792cfbc5ca9c260d *man/makeham.Rd
+4df8393312f1b7ff81d4dab3d18984cd *man/makeham.Rd
7785dc7e94e63e94e688d9553a9c7b2a *man/makehamUC.Rd
-583f3f406844c550079d2592ecba0c25 *man/margeff.Rd
+b830a21e53610a5abfbfa7466ae0f3c3 *man/margeff.Rd
b5c6a5a36ebe07a60b152387e8096d9a *man/marital.nz.Rd
b2f1aa9cecaec318a14cc5d4fbb20d67 *man/maxwell.Rd
c7fcbd341df77f76494a92836715789a *man/maxwellUC.Rd
-bd8250aaa1bc17c017c0b201642882dd *man/mccullagh89.Rd
+665ee56b876aac685d2e35853f8712b8 *man/mccullagh89.Rd
c007d94fac5c46a26baae899a04aaf9d *man/melbmaxtemp.Rd
4d8d0f37dc8249d00e52283764534e98 *man/meplot.Rd
-b1d15dda4a8aae6193ce4283ec7251bd *man/micmen.Rd
-5eed4788f6366c1814ea5c9a250424e8 *man/mix2exp.Rd
-232e7ac50df002b7c0a1d7ba70fd0bbf *man/mix2normal.Rd
-364791d9a909112b530deda4135f30f7 *man/mix2poisson.Rd
+2bcfc226edb08c7257783853ff52d87b *man/micmen.Rd
+09a21e6a1a75e5a2e0e30079a1cbdee1 *man/mix2exp.Rd
+ac6dffa8b08d6cba20464169d19e8439 *man/mix2normal.Rd
+03dead9556e4a5968333b55521a7d381 *man/mix2poisson.Rd
131aaa836a137554786e8bda01d8e334 *man/model.framevlm.Rd
3d875985c00b26af9cb66e0ae0e3aef8 *man/model.matrixvlm.Rd
199ef13d300d6fe1210885af1647c13b *man/moffset.Rd
a725287719f6c4119913108ee4824ddb *man/multilogit.Rd
-363cdcfbb07a4c10a8b29aae89f293f1 *man/multinomial.Rd
+44c03a67d9ec459f64af85542064beab *man/multinomial.Rd
c3248f9d509aecb0726bd0e6e36a13d4 *man/nakagami.Rd
61319d756fcb8509696cc1aa55ae4ed2 *man/nakagamiUC.Rd
-a47f3ed802d871c374f92151f813e3cb *man/nbcanlink.Rd
+170f52d48791fca14c83e19e00fab025 *man/nbcanlink.Rd
0c0ef87d1221196cdc7fc0d156ac150a *man/nbolf.Rd
-e4ed5c80c412d9c80bab940d61854dbc *man/negbinomial.Rd
-01e4d3c6a45020bef55cbadbad8388d3 *man/negbinomial.size.Rd
-14c4a7db111d0d9f41e5a810a3afdea2 *man/normal.vcm.Rd
-5f5f3d9146d7342cc48ecbd7d7c084d1 *man/notdocumentedyet.Rd
+a9f0d86d35628b552c87595b20573ea5 *man/negbinomial.Rd
+7621ea96a711ce85182ef8c5ed6ed1a7 *man/negbinomial.size.Rd
+61d58f624b00429804e5d1cfbc60e82e *man/normal.vcm.Rd
+e50087c6bac80011e9f401f4f1e6b81a *man/notdocumentedyet.Rd
5e590acdda3ff0a9e2df0db8d233f848 *man/nparamvglm.Rd
98b83e406ea1968ba3e8b17d0933b2cf *man/olym.Rd
858c73ce3c458d33e5151342a4e36707 *man/ordpoisson.Rd
025c5545a37dd996931ea7d2b42211b5 *man/oxtemp.Rd
-a0b0563f3e865287ae3be10ca2f6eea8 *man/paralogistic.Rd
+97d58f1d0875eca9da52f607aa6a4c01 *man/ozibetaUC.Rd
+3c217a91527fb169737d67244e8572f4 *man/paralogistic.Rd
383805a5130a512c207a6a30c28553d3 *man/paralogisticUC.Rd
b8a1bd0580460ec6155b7c7bb2dae503 *man/paretoIV.Rd
9e30cad5872ffef80576a429e37cdaca *man/paretoIVUC.Rd
@@ -465,7 +469,7 @@ c0c60830c70e697aeab8bc6d11472b78 *man/paretoff.Rd
28a8a9fa1e219d71dcb68cfdb6f88d1b *man/perks.Rd
a0d64aa4469a9ca70fcfa4e5af26956a *man/perksUC.Rd
60fac0e03c8dce88e04e2c3f6def20b9 *man/persp.qrrvglm.Rd
-a38168dd57b4be503cf47732714e441b *man/pgamma.deriv.Rd
+e4ea396d024de674ff4bfdda6975bb72 *man/pgamma.deriv.Rd
8e0120c68b69d0760218c483490aed8e *man/pgamma.deriv.unscaled.Rd
2c3491351af8d4eb4618723f612c4f26 *man/plotdeplot.lmscreg.Rd
cea29349aed21cbaf8c70f81b7900b15 *man/plotqrrvglm.Rd
@@ -477,7 +481,7 @@ cea29349aed21cbaf8c70f81b7900b15 *man/plotqrrvglm.Rd
40f1661d2f26cb11f54c9140c767c61b *man/pneumo.Rd
606c4d8331ff8e0e4241f0284aba98cd *man/poisson.points.Rd
8c7d77fdf6933ab63d412be61e3fa0ec *man/poisson.pointsUC.Rd
-8d1096d9bfeee36841be53ebe7300e49 *man/poissonff.Rd
+27ff99e8ac98ded3af8e4f94e6560b33 *man/poissonff.Rd
83497c4069d8c74dc15f0308de0dac89 *man/polf.Rd
696c74487d4cebf0251299be00d545c7 *man/polonoUC.Rd
2f4dfc6a802a52da2e14e9789e0170ae *man/posbernUC.Rd
@@ -488,16 +492,16 @@ c2c82f9a71f8a7d20e991dee48a9c734 *man/posbinomUC.Rd
aab909e407aa248772db0235e64890dd *man/posbinomial.Rd
dc19e3d023a2a46c670e431a2cc853e0 *man/posgeomUC.Rd
2963a956fa63f0bd9452b10b432d4fc8 *man/posnegbinUC.Rd
-d1594d0598d420affef6f14a1c263685 *man/posnegbinomial.Rd
+2411fe14cfe5fa2f30f25546fb3ed2a0 *man/posnegbinomial.Rd
45b528182d1c01bc352dea7b84fd7671 *man/posnormUC.Rd
-e22de041c65d80b12a971cc0207aa1da *man/posnormal.Rd
+9061c33c9a5d44acc0c5c4fd1eeec22f *man/posnormal.Rd
137d3986fcbad41bf77c10585dace0b0 *man/pospoisUC.Rd
-89e1ac898695d90f1d6075cafa971460 *man/pospoisson.Rd
+15a13299e9a4052bfe951d8a962e555b *man/pospoisson.Rd
cc06ad7f82789c3703e4977cc39828ed *man/powerlink.Rd
66bad6a1a2012e256b483e1727aca7e9 *man/prats.Rd
ee31e58dfd33c2c3b0d51eac95b553ad *man/predictqrrvglm.Rd
cb6a8c644c31d6ec5e8977ea7b1198df *man/predictvglm.Rd
-4b6da0d45912d1b7fbd9d833f20ec3e9 *man/prentice74.Rd
+1842dc23f02ce22f6aef3247d61965f8 *man/prentice74.Rd
5f4fbb060b2d8386d8d2bfde926d9d5d *man/prinia.Rd
889d24cbaa36abd8df4c54fbf88609e2 *man/probit.Rd
0dc0ebdd8538489ac38a624176612691 *man/propodds.Rd
@@ -506,19 +510,19 @@ ab1399d5d5f71707fd46960dc3efad04 *man/put.smart.Rd
8f4e6ebea74037334377e346c5b476f6 *man/qrrvglm.control.Rd
0b4cf628cd3e15b0668ae4ddae4d3ee6 *man/qtplot.gumbel.Rd
b10bad72776d283be77901e730593f2e *man/qtplot.lmscreg.Rd
-bf8b2681beaeae00d54c8cb5422ad069 *man/quasibinomialff.Rd
-1dbf7bc4c97a7aafebcd736cf1baddbb *man/quasipoissonff.Rd
+6c60658fef3dc7aa5d53d1d954a65e96 *man/quasibinomialff.Rd
+06c7ef40ac06f97042d785a04e81989e *man/quasipoissonff.Rd
bbde69d1bad346cd4ad04763c96d6ffe *man/qvar.Rd
9941ff94abd604ccf9bf44d3819e60ee *man/rayleigh.Rd
a95c0df100dedc0b4e80be0659858441 *man/rayleighUC.Rd
6c45f58f39a63abc2ce8a0923c75cecc *man/rcqo.Rd
97b7c30ea27ac4fa16167599c35b136e *man/rdiric.Rd
585af0deb3deb7b61388d6d4557994d8 *man/rec.exp1.Rd
-64ea5646e75515a8b40fbd136fa6065e *man/rec.normal.Rd
+dbfea987d2d41c45477fa82bd978ab5e *man/rec.normal.Rd
49abf27f1c088a43cda71f0723cf188b *man/reciprocal.Rd
8e6ffaeea6e88d46925e60f343364a0d *man/rhobit.Rd
d907e0bbe40b4fb02b0763ab6076309e *man/riceUC.Rd
-85498654134f98f8aa887bed07b4985a *man/riceff.Rd
+4d5fb32666631b97e65f8a2324f42bcb *man/riceff.Rd
9dd5a151bfc05adcce0ae88a02eb08a8 *man/rigff.Rd
0e12c48578228c300e8c04ab3b08c04a *man/rlplot.egev.Rd
3c6afb0af10ae003dfa8cf9caa567d9b *man/rrar.Rd
@@ -528,30 +532,30 @@ d907e0bbe40b4fb02b0763ab6076309e *man/riceUC.Rd
eb0e4a0a8b0c63cd0c17120e9ca8df53 *man/rrvglm.optim.control.Rd
ecc44804896b8f3d4a9d469a952fe9a6 *man/ruge.Rd
21a97af245ddc566ddd8935381f6ea22 *man/s.Rd
-3ebe2abf58080c4588a912c695adae77 *man/sc.studentt2.Rd
+c66939737b4a412d7057eaf0da8f67d9 *man/sc.studentt2.Rd
114f55f02750721179c9fc78d93f686c *man/sc.t2UC.Rd
c3096134b4f765a7d1d893fb9388488b *man/seq2binomial.Rd
9985ea15444cc317e3e8fc2aad7200da *man/setup.smart.Rd
-451a726465c8e82555ba50a857e86ce0 *man/simplex.Rd
+056aa6efa43e4cd79f5e07769a0c6fd9 *man/simplex.Rd
f158e6c60a4e6b6e13f2a9519515a021 *man/simplexUC.Rd
41af17badd0ef1b17cee591a35d46a12 *man/simulate.vlm.Rd
-bab7555bb34c57f8e56b59af277a5cc4 *man/sinmad.Rd
+5e675d926504dee487751a5a8d26ba47 *man/sinmad.Rd
95cbc5903a187d325c52c3d9d07ee252 *man/sinmadUC.Rd
-5327f9644795a6ed4e1909159156b656 *man/skellam.Rd
+c5839042eff769ac461463b8a7a49428 *man/skellam.Rd
2424940e3cff6d5a3ddd0ee99565ea39 *man/skellamUC.Rd
b62da6a60b01916a10d691e980253bc0 *man/skewnormUC.Rd
3797084c4e552d460e8b3942a661260a *man/skewnormal.Rd
-9f34bfb220e6d0400971a1efa5db28c6 *man/slash.Rd
+fda97ab39e5972100e2392fd0f26432b *man/slash.Rd
9fc90a85fdd63c0b3c49203f5e3d776f *man/slashUC.Rd
21bada3a13aca65ba49fb28127575144 *man/smart.expression.Rd
5726ef8bb900532df62b24bd4b7b8fe4 *man/smart.mode.is.Rd
21a1d3bd045859ceab377610a53ba976 *man/smartpred.Rd
-736fffd7cddf8065fb1dd167f2aa236c *man/sratio.Rd
-0c48da9ab33eb24273c6348320a64f64 *man/studentt.Rd
-2b5cebdae54f21ad3fc0b3df37c6dd9a *man/summaryvglm.Rd
-0258a94ee53da230fb2aea74fd90192a *man/tikuv.Rd
+81d3f84a4dc023adad8e37f46b949ae6 *man/sratio.Rd
+501d551af0419b35ef1bd47bf4d740db *man/studentt.Rd
+8f91c92bee6e12da2adea37b35535a8e *man/summaryvglm.Rd
+234bf47d30e9afe3629e4ad8c1b39b4b *man/tikuv.Rd
ccaa57b076049fdf3cee1c321a2ab456 *man/tikuvUC.Rd
-d9f889c35db05e7eef26be323a3842cb *man/tobit.Rd
+190c660343d7f8465fc01c043c28f658 *man/tobit.Rd
5130a86e60a3b1010b1364155a1afdd0 *man/tobitUC.Rd
b70afa170b0cf98a6c2a9eea9dc58483 *man/toxop.Rd
59e040af3616943e93946ddf0ba96aba *man/triangle.Rd
@@ -559,10 +563,10 @@ b70afa170b0cf98a6c2a9eea9dc58483 *man/toxop.Rd
1d13e92969384eebec80c2b5901bc5db *man/trplot.Rd
c786330c607d69d19e59fc3823d1e2f2 *man/trplot.qrrvglm.Rd
aeaf42ac6e475f1dc3f180450d56c2ee *man/truncparetoUC.Rd
-1d47c3a8f732ea01782c7e0b9929a921 *man/truncweibull.Rd
+1658b0820ef97964c22fa4f3a18d13e6 *man/truncweibull.Rd
50ada9ecd189456ce9f218d22b49089c *man/ucberk.Rd
-5d46e81b3078ef071d0d2afe8cfae91d *man/undocumented-methods.Rd
-f8f257cf6c91bb3c2765bc9c1d5fd4f1 *man/uninormal.Rd
+f026eb5b7a1fba0724603f185abbe7d0 *man/undocumented-methods.Rd
+395bf20844e881303e4f76da27a693cd *man/uninormal.Rd
6a60d8e09c890e47042be1203aee9547 *man/vcovvlm.Rd
f787bf505e7e68f5f16a49f48abb9bcb *man/venice.Rd
8ab09ea32a3839db780ac641218c322e *man/vgam-class.Rd
@@ -571,43 +575,43 @@ ea3fe248b860921783367037c8302c49 *man/vgam.control.Rd
126b55b4567a63cf2edb04a8b6d91506 *man/vglm-class.Rd
71c4c86e48be338c410905722e51afb8 *man/vglm.Rd
0fb3b6b60182efdce44c9d225bcf0a64 *man/vglm.control.Rd
-7cab64090aec93a3edb1a7df961a1fe0 *man/vglmff-class.Rd
-95420c89f2280b447cbd7784f83e7454 *man/vonmises.Rd
+33ea80f5f411700dff4b19371517c743 *man/vglmff-class.Rd
+3c3444f49659331d0b0da1c4e28ea9c8 *man/vonmises.Rd
25b2ef45238e3f61e82dcf52f3d17090 *man/vsmooth.spline.Rd
c498f29d7fc8156fd345b4892f02190d *man/waitakere.Rd
9b9bdfbbf8060eb284c84e8ed9273154 *man/waldff.Rd
-c7bfab9a73e5d5914f5adeac357a54c6 *man/weibull.mean.Rd
-3f1be522e8c9beebe0835912ca81c8db *man/weibullR.Rd
+8bc759f493a94c1df7477b32b35ef8a9 *man/weibull.mean.Rd
+f490b97d72a0bdd81753f2cfc45e6809 *man/weibullR.Rd
e41e54f8623a002d20e55df65c5b6a87 *man/weightsvglm.Rd
e7fd9c7165410545d49481aeded2b317 *man/wine.Rd
a814b37503a9534c86789482ab81333f *man/wrapup.smart.Rd
622f0105b04159f54fcfb361972e4fb7 *man/yeo.johnson.Rd
-ebfff81b0f4730417de95f80b7c82c41 *man/yip88.Rd
+28e8c835229f9fdbb6605917fa38e3aa *man/yip88.Rd
225fcd19868f17b4a5d2590e834cb888 *man/yulesimon.Rd
ef96177f3ee5b07478b717529111adea *man/yulesimonUC.Rd
ae671324c0f93f66adc72f053ef9ebd9 *man/zabinomUC.Rd
-87b0b38fe7357a2259edc9f1159add84 *man/zabinomial.Rd
+cb21430df0f12962f6abf34d9d0e51ce *man/zabinomial.Rd
7d5df5fee6f78c5cf37faaf71adbbb91 *man/zageomUC.Rd
-925e2c8e227ffb6a26192aeeb1fd4f28 *man/zageometric.Rd
+8c0f4c29525dab1b9715b9f7fe40facc *man/zageometric.Rd
78eef8b541d039b00e9990ff758e53e9 *man/zanegbinUC.Rd
-7292195daf3dd8898a1eb971f9f46d21 *man/zanegbinomial.Rd
+285850a216064c3c2395c91b38ae222a *man/zanegbinomial.Rd
b4bcb3a52a6e60efbdaa5d3cfed6fbf4 *man/zapoisUC.Rd
-e9861638c7394e812db8f7d18b660e3a *man/zapoisson.Rd
-64b7af3fd4cd0d0c367778c8bacabe24 *man/zero.Rd
-7985338d08e88fa23cce9cc0a09724b6 *man/zeta.Rd
+11ebb5c9786781ef6eceaf18a5373ec4 *man/zapoisson.Rd
+426432d39c7a2b0975e6cf9fc3ce520d *man/zero.Rd
+2364749f0041ab1fc22b6469bef31fe4 *man/zeta.Rd
e0ef189ae8251b5e0d20b614c18cdd5a *man/zetaUC.Rd
-648342ad0677587e55e4f92d906d0d42 *man/zetaff.Rd
+ffdfc9ccb4ade0814af72eded433db03 *man/zetaff.Rd
bce8783175ca63f89475e705b2fb1709 *man/zibinomUC.Rd
-ae0388e04ce39367e9c14bf1ad39ef06 *man/zibinomial.Rd
+2b2cdf14b7faa05c066e45e35a6af0bb *man/zibinomial.Rd
7b1d2ee37f339b9a218f1db4abb30cdd *man/zigeomUC.Rd
-8de969235239ce10332c2b91304931f5 *man/zigeometric.Rd
+75b757f1586dba0d8837bc4bc682da73 *man/zigeometric.Rd
025dd2763701ec5b6880bcd6f4a9f35a *man/zinegbinUC.Rd
-87def1c11bb8e7e5f4857a8c7eeda491 *man/zinegbinomial.Rd
-a9b1d67033daa03a9880227187041ae5 *man/zipebcom.Rd
+fd3fbee62f3373263e83acfc09023734 *man/zinegbinomial.Rd
+0d842051c2750e57aa0b794f2f4640fe *man/zipebcom.Rd
abfe2e5adf8a4fcd610adccf060e4f45 *man/zipf.Rd
fd2adf6acc7093de70cb3c16d3819f23 *man/zipfUC.Rd
0b8c923247c77bffa3dc24440e5d8bae *man/zipoisUC.Rd
-ce9bd4504bdb369c39394ece70c0beb0 *man/zipoisson.Rd
+c92c30581138442d15678d61eb9ef483 *man/zipoisson.Rd
f306f4262366ba8c13d31e6afd0e393b *src/caqo3.c
ec1b60ab786ea922f9c9665ae352b147 *src/cqof.f
8daac3d03d7cb7a355a4c5ba548c9793 *src/ei.f
@@ -620,7 +624,7 @@ feba7ba09eca8007392e0405c4b373a8 *src/muxr3.c
473bc0b2f4d6757fa9b397ac0d7c9e47 *src/rgam3.c
6aee7dc8f242ea6e9446ade5b7edeee5 *src/specfun3.c
4814bb73b4c3eedc7507ad99511c7dc5 *src/tyeepolygamma.f
-10939d9fb380d54da716a835d37fdf75 *src/tyeepolygamma3.c
+80322c801242c7751e7bdcd0ae192744 *src/tyeepolygamma3.c
79cf39f1d83f25e29a6c56d344ea8d76 *src/vcall2.f
3bc5ecda1e1216006e74ebd72b77d662 *src/vdigami.f
3e145d8721d17dbd0e642508c2de1472 *src/veigen.f
diff --git a/NAMESPACE b/NAMESPACE
index 7d5aacc..87e063b 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -11,11 +11,29 @@ useDynLib(VGAM)
export(as.char.expression)
+export(predictvglmS4VGAM)
+export(EIM.posNB.speciald, EIM.NB.speciald,
+ EIM.posNB.specialp, EIM.NB.specialp)
+export(.min.criterion.VGAM)
+export(pozibetabinom, pozibetabinom.ab,
+ rozibetabinom, rozibetabinom.ab,
+ dozibetabinom, dozibetabinom.ab,
+ Init.mu)
+export(log1mexp)
+export(dozibeta, pozibeta, qozibeta, rozibeta)
+export(logitoffsetlink)
+export(showvglmS4VGAM)
+export(showvgamS4VGAM)
+export(subsetarray3)
+export(tapplymat1)
+export(findFirstMethod)
+export(summaryvglmS4VGAM)
+export(showsummaryvglmS4VGAM)
S3method(vcov, vlm, vcovvlm)
@@ -39,6 +57,7 @@ exportMethods(responseName)
"matplot", "matpoints", "mtext", "par", "points", "rug",
"segments", "text")
importFrom("methods", "as", "is", "new", "slot", "slot<-", "slotNames",
+ "callNextMethod", "existsMethod", "signature",
"show")
importFrom("stats", ".getXlevels", "as.formula", "contrasts<-",
"dbeta", "dbinom", "delete.response", "deriv3", "dgamma",
@@ -47,6 +66,7 @@ exportMethods(responseName)
"model.offset", "model.response", "model.weights",
"na.fail", "napredict", "optim", "pbeta", "pbinom",
"pgamma", "pgeom", "pnbinom", "polym", "printCoefmat",
+ "plogis", "qlogis",
"pweibull", "qbeta", "qbinom", "qchisq", "qf", "qgamma",
"qgeom", "qnbinom", "qt", "quantile", "qweibull", "rbeta",
"rbinom", "rgamma", "rgeom", "rlnorm", "rlogis", "rnbinom",
@@ -349,7 +369,7 @@ cdf, cdf.lms.bcg, cdf.lms.bcn,
cdf.lms.yjn, cdf.vglm,
Coef.rrvgam, Coefficients,
coefqrrvglm,
-coefvlm,
+coefvlm, coefvgam,
coefvsmooth.spline, coefvsmooth.spline.fit,
constraints, constraints.vlm,
deplot, deplot.default, deplot.lms.bcg, deplot.lms.bcn,
@@ -488,7 +508,7 @@ triangle, dtriangle, ptriangle, qtriangle, rtriangle,
vcovvlm,
vglm.fit, vgam.fit,
vglm.garma.control, vglm.multinomial.control,
-vglm.multinomial.deviance.control, vglm.vcategorical.control,
+vglm.multinomial.deviance.control, vglm.VGAMcategorical.control,
vlm, vlm.control,
vnonlinear.control,
wweights, yeo.johnson,
@@ -634,7 +654,10 @@ loglinb2, loglinb3,
loglog,
lvplot.qrrvglm, lvplot.rrvglm,
Max, MNSs,
-dmultinomial, multinomial, margeff)
+dmultinomial, multinomial,
+margeffS4VGAM,
+cratio.derivs,
+margeff)
export(
diff --git a/NEWS b/NEWS
index a86913e..c4d6780 100755
--- a/NEWS
+++ b/NEWS
@@ -1,11 +1,94 @@
*************************************************
* *
- * 0.9 SERIES NEWS *
+ * 1.0 SERIES NEWS *
* *
**************************************************
+ CHANGES IN VGAM VERSION 1.0-1
+
+NEW FEATURES
+
+ o Argument 'zero' has been programmed to handle (a more
+ intuitive) a character vector. Each value of this
+ vector is fed into grep() with fixed = TRUE. Many
+ VGAM family functions have an equivalent default
+ character value of 'zero'.
+ o New slots: "validparams" and "validfitted" for providing
+ more opportunities for half-stepping.
+ o The "infos" slot of most family functions have a
+ component called "parameters.names", and also
+ "Q1" and "M1".
+ o margeff() works for cratio(), sratio() and
+ acat() models, and is generic (with S4 dispatch).
+ For this, "vcategorical" replaced by "VGAMcategorical",
+ and "VGAMordinal" is also a virtual class.
+ And margeffS4VGAM() is generic.
+ o summaryvglm() calls the generic summaryvglmS4VGAM() in order
+ to compute useful quantities, and it is printed by
+ showsummaryvglmS4VGAM(). Specific examples include
+ the binom2.or() and cumulative() families.
+ o Similarly, show.vglm() calls the generic showvglmS4VGAM() in
+ order to print extra potentially useful output.
+ Ditto for , show.vgam() which calls showvgamS4VGAM().
+ o Similarly, predictvglm() calls the generic predictvglmS4VGAM()
+ in order to allow for family-function-specific prediction.
+ o logitoffsetlink() is new.
+ o [dpqr]ozibeta() and [dpr]ozibetabinom() and
+ [dpr]ozibetabinom.ab() are new;
+ by Xiangjie Xue and Thomas Yee.
+ o coef(..., type = c("linear", "nonlinear")) is available
+ for "vgam" objects.
+ o The following have new 'weights' slots (based on
+ negbinomial()@weight): posnegbinomial(), zanegbinomial[ff](),
+ zinegbinomial[ff](). It is based on the expectation of
+ a difference between 2 trigamma function evaluations being
+ computed using pnbinom(lower.tail = FALSE) and variants.
+ Both functions have some argument defaults tweaked.
+ o log1mexp() and log1pexp(), based on Martin Maechler's 2012 paper,
+ is 'new'.
+ o Many zero-altered and zero-inflated families have additional
+ 'type.fitted' choices.
+ Initial values for such families hav been improved (via Init.mu()).
+ o expint(), expexpint(), expint.E1() allow the computation of the
+ first few derivatives.
+ o Tested okay on R 3.2.4.
+
+
+
+BUG FIXES and CHANGES
+
+ o Order of arguments changed: binom2.rho(lmu, lrho),
+ negbinomial(), posnegbinomial(),
+ zanegbinomial(), zinegbinomial().
+ o pzanegbin() could return -.Machine$double.eps. Thanks to
+ Ryan Thompson for notifying me about this.
+ o pbinorm() used to have a bug wrt Inf and -Inf values in
+ its arguments. Thanks to Xiangjie Xue for picking this up.
+ o plota21() used qchisq(0.95, df = 1) instead of
+ qchisq(0.95, df = 1) / 2 for LRT confidence intervals.
+ Thanks to Russell Millar for picking this up.
+ o A new function Init.mu() is used to initialize several
+ family functions, especially those based on the negative
+ binomial and Poisson distributions.
+ The default for Init.mu() is suitable for 0-inflated
+ data.
+ o The fitted value of polya() was wrong (wasn't the mean).
+ o Default value of argument 'zero' has changed for:
+ bisa(), gumbelII().
+ o zibinomialff()@weight had a bug when calling iam().
+ o [dpqr]nbinom(..., size = Inf) was buggy; it produced many NaNs.
+ Thanks to Martin Maechler for promptly fixing this, for R 3.2.4.
+ o The arguments of interleave.VGAM() have changed: from
+ interleave.VGAM(L, M) to interleave.VGAM(.M, M1, inverse = FALSE).
+ The is a compromise solution with respect to my book.
+ The 'inverse' argument is due to Victor Miranda.
+ o summaryvglm() evidently evaluated the weights slot of an
+ object twice. Now it is only done once.
+
+
+
CHANGES IN VGAM VERSION 1.0-0
NEW FEATURES
@@ -69,6 +152,14 @@ BUG FIXES and CHANGES
+ *************************************************
+ * *
+ * 0.9 SERIES NEWS *
+ * *
+ **************************************************
+
+
+
CHANGES IN VGAM VERSION 0.9-8
NEW FEATURES
@@ -148,7 +239,7 @@ BUG FIXES and CHANGES
mu and size parameter has values lying in a certain range).
This may be time- and/or memory-hungry, but the user has
control over this via some arguments such as max.mu, min.size
- and chunk.max.Mb.
+ and chunk.max.MB.
o Renamed functions:
elogit() is now called extlogit(),
fsqrt() is now called foldsqrt().
@@ -642,7 +733,6 @@ BUG FIXES and CHANGES
etc. The year variable has been added.
-
CHANGES IN VGAM VERSION 0.9-0
NEW FEATURES
@@ -726,6 +816,13 @@ BUG FIXES and CHANGES
+ **************************************************
+ * *
+ * 0.8 SERIES NEWS *
+ * *
+ **************************************************
+
+
CHANGES IN VGAM VERSION 0.8-7
NEW FEATURES
diff --git a/R/aamethods.q b/R/aamethods.q
index 7416079..b2afadd 100644
--- a/R/aamethods.q
+++ b/R/aamethods.q
@@ -60,6 +60,8 @@ setClass("vglmff", representation(
"middle2" = "expression",
"summary.dispersion" = "logical",
"vfamily" = "character",
+ "validparams" = "function", # Added 20160305
+ "validfitted" = "function", # Added 20160305
"simslot" = "function",
"deriv" = "expression",
"weight" = "expression"), # "call"
@@ -542,6 +544,70 @@ setMethod("QR.Q", "vglm",
+if (!isGeneric("margeffS4VGAM"))
+ setGeneric("margeffS4VGAM",
+ function(object, subset = NULL,
+ VGAMff,
+ ...)
+ standardGeneric("margeffS4VGAM"),
+ package = "VGAM")
+
+
+
+
+
+if (!isGeneric("summaryvglmS4VGAM"))
+ setGeneric("summaryvglmS4VGAM",
+ function(object,
+ VGAMff,
+ ...)
+ standardGeneric("summaryvglmS4VGAM"),
+ package = "VGAM")
+
+
+if (!isGeneric("showsummaryvglmS4VGAM"))
+ setGeneric("showsummaryvglmS4VGAM",
+ function(object,
+ VGAMff,
+ ...)
+ standardGeneric("showsummaryvglmS4VGAM"),
+ package = "VGAM")
+
+
+
+
+
+if (!isGeneric("showvglmS4VGAM"))
+ setGeneric("showvglmS4VGAM",
+ function(object,
+ VGAMff,
+ ...)
+ standardGeneric("showvglmS4VGAM"),
+ package = "VGAM")
+
+if (!isGeneric("showvgamS4VGAM"))
+ setGeneric("showvgamS4VGAM",
+ function(object,
+ VGAMff,
+ ...)
+ standardGeneric("showvgamS4VGAM"),
+ package = "VGAM")
+
+
+
+if (!isGeneric("predictvglmS4VGAM"))
+ setGeneric("predictvglmS4VGAM",
+ function(object,
+ VGAMff,
+ ...)
+ standardGeneric("predictvglmS4VGAM"),
+ package = "VGAM")
+
+
+
+
+
+
diff --git a/R/calibrate.q b/R/calibrate.q
index 83d1c43..b6370e2 100644
--- a/R/calibrate.q
+++ b/R/calibrate.q
@@ -183,7 +183,7 @@ calibrate.qrrvglm <-
BestOFpar <- rbind(BestOFpar, OFpar[index, ])
BestOFvalues <- c(BestOFvalues, OFvalues[index])
} else {
- BestOFpar <- rbind(BestOFpar, rep(as.numeric(NA), len = Rank))
+ BestOFpar <- rbind(BestOFpar, rep(NA_real_, len = Rank))
BestOFvalues <- c(BestOFvalues, NA)
}
}
@@ -324,7 +324,7 @@ calibrate.qrrvglm <-
mu.function) {
Rank <- length(bnu)
NOS <- Coefs at NOS
- eta <- matrix(as.numeric(NA), 1, NOS)
+ eta <- matrix(NA_real_, 1, NOS)
for (jlocal in 1:NOS) {
eta[1, jlocal] <- predictrrvgam(object, grid = bnu, sppno = jlocal,
Rank = Rank, deriv = 0)$yvals
diff --git a/R/cao.R b/R/cao.R
index f41501c..d6e2023 100644
--- a/R/cao.R
+++ b/R/cao.R
@@ -83,7 +83,7 @@ cao <- function(formula,
cao.fitter <- get(method)
- deviance.Bestof <- rep(as.numeric(NA), len = control$Bestof)
+ deviance.Bestof <- rep(NA_real_, len = control$Bestof)
for (tries in 1:control$Bestof) {
if (control$trace && (control$Bestof > 1)) {
cat(paste("\n========================= Fitting model",
diff --git a/R/cao.fit.q b/R/cao.fit.q
index 1225a8e..6929b94 100644
--- a/R/cao.fit.q
+++ b/R/cao.fit.q
@@ -659,8 +659,8 @@ callcaoc <- function(cmatrix,
if (Rank == 2) {
- smopar <- (c(spar1, spar2))[interleave.VGAM(4 * NOS, M = 2)]
- dofvec <- (1.0 + c(df1.nl, df2.nl))[interleave.VGAM(4 * NOS, M = 2)]
+ smopar <- (c(spar1, spar2))[interleave.VGAM(4 * NOS, M1 = 2)]
+ dofvec <- (1.0 + c(df1.nl, df2.nl))[interleave.VGAM(4 * NOS, M1 = 2)]
lamvec <- 0 * dofvec
stop("20100414; havent got Rank = 2 going yet")
} else {
@@ -1159,11 +1159,11 @@ Coef.rrvgam <- function(object,
object at latvar
}
- optimum <- matrix(as.numeric(NA), Rank, NOS,
+ optimum <- matrix(NA_real_, Rank, NOS,
dimnames = list(latvar.names, ynames))
extents <- apply(latvar.mat, 2, range) # 2 by R
- maximum <- rep(as.numeric(NA), len = NOS)
+ maximum <- rep(NA_real_, len = NOS)
which.species <- 1:NOS # Do it for all species
if (Rank == 1) {
@@ -1859,7 +1859,7 @@ persp.rrvgam <-
which.species.numer <- match(which.species, sppNames)
}
- LP <- matrix(as.numeric(NA), nrow(latvarmat), NOS)
+ LP <- matrix(NA_real_, nrow(latvarmat), NOS)
for (sppno in 1:NOS) {
temp <- predictrrvgam(object = object, grid = latvarmat, sppno = sppno,
Rank = Rank, deriv = 0, MSratio = MSratio)
diff --git a/R/coef.vlm.q b/R/coef.vlm.q
index 17115c2..0effe53 100644
--- a/R/coef.vlm.q
+++ b/R/coef.vlm.q
@@ -49,7 +49,7 @@ coefvlm <- function(object, matrix.out = FALSE, label = TRUE,
if (all(trivial.constraints(Hlist) == 1)) {
Bmat <- matrix(ans, nrow = ncolx, ncol = M, byrow = TRUE)
} else {
- Bmat <- matrix(as.numeric(NA), nrow = ncolx, ncol = M)
+ Bmat <- matrix(NA_real_, nrow = ncolx, ncol = M)
if (!matrix.out)
return(ans)
@@ -172,3 +172,32 @@ setMethod("Coef", "vlm", function(object, ...)
+coefvgam <-
+ function(object, type = c("linear", "nonlinear"), ...) {
+ type <- match.arg(type, c("linear", "nonlinear"))[1]
+
+
+ if (type == "linear") {
+ coefvlm(object, ...)
+ } else {
+ object at Bspline
+ }
+}
+
+
+
+setMethod("coefficients", "vgam",
+ function(object, ...)
+ coefvgam(object, ...))
+
+
+setMethod("coef", "vgam",
+ function(object, ...)
+ coefvgam(object, ...))
+
+
+
+
+
+
+
diff --git a/R/cqo.R b/R/cqo.R
index 8d42e47..f2cae72 100644
--- a/R/cqo.R
+++ b/R/cqo.R
@@ -78,7 +78,7 @@ cqo <- function(formula,
cqo.fitter <- get(method)
- deviance.Bestof <- rep(as.numeric(NA), len = control$Bestof)
+ deviance.Bestof <- rep(NA_real_, len = control$Bestof)
for (tries in 1:control$Bestof) {
if (control$trace && (control$Bestof>1))
cat(paste("\n========================= Fitting model", tries,
diff --git a/R/family.actuary.R b/R/family.actuary.R
index b297e51..c0972d8 100644
--- a/R/family.actuary.R
+++ b/R/family.actuary.R
@@ -164,7 +164,7 @@ rgumbelII <- function(n, scale = 1, shape) {
iscale = NULL, ishape = NULL,
probs.y = c(0.2, 0.5, 0.8),
perc.out = NULL, # 50,
- imethod = 1, zero = -1, nowarning = FALSE) {
+ imethod = 1, zero = "shape", nowarning = FALSE) {
@@ -178,9 +178,6 @@ rgumbelII <- function(n, scale = 1, shape) {
lscale <- attr(escale, "function.name")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(imethod, length.arg = 1,
integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
@@ -212,14 +209,16 @@ rgumbelII <- function(n, scale = 1, shape) {
"Variance: scale^(2/shape) * (gamma(1 - 2/shape) - ",
"gamma(1 + 1/shape)^2)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
+
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ parameters.names = c("scale", "shape"),
perc.out = .perc.out ,
zero = .zero )
}, list( .zero = zero,
@@ -246,14 +245,14 @@ rgumbelII <- function(n, scale = 1, shape) {
M <- M1 * ncoly
- mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("scale", ncoly)
+ mynames2 <- param.names("shape", ncoly)
predictors.names <-
c(namesof(mynames1, .lscale , .escale , tag = FALSE),
namesof(mynames2, .lshape , .eshape , tag = FALSE))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
Shape.init <- matrix(if (length( .ishape )) .ishape else 0 + NA,
@@ -285,7 +284,7 @@ rgumbelII <- function(n, scale = 1, shape) {
etastart <-
cbind(theta2eta(Scale.init, .lscale , .escale ),
theta2eta(Shape.init, .lshape , .eshape ))[,
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
}
}
}), list(
@@ -326,8 +325,8 @@ rgumbelII <- function(n, scale = 1, shape) {
M1 <- extra$M1
misc$link <-
c(rep( .lscale , length = ncoly),
- rep( .lshape , length = ncoly))[interleave.VGAM(M, M = M1)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+ rep( .lshape , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
@@ -406,7 +405,7 @@ rgumbelII <- function(n, scale = 1, shape) {
myderiv <- c(w) * cbind(dl.dscale, dl.dshape) *
cbind(dscale.deta, dshape.deta)
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape
) )),
@@ -830,15 +829,16 @@ perks.control <- function(save.weights = TRUE, ...) {
"Median: qperks(p = 0.5, scale = scale, shape = shape)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
- nsimEIM = .nsimEIM,
+ nsimEIM = .nsimEIM ,
+ parameters.names = c("scale", "shape"),
zero = .zero )
}, list( .zero = zero,
.nsimEIM = nsimEIM ))),
@@ -864,12 +864,12 @@ perks.control <- function(save.weights = TRUE, ...) {
M <- M1 * ncoly
- mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("scale", ncoly)
+ mynames2 <- param.names("shape", ncoly)
predictors.names <-
c(namesof(mynames1, .lscale , .escale , tag = FALSE),
namesof(mynames2, .lshape , .eshape , tag = FALSE))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
@@ -915,7 +915,7 @@ perks.control <- function(save.weights = TRUE, ...) {
etastart <-
cbind(theta2eta(matC, .lscale , .escale ),
theta2eta(matH, .lshape , .eshape ))[,
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
} # End of !length(etastart)
}), list( .lscale = lscale, .lshape = lshape,
.eshape = eshape, .escale = escale,
@@ -934,8 +934,8 @@ perks.control <- function(save.weights = TRUE, ...) {
misc$link <-
c(rep( .lscale , length = ncoly),
- rep( .lshape , length = ncoly))[interleave.VGAM(M, M = M1)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+ rep( .lshape , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
@@ -1018,7 +1018,7 @@ perks.control <- function(save.weights = TRUE, ...) {
dthetas.detas <- cbind(dscale.deta, dshape.deta)
myderiv <- c(w) * cbind(dl.dscale, dl.dshape) * dthetas.detas
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape ))),
@@ -1026,7 +1026,7 @@ perks.control <- function(save.weights = TRUE, ...) {
weight = eval(substitute(expression({
NOS <- M / M1
- dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M1 = M1)]
wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
@@ -1339,15 +1339,16 @@ makeham.control <- function(save.weights = TRUE, ...) {
"Median: qmakeham(p = 0.5, scale, shape, epsilon)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 3
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 3,
Q1 = 1,
nsimEIM = .nsimEIM,
+ parameters.names = c("scale", "shape"),
zero = .zero )
}, list( .zero = zero,
.nsimEIM = nsimEIM ))),
@@ -1374,14 +1375,14 @@ makeham.control <- function(save.weights = TRUE, ...) {
M <- M1 * ncoly
- mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames3 <- paste("epsilon", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("scale", ncoly)
+ mynames2 <- param.names("shape", ncoly)
+ mynames3 <- param.names("epsilon", ncoly)
predictors.names <-
c(namesof(mynames1, .lscale , .escale , tag = FALSE),
namesof(mynames2, .lshape , .eshape , tag = FALSE),
namesof(mynames3, .lepsil , .eepsil , tag = FALSE))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
@@ -1460,7 +1461,7 @@ makeham.control <- function(save.weights = TRUE, ...) {
etastart <- cbind(theta2eta(matC, .lscale , .escale ),
theta2eta(matH, .lshape , .eshape ),
theta2eta(matE, .lepsil , .eepsil ))[,
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
} # End of !length(etastart)
}), list(
.lshape = lshape, .lscale = lscale, .lepsil = lepsil,
@@ -1483,9 +1484,9 @@ makeham.control <- function(save.weights = TRUE, ...) {
misc$link <-
c(rep( .lscale , length = ncoly),
rep( .lshape , length = ncoly),
- rep( .lepsil , length = ncoly))[interleave.VGAM(M, M = M1)]
+ rep( .lepsil , length = ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2, mynames3)[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
@@ -1575,13 +1576,13 @@ makeham.control <- function(save.weights = TRUE, ...) {
myderiv <- c(w) * cbind(dl.dscale,
dl.dshape,
dl.depsil) * dthetas.detas
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
.eshape = eshape, .escale = escale, .eepsil = eepsil ))),
weight = eval(substitute(expression({
NOS <- M / M1
- dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M1 = M1)]
wz <- matrix(0.0, n, M + M - 1 + M - 2) # wz has half-bandwidth 3
ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE) # Use simulated EIM
@@ -1818,15 +1819,16 @@ gompertz.control <- function(save.weights = TRUE, ...) {
"Median: scale * log(2 - 1 / shape)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
nsimEIM = .nsimEIM,
+ parameters.names = c("scale", "shape"),
zero = .zero )
}, list( .zero = zero,
.nsimEIM = nsimEIM ))),
@@ -1852,12 +1854,12 @@ gompertz.control <- function(save.weights = TRUE, ...) {
M <- M1 * ncoly
- mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("scale", ncoly)
+ mynames2 <- param.names("shape", ncoly)
predictors.names <-
c(namesof(mynames1, .lscale , .escale , tag = FALSE),
namesof(mynames2, .lshape , .eshape , tag = FALSE))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
@@ -1904,7 +1906,7 @@ gompertz.control <- function(save.weights = TRUE, ...) {
etastart <- cbind(theta2eta(matC, .lscale , .escale ),
theta2eta(matH, .lshape , .eshape ))[,
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
} # End of !length(etastart)
}), list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale,
@@ -1921,8 +1923,8 @@ gompertz.control <- function(save.weights = TRUE, ...) {
M1 <- extra$M1
misc$link <-
c(rep( .lscale , length = ncoly),
- rep( .lshape , length = ncoly))[interleave.VGAM(M, M = M1)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+ rep( .lshape , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
@@ -1999,7 +2001,7 @@ gompertz.control <- function(save.weights = TRUE, ...) {
dthetas.detas <- cbind(dscale.deta, dshape.deta)
myderiv <- c(w) * cbind(dl.dscale, dl.dshape) * dthetas.detas
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale ))),
@@ -2007,7 +2009,7 @@ gompertz.control <- function(save.weights = TRUE, ...) {
weight = eval(substitute(expression({
NOS <- M / M1
- dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M1 = M1)]
wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
@@ -2182,15 +2184,16 @@ exponential.mo.control <- function(save.weights = TRUE, ...) {
"Median: log(3 - alpha) / lambda"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
nsimEIM = .nsimEIM,
+ parameters.names = c("alpha", "lambda"),
zero = .zero )
}, list( .zero = zero,
.nsimEIM = nsimEIM ))),
@@ -2217,12 +2220,12 @@ exponential.mo.control <- function(save.weights = TRUE, ...) {
M <- M1 * ncoly
- mynames1 <- paste("alpha", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("lambda", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("alpha", ncoly)
+ mynames2 <- param.names("lambda", ncoly)
predictors.names <-
c(namesof(mynames1, .lalpha0 , .ealpha0 , tag = FALSE),
namesof(mynames2, .llambda , .elambda , tag = FALSE))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
@@ -2262,7 +2265,7 @@ exponential.mo.control <- function(save.weights = TRUE, ...) {
etastart <- cbind(theta2eta(matA, .lalpha0, .ealpha0 ),
theta2eta(matL, .llambda, .elambda ))[,
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
mustart <- NULL # Since etastart has been computed.
} # End of !length(etastart)
}), list( .lalpha0 = lalpha0, .llambda = llambda,
@@ -2281,8 +2284,8 @@ exponential.mo.control <- function(save.weights = TRUE, ...) {
M1 <- extra$M1
misc$link <-
c(rep( .lalpha0 , length = ncoly),
- rep( .llambda , length = ncoly))[interleave.VGAM(M, M = M1)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+ rep( .llambda , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
@@ -2339,7 +2342,7 @@ exponential.mo.control <- function(save.weights = TRUE, ...) {
dthetas.detas <- cbind(dalpha0.deta,
dlambda.deta)
myderiv <- c(w) * cbind(dl.dalpha0, dl.dlambda) * dthetas.detas
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lalpha0 = lalpha0, .llambda = llambda,
.ealpha0 = ealpha0, .elambda = elambda ))),
@@ -2347,7 +2350,7 @@ exponential.mo.control <- function(save.weights = TRUE, ...) {
weight = eval(substitute(expression({
NOS <- M / M1
- dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M1 = M1)]
wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
@@ -2424,7 +2427,8 @@ if (ii < 3) {
gshape1.a = exp(-5:5),
gshape2.p = exp(-5:5),
gshape3.q = exp(-5:5),
- zero = ifelse(lss, -(2:4), -c(1, 3:4))) {
+ zero = "shape") {
+
@@ -2446,8 +2450,6 @@ if (ii < 3) {
stop("Bad input for argument 'ishape3.q'")
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
lscale <- as.list(substitute(lscale))
escale <- link2list(lscale)
@@ -2482,9 +2484,9 @@ if (ii < 3) {
"gamma(shape3.q - 1/shape1.a) / ",
"(gamma(shape2.p) * gamma(shape3.q))"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 4
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 4)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 4,
@@ -2492,15 +2494,18 @@ if (ii < 3) {
expected = TRUE,
zero = .zero ,
multipleResponses = TRUE,
+ parameters.names = if ( .lss )
+ c("scale", "shape1.a", "shape2.p", "shape3.q") else
+ c("shape1.a", "scale", "shape2.p", "shape3.q"),
lscale = .lscale , lshape1.a = .lshape1.a ,
escale = .escale , eshape1.a = .eshape1.a ,
lshape2.p = .lshape2.p , lshape3.q = .lshape3.q ,
- eshape2.p = .eshape2.p , eshape3.q = .eshape3.q ,
- .zero = zero )
+ eshape2.p = .eshape2.p , eshape3.q = .eshape3.q )
}, list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
.lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
.eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lss = lss ,
.zero = zero ))),
initialize = eval(substitute(expression({
temp5 <- w.y.check(w = w, y = y,
@@ -2531,13 +2536,13 @@ if (ii < 3) {
},
namesof(sha2.names , .lshape2.p , earg = .eshape2.p , tag = FALSE),
namesof(sha3.names , .lshape3.q , earg = .eshape3.q , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
sc.init <-
aa.init <-
pp.init <-
- qq.init <- matrix(as.numeric(NA), n, NOS)
+ qq.init <- matrix(NA_real_, n, NOS)
for (spp. in 1:NOS) { # For each response 'y_spp.'... do:
yvec <- y[, spp.]
@@ -2558,7 +2563,7 @@ if (ii < 3) {
allmat1 <- expand.grid(shape1.a = gshape1.a,
shape2.p = gshape2.p,
shape3.q = gshape3.q)
- allmat2 <- matrix(as.numeric(NA), nrow(allmat1), 2)
+ allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
ll.gbII <- function(scaleval, x = x, y = y, w = w, extraargs) {
ans <- sum(c(w) * dgenbetaII(x = y,
@@ -2603,7 +2608,7 @@ if (ii < 3) {
theta2eta(sc.init, .lscale , earg = .escale )),
theta2eta(pp.init , .lshape2.p , earg = .eshape2.p ),
theta2eta(qq.init , .lshape3.q , earg = .eshape3.q ))
- etastart <- etastart[, interleave.VGAM(M, M = M1)]
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1)]
} # End of etastart.
}), list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
@@ -2649,16 +2654,16 @@ if (ii < 3) {
rep( if ( .lss ) .lshape1.a else .lscale , len = ncoly),
rep( .lshape2.p , length = ncoly),
rep( .lshape3.q , length = ncoly))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
temp.names <- if ( .lss ) {
c(scaL.names, sha1.names, sha2.names, sha3.names)
} else {
c(sha1.names, scaL.names, sha2.names, sha3.names)
}
- names(misc$link) <- temp.names[interleave.VGAM(M, M = M1)]
+ names(misc$link) <- temp.names[interleave.VGAM(M, M1 = M1)]
misc$earg <- vector("list", M)
- names(misc$earg) <- temp.names
+ names(misc$earg) <- temp.names[interleave.VGAM(M, M1 = M1)]
for (ii in 1:ncoly) {
if ( .lss ) {
misc$earg[[M1*ii-3]] <- .escale
@@ -2761,7 +2766,7 @@ if (ii < 3) {
dl.dp * dp.deta,
dl.dq * dq.deta)
}
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
.lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
@@ -3313,7 +3318,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
gshape1.a = exp(-5:5),
gshape3.q = exp(-5:5),
probs.y = c(0.25, 0.50, 0.75),
- zero = ifelse(lss, -(2:3), -c(1, 3))) {
+ zero = "shape") {
@@ -3339,8 +3344,6 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
!is.Numeric(probs.y, positive = TRUE))
stop("Bad input for argument 'probs.y'")
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
lscale <- as.list(substitute(lscale))
escale <- link2list(lscale)
@@ -3370,9 +3373,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
"gamma(shape3.q - 1/shape1.a) / ",
"gamma(shape3.q)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 3
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 3,
@@ -3380,15 +3383,18 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
expected = TRUE,
zero = .zero ,
multipleResponses = TRUE,
+ parameters.names = if ( .lss )
+ c("scale", "shape1.a", "shape3.q") else
+ c("shape1.a", "scale", "shape3.q"),
lscale = .lscale , lshape1.a = .lshape1.a ,
escale = .escale , eshape1.a = .eshape1.a ,
lshape3.q = .lshape3.q ,
- eshape3.q = .eshape3.q ,
- .zero = zero )
+ eshape3.q = .eshape3.q )
}, list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
.lshape3.q = lshape3.q,
.eshape3.q = eshape3.q,
+ .lss = lss ,
.zero = zero ))),
initialize = eval(substitute(expression({
temp5 <- w.y.check(w = w, y = y,
@@ -3417,12 +3423,12 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
namesof(scaL.names , .lscale , earg = .escale , tag = FALSE))
},
namesof(sha3.names , .lshape3.q , earg = .eshape3.q , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
sc.init <-
aa.init <-
- qq.init <- matrix(as.numeric(NA), n, NOS)
+ qq.init <- matrix(NA_real_, n, NOS)
for (spp. in 1:NOS) { # For each response 'y_spp.'... do:
yvec <- y[, spp.]
@@ -3440,7 +3446,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
gshape3.q <- rep( .ishape3.q , length = NOS)
allmat1 <- expand.grid(shape1.a = gshape1.a,
shape3.q = gshape3.q)
- allmat2 <- matrix(as.numeric(NA), nrow(allmat1), 2)
+ allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
ll.sinm <- function(scaleval, x = x, y = y, w = w, extraargs) {
ans <- sum(c(w) * dgenbetaII(x = y,
@@ -3492,7 +3498,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
cbind(theta2eta(aa.init, .lshape1.a , earg = .eshape1.a ),
theta2eta(sc.init, .lscale , earg = .escale )),
theta2eta(qq.init , .lshape3.q , earg = .eshape3.q ))
- etastart <- etastart[, interleave.VGAM(M, M = M1)]
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1)]
} # End of etastart.
}), list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
@@ -3541,16 +3547,16 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
misc$link <- c(rep( if ( .lss ) .lscale else .lshape1.a , len = ncoly),
rep( if ( .lss ) .lshape1.a else .lscale , len = ncoly),
rep( .lshape3.q , length = ncoly))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
temp.names <- if ( .lss ) {
c(scaL.names, sha1.names, sha3.names)
} else {
c(sha1.names, scaL.names, sha3.names)
}
- names(misc$link) <- temp.names[interleave.VGAM(M, M = M1)]
+ names(misc$link) <- temp.names[interleave.VGAM(M, M1 = M1)]
misc$earg <- vector("list", M)
- names(misc$earg) <- temp.names
+ names(misc$earg) <- temp.names[interleave.VGAM(M, M1 = M1)]
for (ii in 1:ncoly) {
if ( .lss ) {
misc$earg[[M1*ii-2]] <- .escale
@@ -3678,7 +3684,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
dl.dscale * dscale.deta,
dl.dq * dq.deta)
}
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
.lshape3.q = lshape3.q,
@@ -3747,7 +3753,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
gshape1.a = exp(-5:5),
gshape2.p = exp(-5:5),
probs.y = c(0.25, 0.50, 0.75),
- zero = ifelse(lss, -(2:3), -c(1, 3))) {
+ zero = "shape") {
@@ -3774,8 +3780,6 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
!is.Numeric(probs.y, positive = TRUE))
stop("Bad input for argument 'probs.y'")
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
lscale <- as.list(substitute(lscale))
escale <- link2list(lscale)
@@ -3805,9 +3809,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
"gamma(1 - 1/shape1.a) / ",
"gamma(shape2.p)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 3
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 3,
@@ -3815,15 +3819,18 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
expected = TRUE,
zero = .zero ,
multipleResponses = TRUE,
+ parameters.names =
+ if ( .lss ) c("scale", "shape1.a", "shape2.p") else
+ c("shape1.a", "scale", "shape2.p"),
lscale = .lscale , lshape1.a = .lshape1.a ,
escale = .escale , eshape1.a = .eshape1.a ,
lshape2.p = .lshape2.p ,
- eshape2.p = .eshape2.p ,
- .zero = zero )
+ eshape2.p = .eshape2.p )
}, list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
.lshape2.p = lshape2.p,
.eshape2.p = eshape2.p,
+ .lss = lss ,
.zero = zero ))),
initialize = eval(substitute(expression({
temp5 <- w.y.check(w = w, y = y,
@@ -3852,12 +3859,12 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
namesof(scaL.names , .lscale , earg = .escale , tag = FALSE))
},
namesof(sha2.names , .lshape2.p , earg = .eshape2.p , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
sc.init <-
aa.init <-
- pp.init <- matrix(as.numeric(NA), n, NOS)
+ pp.init <- matrix(NA_real_, n, NOS)
for (spp. in 1:NOS) { # For each response 'y_spp.'... do:
yvec <- y[, spp.]
@@ -3875,7 +3882,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
gshape2.p <- rep( .ishape2.p , length = NOS)
allmat1 <- expand.grid(shape1.a = gshape1.a,
shape2.p = gshape2.p)
- allmat2 <- matrix(as.numeric(NA), nrow(allmat1), 2)
+ allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
ll.dagu <- function(scaleval, x = x, y = y, w = w, extraargs) {
ans <- sum(c(w) * dgenbetaII(x = y,
@@ -3928,7 +3935,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
cbind(theta2eta(aa.init, .lshape1.a , earg = .eshape1.a ),
theta2eta(sc.init, .lscale , earg = .escale )),
theta2eta(pp.init , .lshape2.p , earg = .eshape2.p ))
- etastart <- etastart[, interleave.VGAM(M, M = M1)]
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1)]
} # End of etastart.
}), list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
@@ -3978,16 +3985,16 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
misc$link <- c(rep( if ( .lss ) .lscale else .lshape1.a , len = ncoly),
rep( if ( .lss ) .lshape1.a else .lscale , len = ncoly),
rep( .lshape2.p , length = ncoly))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
temp.names <- if ( .lss ) {
c(scaL.names, sha1.names, sha2.names)
} else {
c(sha1.names, scaL.names, sha2.names)
}
- names(misc$link) <- temp.names[interleave.VGAM(M, M = M1)]
+ names(misc$link) <- temp.names[interleave.VGAM(M, M1 = M1)]
misc$earg <- vector("list", M)
- names(misc$earg) <- temp.names
+ names(misc$earg) <- temp.names[interleave.VGAM(M, M1 = M1)]
for (ii in 1:ncoly) {
if ( .lss ) {
misc$earg[[M1*ii-2]] <- .escale
@@ -4115,7 +4122,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
dl.dscale * dscale.deta,
dl.dp * dp.deta)
}
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
.lshape2.p = lshape2.p,
@@ -4177,7 +4184,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
gshape2.p = exp(-5:5),
gshape3.q = exp(-5:5),
probs.y = c(0.25, 0.50, 0.75),
- zero = -(2:3)) {
+ zero = "shape") {
@@ -4200,8 +4207,6 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
!is.Numeric(probs.y, positive = TRUE))
stop("Bad input for argument 'probs.y'")
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
lscale <- as.list(substitute(lscale))
escale <- link2list(lscale)
@@ -4227,9 +4232,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
"gamma(shape3.q - 1) / ",
"(gamma(shape2.p) * gamma(shape3.q))"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 3
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 3,
@@ -4237,11 +4242,11 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
expected = TRUE,
zero = .zero ,
multipleResponses = TRUE,
+ parameters.names = c("scale", "shape2.p", "shape3.q"),
lscale = .lscale ,
escale = .escale ,
lshape2.p = .lshape2.p , lshape3.q = .lshape3.q ,
- eshape2.p = .eshape2.p , eshape3.q = .eshape3.q ,
- .zero = zero )
+ eshape2.p = .eshape2.p , eshape3.q = .eshape3.q )
}, list( .lscale = lscale ,
.escale = escale ,
.lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
@@ -4269,12 +4274,12 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
c(namesof(scaL.names , .lscale , earg = .escale , tag = FALSE),
namesof(sha2.names , .lshape2.p , earg = .eshape2.p , tag = FALSE),
namesof(sha3.names , .lshape3.q , earg = .eshape3.q , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
sc.init <-
pp.init <-
- qq.init <- matrix(as.numeric(NA), n, NOS)
+ qq.init <- matrix(NA_real_, n, NOS)
for (spp. in 1:NOS) { # For each response 'y_spp.'... do:
yvec <- y[, spp.]
@@ -4292,7 +4297,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
gshape3.q <- rep( .ishape3.q , length = NOS)
allmat1 <- expand.grid(shape2.p = gshape2.p,
shape3.q = gshape3.q)
- allmat2 <- matrix(as.numeric(NA), nrow(allmat1), 2)
+ allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
ll.beII <- function(scaleval, x = x, y = y, w = w, extraargs) {
ans <- sum(c(w) * dgenbetaII(x = y,
@@ -4344,7 +4349,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
cbind(theta2eta(sc.init , .lscale , earg = .escale ),
theta2eta(pp.init , .lshape2.p , earg = .eshape2.p ),
theta2eta(qq.init , .lshape3.q , earg = .eshape3.q ))
- etastart <- etastart[, interleave.VGAM(M, M = M1)]
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1)]
} # End of etastart.
}), list( .lscale = lscale ,
.escale = escale ,
@@ -4386,12 +4391,12 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
misc$link <- c(rep( .lscale , length = ncoly),
rep( .lshape2.p , length = ncoly),
rep( .lshape3.q , length = ncoly))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
temp.names <- c(scaL.names, sha2.names, sha3.names)
- names(misc$link) <- temp.names[interleave.VGAM(M, M = M1)]
+ names(misc$link) <- temp.names[interleave.VGAM(M, M1 = M1)]
misc$earg <- vector("list", M)
- names(misc$earg) <- temp.names
+ names(misc$earg) <- temp.names[interleave.VGAM(M, M1 = M1)]
for (ii in 1:ncoly) {
misc$earg[[M1*ii-2]] <- .escale
misc$earg[[M1*ii-1]] <- .eshape2.p
@@ -4463,7 +4468,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
c(w) * cbind(dl.dscale * dscale.deta,
dl.dp * dp.deta,
dl.dq * dq.deta)
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lscale = lscale ,
.escale = escale ,
.lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
@@ -4510,7 +4515,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
gscale = exp(-5:5),
gshape3.q = exp(-5:5), # Finite mean only if qq>1
probs.y = c(0.25, 0.50, 0.75),
- zero = -2) {
+ zero = "shape") {
@@ -4530,8 +4535,6 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
!is.Numeric(probs.y, positive = TRUE))
stop("Bad input for argument 'probs.y'")
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
lscale <- as.list(substitute(lscale))
escale <- link2list(lscale)
@@ -4550,9 +4553,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
namesof("shape3.q" , lshape3.q, earg = eshape3.q), "\n",
"Mean: scale / (shape3.q - 1)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
@@ -4560,11 +4563,11 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
expected = TRUE,
zero = .zero ,
multipleResponses = TRUE,
+ parameters.names = c("scale", "shape3.q"),
lscale = .lscale ,
escale = .escale ,
lshape3.q = .lshape3.q ,
- eshape3.q = .eshape3.q ,
- .zero = zero )
+ eshape3.q = .eshape3.q )
}, list( .lscale = lscale ,
.escale = escale ,
.lshape3.q = lshape3.q,
@@ -4590,11 +4593,11 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
predictors.names <-
c(namesof(scaL.names , .lscale , earg = .escale , tag = FALSE),
namesof(sha3.names , .lshape3.q , earg = .eshape3.q , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
sc.init <-
- qq.init <- matrix(as.numeric(NA), n, NOS)
+ qq.init <- matrix(NA_real_, n, NOS)
for (spp. in 1:NOS) { # For each response 'y_spp.'... do:
yvec <- y[, spp.]
@@ -4608,7 +4611,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
if (length( .ishape3.q ))
gshape3.q <- rep( .ishape3.q , length = NOS)
allmat1 <- cbind(shape3.q = gshape3.q)
- allmat2 <- matrix(as.numeric(NA), nrow(allmat1), 2)
+ allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
ll.lomx <- function(scaleval, x = x, y = y, w = w, extraargs) {
ans <- sum(c(w) * dgenbetaII(x = y,
@@ -4651,7 +4654,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
etastart <-
cbind(theta2eta(sc.init, .lscale , earg = .escale ),
theta2eta(qq.init, .lshape3.q , earg = .eshape3.q ))
- etastart <- etastart[, interleave.VGAM(M, M = M1)]
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1)]
} # End of etastart.
}), list( .lscale = lscale ,
.escale = escale ,
@@ -4690,13 +4693,13 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
misc$link <- c(rep( .lscale , length = ncoly),
rep( .lshape3.q , length = ncoly))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
temp.names <-
c(scaL.names, sha3.names)
- names(misc$link) <- temp.names[interleave.VGAM(M, M = M1)]
+ names(misc$link) <- temp.names[interleave.VGAM(M, M1 = M1)]
misc$earg <- vector("list", M)
- names(misc$earg) <- temp.names
+ names(misc$earg) <- temp.names[interleave.VGAM(M, M1 = M1)]
for (ii in 1:ncoly) {
misc$earg[[M1*ii-1]] <- .escale
misc$earg[[M1*ii ]] <- .eshape3.q
@@ -4784,7 +4787,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
myderiv <-
c(w) * cbind(dl.dscale * dscale.deta,
dl.dq * dq.deta)
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lscale = lscale ,
.escale = escale ,
.lshape3.q = lshape3.q,
@@ -4830,7 +4833,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
gscale = exp(-5:5),
gshape1.a = exp(-5:5),
probs.y = c(0.25, 0.50, 0.75),
- zero = ifelse(lss, -2, -1)) {
+ zero = "shape") {
@@ -4854,8 +4857,6 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
!is.Numeric(probs.y, positive = TRUE))
stop("Bad input for argument 'probs.y'")
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
lscale <- as.list(substitute(lscale))
escale <- link2list(lscale)
@@ -4879,9 +4880,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
"Mean: scale * gamma(1 + 1/shape1.a) * ",
"gamma(1 - 1/shape1.a)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
@@ -4889,11 +4890,14 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
expected = TRUE,
zero = .zero ,
multipleResponses = TRUE,
+ parameters.names = if ( .lss )
+ c("scale", "shape1.a") else
+ c("shape1.a", "scale"),
lscale = .lscale , lshape1.a = .lshape1.a ,
- escale = .escale , eshape1.a = .eshape1.a ,
- .zero = zero )
+ escale = .escale , eshape1.a = .eshape1.a )
}, list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
+ .lss = lss ,
.zero = zero ))),
initialize = eval(substitute(expression({
temp5 <- w.y.check(w = w, y = y,
@@ -4919,11 +4923,11 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
c(namesof(sha1.names , .lshape1.a , earg = .eshape1.a , tag = FALSE),
namesof(scaL.names , .lscale , earg = .escale , tag = FALSE))
}
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
sc.init <-
- aa.init <- matrix(as.numeric(NA), n, NOS)
+ aa.init <- matrix(NA_real_, n, NOS)
for (spp. in 1:NOS) { # For each response 'y_spp.'... do:
yvec <- y[, spp.]
@@ -4937,7 +4941,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
if (length( .ishape1.a ))
gshape1.a <- rep( .ishape1.a , length = NOS)
allmat1 <- cbind(shape1.a = gshape1.a)
- allmat2 <- matrix(as.numeric(NA), nrow(allmat1), 2)
+ allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
ll.fisk <- function(scaleval, x = x, y = y, w = w, extraargs) {
ans <- sum(c(w) * dgenbetaII(x = y,
@@ -4984,7 +4988,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
theta2eta(aa.init, .lshape1.a , earg = .eshape1.a )) else
cbind(theta2eta(aa.init, .lshape1.a , earg = .eshape1.a ),
theta2eta(sc.init, .lscale , earg = .escale ))
- etastart <- etastart[, interleave.VGAM(M, M = M1)]
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1)]
} # End of etastart.
}), list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
@@ -5024,16 +5028,16 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
misc$link <- c(rep( if ( .lss ) .lscale else .lshape1.a , len = ncoly),
rep( if ( .lss ) .lshape1.a else .lscale , len = ncoly))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
temp.names <- if ( .lss ) {
c(scaL.names, sha1.names)
} else {
c(sha1.names, scaL.names)
}
- names(misc$link) <- temp.names[interleave.VGAM(M, M = M1)]
+ names(misc$link) <- temp.names[interleave.VGAM(M, M1 = M1)]
misc$earg <- vector("list", M)
- names(misc$earg) <- temp.names
+ names(misc$earg) <- temp.names[interleave.VGAM(M, M1 = M1)]
for (ii in 1:ncoly)
if ( .lss ) {
misc$earg[[M1*ii-1]] <- .escale
@@ -5140,7 +5144,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
c(w) * cbind(dl.da * da.deta,
dl.dscale * dscale.deta)
}
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
.lss = lss ))),
@@ -5191,7 +5195,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
gscale = exp(-5:5),
gshape2.p = exp(-5:5),
probs.y = c(0.25, 0.50, 0.75),
- zero = -2) {
+ zero = "shape2.p") {
@@ -5213,8 +5217,6 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
!is.Numeric(probs.y, positive = TRUE))
stop("Bad input for argument 'probs.y'")
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
lscale <- as.list(substitute(lscale))
escale <- link2list(lscale)
@@ -5233,9 +5235,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
namesof("shape2.p" , lshape2.p, earg = eshape2.p), "\n",
"Mean: does not exist"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
@@ -5243,11 +5245,11 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
expected = TRUE,
zero = .zero ,
multipleResponses = TRUE,
+ parameters.names = c("scale", "shape2.p"),
lscale = .lscale ,
escale = .escale ,
lshape2.p = .lshape2.p ,
- eshape2.p = .eshape2.p ,
- .zero = zero )
+ eshape2.p = .eshape2.p )
}, list( .lscale = lscale ,
.escale = escale ,
.lshape2.p = lshape2.p,
@@ -5273,11 +5275,11 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
predictors.names <-
c(namesof(scaL.names , .lscale , earg = .escale , tag = FALSE),
namesof(sha2.names , .lshape2.p , earg = .eshape2.p , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
sc.init <-
- pp.init <- matrix(as.numeric(NA), n, NOS)
+ pp.init <- matrix(NA_real_, n, NOS)
for (spp. in 1:NOS) { # For each response 'y_spp.'... do:
yvec <- y[, spp.]
@@ -5291,7 +5293,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
if (length( .ishape2.p ))
gshape2.p <- rep( .ishape2.p , length = NOS)
allmat1 <- cbind(shape2.p = gshape2.p)
- allmat2 <- matrix(as.numeric(NA), nrow(allmat1), 2)
+ allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
ll.invL <- function(scaleval, x = x, y = y, w = w, extraargs) {
ans <- sum(c(w) * dgenbetaII(x = y,
@@ -5328,7 +5330,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
etastart <-
cbind(theta2eta(sc.init, .lscale , earg = .escale ),
theta2eta(pp.init, .lshape2.p , earg = .eshape2.p ))
- etastart <- etastart[, interleave.VGAM(M, M = M1)]
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1)]
} # End of etastart.
}), list( .lscale = lscale ,
.escale = escale ,
@@ -5360,12 +5362,12 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
misc$link <- c(rep( .lscale , length = ncoly),
rep( .lshape2.p , length = ncoly))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
temp.names <- c(scaL.names, sha2.names)
- names(misc$link) <- temp.names[interleave.VGAM(M, M = M1)]
+ names(misc$link) <- temp.names[interleave.VGAM(M, M1 = M1)]
misc$earg <- vector("list", M)
- names(misc$earg) <- temp.names
+ names(misc$earg) <- temp.names[interleave.VGAM(M, M1 = M1)]
for (ii in 1:ncoly) {
misc$earg[[M1*ii-1]] <- .escale
misc$earg[[M1*ii ]] <- .eshape2.p
@@ -5453,7 +5455,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
myderiv <-
c(w) * cbind(dl.dscale * dscale.deta,
dl.dp * dp.deta)
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lscale = lscale ,
.escale = escale ,
.lshape2.p = lshape2.p,
@@ -5496,7 +5498,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
gscale = exp(-5:5),
gshape1.a = exp(-5:5),
probs.y = c(0.25, 0.50, 0.75),
- zero = ifelse(lss, -2, -1)) {
+ zero = "shape") {
@@ -5520,8 +5522,6 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
!is.Numeric(probs.y, positive = TRUE))
stop("Bad input for argument 'probs.y'")
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
lscale <- as.list(substitute(lscale))
escale <- link2list(lscale)
@@ -5546,9 +5546,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
"gamma(shape1.a - 1/shape1.a) / ",
"gamma(shape1.a)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
@@ -5556,11 +5556,14 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
expected = TRUE,
zero = .zero ,
multipleResponses = TRUE,
+ parameters.names = if ( .lss )
+ c("scale", "shape1.a") else
+ c("shape1.a", "scale"),
lscale = .lscale , lshape1.a = .lshape1.a ,
- escale = .escale , eshape1.a = .eshape1.a ,
- .zero = zero )
+ escale = .escale , eshape1.a = .eshape1.a )
}, list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
+ .lss = lss ,
.zero = zero ))),
initialize = eval(substitute(expression({
temp5 <- w.y.check(w = w, y = y,
@@ -5587,11 +5590,11 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
c(namesof(sha1.names , .lshape1.a , earg = .eshape1.a , tag = FALSE),
namesof(scaL.names , .lscale , earg = .escale , tag = FALSE))
}
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
sc.init <-
- aa.init <- matrix(as.numeric(NA), n, NOS)
+ aa.init <- matrix(NA_real_, n, NOS)
for (spp. in 1:NOS) { # For each response 'y_spp.'... do:
yvec <- y[, spp.]
@@ -5605,7 +5608,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
if (length( .ishape1.a ))
gshape1.a <- rep( .ishape1.a , length = NOS)
allmat1 <- expand.grid(shape1.a = gshape1.a)
- allmat2 <- matrix(as.numeric(NA), nrow(allmat1), 2)
+ allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
ll.para <- function(scaleval, x = x, y = y, w = w, extraargs) {
ans <- sum(c(w) * dgenbetaII(x = y,
@@ -5651,7 +5654,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
theta2eta(aa.init, .lshape1.a , earg = .eshape1.a )) else
cbind(theta2eta(aa.init, .lshape1.a , earg = .eshape1.a ),
theta2eta(sc.init, .lscale , earg = .escale ))
- etastart <- etastart[, interleave.VGAM(M, M = M1)]
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1)]
} # End of etastart.
}), list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
@@ -5691,16 +5694,16 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
misc$link <- c(rep( if ( .lss ) .lscale else .lshape1.a , len = ncoly),
rep( if ( .lss ) .lshape1.a else .lscale , len = ncoly))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
temp.names <- if ( .lss ) {
c(scaL.names, sha1.names)
} else {
c(sha1.names, scaL.names)
}
- names(misc$link) <- temp.names[interleave.VGAM(M, M = M1)]
+ names(misc$link) <- temp.names[interleave.VGAM(M, M1 = M1)]
misc$earg <- vector("list", M)
- names(misc$earg) <- temp.names
+ names(misc$earg) <- temp.names[interleave.VGAM(M, M1 = M1)]
for (ii in 1:ncoly)
if ( .lss ) {
misc$earg[[M1*ii-1]] <- .escale
@@ -5811,7 +5814,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
c(w) * cbind(dl.da * da.deta,
dl.dscale * dscale.deta)
}
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
.lss = lss ))),
@@ -5859,7 +5862,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
gscale = exp(-5:5),
gshape1.a = exp(-5:5),
probs.y = c(0.25, 0.50, 0.75),
- zero = ifelse(lss, -2, -1)) {
+ zero = "shape") {
@@ -5882,8 +5885,6 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
!is.Numeric(probs.y, positive = TRUE))
stop("Bad input for argument 'probs.y'")
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
lscale <- as.list(substitute(lscale))
escale <- link2list(lscale)
@@ -5908,9 +5909,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
"gamma(1 - 1/shape1.a) / ",
"gamma(shape1.a)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
@@ -5918,11 +5919,14 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
expected = TRUE,
zero = .zero ,
multipleResponses = TRUE,
+ parameters.names = if ( .lss )
+ c("scale", "shape1.a") else
+ c("shape1.a", "scale"),
lscale = .lscale , lshape1.a = .lshape1.a ,
- escale = .escale , eshape1.a = .eshape1.a ,
- .zero = zero )
+ escale = .escale , eshape1.a = .eshape1.a )
}, list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
+ .lss = lss ,
.zero = zero ))),
initialize = eval(substitute(expression({
temp5 <- w.y.check(w = w, y = y,
@@ -5948,11 +5952,11 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
c(namesof(sha1.names , .lshape1.a , earg = .eshape1.a , tag = FALSE),
namesof(scaL.names , .lscale , earg = .escale , tag = FALSE))
}
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
sc.init <-
- aa.init <- matrix(as.numeric(NA), n, NOS)
+ aa.init <- matrix(NA_real_, n, NOS)
for (spp. in 1:NOS) { # For each response 'y_spp.'... do:
yvec <- y[, spp.]
@@ -5966,7 +5970,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
if (length( .ishape1.a ))
gshape1.a <- rep( .ishape1.a , length = NOS)
allmat1 <- cbind(shape1.a = gshape1.a)
- allmat2 <- matrix(as.numeric(NA), nrow(allmat1), 2)
+ allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
ll.invp <- function(scaleval, x = x, y = y, w = w, extraargs) {
ans <- sum(c(w) * dgenbetaII(x = y,
@@ -6014,7 +6018,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
theta2eta(aa.init, .lshape1.a , earg = .eshape1.a )) else
cbind(theta2eta(aa.init, .lshape1.a , earg = .eshape1.a ),
theta2eta(sc.init, .lscale , earg = .escale ))
- etastart <- etastart[, interleave.VGAM(M, M = M1)]
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1)]
} # End of etastart.
}), list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
@@ -6054,16 +6058,16 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
misc$link <- c(rep( if ( .lss ) .lscale else .lshape1.a , len = ncoly),
rep( if ( .lss ) .lshape1.a else .lscale , len = ncoly))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
temp.names <- if ( .lss ) {
c(scaL.names, sha1.names)
} else {
c(sha1.names, scaL.names)
}
- names(misc$link) <- temp.names[interleave.VGAM(M, M = M1)]
+ names(misc$link) <- temp.names[interleave.VGAM(M, M1 = M1)]
misc$earg <- vector("list", M)
- names(misc$earg) <- temp.names
+ names(misc$earg) <- temp.names[interleave.VGAM(M, M1 = M1)]
for (ii in 1:ncoly)
if ( .lss ) {
misc$earg[[M1*ii-1]] <- .escale
@@ -6175,7 +6179,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
c(w) * cbind(dl.da * da.deta,
dl.dscale * dscale.deta)
}
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lscale = lscale , .lshape1.a = lshape1.a,
.escale = escale , .eshape1.a = eshape1.a,
.lss = lss ))),
diff --git a/R/family.aunivariate.R b/R/family.aunivariate.R
index 2b7f432..c586147 100644
--- a/R/family.aunivariate.R
+++ b/R/family.aunivariate.R
@@ -155,9 +155,6 @@ pkumar <- function(q, shape1, shape2,
if (!is.Numeric(grid.shape1, length.arg = 2, positive = TRUE))
stop("bad input for argument 'grid.shape1'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
new("vglmff",
blurb = c("Kumaraswamy distribution\n\n",
@@ -165,14 +162,22 @@ pkumar <- function(q, shape1, shape2,
namesof("shape2", lshape2, eshape2, tag = FALSE), "\n",
"Mean: shape2 * beta(1 + 1 / shape1, shape2)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
infos = eval(substitute(function(...) {
- list(M1 = 2, Q1 = 1, expected = TRUE, multipleResponses = TRUE,
- lshape1 = .lshape1 , lshape2 = .lshape2 , zero = .zero )
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("shape1", "shape2"),
+ lshape1 = .lshape1 ,
+ lshape2 = .lshape2 ,
+ zero = .zero )
}, list( .zero = zero, .lshape1 = lshape1, .lshape2 = lshape2 ))),
+
initialize = eval(substitute(expression({
checklist <- w.y.check(w = w, y = y, Is.positive.y = TRUE,
ncol.w.max = Inf, ncol.y.max = Inf,
@@ -185,12 +190,12 @@ pkumar <- function(q, shape1, shape2,
extra$ncoly <- ncoly <- ncol(y)
extra$M1 <- M1 <- 2
M <- M1 * ncoly
- mynames1 <- paste("shape1", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("shape2", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("shape1", ncoly)
+ mynames2 <- param.names("shape2", ncoly)
predictors.names <-
c(namesof(mynames1, .lshape1 , earg = .eshape1 , tag = FALSE),
namesof(mynames2, .lshape2 , earg = .eshape2 , tag = FALSE))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
kumar.Loglikfun <- function(shape1, y, x, w, extraargs) {
@@ -212,7 +217,7 @@ pkumar <- function(q, shape1, shape2,
etastart <- cbind(theta2eta(shape1.init, .lshape1 , earg = .eshape1 ),
theta2eta(shape2.init, .lshape2 , earg = .eshape2 ))[,
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
}
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.ishape1 = ishape1, .ishape2 = ishape2,
@@ -226,8 +231,8 @@ pkumar <- function(q, shape1, shape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
last = eval(substitute(expression({
misc$link <- c(rep( .lshape1 , length = ncoly),
- rep( .lshape2 , length = ncoly))[interleave.VGAM(M, M = M1)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+ rep( .lshape2 , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
@@ -269,7 +274,7 @@ pkumar <- function(q, shape1, shape2,
dl.dshape2 <- 1 / shape2 + log1p(-y^shape1)
dl.deta <- c(w) * cbind(dl.dshape1 * dshape1.deta,
dl.dshape2 * dshape2.deta)
- dl.deta[, interleave.VGAM(M, M = M1)]
+ dl.deta[, interleave.VGAM(M, M1 = M1)]
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
weight = eval(substitute(expression({
@@ -414,9 +419,25 @@ riceff.control <- function(save.weights = TRUE, ...) {
"sigma*sqrt(pi/2)*exp(z/2)*((1-z)*",
"besselI(-z/2, nu = 0) - z * besselI(-z/2, nu = 1)) ",
"where z=-vee^2/(2*sigma^2)"),
+
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = FALSE,
+ multipleResponses = FALSE,
+ parameters.names = c("sigma", "vee"),
+ nsimEIM = .nsimEIM,
+ lsigma = .lsigma ,
+ lvee = .lvee ,
+ zero = .zero )
+ }, list( .zero = zero, .lsigma = lsigma, .lvee = lvee,
+ .nsimEIM = nsimEIM ))),
initialize = eval(substitute(expression({
temp5 <-
@@ -671,8 +692,22 @@ skellam.control <- function(save.weights = TRUE, ...) {
bool = .parallel ,
constraints = constraints,
apply.int = TRUE)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .parallel = parallel, .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = FALSE,
+ multipleResponses = FALSE,
+ parameters.names = c("mu1", "mu2"),
+ nsimEIM = .nsimEIM,
+ lmu1 = .lmu1 ,
+ lmu2 = .lmu2 ,
+ zero = .zero )
+ }, list( .zero = zero, .lmu1 = lmu1, .lmu2 = lmu2,
+ .nsimEIM = nsimEIM ))),
initialize = eval(substitute(expression({
@@ -899,9 +934,6 @@ yulesimon.control <- function(save.weights = TRUE, ...) {
nsimEIM <= 50)
stop("argument 'nsimEIM' should be an integer greater than 50")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
new("vglmff",
@@ -913,15 +945,16 @@ yulesimon.control <- function(save.weights = TRUE, ...) {
"Variance: rho^2 / ((rho - 1)^2 * (rho - 2)), ",
"provided rho > 2"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 1
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
nsimEIM = .nsimEIM,
+ parameters.names = c("rho"),
zero = .zero )
}, list( .zero = zero,
.nsimEIM = nsimEIM ))),
@@ -951,9 +984,8 @@ yulesimon.control <- function(save.weights = TRUE, ...) {
M <- M1 * ncoly
- mynames1 <- paste("rho", if (ncoly > 1) 1:ncoly else "", sep = "")
- predictors.names <-
- namesof(mynames1, .link , earg = .earg , tag = FALSE)
+ mynames1 <- param.names("rho", ncoly)
+ predictors.names <- namesof(mynames1, .link , earg = .earg , tag = FALSE)
if (!length(etastart)) {
wmeany <- colSums(y * w) / colSums(w) + 1/8
@@ -1149,9 +1181,6 @@ rlind <- function(n, theta) {
link <- attr(earg, "function.name")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
new("vglmff",
@@ -1164,14 +1193,17 @@ rlind <- function(n, theta) {
"Variance: (theta^2 + 4 * theta + 2) / (theta * (theta + 1))^2"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 1
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("theta"),
zero = .zero )
}, list( .zero = zero ))),
@@ -1199,9 +1231,8 @@ rlind <- function(n, theta) {
M <- M1 * ncoly
- mynames1 <- paste("theta", if (ncoly > 1) 1:ncoly else "", sep = "")
- predictors.names <-
- namesof(mynames1, .link , earg = .earg , tag = FALSE)
+ mynames1 <- param.names("theta", ncoly)
+ predictors.names <- namesof(mynames1, .link , earg = .earg , tag = FALSE)
if (!length(etastart)) {
wmeany <- colSums(y * w) / colSums(w) + 1/8
@@ -1352,9 +1383,6 @@ if (FALSE)
nsimEIM <= 50)
stop("argument 'nsimEIM' should be an integer greater than 50")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
new("vglmff",
@@ -1368,15 +1396,16 @@ if (FALSE)
"(theta * (theta + 1))^2, "
),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 1
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
- nsimEIM = .nsimEIM,
+ nsimEIM = .nsimEIM ,
+ parameters.names = c("theta"),
zero = .zero )
}, list( .zero = zero,
.nsimEIM = nsimEIM ))),
@@ -1406,9 +1435,8 @@ if (FALSE)
M <- M1 * ncoly
- mynames1 <- paste("theta", if (ncoly > 1) 1:ncoly else "", sep = "")
- predictors.names <-
- namesof(mynames1, .link , earg = .earg , tag = FALSE)
+ mynames1 <- param.names("theta", ncoly)
+ predictors.names <- namesof(mynames1, .link , earg = .earg , tag = FALSE)
if (!length(etastart)) {
wmeany <- colSums(y * w) / colSums(w) + 1/8
@@ -1621,9 +1649,6 @@ slash.control <- function(save.weights = TRUE, ...) {
!is.Numeric(isigma, positive = TRUE))
stop("argument 'isigma' must be > 0")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(nsimEIM, length.arg = 1,
@@ -1651,9 +1676,26 @@ slash.control <- function(save.weights = TRUE, ...) {
"\ty!=mu",
"\n1/(2*sigma*sqrt(2*pi))",
"\t\t\t\t\t\t\ty=mu\n")),
+
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("mu", "sigma"),
+ lmu = .lmu ,
+ lsigma = .lsigma ,
+ zero = .zero )
+ }, list( .zero = zero, .lmu = lmu, .lsigma = lsigma ))),
+
+
initialize = eval(substitute(expression({
temp5 <-
@@ -1706,9 +1748,9 @@ slash.control <- function(save.weights = TRUE, ...) {
NA * eta2theta(eta[, 1], link = .lmu , earg = .emu )
}, list( .lmu = lmu, .emu = emu ))),
last = eval(substitute(expression({
- misc$link <- c("mu" = .lmu , "sigma" = .lsigma)
+ misc$link <- c("mu" = .lmu , "sigma" = .lsigma )
- misc$earg <- list("mu" = .emu, "sigma" = .esigma )
+ misc$earg <- list("mu" = .emu , "sigma" = .esigma )
misc$expected <- TRUE
misc$nsimEIM <- .nsimEIM
@@ -2374,9 +2416,6 @@ qbenf <- function(p, ndigits = 1,
stop("argument 'imethod' must be 1 or 2 or 3")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
uu.ll <- min(upper.limit)
@@ -2397,17 +2436,23 @@ qbenf <- function(p, ndigits = 1,
"(1 - prob)^", upper.limit+1, ")", sep = ""),
"")),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 1
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
+ expected = .expected ,
+ imethod = .imethod ,
+ multipleResponses = TRUE,
+ parameters.names = c("prob"),
upper.limit = .upper.limit ,
zero = .zero )
}, list( .zero = zero,
+ .expected = expected,
+ .imethod = imethod,
.upper.limit = upper.limit ))),
initialize = eval(substitute(expression({
@@ -2436,9 +2481,8 @@ qbenf <- function(p, ndigits = 1,
stop("some response values greater than argument 'upper.limit'")
- mynames1 <- paste("prob", if (ncoly > 1) 1:ncoly else "", sep = "")
- predictors.names <-
- namesof(mynames1, .link , earg = .earg , tag = FALSE)
+ mynames1 <- param.names("prob", ncoly)
+ predictors.names <- namesof(mynames1, .link , earg = .earg , tag = FALSE)
if (!length(etastart)) {
diff --git a/R/family.basics.R b/R/family.basics.R
index 0c06722..ff2d446 100644
--- a/R/family.basics.R
+++ b/R/family.basics.R
@@ -391,7 +391,69 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
- cm.zero.VGAM <- function(constraints, x, zero = NULL, M = 1) {
+ cm.zero.VGAM <- function(constraints, x, zero = NULL, M = 1,
+ predictors.names, M1 = 1) {
+
+
+ dotzero <- zero # Transition
+
+ if (is.character(dotzero)) {
+
+
+
+
+ which.numeric.all <- NULL
+ for (ii in 1:length(dotzero)) {
+ which.ones <-
+ grep(dotzero[ii], predictors.names, fixed = TRUE)
+ if (length(which.ones)) {
+ which.numeric.all <- c(which.numeric.all, which.ones)
+ } else {
+ warning("some values of argument 'zero' are unmatched. Ignoring them")
+ }
+ }
+ which.numeric <- unique(sort(which.numeric.all))
+
+ if (!length(which.numeric)) {
+ warning("No values of argument 'zero' were matched.")
+ which.numeric <- NULL
+ } else if (length(which.numeric.all) > length(which.numeric)) {
+ warning("There were redundant values of argument 'zero'.")
+ }
+
+ dotzero <- which.numeric
+ }
+
+
+
+ posdotzero <- dotzero[dotzero > 0]
+ negdotzero <- dotzero[dotzero < 0]
+
+
+ zneg.index <- if (length(negdotzero)) {
+
+ if (!is.Numeric(-negdotzero, positive = TRUE,
+ integer.valued = TRUE) ||
+ max(-negdotzero) > M1)
+ stop("bad input for argument 'zero'")
+
+ bigUniqInt <- 1080
+ zneg.index <- rep(0:bigUniqInt, rep(length(negdotzero),
+ 1 + bigUniqInt)) * M1 + abs(negdotzero)
+ sort(intersect(zneg.index, 1:M))
+ } else {
+ NULL
+ }
+
+ zpos.index <- if (length(posdotzero)) posdotzero else NULL
+ z.Index <- if (!length(dotzero))
+ NULL else
+ unique(sort(c(zneg.index, zpos.index)))
+
+
+ zero <- z.Index # Transition
+
+
asgn <- attr(x, "assign")
nasgn <- names(asgn)
@@ -409,8 +471,6 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
if (is.null(zero))
return(constraints)
- if (!is.numeric(zero))
- stop("argument 'zero' must be numeric")
if (any(zero < 1 | zero > M))
stop("argument 'zero' out of range")
if (nasgn[1] != "(Intercept)")
@@ -918,7 +978,7 @@ procVec <- function(vec, yn, Default) {
if (length(nvec2)) {
if (any(!is.element(nvec2, yn)))
stop("some names given which are superfluous")
- answer <- rep(as.numeric(NA), length.out = length(yn))
+ answer <- rep(NA_real_, length.out = length(yn))
names(answer) <- yn
answer[nvec2] <- vec[nvec2]
answer[is.na(answer)] <-
@@ -1050,7 +1110,7 @@ mbesselI0 <- function(x, deriv.arg = 0) {
if (FALSE) {
}
- ans <- matrix(as.numeric(NA), nrow = nn, ncol = deriv.arg+1)
+ ans <- matrix(NA_real_, nrow = nn, ncol = deriv.arg+1)
ans[, 1] <- besselI(x, nu = 0)
if (deriv.arg>=1) ans[,2] <- besselI(x, nu = 1)
if (deriv.arg>=2) ans[,3] <- ans[,1] - ans[,2] / x
@@ -1170,10 +1230,42 @@ negzero.expression.VGAM <- expression({
+
+
+
+
+ if (is.character(dotzero)) {
+
+
+
+
+ which.numeric.all <- NULL
+ for (ii in 1:length(dotzero)) {
+ which.ones <-
+ grep(dotzero[ii], predictors.names, fixed = TRUE)
+ if (length(which.ones)) {
+ which.numeric.all <- c(which.numeric.all, which.ones)
+ } else {
+ warning("some values of argument 'zero' are unmatched. Ignoring them")
+ }
+ }
+ which.numeric <- unique(sort(which.numeric.all))
+
+ if (!length(which.numeric)) {
+ warning("No values of argument 'zero' were matched.")
+ which.numeric <- NULL
+ } else if (length(which.numeric.all) > length(which.numeric)) {
+ warning("There were redundant values of argument 'zero'.")
+ }
+
+ dotzero <- which.numeric
+ }
+
+
+
posdotzero <- dotzero[dotzero > 0]
negdotzero <- dotzero[dotzero < 0]
- bigUniqInt <- 1080
zneg.index <- if (length(negdotzero)) {
if (!is.Numeric(-negdotzero, positive = TRUE,
@@ -1181,6 +1273,7 @@ negzero.expression.VGAM <- expression({
max(-negdotzero) > M1)
stop("bad input for argument 'zero'")
+ bigUniqInt <- 1080
zneg.index <- rep(0:bigUniqInt, rep(length(negdotzero),
1 + bigUniqInt)) * M1 + abs(negdotzero)
sort(intersect(zneg.index, 1:M))
@@ -1189,8 +1282,9 @@ negzero.expression.VGAM <- expression({
}
zpos.index <- if (length(posdotzero)) posdotzero else NULL
- z.Index <- if (!length(dotzero)) NULL else
- unique(sort(c(zneg.index, zpos.index)))
+ z.Index <- if (!length(dotzero))
+ NULL else
+ unique(sort(c(zneg.index, zpos.index)))
constraints <- cm.zero.VGAM(constraints, x = x, z.Index, M = M)
})
@@ -1210,8 +1304,20 @@ is.empty.list <- function(mylist) {
-interleave.VGAM <- function(L, M)
- c(matrix(1:L, nrow = M, byrow = TRUE))
+
+
+
+ interleave.VGAM <- function(.M, M1, inverse = FALSE) {
+ if (inverse) {
+ NRs <- (.M)/M1
+ if (round(NRs) != NRs)
+ stop("Incompatible number of parameters")
+ c(matrix(1:(.M), nrow = NRs, byrow = TRUE))
+ } else {
+ c(matrix(1:(.M), nrow = M1, byrow = TRUE))
+ }
+}
+
diff --git a/R/family.binomial.R b/R/family.binomial.R
index c323b20..0764111 100644
--- a/R/family.binomial.R
+++ b/R/family.binomial.R
@@ -77,7 +77,7 @@ betabinomial.control <- function(save.weights = TRUE, ...) {
lrho = "logit",
irho = NULL,
imethod = 1, ishrinkage = 0.95,
- nsimEIM = NULL, zero = 2) {
+ nsimEIM = NULL, zero = "rho") {
lmu <- as.list(substitute(lmu))
emu <- link2list(lmu)
lmu <- attr(emu, "function.name")
@@ -113,8 +113,29 @@ betabinomial.control <- function(save.weights = TRUE, ...) {
"Mean: mu", "\n",
"Variance: mu*(1-mu)*(1+(w-1)*rho)/w"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 3,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("mu", "rho"),
+ imethod = .imethod ,
+ ishrinkage = .ishrinkage ,
+ nsimEIM = .nsimEIM ,
+ lmu = .lmu ,
+ lrho = .lrho ,
+ zero = .zero )
+ }, list( .lmu = lmu, .lrho = lrho,
+ .imethod = imethod, .ishrinkage = ishrinkage,
+ .zero = zero,
+ .nsimEIM = nsimEIM ))),
+
+
initialize = eval(substitute(expression({
if (!all(w == 1))
extra$orig.w <- w
@@ -302,7 +323,7 @@ betabinomial.control <- function(save.weights = TRUE, ...) {
.emu = emu, .erho = erho ))),
weight = eval(substitute(expression({
if (is.null( .nsimEIM )) {
- wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
+ wz <- matrix(NA_real_, n, dimm(M)) #3=dimm(2)
wz11 <- -(expected.betabin.ab(nvec, shape1, shape2, TRUE) -
trigamma(shape1+shape2+nvec) -
trigamma(shape1) + trigamma(shape1+shape2))
@@ -470,7 +491,7 @@ rbinom2.or <-
binom2.or <- function(lmu = "logit", lmu1 = lmu, lmu2 = lmu,
loratio = "loge",
imu1 = NULL, imu2 = NULL, ioratio = NULL,
- zero = 3,
+ zero = "oratio",
exchangeable = FALSE,
tol = 0.001,
more.robust = FALSE) {
@@ -522,9 +543,30 @@ rbinom2.or <-
apply.int = TRUE,
cm.default = cm.intercept.default,
cm.intercept.default = cm.intercept.default)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .exchangeable = exchangeable, .zero = zero ))),
deviance = Deviance.categorical.data.vgam,
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 3,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("mu1", "mu2", "oratio"),
+ exchangeable = .exchangeable ,
+ lmu1 = .lmu1 ,
+ lmu2 = .lmu2 ,
+ loratio = .loratio ,
+ zero = .zero )
+ }, list( .lmu1 = lmu1,
+ .lmu2 = lmu2,
+ .loratio = loratio,
+ .zero = zero,
+ .exchangeable = exchangeable
+ ))),
+
+
initialize = eval(substitute(expression({
mustart.orig <- mustart
eval(process.binomial2.data.VGAM)
@@ -687,7 +729,69 @@ rbinom2.or <-
c(w) * wz
}), list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
.emu1 = emu1, .emu2 = emu2, .eoratio = eoratio ))))
-}
+} # binom2.or
+
+
+
+
+
+
+
+
+
+
+setClass("binom2", contains = "vglmff")
+setClass("binom2.or", contains = "binom2")
+
+
+
+
+
+setMethod("summaryvglmS4VGAM", signature(VGAMff = "binom2.or"),
+ function(object,
+ VGAMff,
+ ...) {
+
+ cfit <- coef.vlm(object, matrix = TRUE)
+ if (rownames(cfit)[1] == "(Intercept)" &&
+ all(cfit[-1, 3] == 0)) {
+ object at post$oratio <- eta2theta(cfit[1, 3],
+ link = object at misc$link[3],
+ earg = object at misc$earg[[3]])
+ }
+
+ object at post
+})
+
+
+setMethod("showsummaryvglmS4VGAM", signature(VGAMff = "binom2.or"),
+ function(object,
+ VGAMff,
+ ...) {
+ if (length(object at post$oratio) == 1 &&
+ is.numeric(object at post$oratio)) {
+ cat("\nOdds ratio: ", round(object at post$oratio, digits = 4), "\n")
+ }
+})
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
dbinom2.rho <-
@@ -791,13 +895,15 @@ binom2.rho.control <- function(save.weights = TRUE, ...) {
- binom2.rho <- function(lrho = "rhobit",
- lmu = "probit", # added 20120817
- imu1 = NULL, imu2 = NULL, irho = NULL,
- imethod = 1,
- zero = 3, exchangeable = FALSE,
- grho = seq(-0.95, 0.95, by = 0.05),
- nsimEIM = NULL) {
+ binom2.rho <-
+ function(lmu = "probit", # added 20120817, order swapped 20151128
+ lrho = "rhobit",
+ imu1 = NULL, imu2 = NULL, irho = NULL,
+ imethod = 1,
+ zero = "rho", # 3
+ exchangeable = FALSE,
+ grho = seq(-0.95, 0.95, by = 0.05),
+ nsimEIM = NULL) {
@@ -844,14 +950,22 @@ binom2.rho.control <- function(save.weights = TRUE, ...) {
bool = .exchangeable ,
constraints = constraints,
apply.int = TRUE)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .exchangeable = exchangeable, .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 3,
+ expected = TRUE,
multipleResponses = FALSE,
+ parameters.names = c("mu1", "mu2", "rho"),
+ lmu1 = .lmu12,
+ lmu2 = .lmu12,
+ lrho = .lrho ,
zero = .zero )
- }, list( .zero = zero ))),
+ }, list( .lmu12 = lmu12, .lrho = lrho,
+ .zero = zero ))),
initialize = eval(substitute(expression({
@@ -1203,9 +1317,9 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
warning("some negative values returned")
answer[is.inf1.neg] <- 0
- answer[is.inf1.pos] <- pnorm(Z2[is.inf1.neg])
+ answer[is.inf1.pos] <- pnorm(Z2[is.inf1.pos]) # pnorm(Z2[is.inf1.neg])
answer[is.inf2.neg] <- 0
- answer[is.inf2.pos] <- pnorm(Z1[is.inf2.neg])
+ answer[is.inf2.pos] <- pnorm(Z1[is.inf2.pos]) # pnorm(Z1[is.inf2.neg])
answer
}
@@ -1551,7 +1665,7 @@ my.dbinom <- function(x,
if (length(shape1) != use.n) shape1 <- rep(shape1, len = use.n)
if (length(shape2) != use.n) shape2 <- rep(shape2, len = use.n)
- ans <- rep(as.numeric(NA), len = use.n)
+ ans <- rep(NA_real_, len = use.n)
okay0 <- is.finite(shape1) & is.finite(shape2)
if (smalln <- sum(okay0))
ans[okay0] <- rbinom(n = smalln, size = size[okay0],
@@ -1642,7 +1756,7 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
betabinomialff <-
- function(lshape1 = "loge",lshape2 = "loge",
+ function(lshape1 = "loge", lshape2 = "loge",
ishape1 = 1, ishape2 = NULL, imethod = 1,
ishrinkage = 0.95, nsimEIM = NULL,
zero = NULL) {
@@ -1688,8 +1802,23 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
"Variance: mu * (1-mu) * (1+(w-1)*rho) / w, ",
"where rho = 1 / (shape1+shape2+1)"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("shape1", "shape2"),
+ lshape1 = .lshape1 ,
+ lshape2 = .lshape2 ,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+
initialize = eval(substitute(expression({
if (!all(w == 1))
extra$orig.w <- w
@@ -1709,7 +1838,7 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
if (!length(etastart)) {
mustart.use <- if (length(mustart.orig)) mustart.orig else
- mustart
+ mustart
shape1 <- rep( .ishape1 , len = n)
shape2 <- if (length( .ishape2 )) {
@@ -1846,7 +1975,7 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
.earg1 = earg1, .earg2 = earg2 ))),
weight = eval(substitute(expression({
if (is.null( .nsimEIM)) {
- wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
+ wz <- matrix(NA_real_, n, dimm(M)) #3=dimm(2)
wz[, iam(1, 1, M)] <- -(expected.betabin.ab(nvec,shape1,shape2,
TRUE) -
trigamma(shape1+shape2+nvec) -
@@ -1926,9 +2055,26 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
"Links: ",
namesof("prob", lprob, earg = eprob), ", ",
namesof("shape", lshape, earg = eshape)),
+
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("prob", "shape"),
+ lprob = .lprob ,
+ lshape = .lshape ,
+ zero = .zero )
+ }, list( .lprob = lprob, .lshape = lshape,
+ .zero = zero ))),
+
+
initialize = eval(substitute(expression({
eval(geometric()@initialize)
@@ -2116,15 +2262,30 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
"Links: ",
namesof("prob1", lprob1, earg = eprob1), ", ",
namesof("prob2", lprob2, earg = eprob2)),
+
constraints = eval(substitute(expression({
constraints <- cm.VGAM(matrix(1, M, 1), x = x,
bool = .parallel ,
constraints = constraints,
apply.int = .apply.parint )
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .parallel = parallel,
.apply.parint = apply.parint,
.zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("prob1", "prob2"),
+ lprob1 = .lprob1 ,
+ lprob2 = .lprob2 ,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+
initialize = eval(substitute(expression({
if (!is.vector(w))
stop("the 'weights' argument must be a vector")
@@ -2259,7 +2420,8 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
loratio = "loge",
imu12 = NULL, iphi12 = NULL,
ioratio = NULL,
- zero = 2:3, tol = 0.001, addRidge = 0.001) {
+ zero = c("phi12", "oratio"),
+ tol = 0.001, addRidge = 0.001) {
lmu12 <- as.list(substitute(lmu12))
@@ -2295,9 +2457,29 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
namesof("mu12", lmu12, earg = emu12), ", ",
namesof("phi12", lphi12, earg = ephi12), ", ",
namesof("oratio", loratio, earg = eoratio)),
+
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 3,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("mu12", "phi12", "oratio"),
+ lmu12 = .lmu12 ,
+ lphi12 = .lphi12 ,
+ loratio = .loratio ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio
+ ))),
+
+
+
initialize = eval(substitute(expression({
eval(process.binomial2.data.VGAM)
@@ -2632,7 +2814,8 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
lmu = "probit", # added 20120817
imu1 = NULL, imu2 = NULL, irho = NULL,
imethod = 1,
- zero = 3, exchangeable = FALSE,
+ zero = 3,
+ exchangeable = FALSE,
grho = seq(-0.95, 0.95, by = 0.05)) {
@@ -2669,12 +2852,15 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
bool = .exchangeable ,
constraints = constraints,
apply.int = TRUE)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .exchangeable = exchangeable, .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 3,
multipleResponses = FALSE,
+ parameters.names = c("mu1", "mu2", "rho"),
zero = .zero )
}, list( .zero = zero ))),
diff --git a/R/family.bivariate.R b/R/family.bivariate.R
index 531a3a0..61a93e4 100644
--- a/R/family.bivariate.R
+++ b/R/family.bivariate.R
@@ -90,7 +90,7 @@ rbiclaytoncop <- function(n, apar = 0) {
stop("argument 'imethod' must be 1 or 2 or 3")
new("vglmff",
- blurb = c(" bivariate clayton copula distribution)\n","Links: ",
+ blurb = c(" bivariate Clayton copula distribution)\n","Links: ",
namesof("apar", lapar, earg = eapar)),
constraints = eval(substitute(expression({
@@ -99,10 +99,9 @@ rbiclaytoncop <- function(n, apar = 0) {
constraints = constraints,
apply.int = .apply.parint )
- dotzero <- .zero
- M1 <- 1
- Yusual <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .zero = zero,
.apply.parint = apply.parint,
.parallel = parallel ))),
@@ -110,26 +109,28 @@ rbiclaytoncop <- function(n, apar = 0) {
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 2,
- Yusual = 2,
- apply.parint = .apply.parint,
- parallel = .parallel,
+ apply.parint = .apply.parint ,
+ parameters.names = c("apar"),
+ lapar = .lapar ,
+ parallel = .parallel ,
zero = .zero )
}, list( .zero = zero,
.apply.parint = apply.parint,
+ .lapar = lapar,
.parallel = parallel ))),
initialize = eval(substitute(expression({
M1 <- 1
- Yusual <- 2
+ Q1 <- 2
temp5 <-
w.y.check(w = w, y = y,
Is.positive.y = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
- ncol.y.min = Yusual,
+ ncol.y.min = Q1,
out.wy = TRUE,
- colsyperw = Yusual,
+ colsyperw = Q1,
maximize = TRUE)
w <- temp5$w
@@ -139,10 +140,9 @@ rbiclaytoncop <- function(n, apar = 0) {
ncoly <- ncol(y)
extra$ncoly <- ncoly
extra$M1 <- M1
- extra$Yusual <- Yusual
- M <- M1 * (ncoly / Yusual)
- mynames1 <- paste("apar", if (M / M1 > 1) 1:(M / M1) else "",
- sep = "")
+ extra$Q1 <- Q1
+ M <- M1 * (ncoly / Q1)
+ mynames1 <- param.names("apar", M / M1)
predictors.names <-
namesof(mynames1, .lapar , earg = .eapar , short = TRUE)
@@ -158,7 +158,7 @@ rbiclaytoncop <- function(n, apar = 0) {
if (!length( .iapar ))
for (spp. in 1:(M / M1)) {
- ymatj <- y[, (Yusual * spp. - 1):(Yusual * spp.)]
+ ymatj <- y[, (Q1 * spp. - 1):(Q1 * spp.)]
apar.init0 <- if ( .imethod == 1) {
@@ -198,7 +198,7 @@ rbiclaytoncop <- function(n, apar = 0) {
last = eval(substitute(expression({
M1 <- extra$M1
- Yusual <- extra$Yusual
+ Q1 <- extra$Q1
misc$link <- rep( .lapar , length = M)
temp.names <- mynames1
names(misc$link) <- temp.names
@@ -210,7 +210,7 @@ rbiclaytoncop <- function(n, apar = 0) {
}
misc$M1 <- M1
- misc$Yusual <- Yusual
+ misc$Q1 <- Q1
misc$imethod <- .imethod
misc$expected <- TRUE
misc$parallel <- .parallel
@@ -261,8 +261,8 @@ rbiclaytoncop <- function(n, apar = 0) {
deriv = eval(substitute(expression({
Alpha <- eta2theta(eta, .lapar , earg = .eapar )
- Yindex1 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual)) - 1
- Yindex2 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual))
+ Yindex1 <- extra$Q1 * (1:(extra$ncoly/extra$Q1)) - 1
+ Yindex2 <- extra$Q1 * (1:(extra$ncoly/extra$Q1))
@@ -374,7 +374,7 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
irho = NULL,
imethod = 1,
parallel = FALSE,
- zero = -1) {
+ zero = "rho") {
@@ -419,10 +419,9 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
constraints = constraints,
apply.int = .apply.parint )
- dotzero <- .zero
- M1 <- 2
- Yusual <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero,
.apply.parint = apply.parint,
.parallel = parallel ))),
@@ -430,7 +429,7 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 2,
- Yusual = 2,
+ parameters.names = c("df", "rho"),
apply.parint = .apply.parint ,
parallel = .parallel ,
zero = .zero )
@@ -440,15 +439,15 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
initialize = eval(substitute(expression({
M1 <- 2
- Yusual <- 2
+ Q1 <- 2
temp5 <-
w.y.check(w = w, y = y,
ncol.w.max = Inf,
ncol.y.max = Inf,
- ncol.y.min = Yusual,
+ ncol.y.min = Q1,
out.wy = TRUE,
- colsyperw = Yusual,
+ colsyperw = Q1,
maximize = TRUE)
w <- temp5$w
@@ -458,16 +457,14 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
ncoly <- ncol(y)
extra$ncoly <- ncoly
extra$M1 <- M1
- extra$Yusual <- Yusual
- M <- M1 * (ncoly / Yusual)
- mynames1 <- paste("df", if (M / M1 > 1) 1:(M / M1) else "",
- sep = "")
- mynames2 <- paste("rho", if (M / M1 > 1) 1:(M / M1) else "",
- sep = "")
+ extra$Q1 <- Q1
+ M <- M1 * (ncoly / Q1)
+ mynames1 <- param.names("df", M / M1)
+ mynames2 <- param.names("rho", M / M1)
predictors.names <- c(
namesof(mynames1, .ldof , earg = .edof , short = TRUE),
namesof(mynames2, .lrho , earg = .erho , short = TRUE))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
extra$dimnamesy1 <- dimnames(y)[[1]]
@@ -513,7 +510,7 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
cbind(theta2eta(dof.init, .ldof , earg = .edof ),
theta2eta(rho.init, .lrho , earg = .erho ))
- etastart <- etastart[, interleave.VGAM(M, M = M1)]
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1)]
}
}), list( .imethod = imethod,
@@ -536,12 +533,12 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
last = eval(substitute(expression({
M1 <- extra$M1
- Yusual <- extra$Yusual
+ Q1 <- extra$Q1
misc$link <-
c(rep( .ldof , length = M / M1),
rep( .lrho , length = M / M1))[
- interleave.VGAM(M, M = M1)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
@@ -552,7 +549,7 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
}
misc$M1 <- M1
- misc$Yusual <- Yusual
+ misc$Q1 <- Q1
misc$imethod <- .imethod
misc$expected <- TRUE
misc$parallel <- .parallel
@@ -576,8 +573,8 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- Yindex1 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual)) - 1
- Yindex2 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual))
+ Yindex1 <- extra$Q1 * (1:(extra$ncoly/extra$Q1)) - 1
+ Yindex2 <- extra$Q1 * (1:(extra$ncoly/extra$Q1))
ll.elts <-
c(w) * dbistudentt(x1 = y[, Yindex1, drop = FALSE],
x2 = y[, Yindex2, drop = FALSE],
@@ -594,13 +591,13 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
.imethod = imethod ))),
vfamily = c("bistudentt"),
deriv = eval(substitute(expression({
- M1 <- Yusual <- 2
+ M1 <- Q1 <- 2
Dof <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
.ldof , earg = .edof )
Rho <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
.lrho , earg = .erho )
- Yindex1 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual)) - 1
- Yindex2 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual))
+ Yindex1 <- extra$Q1 * (1:(extra$ncoly/extra$Q1)) - 1
+ Yindex2 <- extra$Q1 * (1:(extra$ncoly/extra$Q1))
x1 <- c(y[, Yindex1]) # Convert into a vector
@@ -651,7 +648,7 @@ bistudent.deriv.dof <- function(u, v, nu, rho) {
ans <- c(w) * cbind(dl.ddof * ddof.deta,
dl.drho * drho.deta)
- ans <- ans[, interleave.VGAM(M, M = M1)]
+ ans <- ans[, interleave.VGAM(M, M1 = M1)]
ans
}), list( .lrho = lrho, .ldof = ldof,
.erho = erho, .edof = edof,
@@ -779,10 +776,9 @@ rbinormcop <- function(n, rho = 0 #, inverse = FALSE
constraints = constraints,
apply.int = .apply.parint )
- dotzero <- .zero
- M1 <- 1
- Yusual <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .zero = zero,
.apply.parint = apply.parint,
.parallel = parallel ))),
@@ -790,7 +786,7 @@ rbinormcop <- function(n, rho = 0 #, inverse = FALSE
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 2,
- Yusual = 2,
+ parameters.names = c("rho"),
apply.parint = .apply.parint ,
parallel = .parallel ,
zero = .zero )
@@ -800,16 +796,16 @@ rbinormcop <- function(n, rho = 0 #, inverse = FALSE
initialize = eval(substitute(expression({
M1 <- 1
- Yusual <- 2
+ Q1 <- 2
temp5 <-
w.y.check(w = w, y = y,
Is.positive.y = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
- ncol.y.min = Yusual,
+ ncol.y.min = Q1,
out.wy = TRUE,
- colsyperw = Yusual,
+ colsyperw = Q1,
maximize = TRUE)
w <- temp5$w
@@ -819,10 +815,9 @@ rbinormcop <- function(n, rho = 0 #, inverse = FALSE
ncoly <- ncol(y)
extra$ncoly <- ncoly
extra$M1 <- M1
- extra$Yusual <- Yusual
- M <- M1 * (ncoly / Yusual)
- mynames1 <- paste("rho", if (M / M1 > 1) 1:(M / M1) else "",
- sep = "")
+ extra$Q1 <- Q1
+ M <- M1 * (ncoly / Q1)
+ mynames1 <- param.names("rho", M / M1)
predictors.names <- c(
namesof(mynames1, .lrho , earg = .erho , short = TRUE))
@@ -838,7 +833,7 @@ rbinormcop <- function(n, rho = 0 #, inverse = FALSE
if (!length( .irho ))
for (spp. in 1:(M / M1)) {
- ymatj <- y[, (Yusual * spp. - 1):(Yusual * spp.)]
+ ymatj <- y[, (Q1 * spp. - 1):(Q1 * spp.)]
rho.init0 <- if ( .imethod == 1) {
@@ -882,7 +877,7 @@ rbinormcop <- function(n, rho = 0 #, inverse = FALSE
last = eval(substitute(expression({
M1 <- extra$M1
- Yusual <- extra$Yusual
+ Q1 <- extra$Q1
misc$link <- rep( .lrho , length = M)
temp.names <- mynames1
names(misc$link) <- temp.names
@@ -894,7 +889,7 @@ rbinormcop <- function(n, rho = 0 #, inverse = FALSE
}
misc$M1 <- M1
- misc$Yusual <- Yusual
+ misc$Q1 <- Q1
misc$imethod <- .imethod
misc$expected <- TRUE
misc$parallel <- .parallel
@@ -915,8 +910,8 @@ rbinormcop <- function(n, rho = 0 #, inverse = FALSE
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- Yindex1 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual)) - 1
- Yindex2 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual))
+ Yindex1 <- extra$Q1 * (1:(extra$ncoly/extra$Q1)) - 1
+ Yindex2 <- extra$Q1 * (1:(extra$ncoly/extra$Q1))
ll.elts <-
c(w) * dbinormcop(x1 = y[, Yindex1, drop = FALSE],
x2 = y[, Yindex2, drop = FALSE],
@@ -952,8 +947,8 @@ rbinormcop <- function(n, rho = 0 #, inverse = FALSE
deriv = eval(substitute(expression({
Rho <- eta2theta(eta, .lrho , earg = .erho )
- Yindex1 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual)) - 1
- Yindex2 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual))
+ Yindex1 <- extra$Q1 * (1:(extra$ncoly/extra$Q1)) - 1
+ Yindex2 <- extra$Q1 * (1:(extra$ncoly/extra$Q1))
temp7 <- 1 - Rho^2
q.y <- qnorm(y)
@@ -1017,12 +1012,24 @@ bilogistic.control <- function(save.weights = TRUE, ...) {
namesof("location1", llocat, elocat), ", ",
namesof("scale1", lscale, escale), ", ",
namesof("location2", llocat, elocat), ", ",
- namesof("scale2", lscale, escale),
- "\n", "\n",
+ namesof("scale2", lscale, escale), "\n", "\n",
"Means: location1, location2"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 4)
}), list( .zero = zero))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 4,
+ Q1 = 2,
+ parameters.names = c("location1", "scale1", "location2", "scale2"),
+ zero = .zero )
+ }, list( .zero = zero
+ ))),
+
+
initialize = eval(substitute(expression({
temp5 <-
@@ -1089,11 +1096,11 @@ bilogistic.control <- function(save.weights = TRUE, ...) {
cbind(eta[, 1], eta[, 2])
},
last = eval(substitute(expression({
- misc$link <- c(location1 = .llocat, scale1 = .lscale,
- location2 = .llocat, scale2 = .lscale)
+ misc$link <- c(location1 = .llocat , scale1 = .lscale ,
+ location2 = .llocat , scale2 = .lscale )
- misc$earg <- list(location1 = .elocat, scale1 = .escale,
- location2 = .elocat, scale2 = .escale)
+ misc$earg <- list(location1 = .elocat , scale1 = .escale ,
+ location2 = .elocat , scale2 = .escale )
misc$expected <- FALSE
misc$BFGS <- TRUE
@@ -1302,12 +1309,39 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
namesof("b", lb, earg = eb ), ", ",
namesof("bp", lbp, earg = ebp)),
constraints = eval(substitute(expression({
+ M1 <- 4
+ Q1 <- 2
constraints <- cm.VGAM(matrix(c(1, 1,0,0, 0,0, 1, 1), M, 2), x = x,
bool = .independent ,
constraints = constraints,
apply.int = TRUE)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
- }), list(.independent = independent, .zero = zero))),
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 4)
+ }), list( .independent = independent, .zero = zero))),
+
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 4,
+ Q1 = 2,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("a", "ap", "b", "bp"),
+ la = .la ,
+ lap = .lap ,
+ lb = .lb ,
+ lbp = .lbp ,
+ independent = .independent ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .la = la ,
+ .lap = lap ,
+ .lb = lb ,
+ .lbp = lbp ,
+ .independent = independent ))),
+
+
initialize = eval(substitute(expression({
temp5 <-
@@ -1323,14 +1357,14 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
predictors.names <-
- c(namesof("a", .la, earg = .ea , short = TRUE),
- namesof("ap", .lap, earg = .eap, short = TRUE),
- namesof("b", .lb, earg = .eb , short = TRUE),
- namesof("bp", .lbp, earg = .ebp, short = TRUE))
+ c(namesof("a", .la , earg = .ea , short = TRUE),
+ namesof("ap", .lap , earg = .eap , short = TRUE),
+ namesof("b", .lb , earg = .eb , short = TRUE),
+ namesof("bp", .lbp , earg = .ebp , short = TRUE))
extra$y1.lt.y2 = y[, 1] < y[, 2]
if (!(arr <- sum(extra$y1.lt.y2)) || arr == n)
- stop("identifiability problem: either all y1<y2 or y2<y1")
+ stop("identifiability problem: either all y1<y2 or y2<y1")
if (!length(etastart)) {
sumx <- sum(y[ extra$y1.lt.y2, 1]);
@@ -1343,20 +1377,20 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
sumx <- sumx * 1.1; sumxp <- sumxp * 1.2;
sumy <- sumy * 1.2; sumyp <- sumyp * 1.3;
}
- ainit <- if (length(.ia)) rep(.ia, length.out = n) else
- arr / (sumx + sumyp)
- apinit <- if (length(.iap)) rep(.iap,length.out = n) else
- (n-arr)/(sumxp-sumyp)
- binit <- if (length(.ib)) rep(.ib, length.out = n) else
- (n-arr)/(sumx +sumyp)
- bpinit <- if (length(.ib)) rep(.ibp,length.out = n) else
- arr / (sumy - sumx)
+ ainit <- if (length( .ia )) rep( .ia , length.out = n) else
+ arr / (sumx + sumyp)
+ apinit <- if (length( .iap )) rep( .iap , length.out = n) else
+ (n-arr) / (sumxp - sumyp)
+ binit <- if (length( .ib )) rep( .ib , length.out = n) else
+ (n-arr) / (sumx + sumyp)
+ bpinit <- if (length( .ibp )) rep( .ibp , length.out = n) else
+ arr / (sumy - sumx)
etastart <-
- cbind(theta2eta(rep(ainit, length.out = n), .la, earg = .ea ),
- theta2eta(rep(apinit, length.out = n), .lap, earg = .eap ),
- theta2eta(rep(binit, length.out = n), .lb, earg = .eb ),
- theta2eta(rep(bpinit, length.out = n), .lbp, earg = .ebp ))
+ cbind(theta2eta(rep(ainit, length.out = n), .la , earg = .ea ),
+ theta2eta(rep(apinit, length.out = n), .lap , earg = .eap ),
+ theta2eta(rep(binit, length.out = n), .lb , earg = .eb ),
+ theta2eta(rep(bpinit, length.out = n), .lbp , earg = .ebp ))
}
}), list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
.ea = ea, .eap = eap, .eb = eb, .ebp = ebp,
@@ -1371,9 +1405,8 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
}, list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
.ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
last = eval(substitute(expression({
- misc$link <- c("a" = .la, "ap" = .lap, "b" = .lb, "bp" = .lbp)
-
- misc$earg <- list("a" = .ea, "ap" = .eap, "b" = .eb, "bp" = .ebp)
+ misc$link <- c("a" = .la , "ap" = .lap , "b" = .lb , "bp" = .lbp )
+ misc$earg <- list("a" = .ea , "ap" = .eap , "b" = .eb , "bp" = .ebp )
misc$multipleResponses <- FALSE
}), list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
@@ -1468,7 +1501,7 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
ishape1 = NULL,
ishape2 = NULL,
imethod = 1,
- zero = 2:3) {
+ zero = "shape") {
lscale <- as.list(substitute(lscale))
escale <- link2list(lscale)
lscale <- attr(escale, "function.name")
@@ -1506,8 +1539,29 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
namesof("shape1", lshape1, earg = eshape1), ", ",
namesof("shape2", lshape2, earg = eshape2)),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
+
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 3,
+ Q1 = 2,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("scale", "shape1", "shape2"),
+ lscale = .lscale ,
+ lshape1 = .lshape1 ,
+ lshape2 = .lshape2 ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .lscale = lscale ,
+ .lshape1 = lshape1,
+ .lshape2 = lshape2 ))),
+
+
initialize = eval(substitute(expression({
temp5 <-
@@ -1649,15 +1703,15 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
d23 <- 0
wz <- matrix(0, n, dimm(M))
- wz[, iam(1, 1, M)] <- dtheta.deta(aparam, .lscale)^2 * d11
- wz[, iam(2, 2, M)] <- dtheta.deta(shape1, .lshape1)^2 * d22
- wz[, iam(3, 3, M)] <- dtheta.deta(shape2, .lshape2)^2 * d33
- wz[, iam(1, 2, M)] <- dtheta.deta(aparam, .lscale) *
- dtheta.deta(shape1, .lshape1) * d12
- wz[, iam(1, 3, M)] <- dtheta.deta(aparam, .lscale) *
- dtheta.deta(shape2, .lshape2) * d13
- wz[, iam(2, 3, M)] <- dtheta.deta(shape1, .lshape1) *
- dtheta.deta(shape2, .lshape2) * d23
+ wz[, iam(1, 1, M)] <- dtheta.deta(aparam, .lscale )^2 * d11
+ wz[, iam(2, 2, M)] <- dtheta.deta(shape1, .lshape1 )^2 * d22
+ wz[, iam(3, 3, M)] <- dtheta.deta(shape2, .lshape2 )^2 * d33
+ wz[, iam(1, 2, M)] <- dtheta.deta(aparam, .lscale ) *
+ dtheta.deta(shape1, .lshape1 ) * d12
+ wz[, iam(1, 3, M)] <- dtheta.deta(aparam, .lscale ) *
+ dtheta.deta(shape2, .lshape2 ) * d13
+ wz[, iam(2, 3, M)] <- dtheta.deta(shape1, .lshape1 ) *
+ dtheta.deta(shape2, .lshape2 ) * d23
c(w) * wz
}), list( .lscale = lscale, .lshape1 = lshape1,
@@ -3123,7 +3177,7 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
isd1 = NULL, isd2 = NULL,
irho = NULL, imethod = 1,
eq.mean = FALSE, eq.sd = FALSE,
- zero = 3:5) {
+ zero = c("sd", "rho")) {
lmean1 <- as.list(substitute(lmean1))
emean1 <- link2list(lmean1)
@@ -3203,7 +3257,9 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
}
- constraints <- cm.zero.VGAM(con.use , x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 5)
}), list( .zero = zero,
.eq.sd = eq.sd,
.eq.mean = eq.mean ))),
@@ -3211,6 +3267,9 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
infos = eval(substitute(function(...) {
list(M1 = 5,
Q1 = 2,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("mean1", "mean2", "sd1", "sd2", "rho"),
eq.mean = .eq.mean ,
eq.sd = .eq.sd ,
zero = .zero )
@@ -3288,16 +3347,16 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
.esd1 = esd1 , .esd2 = esd2 , .erho = erho ))),
last = eval(substitute(expression({
- misc$link <- c("mean1" = .lmean1,
- "mean2" = .lmean2,
- "sd1" = .lsd1,
- "sd2" = .lsd2,
+ misc$link <- c("mean1" = .lmean1 ,
+ "mean2" = .lmean2 ,
+ "sd1" = .lsd1 ,
+ "sd2" = .lsd2 ,
"rho" = .lrho )
- misc$earg <- list("mean1" = .emean1,
- "mean2" = .emean2,
- "sd1" = .esd1,
- "sd2" = .esd2,
+ misc$earg <- list("mean1" = .emean1 ,
+ "mean2" = .emean2 ,
+ "sd1" = .esd1 ,
+ "sd2" = .esd2 ,
"rho" = .erho )
misc$expected <- TRUE
@@ -3310,11 +3369,11 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- mean1 <- eta2theta(eta[, 1], .lmean1, earg = .emean1)
- mean2 <- eta2theta(eta[, 2], .lmean2, earg = .emean2)
- sd1 <- eta2theta(eta[, 3], .lsd1 , earg = .esd1 )
- sd2 <- eta2theta(eta[, 4], .lsd2 , earg = .esd2 )
- Rho <- eta2theta(eta[, 5], .lrho , earg = .erho )
+ mean1 <- eta2theta(eta[, 1], .lmean1 , earg = .emean1 )
+ mean2 <- eta2theta(eta[, 2], .lmean2 , earg = .emean2 )
+ sd1 <- eta2theta(eta[, 3], .lsd1 , earg = .esd1 )
+ sd2 <- eta2theta(eta[, 4], .lsd2 , earg = .esd2 )
+ Rho <- eta2theta(eta[, 5], .lrho , earg = .erho )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
diff --git a/R/family.categorical.R b/R/family.categorical.R
index e680f98..4cb3731 100644
--- a/R/family.categorical.R
+++ b/R/family.categorical.R
@@ -218,7 +218,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
infos = eval(substitute(function(...) {
list(M1 = NA, # zz -1?
Q1 = NA,
+ expected = TRUE,
multipleResponses = FALSE,
+ parameters.names = as.character(NA),
parallel = .parallel ,
reverse = .reverse ,
whitespace = .whitespace ,
@@ -234,7 +236,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
constraints <- cm.VGAM(matrix(1, M, 1), x = x,
bool = .parallel ,
constraints = constraints)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = M)
}), list( .parallel = parallel, .zero = zero ))),
deviance = Deviance.categorical.data.vgam,
@@ -343,7 +347,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
ll.elts
}
},
- vfamily = c("sratio", "vcategorical"),
+ vfamily = c("sratio", "VGAMordinal", "VGAMcategorical"),
deriv = eval(substitute(expression({
if (!length(extra$mymat)) {
extra$mymat <- if ( .reverse ) tapplymat1(y, "cumsum") else
@@ -414,7 +418,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
infos = eval(substitute(function(...) {
list(M1 = NA, # zz -1?
Q1 = NA,
+ expected = TRUE,
multipleResponses = FALSE,
+ parameters.names = as.character(NA),
parallel = .parallel ,
reverse = .reverse ,
whitespace = .whitespace ,
@@ -431,7 +437,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
constraints <- cm.VGAM(matrix(1, M, 1), x = x,
bool = .parallel ,
constraints = constraints)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = M)
}), list( .parallel = parallel, .zero = zero ))),
@@ -478,7 +486,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
} else {
djs <- eta2theta(eta, .link , earg = .earg )
temp <- tapplymat1(djs, "cumprod")
- cbind(1 - djs,1) * cbind(1, temp)
+ cbind(1 - djs, 1) * cbind(1, temp)
}
if (length(extra$dimnamesy2))
dimnames(fv.matrix) <- list(dimnames(eta)[[1]],
@@ -544,7 +552,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
}
}
},
- vfamily = c("cratio", "vcategorical"),
+ vfamily = c("cratio", "VGAMordinal", "VGAMcategorical"),
deriv = eval(substitute(expression({
if (!length(extra$mymat)) {
@@ -613,7 +621,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
}
- vglm.vcategorical.control <-
+ vglm.VGAMcategorical.control <-
function(maxit = 30,
trace = FALSE,
panic = TRUE, ...) {
@@ -702,7 +710,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
bool = .parallel ,
apply.int = TRUE,
constraints = constraints)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = M)
constraints <- cm.nointercept.VGAM(constraints, x, .nointercept , M)
}), list( .parallel = parallel, .zero = zero,
.nointercept = nointercept,
@@ -715,7 +725,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
refLevel = .refLevel ,
M1 = -1,
link = "multilogit",
+ expected = TRUE,
multipleResponses = FALSE,
+ parameters.names = as.character(NA),
zero = .zero )
}, list( .zero = zero,
.refLevel = refLevel,
@@ -819,7 +831,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
}
}
},
- vfamily = c("multinomial", "vcategorical"),
+ vfamily = c("multinomial", "VGAMcategorical"),
deriv = eval(substitute(expression({
if ( .refLevel < 0) {
c(w) * (y[, -ncol(y)] - mu[, -ncol(y)])
@@ -908,7 +920,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
infos = eval(substitute(function(...) {
list(M1 = NA, # zz -1?
Q1 = NA,
+ expected = TRUE,
multipleResponses = .multiple.responses ,
+ parameters.names = as.character(NA),
parallel = .parallel ,
reverse = .reverse ,
whitespace = .whitespace ,
@@ -1174,7 +1188,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
}
}
},
- vfamily = c("cumulative", "vcategorical"),
+ vfamily = c("cumulative", "VGAMordinal", "VGAMcategorical"),
deriv = eval(substitute(expression({
mu.use <- pmax(mu, .Machine$double.eps * 1.0e-0)
deriv.answer <-
@@ -1296,7 +1310,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
infos = eval(substitute(function(...) {
list(M1 = NA, # zz -1?
Q1 = NA,
+ expected = TRUE,
multipleResponses = FALSE,
+ parameters.names = as.character(NA),
parallel = .parallel ,
reverse = .reverse ,
whitespace = .whitespace ,
@@ -1312,7 +1328,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
constraints <- cm.VGAM(matrix(1, M, 1), x = x,
bool = .parallel ,
constraints = constraints)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = M)
}), list( .parallel = parallel, .zero = zero ))),
deviance = Deviance.categorical.data.vgam,
@@ -1414,7 +1432,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
}
}
},
- vfamily = c("acat", "vcategorical"),
+ vfamily = c("acat", "VGAMordinal", "VGAMcategorical"),
deriv = eval(substitute(expression({
zeta <- eta2theta(eta, .link , earg = .earg ) # May be zetar
@@ -1436,7 +1454,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
answer
}), list( .earg = earg, .link = link, .reverse = reverse) )),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, dimm(M))
+ wz <- matrix(NA_real_, n, dimm(M))
hess <- attr(d1, "hessian") / d1
@@ -1508,7 +1526,9 @@ acat.deriv <- function(zeta, reverse, M, n) {
infos = eval(substitute(function(...) {
list(M1 = NA, # zz -1?
Q1 = NA,
+ expected = TRUE,
multipleResponses = FALSE,
+ parameters.names = as.character(NA),
refvalue = .refvalue ,
refgp = .refgp ,
ialpha = .ialpha )
@@ -1596,7 +1616,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
}
}
},
- vfamily = c("brat"),
+ vfamily = c("brat", "VGAMcategorical"),
deriv = eval(substitute(expression({
ans <- NULL
uindex <- if ( .refgp == "last") 1:M else (1:(M+1))[-( .refgp ) ]
@@ -1672,7 +1692,9 @@ acat.deriv <- function(zeta, reverse, M, n) {
infos = eval(substitute(function(...) {
list(M1 = NA, # zz -1?
Q1 = NA,
+ expected = TRUE,
multipleResponses = FALSE,
+ parameters.names = as.character(NA),
refvalue = .refvalue ,
refgp = .refgp ,
i0 = .i0 ,
@@ -1774,7 +1796,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
}
}
},
- vfamily = c("bratt"),
+ vfamily = c("bratt", "VGAMcategorical"),
deriv = eval(substitute(expression({
ans <- NULL
ties <- extra$ties
@@ -1985,9 +2007,10 @@ InverseBrat <-
ordpoisson <- function(cutpoints,
- countdata = FALSE, NOS = NULL, Levels = NULL,
- init.mu = NULL, parallel = FALSE, zero = NULL,
- link = "loge") {
+ countdata = FALSE, NOS = NULL, Levels = NULL,
+ init.mu = NULL, parallel = FALSE,
+ zero = NULL,
+ link = "loge") {
link <- as.list(substitute(link))
earg <- link2list(link)
@@ -2020,13 +2043,28 @@ InverseBrat <-
new("vglmff",
blurb = c(paste("Ordinal Poisson model\n\n"),
"Link: ", namesof("mu", link, earg = earg)),
+
+
constraints = eval(substitute(expression({
constraints <- cm.VGAM(matrix(1, M, 1), x = x,
bool = .parallel ,
apply.int = TRUE,
constraints = constraints)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .parallel = parallel, .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 1,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("mu"),
+ lmu = .link ,
+ zero = .zero )
+ }, list( .zero = zero, .link = link ))),
+
initialize = eval(substitute(expression({
orig.y <- cbind(y) # Convert y into a matrix if necessary
if ( .countdata ) {
@@ -2080,9 +2118,8 @@ InverseBrat <-
extra$cutpoints <- cp.vector
extra$n <- n
- mynames <- if (M > 1) paste("mu", 1:M, sep = "") else "mu"
- predictors.names <-
- namesof(mynames, .link , short = TRUE, earg = .earg )
+ mynames <- param.names("mu", M)
+ predictors.names <- namesof(mynames, .link , earg = .earg , tag = FALSE)
}), list( .link = link, .countdata = countdata, .earg = earg,
.cutpoints=cutpoints, .NOS=NOS, .Levels=Levels,
.init.mu = init.mu
@@ -2127,7 +2164,7 @@ InverseBrat <-
}
}
},
- vfamily = c("ordpoisson", "vcategorical"),
+ vfamily = c("ordpoisson", "VGAMcategorical"),
deriv = eval(substitute(expression({
probs <- ordpoissonProbs(extra, mu)
probs.use <- pmax(probs, .Machine$double.eps * 1.0e-0)
@@ -2217,16 +2254,631 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
- margeff <- function(object, subset = NULL) {
+
+findFirstMethod <- function(methodsfn, charvec) {
+ answer <- NULL
+ for (ii in 1:length(charvec)) {
+ if (existsMethod(methodsfn, signature(VGAMff = charvec[ii]))) {
+ answer <- charvec[ii]
+ break
+ }
+ }
+ answer
+}
+
+
+
+margeff <- function(object, subset = NULL, ...) {
+
+
+ try.this <- findFirstMethod("margeffS4VGAM", object at family@vfamily)
+ if (length(try.this)) {
+ margeffS4VGAM(object = object,
+ subset = subset,
+ VGAMff = new(try.this),
+ ...)
+ } else {
+ stop("Could not find a methods function for 'margeffS4VGAM' ",
+ "emanating from '", object at family@vfamily[1], "'")
+ }
+}
+
+
+
+
+
+subsetarray3 <- function(array3, subset = NULL) {
+ if (is.null(subset)) {
+ return(array3)
+ } else
+ if (is.numeric(subset) && (length(subset) == 1)) {
+ return(array3[, , subset])
+ } else {
+ return(array3[, , subset])
+ }
+ warning("argument 'subset' unmatched. Doing nothing")
+ array3
+}
+
+
+
+
+
+setClass("VGAMcategorical", contains = "vglmff")
+
+setClass("VGAMordinal", contains = "VGAMcategorical")
+setClass("multinomial", contains = "VGAMcategorical")
+
+setClass("acat", contains = "VGAMordinal")
+setClass("cumulative", contains = "VGAMordinal")
+setClass("cratio", contains = "VGAMordinal")
+setClass("sratio", contains = "VGAMordinal")
+
+
+
+setMethod("margeffS4VGAM",
+ signature(VGAMff = "VGAMcategorical"),
+ function(object,
+ subset = NULL,
+ VGAMff,
+ ...) {
+ object at post$M <- M <- object at misc$M
+ object at post$n <- nnn <- object at misc$n
+ invisible(object)
+ })
+
+
+
+
+setMethod("margeffS4VGAM", signature(VGAMff ="multinomial"),
+ function(object,
+ subset = NULL,
+ VGAMff,
+ ...) {
+
+ object <- callNextMethod(VGAMff = VGAMff,
+ object = object,
+ subset = subset,
+ ...)
+
+ M <- object at misc$M
+ nnn <- object at misc$n
+ cfit <- coefvlm(object, matrix.out = TRUE)
+ rlev <- object at misc$refLevel
+ if (!length(rlev))
+ relev <- M+1 # Default
+ Bmat <- matrix(0, nrow(cfit), 1 + ncol(cfit))
+ Bmat[, -rlev] <- cfit
+ ppp <- nrow(Bmat)
+ pvec1 <- fitted(object)[1, ]
+ rownames(Bmat) <- rownames(cfit)
+ colnames(Bmat) <- if (length(names(pvec1))) names(pvec1) else
+ paste("mu", 1:(M+1), sep = "")
+
+
+ BB <- array(Bmat, c(ppp, M+1, nnn))
+ pvec <- c(t(fitted(object)))
+ pvec <- rep(pvec, each = ppp)
+ temp1 <- array(BB * pvec, c(ppp, M+1, nnn))
+ temp2 <- aperm(temp1, c(2, 1, 3)) # (M+1) x ppp x nnn
+ temp2 <- colSums(temp2) # ppp x nnn
+ temp2 <- array(rep(temp2, each = M+1), c(M+1, ppp, nnn))
+ temp2 <- aperm(temp2, c(2, 1, 3)) # ppp x (M+1) x nnn
+ temp3 <- pvec
+ ans.mlm <- array((BB - temp2) * temp3, c(ppp, M+1, nnn),
+ dimnames = list(dimnames(Bmat)[[1]],
+ dimnames(Bmat)[[2]], dimnames(fitted(object))[[1]]))
+ return(subsetarray3(ans.mlm, subset = subset))
+ })
+
+
+
+
+setMethod("margeffS4VGAM", signature(VGAMff = "VGAMordinal"),
+ function(object,
+ subset = NULL,
+ VGAMff,
+ ...) {
+ M <- object at misc$M
+ nnn <- object at misc$n
+
+ object at post$reverse <- object at misc$reverse
+ object at post$linkfunctions <- linkfunctions <- object at misc$link
+ object at post$all.eargs <- all.eargs <- object at misc$earg
+ object at post$Bmat <- Bmat <- coefvlm(object, matrix.out = TRUE)
+ object at post$ppp <- nrow(Bmat)
+ etamat <- predict(object)
+
+ hdot <- Thetamat <- etamat
+ for (jlocal in 1:M) {
+ Thetamat[, jlocal] <- eta2theta(etamat[, jlocal],
+ link = linkfunctions[jlocal],
+ earg = all.eargs[[jlocal]])
+ hdot[, jlocal] <- dtheta.deta(Thetamat[, jlocal],
+ link = linkfunctions[jlocal],
+ earg = all.eargs[[jlocal]])
+ } # jlocal
+
+
+ object at post$hdot <- hdot
+ object at post$Thetamat <- Thetamat
+ object
+ })
+
+
+
+
+
+setMethod("margeffS4VGAM", signature(VGAMff = "cumulative"),
+ function(object,
+ subset = NULL,
+ VGAMff,
+ ...) {
+
+
+ object <- callNextMethod(VGAMff = VGAMff,
+ object = object,
+ subset = subset,
+ ...)
+ reverse <- object at post$reverse
+ linkfunctions <- object at post$linkfunctions
+ all.eargs <- object at post$all.eargs
+ Bmat <- cfit <- object at post$Bmat
+ ppp <- object at post$ppp
+ etamat <- predict(object) # nnn x M
+ fitmat <- fitted(object) # nnn x (M + 1)
+ nnn <- nrow(etamat)
+ M <- ncol(etamat)
+ hdot <- object at post$hdot
+ Thetamat <- object at post$Thetamat
+
+
+
+
+ hdot.big <- kronecker(hdot, matrix(1, ppp, 1)) # Enlarged
+ resmat <- cbind(hdot.big, 1)
+ resmat[, 1] <- ifelse(reverse, -1, 1) * hdot.big[, 1] * cfit[, 1]
+
+ if (M > 1) {
+ for (jlocal in 2:M) {
+ resmat[, jlocal] <- ifelse(reverse, -1, 1) *
+ (hdot.big[, jlocal ] * cfit[, jlocal ] -
+ hdot.big[, jlocal - 1] * cfit[, jlocal - 1])
+ } # jlocal
+
+ } # if
+
+ resmat[, M+1] <- ifelse(reverse, 1, -1) * hdot.big[, M] * cfit[, M]
+
+ ans.cum <- array(resmat, c(ppp, nnn, M+1),
+ dimnames = list(dimnames(Bmat)[[1]],
+ dimnames(fitted(object))[[1]],
+ dimnames(fitted(object))[[2]]))
+ ans.cum <- aperm(ans.cum, c(1, 3, 2)) # ppp x (M+1) x nnn
+
+ subsetarray3(ans.cum, subset = subset)
+ })
+
+
+
+
+
+
+
+setMethod("margeffS4VGAM", signature(VGAMff = "acat"),
+ function(object,
+ subset = NULL,
+ VGAMff,
+ ...) {
+
+
+ object <- callNextMethod(VGAMff = VGAMff,
+ object = object,
+ subset = subset,
+ ...)
+ reverse <- object at post$reverse
+ linkfunctions <- object at post$linkfunctions
+ all.eargs <- object at post$all.eargs
+ Bmat <- cfit <- object at post$Bmat
+ ppp <- object at post$ppp
+ etamat <- predict(object) # nnn x M
+ fitmat <- fitted(object) # nnn x (M + 1)
+ nnn <- nrow(etamat)
+ M <- ncol(etamat)
+ hdot <- object at post$hdot
+ Thetamat <- object at post$Thetamat
+
+
+
+
+ expcs.etamat <- if (reverse)
+ exp(tapplymat1(etamat[, M:1, drop = FALSE],
+ "cumsum")[, M:1, drop = FALSE]) else
+ exp(tapplymat1(etamat, "cumsum"))
+ csexpcs.etavec <- rowSums(expcs.etamat)
+
+
+
+ if (!all(object at misc$link == "loge"))
+ stop("currently only the 'loge' link is supported")
+
+
+ acat.derivs <- function(jay, tee,
+ M, expcs.etamat, Thetamat,
+ prob1, probMplus1,
+ reverse = FALSE) {
+
+ if (jay > M+1) stop("argument 'jay' out of range")
+ if (M < tee) stop("argument 'tee' out of range")
+
+ if (reverse) { # ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+
+ dpMplus1.detat <- -(probMplus1^2) *
+ rowSums(expcs.etamat[, 1:tee, drop = FALSE])
+ if (jay == M+1) {
+ return(dpMplus1.detat)
+ }
+ if (jay <= tee) {
+ return((probMplus1 + dpMplus1.detat) * expcs.etamat[, jay])
+ }
+ if (tee < jay) {
+ return(dpMplus1.detat * expcs.etamat[, jay])
+ }
+ } else { # reverse = FALSE ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+
+ dp1.detat <- -(prob1^2) * rowSums(expcs.etamat[, tee:M, drop = FALSE])
+ if (jay == 1) {
+ return(dp1.detat)
+ }
+ if (jay <= tee) {
+ return(dp1.detat * expcs.etamat[, jay-1])
+ }
+ if (tee < jay) {
+ return((prob1 + dp1.detat) * expcs.etamat[, jay-1])
+ }
+ } # reverse ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ } # acat.derivs
+
+
+
+ A <- array(0, c(i = nnn, vars = ppp, probs = M + 1, etas = M))
+ ansarray <- array(0, c(vars = ppp, i = nnn, probs = M + 1))
+ if (reverse) {
+ probMplus1 <- 1 / (1 + csexpcs.etavec) # Last level of Y
+ } else {
+ prob1 <- 1 / (1 + csexpcs.etavec) # First level of Y
+ }
+
+ for (jlocal in 1:(M+1)) {
+ for (tlocal in 1:M) {
+ A[, , jlocal, tlocal] <-
+ acat.derivs(jay = jlocal, tee = tlocal,
+ M = M, expcs.etamat = expcs.etamat,
+ Thetamat = Thetamat,
+ prob1 = prob1, probMplus1 = probMplus1,
+ reverse = reverse)
+ }
+ }
+
+
+ A <- aperm(A, c(2, 1, 3, 4)) # c(ppp, nnn, M+1, M)
+ for (jlocal in 1:(M + 1)) {
+ for (tlocal in 1:M) {
+ ansarray[,, jlocal] <- ansarray[,, jlocal] +
+ A[,, jlocal, tlocal] * Bmat[, tlocal]
+ }
+ }
+ ans.acat <- aperm(ansarray, c(1, 3, 2)) # c(ppp, M+1, nnn)
+ dimnames(ans.acat) <- list(rownames(Bmat),
+ colnames(fitmat),
+ rownames(etamat))
+ subsetarray3(ans.acat, subset = subset)
+ })
+
+
+
+
+
+ cratio.derivs <- function(jay, tee,
+ hdot, M, cpThetamat, Thetamat,
+ reverse = FALSE) {
+
+ if (jay >= M+1) stop("argument 'jay' out of range")
+ if (M < tee) stop("argument 'tee' out of range")
+
+ if (reverse) { # ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ if (jay == 1) {
+ return(hdot[, tee] * cpThetamat[, 1] / Thetamat[, tee])
+ }
+
+ if (jay-1 == tee) {
+ return(-hdot[, jay-1] * cpThetamat[, jay])
+ }
+ if (jay <= tee) {
+ return((1 - Thetamat[, jay-1]) *
+ hdot[, tee] * cpThetamat[, jay] / Thetamat[, tee])
+ }
+ return(rep(0, length = nrow(Thetamat))) # Since jay-1 > tee
+ } else { # reverse = FALSE ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+
+ if (jay == 1 && tee == 1) {
+ return(-hdot[, 1])
+ }
+
+ if (jay == tee) {
+ return(-hdot[, jay] * cpThetamat[, jay-1])
+ }
+ if (tee < jay) {
+ return((1 - Thetamat[, jay]) *
+ hdot[, tee] * cpThetamat[, jay-1] / Thetamat[, tee])
+ }
+ return(rep(0, length = nrow(Thetamat))) # Since jay < tee
+ } # reverse ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ } # cratio.derivs
+
+
+
+
+
+
+setMethod("margeffS4VGAM", signature(VGAMff = "cratio"),
+ function(object,
+ subset = NULL,
+ VGAMff,
+ ...) {
+
+
+ object <- callNextMethod(VGAMff = VGAMff,
+ object = object,
+ subset = subset,
+ ...)
+ reverse <- object at post$reverse
+ linkfunctions <- object at post$linkfunctions
+ all.eargs <- object at post$all.eargs
+ Bmat <- cfit <- object at post$Bmat
+ ppp <- object at post$ppp
+ etamat <- predict(object) # nnn x M
+ fitmat <- fitted(object) # nnn x (M + 1)
+ nnn <- nrow(etamat)
+ M <- ncol(etamat)
+ hdot <- object at post$hdot
+ Thetamat <- object at post$Thetamat
+
+
+
+
+
+
+
+ vfamily <- object at family@vfamily
+ c.nots <- any(vfamily == "cratio")
+
+ if (any(vfamily == "cratio")) {
+ cpThetamat <- if (reverse)
+ tapplymat1( Thetamat[, M:1, drop = FALSE],
+ "cumprod")[, M:1, drop = FALSE] else
+ tapplymat1( Thetamat, "cumprod")
+ }
+
+
+
+ A <- array(0, c(i = nnn, vars = ppp, probs = M + 1, etas = M))
+ ansarray <- array(0, c(vars = ppp, i = nnn, probs = M + 1))
+
+
+ choosemat <- if (c.nots) Thetamat else 1 - Thetamat
+ if (min(choosemat) <= 0)
+ warning("division by 0 may occur")
+
+
+
+
+
+
+ if (reverse) {
+ for (tlocal in 1:M) {
+ for (jlocal in 1:tlocal) {
+ A[, , jlocal, tlocal] <-
+ cratio.derivs(jay = jlocal, tee = tlocal,
+ hdot = ifelse(c.nots, 1, -1) * hdot,
+ M = M, cpThetamat = cpThetamat,
+ Thetamat = choosemat,
+ reverse = reverse)
+ }
+ }
+ if (M > 1)
+ for (jlocal in 2:M) {
+ A[, , jlocal, jlocal-1] <-
+ cratio.derivs(jay = jlocal, tee = jlocal-1,
+ hdot = ifelse(c.nots, 1, -1) * hdot,
+ M = M, cpThetamat = cpThetamat,
+ Thetamat = choosemat,
+ reverse = reverse)
+ }
+ } else {
+ for (jlocal in 1:M) {
+ for (tlocal in 1:jlocal) {
+ A[, , jlocal, tlocal] <-
+ cratio.derivs(jay = jlocal, tee = tlocal,
+ hdot = ifelse(c.nots, 1, -1) * hdot,
+ M = M, cpThetamat = cpThetamat,
+ Thetamat = choosemat,
+ reverse = reverse)
+ }
+ }
+ }
+
+ if (reverse) {
+ A[, , M+1, M] <- ifelse(c.nots, -1, 1) * hdot[, M]
+ } else {
+ for (jlocal in 1:M) {
+ for (tlocal in 1:jlocal) {
+ A[, , M+1, tlocal] <- if (c.nots) {
+ A[, , M+1, tlocal] - A[, , jlocal, tlocal]
+ } else {
+ -hdot[, tlocal] * cpThetamat[, M] / choosemat[, tlocal]
+ }
+ }
+ }
+ }
+
+ A <- aperm(A, c(2, 1, 3, 4)) # c(ppp, nnn, M+1, M)
+ for (jlocal in 1:(M + 1)) {
+ for (tlocal in 1:M) {
+ ansarray[,, jlocal] <- ansarray[,, jlocal] +
+ A[,, jlocal, tlocal] * Bmat[, tlocal]
+ }
+ }
+ ans.csratio <- aperm(ansarray, c(1, 3, 2)) # c(ppp, M+1, nnn)
+ dimnames(ans.csratio) <- list(rownames(Bmat),
+ colnames(fitmat),
+ rownames(etamat))
+ subsetarray3(ans.csratio, subset = subset) # "cratio" and "sratio"
+ })
+
+
+
+
+setMethod("margeffS4VGAM", signature(VGAMff = "sratio"),
+ function(object,
+ subset = NULL,
+ VGAMff,
+ ...) {
+
+
+
+
+
+
+
+
+ object <- callNextMethod(VGAMff = VGAMff,
+ object = object,
+ subset = subset,
+ ...)
+ reverse <- object at post$reverse
+ linkfunctions <- object at post$linkfunctions
+ all.eargs <- object at post$all.eargs
+ Bmat <- cfit <- object at post$Bmat
+ ppp <- object at post$ppp
+ etamat <- predict(object) # nnn x M
+ fitmat <- fitted(object) # nnn x (M + 1)
+ nnn <- nrow(etamat)
+ M <- ncol(etamat)
+ hdot <- object at post$hdot
+ Thetamat <- object at post$Thetamat
+
+
+
+
+ vfamily <- object at family@vfamily
+ c.nots <- any(vfamily == "cratio")
+ if (any(vfamily == "sratio")) {
+ cpThetamat <- if (reverse)
+ tapplymat1(1 - Thetamat[, M:1, drop = FALSE],
+ "cumprod")[, M:1, drop = FALSE] else
+ tapplymat1(1 - Thetamat, "cumprod")
+ }
+
+
+
+ A <- array(0, c(i = nnn, vars = ppp, probs = M + 1, etas = M))
+ ansarray <- array(0, c(vars = ppp, i = nnn, probs = M + 1))
+
+
+ choosemat <- if (c.nots) Thetamat else 1 - Thetamat
+ if (min(choosemat) <= 0)
+ warning("division by 0 may occur")
+
+
+
- ii <- ii.save <- subset
+
+
+
+
+
+
+ if (reverse) {
+ for (tlocal in 1:M) {
+ for (jlocal in 1:tlocal) {
+ A[, , jlocal, tlocal] <-
+ cratio.derivs(jay = jlocal, tee = tlocal,
+ hdot = ifelse(c.nots, 1, -1) * hdot,
+ M = M, cpThetamat = cpThetamat,
+ Thetamat = choosemat,
+ reverse = reverse)
+ }
+ }
+ if (M > 1)
+ for (jlocal in 2:M) {
+ A[, , jlocal, jlocal-1] <-
+ cratio.derivs(jay = jlocal, tee = jlocal-1,
+ hdot = ifelse(c.nots, 1, -1) * hdot,
+ M = M, cpThetamat = cpThetamat,
+ Thetamat = choosemat,
+ reverse = reverse)
+ }
+ } else {
+ for (jlocal in 1:M) {
+ for (tlocal in 1:jlocal) {
+ A[, , jlocal, tlocal] <-
+ cratio.derivs(jay = jlocal, tee = tlocal,
+ hdot = ifelse(c.nots, 1, -1) * hdot,
+ M = M, cpThetamat = cpThetamat,
+ Thetamat = choosemat,
+ reverse = reverse)
+ }
+ }
+ }
+
+ if (reverse) {
+ A[, , M+1, M] <- ifelse(c.nots, -1, 1) * hdot[, M]
+ } else {
+ for (jlocal in 1:M) {
+ for (tlocal in 1:jlocal) {
+ A[, , M+1, tlocal] <- if (c.nots) {
+ A[, , M+1, tlocal] - A[, , jlocal, tlocal]
+ } else {
+ -hdot[, tlocal] * cpThetamat[, M] / choosemat[, tlocal]
+ }
+ }
+ }
+ }
+
+ A <- aperm(A, c(2, 1, 3, 4)) # c(ppp, nnn, M+1, M)
+ for (jlocal in 1:(M + 1)) {
+ for (tlocal in 1:M) {
+ ansarray[,, jlocal] <- ansarray[,, jlocal] +
+ A[,, jlocal, tlocal] * Bmat[, tlocal]
+ }
+ }
+ ans.csratio <- aperm(ansarray, c(1, 3, 2)) # c(ppp, M+1, nnn)
+ dimnames(ans.csratio) <- list(rownames(Bmat),
+ colnames(fitmat),
+ rownames(etamat))
+ subsetarray3(ans.csratio, subset = subset) # "cratio" and "sratio"
+ })
+
+
+
+
+
+
+
+
+ margefff <- function(object, subset = NULL) {
+
+
+ ii <- subset
if (!is(object, "vglm"))
stop("'object' is not a vglm() object")
if (!any(temp.logical <-
- is.element(c("multinomial", "cumulative", "acat"),
+ is.element(c("multinomial", "cumulative", "acat", "cratio", "sratio"),
object at family@vfamily)))
- stop("'object' is not a 'multinomial' or 'acat' or 'cumulative' VGLM!")
+ stop("'object' is not a 'multinomial' or 'acat' or 'cumulative' ",
+ " or 'cratio' or 'sratio' VGLM!")
vfamily <- object at family@vfamily
if (is(object, "vgam"))
stop("'object' is a vgam() object")
@@ -2248,42 +2900,42 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
rlev <- object at misc$refLevel
cfit <- coefvlm(object, matrix.out = TRUE)
B <- if (!length(rlev)) {
- cbind(cfit, 0)
+ cbind(cfit, 0)
} else {
- if (rlev == M+1) { # Default
- cbind(cfit, 0)
- } else if (rlev == 1) {
- cbind(0, cfit)
- } else {
- cbind(cfit[, 1:(rlev-1)], 0, cfit[,rlev:M])
- }
+ if (rlev == M+1) { # Default
+ cbind(cfit, 0)
+ } else if (rlev == 1) {
+ cbind(0, cfit)
+ } else {
+ cbind(cfit[, 1:(rlev-1)], 0, cfit[, rlev:M])
+ }
}
ppp <- nrow(B)
- pvec1 <- fitted(object)[ 1,]
+ pvec1 <- fitted(object)[1, ]
colnames(B) <- if (length(names(pvec1))) names(pvec1) else
paste("mu", 1:(M+1), sep = "")
if (is.null(ii)) {
- BB <- array(B, c(ppp, M+1, nnn))
- pvec <- c(t(fitted(object)))
- pvec <- rep(pvec, each=ppp)
- temp1 <- array(BB * pvec, c(ppp, M+1, nnn))
- temp2 <- aperm(temp1, c(2,1,3)) # (M+1) x ppp x nnn
- temp2 <- colSums(temp2) # ppp x nnn
- temp2 <- array(rep(temp2, each=M+1), c(M+1, ppp, nnn))
- temp2 <- aperm(temp2, c(2, 1, 3)) # ppp x (M+1) x nnn
- temp3 <- pvec
- ans <- array((BB - temp2) * temp3, c(ppp, M+1, nnn),
- dimnames = list(dimnames(B)[[1]],
- dimnames(B)[[2]], dimnames(fitted(object))[[1]]))
- ans
+ BB <- array(B, c(ppp, M+1, nnn))
+ pvec <- c(t(fitted(object)))
+ pvec <- rep(pvec, each = ppp)
+ temp1 <- array(BB * pvec, c(ppp, M+1, nnn))
+ temp2 <- aperm(temp1, c(2, 1, 3)) # (M+1) x ppp x nnn
+ temp2 <- colSums(temp2) # ppp x nnn
+ temp2 <- array(rep(temp2, each = M+1), c(M+1, ppp, nnn))
+ temp2 <- aperm(temp2, c(2, 1, 3)) # ppp x (M+1) x nnn
+ temp3 <- pvec
+ ans <- array((BB - temp2) * temp3, c(ppp, M+1, nnn),
+ dimnames = list(dimnames(B)[[1]],
+ dimnames(B)[[2]], dimnames(fitted(object))[[1]]))
+ return(ans)
} else
- if (is.numeric(ii) && (length(ii) == 1)) {
- pvec <- fitted(object)[ii,]
+ if (is.numeric(ii) && length(ii) == 1) {
+ pvec <- fitted(object)[ii, ]
temp1 <- B * matrix(pvec, ppp, M+1, byrow = TRUE)
temp2 <- matrix(rowSums(temp1), ppp, M+1)
temp3 <- matrix(pvec, nrow(B), M+1, byrow = TRUE)
- (B - temp2) * temp3
+ return((B - temp2) * temp3)
} else {
if (is.logical(ii))
ii <- (1:nnn)[ii]
@@ -2291,7 +2943,7 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
ans <- array(0, c(ppp, M+1, length(ii)),
dimnames = list(dimnames(B)[[1]],
dimnames(B)[[2]],
- dimnames(fitted(object)[ii,])[[1]]))
+ dimnames(fitted(object)[ii, ])[[1]]))
for (ilocal in 1:length(ii)) {
pvec <- fitted(object)[ii[ilocal], ]
temp1 <- B * matrix(pvec, ppp, M+1, byrow = TRUE)
@@ -2299,56 +2951,81 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
temp3 <- matrix(pvec, nrow(B), M+1, byrow = TRUE)
ans[ , , ilocal] <- (B - temp2) * temp3
}
- ans
- }
- } else if (any(vfamily == "acat")) {
- stop("currently the 'acat' family is unsupported here")
- reverse <- object at misc$reverse
- linkfunctions <- object at misc$link
- all.eargs <- object at misc$earg
- B <- cfit <- coefvlm(object, matrix.out = TRUE)
- ppp <- nrow(B)
- etamat <- predict(object) # nnn x M
+ return(ans)
+ }
+ } # "multinomial"
+ reverse <- object at misc$reverse
+ linkfunctions <- object at misc$link
+ all.eargs <- object at misc$earg
+ B <- cfit <- coefvlm(object, matrix.out = TRUE)
+ ppp <- nrow(B)
+ etamat <- predict(object) # nnn x M
+ fitmat <- fitted(object) # nnn x (M + 1)
+ nnn <- nrow(etamat)
- } else {
+ hdot <- Thetamat <- etamat
+ for (jlocal in 1:M) {
+ Thetamat[, jlocal] <- eta2theta(etamat[, jlocal],
+ link = linkfunctions[jlocal],
+ earg = all.eargs[[jlocal]])
+ hdot[, jlocal] <- dtheta.deta(Thetamat[, jlocal],
+ link = linkfunctions[jlocal],
+ earg = all.eargs[[jlocal]])
+ } # jlocal
- if (is.logical(is.multivariateY <- object at misc$multiple.responses) &&
- is.multivariateY)
- stop("cannot handle cumulative(multiple.responses = TRUE)")
- reverse <- object at misc$reverse
- linkfunctions <- object at misc$link
- all.eargs <- object at misc$earg
- B <- cfit <- coefvlm(object, matrix.out = TRUE)
- ppp <- nrow(B)
- hdot <- lpmat <- kronecker(predict(object), matrix(1, ppp, 1))
- resmat <- cbind(hdot, 1)
- for (jlocal in 1:M) {
- Cump <- eta2theta(lpmat[,jlocal],
- link = linkfunctions[jlocal],
- earg = all.eargs[[jlocal]])
- hdot[, jlocal] <- dtheta.deta(Cump,
- link = linkfunctions[jlocal],
- earg = all.eargs[[jlocal]])
- }
- resmat[, 1] <- ifelse(reverse, -1, 1) * hdot[, 1] * cfit[, 1]
+ if (any(vfamily == "acat")) {
+ expcs.etamat <- if (reverse)
+ exp(tapplymat1(etamat[, M:1, drop = FALSE],
+ "cumsum")[, M:1, drop = FALSE]) else
+ exp(tapplymat1(etamat, "cumsum"))
+ csexpcs.etavec <- rowSums(expcs.etamat)
+ }
+ if (any(vfamily == "cratio")) {
+ cpThetamat <- if (reverse)
+ tapplymat1( Thetamat[, M:1, drop = FALSE],
+ "cumprod")[, M:1, drop = FALSE] else
+ tapplymat1( Thetamat, "cumprod")
+ }
+ if (any(vfamily == "sratio")) {
+ cpThetamat <- if (reverse)
+ tapplymat1(1 - Thetamat[, M:1, drop = FALSE],
+ "cumprod")[, M:1, drop = FALSE] else
+ tapplymat1(1 - Thetamat, "cumprod")
+ }
+
+
+
+
+
+ if (is.logical(is.multivariateY <- object at misc$multiple.responses) &&
+ is.multivariateY)
+ stop("cannot handle cumulative(multiple.responses = TRUE)")
+
+
+
+
+ if (any(vfamily == "cumulative")) {
+ hdot.big <- kronecker(hdot, matrix(1, ppp, 1)) # Enlarged
+ resmat <- cbind(hdot.big, 1)
+ resmat[, 1] <- ifelse(reverse, -1, 1) * hdot.big[, 1] * cfit[, 1]
if (M > 1) {
for (jlocal in 2:M)
resmat[, jlocal] <- ifelse(reverse, -1, 1) *
- (hdot[, jlocal ] * cfit[, jlocal ] -
- hdot[, jlocal - 1] * cfit[, jlocal - 1])
+ (hdot.big[, jlocal ] * cfit[, jlocal ] -
+ hdot.big[, jlocal - 1] * cfit[, jlocal - 1])
- }
+ } # jlocal
- resmat[, M+1] <- ifelse(reverse, 1, -1) * hdot[, M] * cfit[, M]
+ resmat[, M+1] <- ifelse(reverse, 1, -1) * hdot.big[, M] * cfit[, M]
temp1 <- array(resmat, c(ppp, nnn, M+1),
dimnames = list(dimnames(B)[[1]],
@@ -2364,8 +3041,215 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
} else {
return(temp1[, , ii])
}
+ } # "cumulative"
+
+
+
+
+
+
+
+
+
+ if (any(vfamily == "acat")) {
+ if (!all(object at misc$link == "loge"))
+ stop("currently only the 'loge' link is supported")
+
+
+ acat.derivs <- function(jay, tee,
+ M, expcs.etamat, Thetamat,
+ prob1, probMplus1,
+ reverse = FALSE) {
+
+ if (jay > M+1) stop("argument 'jay' out of range")
+ if (M < tee) stop("argument 'tee' out of range")
+
+ if (reverse) { # ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+
+ dpMplus1.detat <- -(probMplus1^2) *
+ rowSums(expcs.etamat[, 1:tee, drop = FALSE])
+ if (jay == M+1) {
+ return(dpMplus1.detat)
+ }
+ if (jay <= tee) {
+ return((probMplus1 + dpMplus1.detat) * expcs.etamat[, jay])
+ }
+ if (tee < jay) {
+ return(dpMplus1.detat * expcs.etamat[, jay])
+ }
+ } else { # reverse = FALSE ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+
+ dp1.detat <- -(prob1^2) * rowSums(expcs.etamat[, tee:M, drop = FALSE])
+ if (jay == 1) {
+ return(dp1.detat)
+ }
+ if (jay <= tee) {
+ return(dp1.detat * expcs.etamat[, jay-1])
+ }
+ if (tee < jay) {
+ return((prob1 + dp1.detat) * expcs.etamat[, jay-1])
+ }
+ } # reverse ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ } # acat.derivs
+
+
+
+ A <- array(0, c(i = nnn, vars = ppp, probs = M + 1, etas = M))
+ ansarray <- array(0, c(vars = ppp, i = nnn, probs = M + 1))
+ if (reverse) {
+ probMplus1 <- 1 / (1 + csexpcs.etavec) # Last level of Y
+ } else {
+ prob1 <- 1 / (1 + csexpcs.etavec) # First level of Y
}
-}
+
+ for (jlocal in 1:(M+1)) {
+ for (tlocal in 1:M) {
+ A[, , jlocal, tlocal] <-
+ acat.derivs(jay = jlocal, tee = tlocal,
+ M = M, expcs.etamat = expcs.etamat,
+ Thetamat = Thetamat,
+ prob1 = prob1, probMplus1 = probMplus1,
+ reverse = reverse)
+ }
+ }
+
+
+ A <- aperm(A, c(2, 1, 3, 4)) # c(ppp, nnn, M+1, M)
+ for (jlocal in 1:(M + 1)) {
+ for (tlocal in 1:M) {
+ ansarray[,, jlocal] <- ansarray[,, jlocal] +
+ A[,, jlocal, tlocal] * B[, tlocal]
+ }
+ }
+ ans.acat <- aperm(ansarray, c(1, 3, 2)) # c(ppp, M+1, nnn)
+ dimnames(ans.acat) <- list(rownames(B),
+ colnames(fitmat),
+ rownames(etamat))
+ return(ans.acat)
+ } # "acat"
+
+
+
+
+
+
+ c.nots <- any(vfamily == "cratio")
+
+ cratio.derivs <- function(jay, tee,
+ hdot, M, cpThetamat, Thetamat,
+ reverse = FALSE) {
+
+ if (jay >= M+1) stop("argument 'jay' out of range")
+ if (M < tee) stop("argument 'tee' out of range")
+
+ if (reverse) { # ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ if (jay == 1) {
+ return(hdot[, tee] * cpThetamat[, 1] / Thetamat[, tee])
+ }
+
+ if (jay-1 == tee) {
+ return(-hdot[, jay-1] * cpThetamat[, jay])
+ }
+ if (jay <= tee) {
+ return((1 - Thetamat[, jay-1]) *
+ hdot[, tee] * cpThetamat[, jay] / Thetamat[, tee])
+ }
+ return(rep(0, length = nrow(Thetamat))) # Since jay-1 > tee
+ } else { # reverse = FALSE ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+
+ if (jay == 1 && tee == 1) {
+ return(-hdot[, 1])
+ }
+
+ if (jay == tee) {
+ return(-hdot[, jay] * cpThetamat[, jay-1])
+ }
+ if (tee < jay) {
+ return((1 - Thetamat[, jay]) *
+ hdot[, tee] * cpThetamat[, jay-1] / Thetamat[, tee])
+ }
+ return(rep(0, length = nrow(Thetamat))) # Since jay < tee
+ } # reverse ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ } # cratio.derivs
+
+
+ A <- array(0, c(i = nnn, vars = ppp, probs = M + 1, etas = M))
+ ansarray <- array(0, c(vars = ppp, i = nnn, probs = M + 1))
+
+
+ choosemat <- if (c.nots) Thetamat else 1 - Thetamat
+ if (min(choosemat) <= 0)
+ warning("division by 0 may occur")
+
+
+
+
+ if (any(vfamily == "cratio" | vfamily == "sratio")) {
+
+
+ if (reverse) {
+ for (tlocal in 1:M) {
+ for (jlocal in 1:tlocal) {
+ A[, , jlocal, tlocal] <-
+ cratio.derivs(jay = jlocal, tee = tlocal,
+ hdot = ifelse(c.nots, 1, -1) * hdot,
+ M = M, cpThetamat = cpThetamat,
+ Thetamat = choosemat,
+ reverse = reverse)
+ }
+ }
+ if (M > 1)
+ for (jlocal in 2:M) {
+ A[, , jlocal, jlocal-1] <-
+ cratio.derivs(jay = jlocal, tee = jlocal-1,
+ hdot = ifelse(c.nots, 1, -1) * hdot,
+ M = M, cpThetamat = cpThetamat,
+ Thetamat = choosemat,
+ reverse = reverse)
+ }
+ } else {
+ for (jlocal in 1:M) {
+ for (tlocal in 1:jlocal) {
+ A[, , jlocal, tlocal] <-
+ cratio.derivs(jay = jlocal, tee = tlocal,
+ hdot = ifelse(c.nots, 1, -1) * hdot,
+ M = M, cpThetamat = cpThetamat,
+ Thetamat = choosemat,
+ reverse = reverse)
+ }
+ }
+ }
+
+ if (reverse) {
+ A[, , M+1, M] <- ifelse(c.nots, -1, 1) * hdot[, M]
+ } else {
+ for (jlocal in 1:M) {
+ for (tlocal in 1:jlocal) {
+ A[, , M+1, tlocal] <- if (c.nots) {
+ A[, , M+1, tlocal] - A[, , jlocal, tlocal]
+ } else {
+ -hdot[, tlocal] * cpThetamat[, M] / choosemat[, tlocal]
+ }
+ }
+ }
+ }
+
+ A <- aperm(A, c(2, 1, 3, 4)) # c(ppp, nnn, M+1, M)
+ for (jlocal in 1:(M + 1)) {
+ for (tlocal in 1:M) {
+ ansarray[,, jlocal] <- ansarray[,, jlocal] +
+ A[,, jlocal, tlocal] * B[, tlocal]
+ }
+ }
+ ans.csratio <- aperm(ansarray, c(1, 3, 2)) # c(ppp, M+1, nnn)
+ dimnames(ans.csratio) <- list(rownames(B),
+ colnames(fitmat),
+ rownames(etamat))
+ return(ans.csratio)
+ } # "cratio" and "sratio"
+
+
+} # margefff
@@ -2379,7 +3263,7 @@ prplot <- function(object,
if (!any(slotNames(object) == "family") ||
- !any(object at family@vfamily == "vcategorical"))
+ !any(object at family@vfamily == "VGAMcategorical"))
stop("'object' does not seem to be a VGAM categorical model object")
if (!any(object at family@vfamily == "cumulative"))
@@ -2542,6 +3426,51 @@ setMethod("is.zero", "vglm", function(object, ...)
+setMethod("showvglmS4VGAM",
+ signature(VGAMff = "acat"),
+ function(object,
+ VGAMff,
+ ...) {
+ cat("\nThis is an adjacent categories model with", 1 + object at misc$M, "levels\n")
+ invisible(object)
+ })
+
+
+setMethod("showvgamS4VGAM",
+ signature(VGAMff = "acat"),
+ function(object,
+ VGAMff,
+ ...) {
+ cat("\nThis is an adjacent categories model with", 1 + object at misc$M, "levels\n")
+ invisible(object)
+ })
+
+
+
+setMethod("showvglmS4VGAM",
+ signature(VGAMff = "multinomial"),
+ function(object,
+ VGAMff,
+ ...) {
+ cat("\nThis is a multinomial logit model with", 1 + object at misc$M, "levels\n")
+ invisible(object)
+ })
+
+
+setMethod("showvgamS4VGAM",
+ signature(VGAMff = "multinomial"),
+ function(object,
+ VGAMff,
+ ...) {
+ cat("\nThis is a multinomial logit model with", 1 + object at misc$M, "levels\n")
+ invisible(object)
+ })
+
+
+
+
+
+
diff --git a/R/family.censored.R b/R/family.censored.R
index 47d0bc2..8cc4b65 100644
--- a/R/family.censored.R
+++ b/R/family.censored.R
@@ -336,8 +336,7 @@ if (FALSE)
cennormal <-
cens.normal <- function(lmu = "identitylink", lsd = "loge",
- imethod = 1, zero = 2) {
-
+ imethod = 1, zero = "sd") {
lmu <- as.list(substitute(lmu))
@@ -349,7 +348,6 @@ if (FALSE)
lsd <- attr(esd, "function.name")
-
if (!is.Numeric(imethod, length.arg = 1,
integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
@@ -362,8 +360,19 @@ if (FALSE)
namesof("sd", lsd, tag = TRUE), "\n",
"Conditional variance: sd^2"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ zero = .zero ,
+ multiple.responses = FALSE,
+ parameters.names = c("mu", "sd"),
+ expected = TRUE )
+ }, list( .zero = zero ))),
+
initialize = eval(substitute(expression({
temp5 <-
@@ -383,8 +392,8 @@ if (FALSE)
stop("some observations are both right and left censored!")
predictors.names <-
- c(namesof("mu", .lmu , earg =.emu, tag = FALSE),
- namesof("sd", .lsd, earg =.esd, tag = FALSE))
+ c(namesof("mu", .lmu , earg = .emu , tag = FALSE),
+ namesof("sd", .lsd , earg = .esd , tag = FALSE))
if (!length(etastart)) {
anyc <- extra$leftcensored | extra$rightcensored
@@ -404,9 +413,9 @@ if (FALSE)
eta2theta(eta[, 1], .lmu , earg = .emu )
}, list( .lmu = lmu, .emu = emu ))),
last = eval(substitute(expression({
- misc$link <- c("mu" = .lmu , "sd" = .lsd)
+ misc$link <- c("mu" = .lmu , "sd" = .lsd )
- misc$earg <- list("mu" = .emu ,"sd" = .esd )
+ misc$earg <- list("mu" = .emu , "sd" = .esd )
misc$expected <- TRUE
misc$multipleResponses <- FALSE
@@ -419,7 +428,7 @@ if (FALSE)
cen0 <- !cenL & !cenU # uncensored obsns
mum <- eta2theta(eta[, 1], .lmu , earg = .emu )
- sdv <- eta2theta(eta[, 2], .lsd, earg = .esd )
+ sdv <- eta2theta(eta[, 2], .lsd , earg = .esd )
Lower <- ifelse(cenL, y, -Inf)
Upper <- ifelse(cenU, y, Inf)
@@ -428,7 +437,9 @@ if (FALSE)
ell3 <- log1p(-pnorm(( Upper[cenU] - mum[cenU]) / sdv[cenU]))
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
- sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
+ sum(w[cen0] * ell1) +
+ sum(w[cenL] * ell2) +
+ sum(w[cenU] * ell3)
}, list( .lmu = lmu, .lsd = lsd,
.emu = emu, .esd = esd ))),
vfamily = c("cens.normal"),
@@ -446,7 +457,7 @@ if (FALSE)
dl.dsd <- (((y-mum)/sdv)^2 - 1) / sdv
dmu.deta <- dtheta.deta(mum, .lmu , earg = .emu )
- dsd.deta <- dtheta.deta(sdv, .lsd, earg = .esd )
+ dsd.deta <- dtheta.deta(sdv, .lsd , earg = .esd )
if (any(cenL)) {
mumL <- mum - Lower
@@ -477,8 +488,8 @@ if (FALSE)
A3 <- 1 - pnorm((Upper - mum) / sdv) # Upper
A2 <- 1 - A1 - A3 # Middle; uncensored
wz <- matrix(0, n, 3)
- wz[,iam(1, 1,M)] <- A2 * 1 / sdv^2 # ed2l.dmu2
- wz[,iam(2, 2,M)] <- A2 * 2 / sdv^2 # ed2l.dsd2
+ wz[, iam(1, 1,M)] <- A2 * 1 / sdv^2 # ed2l.dmu2
+ wz[, iam(2, 2,M)] <- A2 * 2 / sdv^2 # ed2l.dsd2
mumL <- mum - Lower
temp21L <- mumL / sdv
PhiL <- pnorm(temp21L)
@@ -486,15 +497,15 @@ if (FALSE)
temp31L <- ((1-PhiL) * sdv)^2
wz.cenL11 <- phiL * (phiL - (1-PhiL)*temp21L) / temp31L
wz.cenL22 <- mumL * phiL * ((1-PhiL) * (2 - temp21L^2) +
- mumL * phiL / sdv) / (sdv * temp31L)
+ mumL * phiL / sdv) / (sdv * temp31L)
wz.cenL12 <- phiL * ((1-PhiL)*(temp21L^2 - 1) -
- temp21L*phiL) / temp31L
+ temp21L*phiL) / temp31L
wz.cenL11[!is.finite(wz.cenL11)] <- 0
wz.cenL22[!is.finite(wz.cenL22)] <- 0
wz.cenL12[!is.finite(wz.cenL12)] <- 0
- wz[,iam(1, 1,M)] <- wz[,iam(1, 1,M)] + A1 * wz.cenL11
- wz[,iam(2, 2,M)] <- wz[,iam(2, 2,M)] + A1 * wz.cenL22
- wz[,iam(1, 2,M)] <- A1 * wz.cenL12
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] + A1 * wz.cenL11
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] + A1 * wz.cenL22
+ wz[, iam(1, 2, M)] <- A1 * wz.cenL12
mumU <- Upper - mum # often Inf
temp21U <- mumU / sdv # often Inf
PhiU <- pnorm(temp21U) # often 1
@@ -505,16 +516,16 @@ if (FALSE)
tmp9 <- (1-PhiU) * (2 - temp21U^2)
wzcenU22 <- mumU * phiU * (tmp9 + mumU * phiU / sdv) / (sdv * temp31U)
wzcenU12 <- -phiU * ((1-PhiU)*(temp21U^2 - 1) -
- temp21U*phiU) / temp31U
+ temp21U*phiU) / temp31U
wzcenU11[!is.finite(wzcenU11)] <- 0 # Needed when Upper==Inf
wzcenU22[!is.finite(wzcenU22)] <- 0 # Needed when Upper==Inf
wzcenU12[!is.finite(wzcenU12)] <- 0 # Needed when Upper==Inf
- wz[,iam(1, 1,M)] <- wz[,iam(1, 1,M)] + A3 * wzcenU11
- wz[,iam(2, 2,M)] <- wz[,iam(2, 2,M)] + A3 * wzcenU22
- wz[,iam(1, 2,M)] <- wz[,iam(1, 2,M)] + A3 * wzcenU12
- wz[,iam(1, 1,M)] <- wz[,iam(1, 1,M)] * dmu.deta^2
- wz[,iam(2, 2,M)] <- wz[,iam(2, 2,M)] * dsd.deta^2
- wz[,iam(1, 2,M)] <- wz[,iam(1, 2,M)] * dmu.deta * dsd.deta
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] + A3 * wzcenU11
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] + A3 * wzcenU22
+ wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)] + A3 * wzcenU12
+ wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * dmu.deta^2
+ wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] * dsd.deta^2
+ wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)] * dmu.deta * dsd.deta
c(w) * wz
}), list( .lmu = lmu, .lsd = lsd ))))
}
@@ -631,7 +642,7 @@ if (FALSE)
function(lmean = "loge", lshape = "loge",
imean = NULL, ishape = NULL,
probs.y = c(0.2, 0.5, 0.8),
- imethod = 1, zero = -2) {
+ imethod = 1, zero = "shape") {
@@ -648,9 +659,6 @@ if (FALSE)
lmeann <- attr(emeann, "function.name")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(imethod, length.arg = 1,
integer.valued = TRUE, positive = TRUE) ||
@@ -683,9 +691,9 @@ if (FALSE)
"Variance: mean^2 * (gamma(1 + 2/shape) / ",
"gamma(1 + 1/shape)^2 - 1)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero,
.lmeann = lmeann ))),
@@ -694,8 +702,12 @@ if (FALSE)
Q1 = 1,
expected = TRUE,
multipleResponses = TRUE,
+ parameters.names = c("mean", "shape"),
+ lmean = .lmeann ,
+ lshape = .lshape ,
zero = .zero )
- }, list( .zero = zero ))),
+ }, list( .zero = zero,
+ .lmeann = lmeann, .lshape = lshape ))),
initialize = eval(substitute(expression({
@@ -727,7 +739,7 @@ if (FALSE)
predictors.names <-
c(namesof(mynames1, .lmeann , earg = .emeann , tag = FALSE),
namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
Meann.init <- matrix(if (length( .imeann )) .imeann else 0.5 * colMeans(y),
@@ -756,7 +768,7 @@ if (FALSE)
etastart <-
cbind(theta2eta(Meann.init, .lmeann , earg = .emeann ),
theta2eta(Shape.init, .lshape , earg = .eshape ))[,
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
}
}
}), list( .lmeann = lmeann, .lshape = lshape,
@@ -792,8 +804,8 @@ if (FALSE)
M1 <- extra$M1
avector <- c(rep( .lmeann , length = ncoly),
rep( .lshape , length = ncoly))
- misc$link <- avector[interleave.VGAM(M, M = M1)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+ misc$link <- avector[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
@@ -851,7 +863,7 @@ if (FALSE)
myderiv <- c(w) * cbind(dl.dmeann * dmeann.deta,
dl.dshape * dshape.deta)
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lmeann = lmeann, .lshape = lshape,
.emeann = emeann, .eshape = eshape ) )),
weight = eval(substitute(expression({
@@ -886,7 +898,7 @@ if (FALSE)
lss = TRUE,
nrfs = 1,
probs.y = c(0.2, 0.5, 0.8),
- imethod = 1, zero = ifelse(lss, -2, -1)) {
+ imethod = 1, zero = "shape") {
@@ -899,9 +911,6 @@ if (FALSE)
lscale <- attr(escale, "function.name")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(imethod, length.arg = 1,
integer.valued = TRUE, positive = TRUE) ||
@@ -943,9 +952,9 @@ if (FALSE)
"Variance: scale^2 * (gamma(1 + 2/shape) - ",
"gamma(1 + 1/shape)^2)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero,
.scale.12 = scale.12, .scale.TF = scale.TF, .lss = lss ))),
@@ -954,8 +963,17 @@ if (FALSE)
Q1 = 1,
expected = TRUE,
multipleResponses = TRUE,
+ parameters.names = if ( .lss )
+ c("scale", "shape") else
+ c("shape", "scale"),
+ lss = .lss ,
+ lscale = .lscale ,
+ lshape = .lshape ,
zero = .zero )
- }, list( .zero = zero, .scale.12 = scale.12, .scale.TF = scale.TF
+ }, list( .zero = zero, .scale.12 = scale.12, .scale.TF = scale.TF,
+ .lscale = lscale ,
+ .lshape = lshape ,
+ .lss = lss
))),
initialize = eval(substitute(expression({
@@ -984,21 +1002,21 @@ if (FALSE)
if ( .lss ) {
- mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("scale", ncoly)
+ mynames2 <- param.names("shape", ncoly)
predictors.names <-
c(namesof(mynames1, .lscale , earg = .escale , tag = FALSE),
namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))
} else {
- mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("shape", ncoly)
+ mynames2 <- param.names("scale", ncoly)
predictors.names <-
c(namesof(mynames1, .lshape , earg = .eshape , tag = FALSE),
namesof(mynames2, .lscale , earg = .escale , tag = FALSE))
}
predictors.names <- predictors.names[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
Shape.init <- matrix(if (length( .ishape )) .ishape else 0 + NA,
@@ -1029,10 +1047,10 @@ if (FALSE)
etastart <- if ( .lss )
cbind(theta2eta(Scale.init, .lscale , earg = .escale ),
theta2eta(Shape.init, .lshape , earg = .eshape ))[,
- interleave.VGAM(M, M = M1)] else
+ interleave.VGAM(M, M1 = M1)] else
cbind(theta2eta(Shape.init, .lshape , earg = .eshape ),
theta2eta(Scale.init, .lscale , earg = .escale ))[,
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
}
}
}), list( .lscale = lscale, .lshape = lshape,
@@ -1073,8 +1091,8 @@ if (FALSE)
rep( .lshape , length = ncoly)) else
c(rep( .lshape , length = ncoly),
rep( .lscale , length = ncoly))
- misc$link <- avector[interleave.VGAM(M, M = M1)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+ misc$link <- avector[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
@@ -1127,7 +1145,7 @@ if (FALSE)
dl.dshape * dshape.deta) else
c(w) * cbind(dl.dshape * dshape.deta,
dl.dscale * dscale.deta)
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape,
.scale.12 = scale.12, .scale.TF = scale.TF, .lss = lss ) )),
@@ -1379,7 +1397,8 @@ pgamma.deriv.unscaled <- function(q, shape) {
iAlpha = NULL, iBetaa = NULL,
nrfs = 1,
probs.y = c(0.2, 0.5, 0.8),
- imethod = 1, zero = -2) {
+ imethod = 1,
+ zero = "Betaa") {
@@ -1398,9 +1417,6 @@ pgamma.deriv.unscaled <- function(q, shape) {
lBetaa <- attr(eBetaa, "function.name")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(imethod, length.arg = 1,
integer.valued = TRUE, positive = TRUE) ||
@@ -1438,17 +1454,24 @@ pgamma.deriv.unscaled <- function(q, shape) {
lower.limit, sep = ", ") else
""),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("Alpha", "Betaa"),
lower.limit = .lower.limit ,
+ lAlpha = .lAlpha ,
+ lBetaa = .lBetaa ,
zero = .zero )
}, list( .zero = zero,
+ .lAlpha = lAlpha ,
+ .lBetaa = lBetaa ,
.lower.limit = lower.limit
))),
@@ -1484,12 +1507,12 @@ pgamma.deriv.unscaled <- function(q, shape) {
"don't use SurvS4()")
- mynames1 <- paste("Alpha", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("Betaa", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("Alpha", ncoly)
+ mynames2 <- param.names("Betaa", ncoly)
predictors.names <-
c(namesof(mynames1, .lAlpha , earg = .eAlpha , tag = FALSE),
namesof(mynames2, .lBetaa , earg = .eBetaa , tag = FALSE))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
Alpha.init <- matrix(if (length( .iAlpha )) .iAlpha else 0 + NA,
@@ -1525,7 +1548,7 @@ pgamma.deriv.unscaled <- function(q, shape) {
etastart <-
cbind(theta2eta(Alpha.init, .lAlpha , earg = .eAlpha ),
theta2eta(Betaa.init, .lBetaa , earg = .eBetaa ))[,
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
}
}), list( .lBetaa = lBetaa, .lAlpha = lAlpha,
.eBetaa = eBetaa, .eAlpha = eAlpha,
@@ -1572,8 +1595,8 @@ pgamma.deriv.unscaled <- function(q, shape) {
M1 <- extra$M1
misc$link <-
c(rep( .lAlpha , length = ncoly),
- rep( .lBetaa , length = ncoly))[interleave.VGAM(M, M = M1)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+ rep( .lBetaa , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
@@ -1639,7 +1662,7 @@ pgamma.deriv.unscaled <- function(q, shape) {
myderiv <- c(w) * cbind(dl.dAlpha * dAlpha.deta,
dl.dBetaa * dBetaa.deta)
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lBetaa = lBetaa, .lAlpha = lAlpha,
.eBetaa = eBetaa, .eAlpha = eAlpha,
.lower.limit = lower.limit ) )),
diff --git a/R/family.circular.R b/R/family.circular.R
index 26c0531..1394023 100644
--- a/R/family.circular.R
+++ b/R/family.circular.R
@@ -96,8 +96,12 @@ qcard <- function(p, mu, rho, tolerance = 1.0e-7, maxits = 500,
if (any(index)) {
ans[index] <- runif (sum(index), 0, 2*pi)
}
- if (max(abs(ans - oldans)) < tolerance) break;
- if (its == maxits) {warning("did not converge"); break}
+ if (max(abs(ans - oldans)) < tolerance)
+ break
+ if (its == maxits) {
+ warning("did not converge")
+ break
+ }
oldans <- ans
}
} else {
@@ -109,8 +113,12 @@ qcard <- function(p, mu, rho, tolerance = 1.0e-7, maxits = 500,
if (any(index)) {
ans[index] <- runif(sum(index), 0, 2*pi)
}
- if (max(abs(ans - oldans)) < tolerance) break;
- if (its == maxits) {warning("did not converge"); break}
+ if (max(abs(ans - oldans)) < tolerance)
+ break
+ if (its == maxits) {
+ warning("did not converge")
+ break
+ }
oldans <- ans
}
}
@@ -125,8 +133,12 @@ qcard <- function(p, mu, rho, tolerance = 1.0e-7, maxits = 500,
if (any(index)) {
ans[index] <- runif (sum(index), 0, 2*pi)
}
- if (max(abs(ans - oldans)) < tolerance) break;
- if (its == maxits) {warning("did not converge"); break}
+ if (max(abs(ans - oldans)) < tolerance)
+ break
+ if (its == maxits) {
+ warning("did not converge")
+ break
+ }
oldans <- ans
}
} else {
@@ -138,8 +150,12 @@ qcard <- function(p, mu, rho, tolerance = 1.0e-7, maxits = 500,
if (any(index)) {
ans[index] <- runif (sum(index), 0, 2*pi)
}
- if (max(abs(ans - oldans)) < tolerance) break;
- if (its == maxits) {warning("did not converge"); break}
+ if (max(abs(ans - oldans)) < tolerance)
+ break
+ if (its == maxits) {
+ warning("did not converge")
+ break
+ }
oldans <- ans
}
}
@@ -215,8 +231,26 @@ cardioid.control <- function(save.weights = TRUE, ...) {
"pi + (rho/pi) *",
"((2*pi-mu)*sin(2*pi-mu)+cos(2*pi-mu)-mu*sin(mu)-cos(mu))"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("mu", "rho"),
+ nsimEIM = .nsimEIM ,
+ lmu = .lmu ,
+ lrho = .lrho ,
+ zero = .zero )
+ }, list( .zero = zero, .lmu = lmu, .lrho = lrho,
+ .nsimEIM = nsimEIM ))),
+
+
initialize = eval(substitute(expression({
@@ -234,40 +268,40 @@ cardioid.control <- function(save.weights = TRUE, ...) {
stop("the response must be in (0, 2*pi)")
predictors.names <- c(
- namesof("mu", .lmu, earg = .emu , tag = FALSE),
- namesof("rho", .lrho, earg = .erho, tag = FALSE))
+ namesof("mu", .lmu , earg = .emu , tag = FALSE),
+ namesof("rho", .lrho , earg = .erho , tag = FALSE))
if (!length(etastart)) {
- rho.init <- rep(if (length(.irho)) .irho else 0.3, length=n)
+ rho.init <- rep(if (length( .irho )) .irho else 0.3, length = n)
cardioid.Loglikfun <- function(mu, y, x, w, extraargs) {
rho <- extraargs$irho
sum(w * (-log(2*pi) + log1p(2*rho*cos(y-mu))))
}
- mu.grid <- seq(0.1, 6.0, len=19)
+ mu.grid <- seq(0.1, 6.0, len = 19)
mu.init <- if (length( .imu )) .imu else
grid.search(mu.grid, objfun = cardioid.Loglikfun,
y = y, x = x, w = w,
extraargs = list(irho = rho.init))
mu.init <- rep(mu.init, length=length(y))
etastart <-
- cbind(theta2eta( mu.init, .lmu, earg = .emu),
- theta2eta(rho.init, .lrho, earg = .erho))
+ cbind(theta2eta( mu.init, .lmu , earg = .emu ),
+ theta2eta(rho.init, .lrho , earg = .erho ))
}
}), list( .lmu = lmu, .lrho = lrho,
.imu = imu, .irho = irho,
.emu = emu, .erho = erho ))),
linkinv = eval(substitute(function(eta, extra = NULL){
- mu <- eta2theta(eta[, 1], link = .lmu, earg = .emu)
- rho <- eta2theta(eta[, 2], link = .lrho, earg = .erho)
+ mu <- eta2theta(eta[, 1], link = .lmu , earg = .emu )
+ rho <- eta2theta(eta[, 2], link = .lrho , earg = .erho )
pi + (rho/pi) *
((2*pi-mu)*sin(2*pi-mu) + cos(2*pi-mu) - mu*sin(mu) - cos(mu))
}, list( .lmu = lmu, .lrho = lrho,
.emu = emu, .erho = erho ))),
last = eval(substitute(expression({
- misc$link <- c("mu" = .lmu, "rho" = .lrho)
+ misc$link <- c("mu" = .lmu , "rho" = .lrho )
- misc$earg <- list("mu" = .emu, "rho" = .erho)
+ misc$earg <- list("mu" = .emu , "rho" = .erho )
misc$expected <- TRUE
misc$nsimEIM <- .nsimEIM
@@ -276,8 +310,8 @@ cardioid.control <- function(save.weights = TRUE, ...) {
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL,
summation = TRUE) {
- mu <- eta2theta(eta[, 1], link = .lmu, earg = .emu)
- rho <- eta2theta(eta[, 2], link = .lrho, earg = .erho)
+ mu <- eta2theta(eta[, 1], link = .lmu , earg = .emu )
+ rho <- eta2theta(eta[, 2], link = .lrho , earg = .erho )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
@@ -298,28 +332,19 @@ cardioid.control <- function(save.weights = TRUE, ...) {
-
-
-
-
-
-
-
-
-
deriv = eval(substitute(expression({
- mu <- eta2theta(eta[, 1], link = .lmu, earg = .emu)
- rho <- eta2theta(eta[, 2], link = .lrho, earg = .erho)
+ mu <- eta2theta(eta[, 1], link = .lmu , earg = .emu )
+ rho <- eta2theta(eta[, 2], link = .lrho , earg = .erho )
- dmu.deta <- dtheta.deta(mu, link = .lmu, earg = .emu)
- drho.deta <- dtheta.deta(rho, link = .lrho, earg = .erho)
+ dmu.deta <- dtheta.deta(mu, link = .lmu , earg = .emu )
+ drho.deta <- dtheta.deta(rho, link = .lrho , earg = .erho )
dl.dmu <- 2 * rho * sin(y-mu) / (1 + 2 * rho * cos(y-mu))
dl.drho <- 2 * cos(y-mu) / (1 + 2 * rho * cos(y-mu))
c(w) * cbind(dl.dmu * dmu.deta,
dl.drho * drho.deta)
- }), list( .lmu = lmu, .lrho=lrho,
- .emu = emu, .erho=erho, .nsimEIM=nsimEIM ))),
+ }), list( .lmu = lmu, .lrho = lrho,
+ .emu = emu, .erho = erho, .nsimEIM = nsimEIM ))),
weight = eval(substitute(expression({
run.varcov <- 0
ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
@@ -331,14 +356,16 @@ cardioid.control <- function(save.weights = TRUE, ...) {
rm(ysim)
temp3 <- cbind(dl.dmu, dl.drho)
run.varcov <- ((ii-1) * run.varcov +
- temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
+ temp3[, ind1$row.index] *
+ temp3[, ind1$col.index]) / ii
}
wz <- if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
dtheta.detas <- cbind(dmu.deta, drho.deta)
- wz <- wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
+ wz <- wz * dtheta.detas[, index0$row] *
+ dtheta.detas[, index0$col]
c(w) * wz
}), list( .lmu = lmu, .lrho = lrho,
.emu = emu, .erho = erho, .nsimEIM = nsimEIM ))))
@@ -367,9 +394,6 @@ cardioid.control <- function(save.weights = TRUE, ...) {
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
@@ -380,13 +404,19 @@ cardioid.control <- function(save.weights = TRUE, ...) {
namesof("scale", lscale, earg = escale),
"\n", "\n",
"Mean: location"),
+
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
infos = eval(substitute(function(...) {
list(M1 = 2,
- zero = .zero ,
- parameterNames = c("location", "scale"))
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("location", "scale"),
+ zero = .zero )
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -395,8 +425,8 @@ cardioid.control <- function(save.weights = TRUE, ...) {
predictors.names <-
- c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE))
+ c(namesof("location", .llocat , earg = .elocat , tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
if (!length(etastart)) {
if ( .imethod == 1) {
@@ -409,13 +439,13 @@ cardioid.control <- function(save.weights = TRUE, ...) {
}
locat.init <- if (length( .ilocat ))
- rep( .ilocat , len=n) else
- rep(locat.init, len=n)
+ rep( .ilocat , len = n) else
+ rep(locat.init, len = n)
scale.init <- if (length( .iscale ))
rep( .iscale , len = n) else rep(1, len = n)
etastart <- cbind(
- theta2eta(locat.init, .llocat, earg = .elocat),
- theta2eta(scale.init, .lscale, earg = .escale))
+ theta2eta(locat.init, .llocat , earg = .elocat ),
+ theta2eta(scale.init, .lscale , earg = .escale ))
}
y <- y %% (2*pi) # Coerce after initial values have been computed
}), list( .imethod = imethod, .ilocat = ilocat,
@@ -423,7 +453,7 @@ cardioid.control <- function(save.weights = TRUE, ...) {
.lscale = lscale, .llocat = llocat,
.iscale = iscale ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .llocat, earg = .elocat) %% (2*pi)
+ eta2theta(eta[, 1], .llocat , earg = .elocat ) %% (2*pi)
}, list( .escale = escale, .lscale = lscale,
.llocat = llocat, .elocat = elocat ))),
last = eval(substitute(expression({
@@ -437,13 +467,14 @@ cardioid.control <- function(save.weights = TRUE, ...) {
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL,
summation = TRUE) {
- locat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
- Scale <- eta2theta(eta[, 2], .lscale, earg = .escale)
+ locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- ll.elts <- c(w) * (Scale * cos(y - locat) - log(mbesselI0(x = Scale)))
+ ll.elts <- c(w) * (Scale * cos(y - locat) -
+ log(mbesselI0(x = Scale)))
if (summation) {
sum(ll.elts)
} else {
@@ -454,8 +485,8 @@ cardioid.control <- function(save.weights = TRUE, ...) {
.llocat = llocat, .elocat = elocat ))),
vfamily = c("vonmises"),
deriv = eval(substitute(expression({
- locat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
- Scale <- eta2theta(eta[, 2], .lscale, earg = .escale)
+ locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
tmp6 <- mbesselI0(x = Scale, deriv = 2)
dl.dlocat <- Scale * sin(y - locat)
@@ -463,19 +494,19 @@ cardioid.control <- function(save.weights = TRUE, ...) {
dlocat.deta <- dtheta.deta(locat, .llocat ,
earg = .elocat )
- dscale.deta <- dtheta.deta(Scale, .lscale, earg = .escale)
+ dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
c(w) * cbind(dl.dlocat * dlocat.deta,
- dl.dscale * dscale.deta)
+ dl.dscale * dscale.deta)
}), list( .escale = escale, .lscale = lscale,
.llocat = llocat, .elocat = elocat ))),
weight = eval(substitute(expression({
ned2l.dlocat2 <- Scale * tmp6[, 2] / tmp6[, 1]
ned2l.dscale2 <- tmp6[, 3] / tmp6[, 1] - (tmp6[, 2] / tmp6[, 1])^2
- wz <- matrix(as.numeric(NA), nrow = n, ncol = 2) # diagonal
- wz[,iam(1, 1, M)] <- ned2l.dlocat2 * dlocat.deta^2
- wz[,iam(2, 2, M)] <- ned2l.dscale2 * dscale.deta^2
+ wz <- matrix(0, nrow = n, ncol = 2) # diagonal
+ wz[, iam(1, 1, M)] <- ned2l.dlocat2 * dlocat.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dscale2 * dscale.deta^2
c(w) * wz
}), list( .escale = escale, .elocat = elocat,
.lscale = lscale, .llocat = llocat ))))
diff --git a/R/family.exp.R b/R/family.exp.R
index 1fc1bde..bc4b1d7 100644
--- a/R/family.exp.R
+++ b/R/family.exp.R
@@ -527,7 +527,7 @@ rsc.t2 <- function(n, location = 0, scale = 1) {
llocation = "identitylink", lscale = "loge",
ilocation = NULL, iscale = NULL,
imethod = 1,
- zero = 2) {
+ zero = "scale") {
@@ -566,8 +566,24 @@ rsc.t2 <- function(n, location = 0, scale = 1) {
"Mean: location\n",
"Variance: infinite"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("location", "scale"),
+ llocation = .llocation ,
+ lscale = .lscale ,
+ zero = .zero )
+ }, list( .zero = zero, .llocation = llocation, .lscale = lscale ))),
+
+
initialize = eval(substitute(expression({
temp5 <-
diff --git a/R/family.extremes.R b/R/family.extremes.R
index 1db2f36..576277b 100644
--- a/R/family.extremes.R
+++ b/R/family.extremes.R
@@ -222,7 +222,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
tolshape0 = 0.001,
type.fitted = c("percentiles", "mean"),
giveWarning = TRUE,
- zero = 2:3) {
+ zero = c("scale", "shape")) {
@@ -272,9 +272,6 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
if (!is.Numeric(gshape, length.arg = 2) ||
gshape[1] >= gshape[2])
stop("bad input for argument 'gshape'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
new("vglmff",
@@ -284,14 +281,24 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
namesof("scale", lscale, escale), ", ",
namesof("shape", lshape, eshape)),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
+
infos = eval(substitute(function(...) {
list(M1 = 3,
+ Q1 = 1,
+ expected = TRUE,
multipleResponses = FALSE,
+ parameters.names = c("location", "scale", "shape"),
+ llocation = .llocat ,
+ lscale = .lscale ,
+ lshape = .lshape ,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
+ .llocat = llocation, .lscale = lscale, .lshape = lshape,
.type.fitted = type.fitted ))),
@@ -398,8 +405,8 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
bad <- ((1 + init.xi*(y-init.mu)/init.sig) <= 0)
if (fred <- sum(bad)) {
- warning(paste(fred, "observations violating boundary",
- "constraints while initializing. Taking corrective action."))
+ warning(fred, "observations violating boundary constraints ",
+ "while initializing. Taking corrective action")
init.xi[bad] <- ifelse(y[bad] > init.mu[bad], 0.1, -0.1)
}
@@ -424,11 +431,13 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
shape <- eta2theta(eta[, 3], .lshape , .eshape )
- type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
- warning("cannot find 'type.fitted'. ",
- "Returning 'percentiles'.")
- "percentiles"
- }
+ type.fitted <-
+ if (length(extra$type.fitted)) {
+ extra$type.fitted
+ } else {
+ warning("cannot find 'type.fitted'. Returning 'percentiles'.")
+ "percentiles"
+ }
type.fitted <- match.arg(type.fitted,
c("percentiles", "mean"))[1]
@@ -441,7 +450,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
LP <- length(cent)
if (type.fitted == "percentiles" && # Upward compatibility:
LP > 0) {
- fv <- matrix(as.numeric(NA), nrow(eta), LP)
+ fv <- matrix(NA_real_, nrow(eta), LP)
for (ii in 1:LP) {
yp <- -log(cent[ii] / 100)
fv[!is.zero, ii] <- Locat[!is.zero] - sigma[!is.zero] *
@@ -511,9 +520,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
mytolerance <- 0 # .Machine$double.eps
if (any(bad <- (A1 <= mytolerance), na.rm = TRUE)) {
if ( .giveWarning )
- warning("There are", sum(bad),
- "range violations in @loglikelihood")
-
+ warning("There are", sum(bad), "range violations in @loglikelihood")
cat("There are", sum(bad),
"range violations in @loglikelihood\n")
flush.console()
@@ -565,7 +572,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
dsi.deta <- dtheta.deta(sigma, .lscale , .escale )
dxi.deta <- dtheta.deta(shape, .lshape , .eshape )
- is.zero <- (abs(shape) < .tolshape0)
+ is.zero <- (abs(shape) < .tolshape0 )
ii <- 1:nrow(eta)
zedd <- (y-Locat) / sigma
A <- 1 + shape * zedd
@@ -622,7 +629,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
k2 <- k1 * kay
k3 <- k2 * kay # kay^3 * (1-2*kay)
- wz <- matrix(as.numeric(NA), n, 6)
+ wz <- matrix(NA_real_, n, 6)
wz[, iam(1, 1, M)] <- tmp2 / (sigma^2 * k0)
wz[, iam(1, 2, M)] <- (tmp2 - tmp1) / (sigma^2 * k1)
wz[, iam(1, 3, M)] <- (tmp1 * temp13 - tmp2) / (sigma * k2)
@@ -707,7 +714,7 @@ dgammadx <- function(x, deriv.arg = 1) {
tolshape0 = 0.001,
type.fitted = c("percentiles", "mean"),
giveWarning = TRUE,
- zero = 2:3) {
+ zero = c("scale", "shape")) {
if (!is.logical(giveWarning) || length(giveWarning) != 1)
stop("bad input for argument 'giveWarning'")
if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
@@ -749,9 +756,6 @@ dgammadx <- function(x, deriv.arg = 1) {
positive = TRUE) ||
tolshape0 > 0.1)
stop("bad input for argument 'tolshape0'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
new("vglmff",
@@ -761,16 +765,28 @@ dgammadx <- function(x, deriv.arg = 1) {
namesof("scale", link = lscale, earg = escale), ", ",
namesof("shape", link = lshape, earg = eshape)),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
+
+
infos = eval(substitute(function(...) {
list(M1 = 3,
+ Q1 = 1,
+ expected = TRUE,
multipleResponses = FALSE,
+ parameters.names = c("location", "scale", "shape"),
+ llocation = .llocat ,
+ lscale = .lscale ,
+ lshape = .lshape ,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
+ .llocat = llocation, .lscale = lscale, .lshape = lshape,
.type.fitted = type.fitted ))),
+
initialize = eval(substitute(expression({
M1 <- extra$M1 <- 3
ncoly <- ncol(y)
@@ -845,8 +861,8 @@ dgammadx <- function(x, deriv.arg = 1) {
}
bad <- (1 + init.xi * (y - init.mu) / init.sig <= 0)
if (fred <- sum(bad, na.rm = TRUE)) {
- warning(paste(fred, "observations violating boundary",
- "constraints while initializing. Taking corrective action."))
+ warning(fred, "observations violating boundary constraints ",
+ "while initializing. Taking corrective action")
init.xi[bad] <- ifelse(y[bad] > init.mu[bad], 0.01, -0.01)
}
@@ -867,11 +883,13 @@ dgammadx <- function(x, deriv.arg = 1) {
loc <- eta2theta(eta[, 1], .llocat , earg = .elocat )
sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
xi <- eta2theta(eta[, 3], .lshape , earg = .eshape )
- type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
- warning("cannot find 'type.fitted'. ",
- "Returning 'percentiles'.")
- "percentiles"
- }
+ type.fitted <-
+ if (length(extra$type.fitted)) {
+ extra$type.fitted
+ } else {
+ warning("cannot find 'type.fitted'. Returning 'percentiles'.")
+ "percentiles"
+ }
type.fitted <- match.arg(type.fitted,
c("percentiles", "mean"))[1]
@@ -883,7 +901,7 @@ dgammadx <- function(x, deriv.arg = 1) {
LP <- length(cent)
if (type.fitted == "percentiles" && # Upward compatibility:
LP > 0) {
- fv <- matrix(as.numeric(NA), nrow(eta), LP)
+ fv <- matrix(NA_real_, nrow(eta), LP)
for (ii in 1:LP) {
yp <- -log(cent[ii] / 100)
fv[!is.zero, ii] <- loc[!is.zero] - sigma[!is.zero] *
@@ -992,7 +1010,7 @@ dgammadx <- function(x, deriv.arg = 1) {
temp100 <- gamma(2-kay)
pp <- (1-kay)^2 * gamma(1-2*kay) # gamma(0) is undefined so kay != 0.5
qq <- temp100 * (digamma(1-kay) - (1-kay)/kay)
- wz <- matrix(as.numeric(NA), n, 6)
+ wz <- matrix(NA_real_, n, 6)
wz[, iam(1, 1, M)] <- pp / sigma^2
wz[, iam(2, 2, M)] <- (1 - 2*temp100 + pp) / (sigma * kay)^2
EulerM <- -digamma(1)
@@ -1139,9 +1157,6 @@ pgumbel <- function(q, location = 0, scale = 1,
max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
@@ -1154,8 +1169,27 @@ pgumbel <- function(q, location = 0, scale = 1,
namesof("location", llocat, earg = elocat ), ", ",
namesof("scale", lscale, earg = escale )),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("location", "scale"),
+ llocation = .llocat ,
+ lscale = .lscale ,
+ mpv = .mpv ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .llocat = llocation, .lscale = lscale,
+ .mpv = mpv ))),
+
+
initialize = eval(substitute(expression({
predictors.names <-
@@ -1213,7 +1247,7 @@ pgumbel <- function(q, location = 0, scale = 1,
LP <- length(Percentiles) # may be 0
if (LP > 0) {
mpv <- extra$mpv
- mu <- matrix(as.numeric(NA), nrow(eta), LP + mpv) # LP may be 0
+ mu <- matrix(NA_real_, nrow(eta), LP + mpv) # LP may be 0
Rvec <- extra$R
for (ii in 1:LP) {
ci <- if (is.Numeric(Rvec))
@@ -1307,7 +1341,7 @@ pgumbel <- function(q, location = 0, scale = 1,
temp5[col(temp5) > r.vec] <- 0
temp5 <- temp5 %*% rep(1, ncol(temp5))
- wz <- matrix(as.numeric(NA), n, dimm(M = 2)) # 3=dimm(M = 2)
+ wz <- matrix(NA_real_, n, dimm(M = 2)) # 3=dimm(M = 2)
wz[, iam(1, 1, M)] <- r.vec / sigma^2
wz[, iam(2, 1, M)] <- -(1 + r.vec * temp6) / sigma^2
wz[, iam(2, 2, M)] <- (2*(r.vec+1)*temp6 + r.vec*(trigamma(r.vec) +
@@ -1538,7 +1572,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
type.fitted = c("percentiles", "mean"),
giveWarning = TRUE,
imethod = 1,
- zero = -2) {
+ zero = "shape") {
type.fitted <- match.arg(type.fitted,
c("percentiles", "mean"))[1]
@@ -1569,9 +1603,6 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
tolshape0 > 0.1)
stop("bad input for argument 'tolshape0'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
new("vglmff",
@@ -1580,20 +1611,27 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
namesof("scale", link = lscale, earg = escale ), ", ",
namesof("shape", link = lshape, earg = eshape )),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("scale", "shape"),
+ lscale = .lscale ,
+ lshape = .lshape ,
type.fitted = .type.fitted ,
zero = .zero )
- }, list( .zero = zero, .type.fitted = type.fitted
+ }, list( .zero = zero, .type.fitted = type.fitted,
+ .lscale = lscale, .lshape = lshape
))),
+
initialize = eval(substitute(expression({
@@ -1637,12 +1675,12 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
extra$threshold <- Threshold
- mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("scale", ncoly)
+ mynames2 <- param.names("shape", ncoly)
predictors.names <-
c(namesof(mynames1, .lscale , earg = .escale , tag = FALSE),
namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
@@ -1679,7 +1717,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
etastart <-
cbind(theta2eta(init.sig, .lscale , earg = .escale ),
theta2eta(init.xii, .lshape , earg = .eshape ))[,
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
}
}), list( .lscale = lscale, .lshape = lshape,
.iscale = iscale, .ishape = ishape,
@@ -1699,11 +1737,13 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
shape <- as.matrix(shape)
- type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
- warning("cannot find 'type.fitted'. ",
- "Returning 'percentiles'.")
- "percentiles"
- }
+ type.fitted <-
+ if (length(extra$type.fitted)) {
+ extra$type.fitted
+ } else {
+ warning("cannot find 'type.fitted'. Returning 'percentiles'.")
+ "percentiles"
+ }
type.fitted <- match.arg(type.fitted,
c("percentiles", "mean"))[1]
@@ -1736,7 +1776,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
is.zero <- (abs(shape) < tolshape0 ) # A matrix
LP <- length(percentiles)
- fv <- matrix(as.numeric(NA), length(shape), LP)
+ fv <- matrix(NA_real_, length(shape), LP)
is.zero <- (abs(shape) < tolshape0)
for (ii in 1:LP) {
temp <- 1 - percentiles[ii] / 100
@@ -1794,8 +1834,8 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
M1 <- extra$M1
misc$link <-
c(rep( .lscale , length = ncoly),
- rep( .lshape , length = ncoly))[interleave.VGAM(M, M = M1)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+ rep( .lshape , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
@@ -1879,7 +1919,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
myderiv <-
c(w) * cbind(dl.dsigma * dsigma.deta,
dl.dShape * dShape.deta)
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .tolshape0 = tolshape0,
.lscale = lscale, .escale = escale,
.lshape = lshape, .eshape = eshape ))),
@@ -2038,9 +2078,6 @@ setMethod("guplot", "vlm",
max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
@@ -2048,14 +2085,34 @@ setMethod("guplot", "vlm",
new("vglmff",
blurb = c("Gumbel distribution (univariate response)\n\n",
"Links: ",
- namesof("location", llocat,
- earg = elocat, tag = TRUE), ", ",
- namesof("scale", lscale, earg = escale , tag = TRUE), "\n",
+ namesof("location", llocat, earg = elocat, tag = TRUE), ", ",
+ namesof("scale", lscale, earg = escale, tag = TRUE), "\n",
"Mean: location + scale*0.5772..\n",
"Variance: pi^2 * scale^2 / 6"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("location", "scale"),
+ llocation = .llocat ,
+ lscale = .lscale ,
+ mpv = .mpv ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .llocat = llocation, .lscale = lscale,
+ .mpv = mpv ))),
+
+
+
initialize = eval(substitute(expression({
y <- cbind(y)
if (ncol(y) > 1)
@@ -2099,7 +2156,7 @@ setMethod("guplot", "vlm",
mpv <- extra$mpv
LP <- length(Percentiles) # may be 0
if (!LP) return(locat + sigma * EulerM)
- mu <- matrix(as.numeric(NA), nrow(eta), LP + mpv)
+ mu <- matrix(NA_real_, nrow(eta), LP + mpv)
Rvec <- extra$R
if (1 <= LP)
for (ii in 1:LP) {
@@ -2110,7 +2167,7 @@ setMethod("guplot", "vlm",
if (mpv)
mu[, ncol(mu)] <- locat - sigma * log(log(2))
dmn2 <- if (LP >= 1) paste(as.character(Percentiles), "%",
- sep = "") else NULL
+ sep = "") else NULL
if (mpv)
dmn2 <- c(dmn2, "MPV")
dimnames(mu) <- list(dimnames(eta)[[1]], dmn2)
@@ -2165,7 +2222,7 @@ setMethod("guplot", "vlm",
ned2l.dloc2 <- 1 / sca^2
ned2l.dscaloc <- -(1 + digamma1) / sca^2
- wz = matrix(as.numeric(NA), n, dimm(M = 2))
+ wz = matrix(NA_real_, n, dimm(M = 2))
wz[, iam(1, 1, M)] <- ned2l.dloc2 * dloc.deta^2
wz[, iam(2, 2, M)] <- ned2l.dsca2 * dsca.deta^2
wz[, iam(1, 2, M)] <- ned2l.dscaloc * dloc.deta * dsca.deta
@@ -2180,7 +2237,8 @@ setMethod("guplot", "vlm",
cens.gumbel <- function(llocation = "identitylink",
lscale = "loge",
iscale = NULL,
- mean = TRUE, percentiles = NULL, zero = 2) {
+ mean = TRUE, percentiles = NULL,
+ zero = "scale") {
llocat <- as.list(substitute(llocation))
elocat <- link2list(llocat)
llocat <- attr(elocat, "function.name")
@@ -2196,23 +2254,36 @@ setMethod("guplot", "vlm",
any(percentiles >= 100)))
stop("valid percentiles values must be given when mean = FALSE")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
new("vglmff",
blurb = c("Censored Gumbel distribution\n\n",
"Links: ",
- namesof("location", llocat, earg = elocat, tag = TRUE),
- ", ",
- namesof("scale", lscale, earg = escale, tag = TRUE),
- "\n",
+ namesof("location", llocat, earg = elocat, tag = TRUE), ", ",
+ namesof("scale", lscale, earg = escale, tag = TRUE), "\n",
"Mean: location + scale*0.5772..\n",
"Variance: pi^2 * scale^2 / 6"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("location", "scale"),
+ llocation = .llocat ,
+ lscale = .lscale ,
+ percentiles = .percentiles ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .llocat = llocation, .lscale = lscale,
+ .percentiles = percentiles ))),
+
initialize = eval(substitute(expression({
y <- cbind(y)
if (ncol(y) > 1)
@@ -2222,8 +2293,6 @@ setMethod("guplot", "vlm",
-
-
if (!length(extra$leftcensored))
extra$leftcensored <- rep(FALSE, length.out = n)
if (!length(extra$rightcensored))
@@ -2255,7 +2324,7 @@ setMethod("guplot", "vlm",
EulerM <- -digamma(1)
if (.mean) loc + sc * EulerM else {
LP <- length(.percentiles) # 0 if NULL
- mu <- matrix(as.numeric(NA), nrow(eta), LP)
+ mu <- matrix(NA_real_, nrow(eta), LP)
for (ii in 1:LP) {
ci <- -log( .percentiles[ii] / 100)
mu[, ii] <- loc - sc * log(ci)
@@ -2337,7 +2406,7 @@ setMethod("guplot", "vlm",
ed2l.dsc2 <- ((2+digamma1)*digamma1 + trigamma(1) + 1) / sc^2
ed2l.dloc2 <- 1 / sc^2
ed2l.dlocsc <- -(1 + digamma1) / sc^2
- wz <- matrix(as.numeric(NA), n, dimm(M = 2))
+ wz <- matrix(NA_real_, n, dimm(M = 2))
wz[, iam(1, 1, M)] <- A2 * ed2l.dloc2 * dloc.deta^2
wz[, iam(2, 2, M)] <- A2 * ed2l.dsc2 * dsc.deta^2
wz[, iam(1, 2, M)] <- A2 * ed2l.dlocsc * dloc.deta * dsc.deta
@@ -2509,8 +2578,27 @@ frechet.control <- function(save.weights = TRUE, ...) {
namesof("scale", link = lscale, earg = escale ), ", ",
namesof("shape", link = lshape, earg = eshape )),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("scale", "shape"),
+ lscale = .lscale ,
+ lshape = .lshape ,
+ nsimEIM = .nsimEIM ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .lscale = lscale,
+ .lshape = lshape,
+ .nsimEIM = nsimEIM ))),
+
initialize = eval(substitute(expression({
@@ -2587,7 +2675,7 @@ frechet.control <- function(save.weights = TRUE, ...) {
Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
- ans <- rep(as.numeric(NA), length.out = length(shape))
+ ans <- rep(NA_real_, length.out = length(shape))
ok <- shape > 1
ans[ok] <- loc[ok] + Scale[ok] * gamma(1 - 1/shape[ok])
ans
@@ -2692,8 +2780,8 @@ rec.normal.control <- function(save.weights = TRUE, ...) {
rec.normal <- function(lmean = "identitylink", lsd = "loge",
- imean = NULL, isd = NULL, imethod = 1,
- zero = NULL) {
+ imean = NULL, isd = NULL, imethod = 1,
+ zero = NULL) {
lmean <- as.list(substitute(lmean))
emean <- link2list(lmean)
lmean <- attr(emean, "function.name")
@@ -2720,8 +2808,29 @@ rec.normal.control <- function(save.weights = TRUE, ...) {
"\n",
"Variance: sd^2"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("mean", "sd"),
+ lmean = .lmean ,
+ lsd = .lsd ,
+ imethod = .imethod ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .lmean = lmean,
+ .lsd = lsd,
+ .imethod = imethod ))),
+
+
+
initialize = eval(substitute(expression({
diff --git a/R/family.genetic.R b/R/family.genetic.R
index 4d57e30..1cb92e7 100644
--- a/R/family.genetic.R
+++ b/R/family.genetic.R
@@ -46,6 +46,8 @@
M1 = ifelse( .inbreeding , 3, 2),
expected = TRUE,
multipleResponses = FALSE,
+ parameters.names = c("p1", "p2",
+ if ( .inbreeding ) "f" else NULL),
link = if ( .inbreeding )
c("p1" = .link , "p2" = .link , "f" = .link ) else
c("p1" = .link , "p2" = .link ))
@@ -183,7 +185,7 @@
weight = eval(substitute(expression({
if ( .inbreeding ) {
dPP <- array(c(dP1, dP2, dP3), c(n, 6, 3))
- wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
+ wz <- matrix(NA_real_, n, dimm(M)) # dimm(M)==6 because M==3
for (i1 in 1:M)
for (i2 in i1:M) {
index <- iam(i1, i2, M)
@@ -193,7 +195,7 @@
}
} else {
qq <- 1-p1-p2
- wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+ wz <- matrix(NA_real_, n, dimm(M)) # dimm(M)==3 because M==2
ned2l.dp12 <- 2 * (1/p1 + 1/qq)
ned2l.dp22 <- 2 * (1/p2 + 1/qq)
ned2l.dp1dp2 <- 2 / qq
@@ -309,7 +311,7 @@
}), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
dPP <- array(c(dP1,dP2,dP3), c(n,6, 3))
- wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
+ wz <- matrix(NA_real_, n, dimm(M)) # dimm(M)==6 because M==3
for (i1 in 1:M)
for (i2 in i1:M) {
index <- iam(i1,i2, M)
@@ -344,10 +346,18 @@
namesof("pA", link.pA, earg = earg.pA, tag = FALSE), ", ",
namesof("pB", link.pB, earg = earg.pB, tag = FALSE)),
deviance = Deviance.categorical.data.vgam,
+
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
+ }), list( .zero = zero ))),
+
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 4,
multipleResponses = FALSE,
+ parameters.names = c("pA", "pB"),
expected = TRUE,
zero = .zero ,
link = c("pA" = .link.pA , "pB" = .link.pB ),
@@ -357,11 +367,6 @@
.earg.pA = earg.pA, .earg.pB = earg.pB,
.zero = zero ))),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
- }), list( .zero = zero ))),
-
-
initialize = eval(substitute(expression({
mustart.orig <- mustart
@@ -455,7 +460,7 @@
.earg.pA = earg.pA, .earg.pB = earg.pB ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+ wz <- matrix(NA_real_, n, dimm(M)) # dimm(M)==3 because M==2
ned2l.dp2 <- (1 + 2/ppp + 4*qqq/qbar + ppp/pbar)
ned2l.dq2 <- (1 + 2/qqq + 4*ppp/pbar + qqq/qbar)
@@ -607,10 +612,12 @@
list(M1 = ifelse( .inbreeding , 2, 1),
Q1 = 3,
multipleResponses = FALSE,
+ parameters.names = c("pA",
+ if ( .inbreeding ) "f" else NULL),
expected = TRUE,
zero = .zero ,
link = if ( .inbreeding ) c("pA" = .linkp , "f" = .linkf ) else
- c("pA" = .linkp ))
+ c("pA" = .linkp ))
}, list( .linkp = linkp,
.linkf = linkf, .inbreeding = inbreeding,
.zero = zero ))),
@@ -725,7 +732,7 @@
dPP <- array(c(dP1, dP2), c(n, 3, 2))
dPP.deta <- cbind(dtheta.deta(pA, link = .linkp , earg = .eargp ),
dtheta.deta(fp, link = .linkf , earg = .eargf ))
- wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+ wz <- matrix(NA_real_, n, dimm(M)) # dimm(M)==3 because M==2
for (i1 in 1:M)
for (i2 in i1:M) {
index <- iam(i1, i2, M)
diff --git a/R/family.glmgam.R b/R/family.glmgam.R
index c639c6e..61f2859 100644
--- a/R/family.glmgam.R
+++ b/R/family.glmgam.R
@@ -52,19 +52,25 @@
c("Binomial model\n\n",
"Link: ", namesof("prob", link, earg = earg), "\n",
"Variance: mu * (1 - mu)"),
+
constraints = eval(substitute(expression({
constraints <- cm.VGAM(matrix(1, M, 1), x = x,
bool = .parallel ,
constraints = constraints,
apply.int = .apply.parint )
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .zero = zero,
.parallel = parallel, .apply.parint = apply.parint ))),
+
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
bred = .bred ,
+ expected = TRUE,
+ parameters.names = c("prob"), # new.name
zero = .zero )
}, list( .zero = zero,
.bred = bred ))),
@@ -211,7 +217,7 @@
dtheta.deta(mu, link = .link ,
earg = .earg )^2) # w cancel
if (.multiple.responses && ! .onedpar ) {
- dpar <- rep(as.numeric(NA), len = M)
+ dpar <- rep(NA_real_, len = M)
temp87 <- cbind(temp87)
nrow.mu <- if (is.matrix(mu)) nrow(mu) else length(mu)
for (ii in 1:M)
@@ -282,7 +288,7 @@
}
}, list( .multiple.responses = multiple.responses ))),
- vfamily = c("binomialff", "vcategorical"),
+ vfamily = c("binomialff", "VGAMcategorical"),
@@ -453,6 +459,7 @@
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
+ parameters.names = c("mu"),
dispersion = .dispersion )
}, list( .dispersion = dispersion ))),
initialize = eval(substitute(expression({
@@ -507,7 +514,7 @@
misc$estimated.dispersion <- .estimated.dispersion
misc$link <- rep( .link , length = M)
- names(misc$link) <- if (M > 1) paste("mu", 1:M, sep = "") else "mu"
+ names(misc$link) <- param.names("mu", M)
misc$earg <- vector("list", M)
names(misc$earg) <- names(misc$link)
@@ -579,6 +586,7 @@
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
+ parameters.names = c("mu"),
dispersion = .dispersion )
}, list( .earg = earg , .dispersion = dispersion ))),
initialize = eval(substitute(expression({
@@ -625,7 +633,7 @@
misc$estimated.dispersion <- .estimated.dispersion
misc$link <- rep( .link , length = M)
- names(misc$link) <- if (M > 1) paste("mu", 1:M, sep = "") else "mu"
+ names(misc$link) <- param.names("mu", M)
misc$earg <- vector("list", M)
names(misc$earg) <- names(misc$link)
@@ -758,9 +766,6 @@ rinv.gaussian <- function(n, mu, lambda) {
ishrinkage > 1)
stop("bad input for argument 'ishrinkage'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (is.logical(parallel) && parallel && length(zero))
@@ -783,12 +788,16 @@ rinv.gaussian <- function(n, mu, lambda) {
constraints = constraints,
apply.int = .apply.parint )
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero,
.parallel = parallel, .apply.parint = apply.parint ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ parameters.names = c("mu", "lambda"),
+ expected = TRUE,
zero = .zero )
}, list( .zero = zero ))),
@@ -813,12 +822,12 @@ rinv.gaussian <- function(n, mu, lambda) {
- mynames1 <- paste("mu", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("lambda", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("mu", ncoly)
+ mynames2 <- param.names("lambda", ncoly)
predictors.names <-
c(namesof(mynames1, .lmu , earg = .emu , short = TRUE),
namesof(mynames2, .llambda , earg = .elambda , short = TRUE))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
@@ -844,7 +853,7 @@ rinv.gaussian <- function(n, mu, lambda) {
etastart <- cbind(
theta2eta(init.mu, link = .lmu , earg = .emu ),
theta2eta(init.la, link = .llambda , earg = .elambda ))[,
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
}
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda,
@@ -859,8 +868,8 @@ rinv.gaussian <- function(n, mu, lambda) {
M1 <- extra$M1
misc$link <-
c(rep( .lmu , length = ncoly),
- rep( .llambda , length = ncoly))[interleave.VGAM(M, M = M1)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+ rep( .llambda , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
@@ -921,7 +930,7 @@ rinv.gaussian <- function(n, mu, lambda) {
dl.dlambda <- 0.5 / lambda - (y - mymu)^2 / (2 * mymu^2 * y)
myderiv <- c(w) * cbind(dl.dmu * dmu.deta,
dl.dlambda * dlambda.deta)
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda ))),
@@ -932,7 +941,7 @@ rinv.gaussian <- function(n, mu, lambda) {
wz <- cbind(dmu.deta^2 * ned2l.dmu2,
dlambda.deta^2 * ned2l.dlambda2)[,
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / M1)
}), list( .lmu = lmu, .llambda = llambda,
@@ -985,9 +994,23 @@ rinv.gaussian <- function(n, mu, lambda) {
constraints <- cm.VGAM(matrix(1, M, 1), x = x,
bool = .parallel ,
constraints = constraints)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .parallel = parallel, .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(M1 = 1,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("lambda"),
+ bred = .bred ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .bred = bred ))),
+
+
deviance =
function(mu, y, w, residuals = FALSE, eta, extra = NULL,
summation = TRUE) {
@@ -1006,14 +1029,6 @@ rinv.gaussian <- function(n, mu, lambda) {
}
},
- infos = eval(substitute(function(...) {
- list(M1 = 1,
- Q1 = 1,
- bred = .bred ,
- zero = .zero )
- }, list( .zero = zero,
- .bred = bred ))),
-
initialize = eval(substitute(expression({
temp5 <-
@@ -1085,7 +1100,7 @@ rinv.gaussian <- function(n, mu, lambda) {
temp87 <- (y-mu)^2 *
wz / (dtheta.deta(mu, link = .link , earg = .earg )^2) # w cancel
if (M > 1 && ! .onedpar ) {
- dpar <- rep(as.numeric(NA), length = M)
+ dpar <- rep(NA_real_, length = M)
temp87 <- cbind(temp87)
nrow.mu <- if (is.matrix(mu)) nrow(mu) else length(mu)
for (ii in 1:M)
@@ -1223,8 +1238,11 @@ rinv.gaussian <- function(n, mu, lambda) {
ans at infos <- eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
+ multipleResponses = .multiple.responses ,
+ parameters.names = c("prob"),
zero = .zero )
- }, list( .zero = zero )))
+ }, list( .zero = zero,
+ .multiple.responses = multiple.responses )))
ans
}
@@ -1251,6 +1269,8 @@ rinv.gaussian <- function(n, mu, lambda) {
ans at infos <- eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
+ multipleResponses = TRUE,
+ parameters.names = c("lambda"),
zero = .zero )
}, list( .zero = zero )))
@@ -1261,10 +1281,11 @@ rinv.gaussian <- function(n, mu, lambda) {
- double.exppoisson <- function(lmean = "loge",
- ldispersion = "logit",
- idispersion = 0.8,
- zero = NULL) {
+ double.exppoisson <-
+ function(lmean = "loge",
+ ldispersion = "logit",
+ idispersion = 0.8,
+ zero = NULL) {
if (!is.Numeric(idispersion, positive = TRUE))
stop("bad input for 'idispersion'")
@@ -1289,14 +1310,20 @@ rinv.gaussian <- function(n, mu, lambda) {
"Mean: ", "mean\n",
"Variance: mean / dispersion"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
- lmean = .lmean ,
+ parameters.names = c("mean", "dispersion"),
+ lmean = .lmean ,
+ ldispersion = .ldispersion ,
zero = .zero )
- }, list( .lmean = lmean ))),
+ }, list( .lmean = lmean,
+ .ldispersion = ldispersion,
+ .zero = zero ))),
initialize = eval(substitute(expression({
@@ -1379,7 +1406,7 @@ rinv.gaussian <- function(n, mu, lambda) {
}), list( .lmean = lmean, .emean = emean,
.ldisp = ldisp, .edisp = edisp ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), nrow = n, ncol = 2) # diagonal
+ wz <- matrix(NA_real_, nrow = n, ncol = 2) # diagonal
usethis.lambda <- pmax(lambda, .Machine$double.eps / 10000)
wz[, iam(1, 1, M)] <- (Disper / usethis.lambda) * dlambda.deta^2
wz[, iam(2, 2, M)] <- (0.5 / Disper^2) * dDisper.deta^2
@@ -1393,7 +1420,7 @@ rinv.gaussian <- function(n, mu, lambda) {
double.expbinomial <-
function(lmean = "logit", ldispersion = "logit",
- idispersion = 0.25, zero = 2) {
+ idispersion = 0.25, zero = "dispersion") {
lmean <- as.list(substitute(lmean))
emean <- link2list(lmean)
@@ -1416,8 +1443,24 @@ rinv.gaussian <- function(n, mu, lambda) {
namesof("dispersion", ldisp, earg = edisp), "\n",
"Mean: ", "mean\n"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = NA,
+ parameters.names = c("mean", "dispersion"),
+ lmean = .lmean ,
+ ldisp = .ldisp ,
+ multipleResponses = FALSE,
+ zero = .zero )
+ }, list( .lmean = lmean,
+ .zero = zero,
+ .ldisp = ldisp ))),
+
initialize = eval(substitute(expression({
if (!all(w == 1))
extra$orig.w <- w
@@ -1539,7 +1582,7 @@ rinv.gaussian <- function(n, mu, lambda) {
}), list( .lmean = lmean, .emean = emean,
.ldisp = ldisp, .edisp = edisp ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), nrow = n, ncol = 2) # diagonal
+ wz <- matrix(NA_real_, nrow = n, ncol = 2) # diagonal
wz[, iam(1, 1, M)] <- w * (Disper / temp3) * dprob.deta^2
wz[, iam(2, 2, M)] <- (0.5 / Disper^2) * dDisper.deta^2
wz
@@ -1592,6 +1635,7 @@ rinv.gaussian <- function(n, mu, lambda) {
},
infos = eval(substitute(function(...) {
list(M1 = 2,
+ parameters.names = c("mu.1[,j]", "mu.2[,j]"),
parallel = .parallel)
}, list( .parallel = parallel ))),
initialize = eval(substitute(expression({
@@ -1616,7 +1660,7 @@ rinv.gaussian <- function(n, mu, lambda) {
"mu.2", .link , earg = .earg , short = TRUE))
NOS = M / M1
predictors.names <-
- predictors.names[interleave.VGAM(M1 * NOS, M = M1)]
+ predictors.names[interleave.VGAM(M1 * NOS, M1 = M1)]
if (!length(mustart) && !length(etastart))
@@ -1727,7 +1771,7 @@ rinv.gaussian <- function(n, mu, lambda) {
}
}
},
- vfamily = c("augbinomial", "vcategorical"),
+ vfamily = c("augbinomial", "VGAMcategorical"),
deriv = eval(substitute(expression({
M1 <- 2
Mdiv2 <- M / 2
@@ -1753,8 +1797,7 @@ rinv.gaussian <- function(n, mu, lambda) {
}
myderiv = (cbind(deriv1,
- deriv2))[, interleave.VGAM(M1 * NOS,
- M = M1)]
+ deriv2))[, interleave.VGAM(M1 * NOS, M1 = M1)]
myderiv
}), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
@@ -1773,7 +1816,7 @@ rinv.gaussian <- function(n, mu, lambda) {
my.wk.wt <- cbind(wk.wt1, wk.wt2)
- my.wk.wt <- my.wk.wt[, interleave.VGAM(M1 * NOS, M = M1)]
+ my.wk.wt <- my.wk.wt[, interleave.VGAM(M1 * NOS, M1 = M1)]
my.wk.wt
}), list( .link = link, .earg = earg))))
}
diff --git a/R/family.loglin.R b/R/family.loglin.R
index 2b93cea..14da3d7 100644
--- a/R/family.loglin.R
+++ b/R/family.loglin.R
@@ -6,8 +6,7 @@
- loglinb2 <- function(exchangeable = FALSE, zero = 3) {
-
+ loglinb2 <- function(exchangeable = FALSE, zero = "u12") {
if (!is.logical(exchangeable))
@@ -28,8 +27,24 @@
apply.int = TRUE,
cm.default = cm.intercept.default,
cm.intercept.default = cm.intercept.default)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .exchangeable = exchangeable, .zero = zero ))),
+
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 3,
+ Q1 = 4, # ncol(fitted(object))
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("u1", "u2", "u12"),
+ zero = .zero )
+ }, list( .zero = zero
+ ))),
+
+
initialize = expression({
@@ -50,7 +65,7 @@
predictors.names <- c("u1", "u2", "u12")
if (length(mustart) + length(etastart) == 0) {
- mustart <- matrix(as.numeric(NA), nrow(y), 4)
+ mustart <- matrix(NA_real_, nrow(y), 4)
mustart[,1] <- weighted.mean((1-y[,1])*(1-y[,2]), w)
mustart[,2] <- weighted.mean((1-y[,1])*y[,2], w)
mustart[,3] <- weighted.mean(y[,1]*(1-y[,2]), w)
@@ -126,7 +141,7 @@
d2u0.du1u3 <- -(1 + exp(u2)) * exp(u1 + u2 + u12) / denom^2
d2u0.du2u3 <- -(1 + exp(u1)) * exp(u1 + u2 + u12) / denom^2
- wz <- matrix(as.numeric(NA), n, dimm(M))
+ wz <- matrix(NA_real_, n, dimm(M))
wz[,iam(1,1,M)] <- -d2u0.du1.2
wz[,iam(2,2,M)] <- -d2u0.du22
wz[,iam(3,3,M)] <- -d2u0.du122
@@ -140,7 +155,8 @@
- loglinb3 <- function(exchangeable = FALSE, zero = 4:6) {
+ loglinb3 <- function(exchangeable = FALSE,
+ zero = c("u12", "u13", "u23")) {
if (!is.logical(exchangeable))
@@ -161,8 +177,23 @@
apply.int = TRUE,
cm.default = cm.intercept.default,
cm.intercept.default = cm.intercept.default)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 6)
}), list( .exchangeable = exchangeable, .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 6,
+ Q1 = 8, # ncol(fitted(object))
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("u1", "u2", "u3", "u12", "u13", "u23"),
+ zero = .zero )
+ }, list( .zero = zero
+ ))),
+
+
initialize = expression({
predictors.names <- c("u1", "u2", "u3", "u12", "u13", "u23")
@@ -201,7 +232,7 @@
if (length(mustart) + length(etastart) == 0) {
- mustart <- matrix(as.numeric(NA), nrow(y), 2^3)
+ mustart <- matrix(NA_real_, nrow(y), 2^3)
mustart[,1] <- weighted.mean((1-y[,1])*(1-y[,2])*(1-y[,3]), w)
mustart[,2] <- weighted.mean((1-y[,1])*(1-y[,2])*y[,3], w)
mustart[,3] <- weighted.mean((1-y[,1])*y[,2]*(1-y[,3]), w)
@@ -323,7 +354,7 @@
dA3.du1 <- exp(u1 + u3 + u13) + allterms
dA3.du2 <- exp(u2 + u3 + u23) + allterms
- wz <- matrix(as.numeric(NA), n, dimm(6))
+ wz <- matrix(NA_real_, n, dimm(6))
expu0 <- exp(u0)
wz[,iam(1,1,M)] <- A1 * (1 - expu0 * A1)
diff --git a/R/family.math.R b/R/family.math.R
index 7d7370d..4fe2699 100644
--- a/R/family.math.R
+++ b/R/family.math.R
@@ -12,6 +12,7 @@
+if (FALSE)
log1pexp <- function(x) {
ans <- log1p(exp(x))
@@ -153,6 +154,99 @@ lambertW <- function(x, tolerance = 1.0e-10, maxit = 50) {
+
+
+
+expint <- function (x, deriv = 0) {
+ if (deriv == 0) {
+ LLL <- length(x)
+ answer <- .C("sf_C_expint", x = as.double(x), size = as.integer(LLL),
+ ans = double(LLL))$ans
+ answer[x < 0] <- NA
+ answer[x == 0] <- NA
+ answer
+ } else {
+ if (!is.Numeric(deriv, integer.valued = TRUE, positive = TRUE) ||
+ deriv > 3)
+ stop("Bad input for argument 'deriv'")
+ answer <- rep(0, length(x))
+ if (deriv == 1) {
+ answer <- exp(x) / x
+ }
+ if (deriv == 2) {
+ answer <- exp(x) / x - exp(x) / x^2
+ }
+ if (deriv == 3) {
+ answer <- exp(x) / x - 2 * exp(x) / x^2 +
+ 2 * exp(x) / x^3
+ }
+ answer
+ }
+}
+
+
+expexpint <- function (x, deriv = 0) {
+ LLL <- length(x)
+ answer <- .C("sf_C_expexpint", x = as.double(x), size = as.integer(LLL),
+ ans = double(LLL))$ans
+ answer[x < 0] <- NA
+ answer[x == 0] <- NA
+ if (deriv > 0) {
+ if (!is.Numeric(deriv, integer.valued = TRUE, positive = TRUE) ||
+ deriv > 3)
+ stop("Bad input for argument 'deriv'")
+ if (deriv >= 1) {
+ answer <- -answer + 1 / x
+ }
+ if (deriv >= 2) {
+ answer <- -answer - 1 / x^2
+ }
+ if (deriv == 3) {
+ answer <- -answer + 2 / x^3
+ }
+ }
+ answer
+}
+
+
+expint.E1 <- function (x, deriv = 0) {
+ if (deriv == 0) {
+ LLL <- length(x)
+ answer <- .C("sf_C_expint_e1", x = as.double(x), size = as.integer(LLL),
+ ans = double(LLL))$ans
+ answer[x < 0] <- NA
+ answer[x == 0] <- NA
+ } else {
+ if (!is.Numeric(deriv, integer.valued = TRUE, positive = TRUE) ||
+ deriv > 3)
+ stop("Bad input for argument 'deriv'")
+ answer <- rep(0, length(x))
+ if (deriv == 1) {
+ answer <- exp(-x) / x
+ }
+ if (deriv == 2) {
+ answer <- exp(-x) / x + exp(-x) / x^2
+ }
+ if (deriv == 3) {
+ answer <- exp(-x) / x + 2 * exp(-x) / x^2 +
+ 2 * exp(-x) / x^3
+ }
+ answer <- (-1)^deriv * answer
+ }
+ answer
+}
+
+
+
+
+
+
+
+
+
+
+
+if (FALSE)
expint <- function(x) {
@@ -170,6 +264,7 @@ expint <- function(x) {
+if (FALSE)
expexpint <- function(x) {
@@ -192,6 +287,7 @@ expexpint <- function(x) {
+if (FALSE)
expint.E1 <- function(x) {
diff --git a/R/family.mixture.R b/R/family.mixture.R
index c43b162..2faf74b 100644
--- a/R/family.mixture.R
+++ b/R/family.mixture.R
@@ -28,7 +28,7 @@ mix2normal.control <- function(trace = TRUE, ...) {
qmu = c(0.2, 0.8),
eq.sd = TRUE,
nsimEIM = 100,
- zero = 1) {
+ zero = "phi") {
lphi <- as.list(substitute(lphi))
ephi <- link2list(lphi)
lphi <- attr(ephi, "function.name")
@@ -91,8 +91,32 @@ mix2normal.control <- function(trace = TRUE, ...) {
bool = .eq.sd ,
constraints = constraints,
apply.int = TRUE)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 5)
}), list( .zero = zero, .eq.sd = eq.sd ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 5,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("phi", "mu1", "sd1", "mu2", "sd2"),
+ nsimEIM = .nsimEIM ,
+ lphi = .lphi ,
+ lmu1 = .lmu ,
+ lsd1 = .lsd ,
+ lmu2 = .lmu ,
+ lsd2 = .lsd ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .nsimEIM = nsimEIM,
+ .lphi = lphi,
+ .lmu = lmu , .lsd = lsd
+ ))),
+
+
initialize = eval(substitute(expression({
temp5 <-
@@ -107,19 +131,19 @@ mix2normal.control <- function(trace = TRUE, ...) {
predictors.names <- c(
- namesof("phi", .lphi, tag = FALSE),
- namesof("mu1", .lmu, earg = .emu1, tag = FALSE),
- namesof("sd1", .lsd, earg = .esd1, tag = FALSE),
- namesof("mu2", .lmu, earg = .emu2, tag = FALSE),
- namesof("sd2", .lsd, earg = .esd2, tag = FALSE))
+ namesof("phi", .lphi , earg = .ephi , tag = FALSE),
+ namesof("mu1", .lmu , earg = .emu1 , tag = FALSE),
+ namesof("sd1", .lsd , earg = .esd1 , tag = FALSE),
+ namesof("mu2", .lmu , earg = .emu2 , tag = FALSE),
+ namesof("sd2", .lsd , earg = .esd2 , tag = FALSE))
if (!length(etastart)) {
qy <- quantile(y, prob = .qmu )
- init.phi <- rep(if (length(.iphi)) .iphi else 0.5, length = n)
- init.mu1 <- rep(if (length(.imu1)) .imu1 else qy[1], length = n)
- init.mu2 <- rep(if (length(.imu2)) .imu2 else qy[2], length = n)
+ init.phi <- rep(if (length( .iphi )) .iphi else 0.5, length = n)
+ init.mu1 <- rep(if (length( .imu1 )) .imu1 else qy[1], length = n)
+ init.mu2 <- rep(if (length( .imu2 )) .imu2 else qy[2], length = n)
ind.1 <- if (init.mu1[1] < init.mu2[1])
1:round(n* init.phi[1]) else
round(n* init.phi[1]):n
@@ -138,11 +162,11 @@ mix2normal.control <- function(trace = TRUE, ...) {
stop("'esd1' and 'esd2' must be equal if 'eq.sd = TRUE'")
}
etastart <- cbind(
- theta2eta(init.phi, .lphi, earg = .ephi),
- theta2eta(init.mu1, .lmu, earg = .emu1),
- theta2eta(init.sd1, .lsd, earg = .esd1),
- theta2eta(init.mu2, .lmu, earg = .emu2),
- theta2eta(init.sd2, .lsd, earg = .esd2))
+ theta2eta(init.phi, .lphi , earg = .ephi ),
+ theta2eta(init.mu1, .lmu , earg = .emu1 ),
+ theta2eta(init.sd1, .lsd , earg = .esd1 ),
+ theta2eta(init.mu2, .lmu , earg = .emu2 ),
+ theta2eta(init.sd2, .lsd , earg = .esd2 ))
}
}), list(.lphi = lphi, .lmu = lmu,
.iphi = iphi, .imu1 = imu1, .imu2 = imu2,
@@ -150,19 +174,19 @@ mix2normal.control <- function(trace = TRUE, ...) {
.esd1 = esd1, .esd2 = esd2, .eq.sd = eq.sd,
.lsd = lsd, .isd1 = isd1, .isd2 = isd2, .qmu = qmu))),
linkinv = eval(substitute(function(eta, extra = NULL){
- phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- mu1 <- eta2theta(eta[, 2], link = .lmu, earg = .emu1)
- mu2 <- eta2theta(eta[, 4], link = .lmu, earg = .emu2)
+ phi <- eta2theta(eta[, 1], link = .lphi , earg = .ephi )
+ mu1 <- eta2theta(eta[, 2], link = .lmu , earg = .emu1 )
+ mu2 <- eta2theta(eta[, 4], link = .lmu , earg = .emu2 )
phi * mu1 + (1 - phi) * mu2
}, list( .lphi = lphi, .lmu = lmu,
.ephi = ephi, .emu1 = emu1, .emu2 = emu2,
.esd1 = esd1, .esd2 = esd2 ))),
last = eval(substitute(expression({
- misc$link <- c("phi" = .lphi, "mu1" = .lmu,
- "sd1" = .lsd , "mu2" = .lmu, "sd2" = .lsd)
+ misc$link <- c("phi" = .lphi , "mu1" = .lmu ,
+ "sd1" = .lsd , "mu2" = .lmu , "sd2" = .lsd )
- misc$earg <- list("phi" = .ephi, "mu1" = .emu1,
- "sd1" = .esd1, "mu2" = .emu2, "sd2" = .esd2)
+ misc$earg <- list("phi" = .ephi , "mu1" = .emu1 ,
+ "sd1" = .esd1 , "mu2" = .emu2 , "sd2" = .esd2 )
misc$expected <- TRUE
misc$eq.sd <- .eq.sd
@@ -176,13 +200,13 @@ mix2normal.control <- function(trace = TRUE, ...) {
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- mu1 <- eta2theta(eta[, 2], link = .lmu, earg = .emu1)
- sd1 <- eta2theta(eta[, 3], link = .lsd, earg = .esd1)
- mu2 <- eta2theta(eta[, 4], link = .lmu, earg = .emu2)
- sd2 <- eta2theta(eta[, 5], link = .lsd, earg = .esd2)
- f1 <- dnorm(y, mean=mu1, sd=sd1)
- f2 <- dnorm(y, mean=mu2, sd=sd2)
+ phi <- eta2theta(eta[, 1], link = .lphi , earg = .ephi )
+ mu1 <- eta2theta(eta[, 2], link = .lmu , earg = .emu1 )
+ sd1 <- eta2theta(eta[, 3], link = .lsd , earg = .esd1 )
+ mu2 <- eta2theta(eta[, 4], link = .lmu , earg = .emu2 )
+ sd2 <- eta2theta(eta[, 5], link = .lsd , earg = .esd2 )
+ f1 <- dnorm(y, mean = mu1, sd = sd1)
+ f2 <- dnorm(y, mean = mu2, sd = sd2)
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
@@ -199,16 +223,16 @@ mix2normal.control <- function(trace = TRUE, ...) {
.lsd = lsd ))),
vfamily = c("mix2normal"),
deriv = eval(substitute(expression({
- phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- mu1 <- eta2theta(eta[, 2], link = .lmu, earg = .emu1)
- sd1 <- eta2theta(eta[, 3], link = .lsd, earg = .esd1)
- mu2 <- eta2theta(eta[, 4], link = .lmu, earg = .emu2)
- sd2 <- eta2theta(eta[, 5], link = .lsd, earg = .esd2)
- dphi.deta <- dtheta.deta(phi, link = .lphi, earg = .ephi)
- dmu1.deta <- dtheta.deta(mu1, link = .lmu, earg = .emu1)
- dmu2.deta <- dtheta.deta(mu2, link = .lmu, earg = .emu2)
- dsd1.deta <- dtheta.deta(sd1, link = .lsd, earg = .esd1)
- dsd2.deta <- dtheta.deta(sd2, link = .lsd, earg = .esd2)
+ phi <- eta2theta(eta[, 1], link = .lphi , earg = .ephi )
+ mu1 <- eta2theta(eta[, 2], link = .lmu , earg = .emu1 )
+ sd1 <- eta2theta(eta[, 3], link = .lsd , earg = .esd1 )
+ mu2 <- eta2theta(eta[, 4], link = .lmu , earg = .emu2 )
+ sd2 <- eta2theta(eta[, 5], link = .lsd , earg = .esd2 )
+ dphi.deta <- dtheta.deta(phi, link = .lphi , earg = .ephi )
+ dmu1.deta <- dtheta.deta(mu1, link = .lmu , earg = .emu1 )
+ dmu2.deta <- dtheta.deta(mu2, link = .lmu , earg = .emu2 )
+ dsd1.deta <- dtheta.deta(sd1, link = .lsd , earg = .esd1 )
+ dsd2.deta <- dtheta.deta(sd2, link = .lsd , earg = .esd2 )
f1 <- dnorm(y, mean = mu1, sd = sd1)
f2 <- dnorm(y, mean = mu2, sd = sd2)
pdf <- phi*f1 + (1 - phi)*f2
@@ -279,7 +303,8 @@ mix2poisson.control <- function(trace = TRUE, ...) {
mix2poisson <- function(lphi = "logit", llambda = "loge",
iphi = 0.5, il1 = NULL, il2 = NULL,
- qmu = c(0.2, 0.8), nsimEIM = 100, zero = 1) {
+ qmu = c(0.2, 0.8), nsimEIM = 100,
+ zero = "phi") {
lphi <- as.list(substitute(lphi))
ephi <- link2list(lphi)
@@ -320,8 +345,30 @@ mix2poisson.control <- function(trace = TRUE, ...) {
namesof("lambda2", llambda, earg = el2, tag = FALSE), "\n",
"Mean: phi*lambda1 + (1 - phi)*lambda2"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 3,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("phi", "lambda1", "lambda2"),
+ nsimEIM = .nsimEIM ,
+ lphi = .lphi ,
+ llambda1 = .llambda ,
+ llambda2 = .llambda ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .nsimEIM = nsimEIM,
+ .lphi = lphi,
+ .llambda = llambda
+ ))),
+
+
initialize = eval(substitute(expression({
@@ -347,9 +394,9 @@ mix2poisson.control <- function(trace = TRUE, ...) {
if (!length(etastart)) {
qy <- quantile(y, prob = .qmu)
- init.phi <- rep(if (length(.iphi)) .iphi else 0.5, length = n)
- init.lambda1 <- rep(if (length(.il1)) .il1 else qy[1], length = n)
- init.lambda2 <- rep(if (length(.il2)) .il2 else qy[2], length = n)
+ init.phi <- rep(if (length( .iphi )) .iphi else 0.5, length = n)
+ init.lambda1 <- rep(if (length( .il1 )) .il1 else qy[1], length = n)
+ init.lambda2 <- rep(if (length( .il2 )) .il2 else qy[2], length = n)
if (!length(etastart))
etastart <- cbind(theta2eta(init.phi, .lphi , earg = .ephi ),
@@ -369,10 +416,10 @@ mix2poisson.control <- function(trace = TRUE, ...) {
.ephi = ephi, .el1 = el1, .el2 = el2 ))),
last = eval(substitute(expression({
misc$link <-
- c("phi" = .lphi, "lambda1" = .llambda, "lambda2" = .llambda )
+ c("phi" = .lphi , "lambda1" = .llambda , "lambda2" = .llambda )
misc$earg <-
- list("phi" = .ephi, "lambda1" = .el1, "lambda2" = .el2 )
+ list("phi" = .ephi , "lambda1" = .el1 , "lambda2" = .el2 )
misc$expected <- TRUE
misc$nsimEIM <- .nsimEIM
@@ -384,9 +431,9 @@ mix2poisson.control <- function(trace = TRUE, ...) {
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- lambda1 <- eta2theta(eta[, 2], link = .llambda, earg = .el1)
- lambda2 <- eta2theta(eta[, 3], link = .llambda, earg = .el2)
+ phi <- eta2theta(eta[, 1], link = .lphi , earg = .ephi )
+ lambda1 <- eta2theta(eta[, 2], link = .llambda , earg = .el1 )
+ lambda2 <- eta2theta(eta[, 3], link = .llambda , earg = .el2 )
f1 <- dpois(y, lam = lambda1)
f2 <- dpois(y, lam = lambda2)
if (residuals) {
@@ -403,13 +450,13 @@ mix2poisson.control <- function(trace = TRUE, ...) {
.ephi = ephi, .el1 = el1, .el2 = el2 ))),
vfamily = c("mix2poisson"),
deriv = eval(substitute(expression({
- phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- lambda1 <- eta2theta(eta[, 2], link = .llambda, earg = .el1)
- lambda2 <- eta2theta(eta[, 3], link = .llambda, earg = .el2)
+ phi <- eta2theta(eta[, 1], link = .lphi , earg = .ephi )
+ lambda1 <- eta2theta(eta[, 2], link = .llambda , earg = .el1 )
+ lambda2 <- eta2theta(eta[, 3], link = .llambda , earg = .el2 )
- dphi.deta <- dtheta.deta(phi, link = .lphi, earg = .ephi)
- dlambda1.deta <- dtheta.deta(lambda1, link = .llambda, earg = .el1)
- dlambda2.deta <- dtheta.deta(lambda2, link = .llambda, earg = .el2)
+ dphi.deta <- dtheta.deta(phi, link = .lphi , earg = .ephi )
+ dlambda1.deta <- dtheta.deta(lambda1, link = .llambda , earg = .el1 )
+ dlambda2.deta <- dtheta.deta(lambda2, link = .llambda , earg = .el2 )
f1 <- dpois(x = y, lam = lambda1)
f2 <- dpois(x = y, lam = lambda2)
@@ -430,7 +477,7 @@ mix2poisson.control <- function(trace = TRUE, ...) {
run.mean <- 0
for (ii in 1:( .nsimEIM )) {
ysim <- ifelse(runif(n) < phi, rpois(n, lambda1),
- rpois(n, lambda2))
+ rpois(n, lambda2))
f1 <- dpois(x = ysim, lam = lambda1)
f2 <- dpois(x = ysim, lam = lambda2)
pdf <- phi*f1 + (1 - phi)*f2
@@ -450,22 +497,22 @@ mix2poisson.control <- function(trace = TRUE, ...) {
dpois(ysim, lambda2)
d2l.dphi2 <- dl.dphi^2
d2l.dlambda12 <- phi * (phi * df1.dlambda1^2 / pdf -
- d2f1.dlambda12) / pdf
+ d2f1.dlambda12) / pdf
d2l.dlambda22 <- (1 - phi) * ((1 - phi) * df2.dlambda2^2 / pdf -
- d2f2.dlambda22) / pdf
+ d2f2.dlambda22) / pdf
d2l.dlambda1lambda2 <- phi * (1 - phi) *
- df1.dlambda1 * df2.dlambda2 / pdf^2
+ df1.dlambda1 * df2.dlambda2 / pdf^2
d2l.dphilambda1 <- df1.dlambda1 * (phi*(f1-f2)/pdf - 1) / pdf
d2l.dphilambda2 <- df2.dlambda2 * ((1 - phi)*(f1-f2)/pdf - 1) / pdf
rm(ysim)
temp3 <- matrix(0, n, dimm(M))
- temp3[,iam(1, 1, M = 3)] <- d2l.dphi2
- temp3[,iam(2, 2, M = 3)] <- d2l.dlambda12
- temp3[,iam(3, 3, M = 3)] <- d2l.dlambda22
- temp3[,iam(1, 2, M = 3)] <- d2l.dphilambda1
- temp3[,iam(1, 3, M = 3)] <- d2l.dphilambda2
- temp3[,iam(2, 3, M = 3)] <- d2l.dlambda1lambda2
+ temp3[, iam(1, 1, M = 3)] <- d2l.dphi2
+ temp3[, iam(2, 2, M = 3)] <- d2l.dlambda12
+ temp3[, iam(3, 3, M = 3)] <- d2l.dlambda22
+ temp3[, iam(1, 2, M = 3)] <- d2l.dphilambda1
+ temp3[, iam(1, 3, M = 3)] <- d2l.dphilambda2
+ temp3[, iam(2, 3, M = 3)] <- d2l.dlambda1lambda2
run.mean <- ((ii-1) * run.mean + temp3) / ii
}
@@ -496,7 +543,8 @@ mix2exp.control <- function(trace = TRUE, ...) {
mix2exp <- function(lphi = "logit", llambda = "loge",
iphi = 0.5, il1 = NULL, il2 = NULL,
- qmu = c(0.8, 0.2), nsimEIM = 100, zero = 1) {
+ qmu = c(0.8, 0.2), nsimEIM = 100,
+ zero = "phi") {
lphi <- as.list(substitute(lphi))
ephi <- link2list(lphi)
lphi <- attr(ephi, "function.name")
@@ -537,9 +585,30 @@ mix2exp.control <- function(trace = TRUE, ...) {
"Mean: phi / lambda1 + (1 - phi) / lambda2\n"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 3,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("phi", "lambda1", "lambda2"),
+ nsimEIM = .nsimEIM ,
+ lphi = .lphi ,
+ llambda1 = .llambda ,
+ llambda2 = .llambda ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .nsimEIM = nsimEIM,
+ .lphi = lphi,
+ .llambda = llambda
+ ))),
+
initialize = eval(substitute(expression({
temp5 <-
@@ -567,27 +636,27 @@ mix2exp.control <- function(trace = TRUE, ...) {
init.lambda1 <- rep(if (length(.il1)) .il1 else 1/qy[1], length = n)
init.lambda2 <- rep(if (length(.il2)) .il2 else 1/qy[2], length = n)
if (!length(etastart))
- etastart <- cbind(theta2eta(init.phi, .lphi, earg = .ephi),
- theta2eta(init.lambda1, .llambda, earg = .el1),
- theta2eta(init.lambda2, .llambda, earg = .el2))
+ etastart <- cbind(theta2eta(init.phi, .lphi , earg = .ephi ),
+ theta2eta(init.lambda1, .llambda , earg = .el1 ),
+ theta2eta(init.lambda2, .llambda , earg = .el2 ))
}
}), list(.lphi = lphi, .llambda = llambda,
.ephi = ephi, .el1 = el1, .el2 = el2,
.iphi = iphi, .il1 = il1, .il2 = il2,
.qmu = qmu))),
linkinv = eval(substitute(function(eta, extra = NULL){
- phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- lambda1 <- eta2theta(eta[, 2], link = .llambda, earg = .el1)
- lambda2 <- eta2theta(eta[, 3], link = .llambda, earg = .el2)
+ phi <- eta2theta(eta[, 1], link = .lphi , earg = .ephi )
+ lambda1 <- eta2theta(eta[, 2], link = .llambda , earg = .el1 )
+ lambda2 <- eta2theta(eta[, 3], link = .llambda , earg = .el2 )
phi / lambda1 + (1 - phi) / lambda2
}, list(.lphi = lphi, .llambda = llambda,
.ephi = ephi, .el1 = el1, .el2 = el2 ))),
last = eval(substitute(expression({
misc$link <-
- c("phi" = .lphi, "lambda1" = .llambda, "lambda2" = .llambda)
+ c("phi" = .lphi , "lambda1" = .llambda , "lambda2" = .llambda )
misc$earg <-
- list("phi" = .ephi, "lambda1" = .el1, "lambda2" = .el2)
+ list("phi" = .ephi , "lambda1" = .el1 , "lambda2" = .el2 )
misc$expected <- TRUE
misc$nsimEIM <- .nsimEIM
@@ -598,9 +667,9 @@ mix2exp.control <- function(trace = TRUE, ...) {
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- lambda1 <- eta2theta(eta[, 2], link = .llambda, earg = .el1)
- lambda2 <- eta2theta(eta[, 3], link = .llambda, earg = .el2)
+ phi <- eta2theta(eta[, 1], link = .lphi , earg = .ephi )
+ lambda1 <- eta2theta(eta[, 2], link = .llambda , earg = .el1 )
+ lambda2 <- eta2theta(eta[, 3], link = .llambda , earg = .el2 )
f1 <- dexp(y, rate=lambda1)
f2 <- dexp(y, rate=lambda2)
@@ -618,19 +687,19 @@ mix2exp.control <- function(trace = TRUE, ...) {
.ephi = ephi, .el1 = el1, .el2 = el2 ))),
vfamily = c("mix2exp"),
deriv = eval(substitute(expression({
- phi <- eta2theta(eta[, 1], link = .lphi, earg = .ephi)
- lambda1 <- eta2theta(eta[, 2], link = .llambda, earg = .el1)
- lambda2 <- eta2theta(eta[, 3], link = .llambda, earg = .el2)
+ phi <- eta2theta(eta[, 1], link = .lphi , earg = .ephi )
+ lambda1 <- eta2theta(eta[, 2], link = .llambda , earg = .el1 )
+ lambda2 <- eta2theta(eta[, 3], link = .llambda , earg = .el2 )
- dphi.deta <- dtheta.deta(phi, link = .lphi, earg = .ephi)
- dlambda1.deta <- dtheta.deta(lambda1, link = .llambda, earg = .el1)
- dlambda2.deta <- dtheta.deta(lambda2, link = .llambda, earg = .el2)
+ dphi.deta <- dtheta.deta(phi, link = .lphi , earg = .ephi )
+ dlambda1.deta <- dtheta.deta(lambda1, link = .llambda , earg = .el1 )
+ dlambda2.deta <- dtheta.deta(lambda2, link = .llambda , earg = .el2 )
- f1 <- dexp(x = y, rate=lambda1)
- f2 <- dexp(x = y, rate=lambda2)
+ f1 <- dexp(x = y, rate = lambda1)
+ f2 <- dexp(x = y, rate = lambda2)
pdf <- phi*f1 + (1 - phi)*f2
- df1.dlambda1 <- exp(-lambda1*y) - y * dexp(y, rate=lambda1)
- df2.dlambda2 <- exp(-lambda2*y) - y * dexp(y, rate=lambda2)
+ df1.dlambda1 <- exp(-lambda1*y) - y * dexp(y, rate = lambda1)
+ df2.dlambda2 <- exp(-lambda2*y) - y * dexp(y, rate = lambda2)
dl.dphi <- (f1-f2) / pdf
dl.dlambda1 <- phi * df1.dlambda1 / pdf
dl.dlambda2 <- (1 - phi) * df2.dlambda2 / pdf
@@ -649,8 +718,8 @@ mix2exp.control <- function(trace = TRUE, ...) {
f2 <- dexp(x = ysim, rate=lambda2)
pdf <- phi*f1 + (1 - phi)*f2
- df1.dlambda1 <- exp(-lambda1*ysim) - ysim * dexp(ysim, rate=lambda1)
- df2.dlambda2 <- exp(-lambda2*ysim) - ysim * dexp(ysim, rate=lambda2)
+ df1.dlambda1 <- exp(-lambda1*ysim) - ysim * dexp(ysim, rate = lambda1)
+ df2.dlambda2 <- exp(-lambda2*ysim) - ysim * dexp(ysim, rate = lambda2)
dl.dphi <- (f1-f2) / pdf
dl.dlambda1 <- phi * df1.dlambda1 / pdf
dl.dlambda2 <- (1 - phi) * df2.dlambda2 / pdf
@@ -668,12 +737,12 @@ mix2exp.control <- function(trace = TRUE, ...) {
rm(ysim)
temp3 <- matrix(0, n, dimm(M))
- temp3[,iam(1, 1, M = 3)] <- d2l.dphi2
- temp3[,iam(2, 2, M = 3)] <- d2l.dlambda12
- temp3[,iam(3, 3, M = 3)] <- d2l.dlambda22
- temp3[,iam(1, 2, M = 3)] <- d2l.dphilambda1
- temp3[,iam(1, 3, M = 3)] <- d2l.dphilambda2
- temp3[,iam(2, 3, M = 3)] <- d2l.dlambda1lambda2
+ temp3[, iam(1, 1, M = 3)] <- d2l.dphi2
+ temp3[, iam(2, 2, M = 3)] <- d2l.dlambda12
+ temp3[, iam(3, 3, M = 3)] <- d2l.dlambda22
+ temp3[, iam(1, 2, M = 3)] <- d2l.dphilambda1
+ temp3[, iam(1, 3, M = 3)] <- d2l.dphilambda2
+ temp3[, iam(2, 3, M = 3)] <- d2l.dlambda1lambda2
run.mean <- ((ii-1) * run.mean + temp3) / ii
}
wz <- if (intercept.only)
diff --git a/R/family.nonlinear.R b/R/family.nonlinear.R
index bdef4f1..ab3287b 100644
--- a/R/family.nonlinear.R
+++ b/R/family.nonlinear.R
@@ -131,9 +131,12 @@ micmen.control <- function(save.weights = TRUE, ...) {
"Variance: constant"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = 2)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero))),
+
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
M <- if (is.matrix(y)) ncol(y) else 1
if (residuals) {
@@ -143,6 +146,20 @@ micmen.control <- function(save.weights = TRUE, ...) {
}
},
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("theta1", "theta2"),
+ link1 = .link1 ,
+ link2 = .link2 ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .link1 = link1, .link2 = link2
+ ))),
+
initialize = eval(substitute(expression({
@@ -409,8 +426,11 @@ skira.control <- function(save.weights = TRUE, ...) {
namesof("theta1", link1, earg = earg1), ", ",
namesof("theta2", link2, earg = earg2)),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = 2)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
M <- if (is.matrix(y))
ncol(y) else 1
@@ -420,6 +440,20 @@ skira.control <- function(save.weights = TRUE, ...) {
ResSS.vgam(y - mu, w, M = M)
}
},
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("theta1", "theta2"),
+ link1 = .link1 ,
+ link2 = .link2 ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .link1 = link1, .link2 = link2
+ ))),
+
initialize = eval(substitute(expression({
warning("20101105; need to fix a bug in the signs of initial vals")
diff --git a/R/family.normal.R b/R/family.normal.R
index c171898..7812083 100644
--- a/R/family.normal.R
+++ b/R/family.normal.R
@@ -52,12 +52,16 @@ VGAM.weights.function <- function(w, M, n) {
new("vglmff",
blurb = c("Vector linear/additive model\n",
"Links: identitylink for Y1,...,YM"),
+
constraints = eval(substitute(expression({
constraints <- cm.VGAM(matrix(1, M, 1), x = x,
bool = .parallel ,
constraints = constraints)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .parallel = parallel, .zero = zero ))),
+
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
M <- if (is.matrix(y)) ncol(y) else 1
n <- if (is.matrix(y)) nrow(y) else length(y)
@@ -77,6 +81,7 @@ VGAM.weights.function <- function(w, M, n) {
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
+ expected = TRUE,
multipleResponses = TRUE,
zero = .zero )
}, list( .zero = zero ))),
@@ -303,7 +308,7 @@ if (FALSE)
gmean = exp((-5:5)/2), gsd = exp((-1:5)/2),
imean = NULL, isd = NULL, probs.y = 0.10,
imethod = 1,
- nsimEIM = NULL, zero = -2) {
+ nsimEIM = NULL, zero = "sd") {
@@ -327,9 +332,6 @@ if (FALSE)
if (!is.logical(eq.sd ) || length(eq.sd ) != 1)
stop("bad input for argument 'eq.sd'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
if (length(isd) &&
!is.Numeric(isd, positive = TRUE))
stop("bad input for argument 'isd'")
@@ -388,9 +390,9 @@ if (FALSE)
}
constraints <- con.use
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero,
@@ -399,19 +401,13 @@ if (FALSE)
-
-
-
-
-
-
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
eq.mean = .eq.mean ,
eq.sd = .eq.sd ,
multipleResponses = TRUE,
- par.names = c("mean", "sd"),
+ parameters.names = c("mean", "sd"),
zero = .zero )
}, list( .zero = zero,
.eq.mean = eq.mean,
@@ -419,8 +415,6 @@ if (FALSE)
))),
-
-
initialize = eval(substitute(expression({
M1 <- 2
temp5 <-
@@ -442,12 +436,12 @@ if (FALSE)
predictors.names <-
c(namesof(mean.names , .lmean , earg = .emean , tag = FALSE),
namesof(sdev.names , .lsd , earg = .esd , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
- init.me <- matrix( if (length( .imean )) .imean else as.numeric(NA),
+ init.me <- matrix( if (length( .imean )) .imean else NA_real_,
n, NOS, byrow = TRUE)
- init.sd <- matrix( if (length( .isd )) .isd else as.numeric(NA),
+ init.sd <- matrix( if (length( .isd )) .isd else NA_real_,
n, NOS, byrow = TRUE)
mean.grid.orig <- .gmean
@@ -483,7 +477,7 @@ if (FALSE)
mean.grid <- sort(c(-mean.grid,
mean.grid))
allmat1 <- expand.grid(Mean = mean.grid)
- allmat2 <- matrix(as.numeric(NA), nrow(allmat1), 2)
+ allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
for (iloc in 1:nrow(allmat1)) {
allmat2[iloc, ] <-
@@ -505,7 +499,7 @@ if (FALSE)
etastart <- cbind(theta2eta(init.me, .lmean , earg = .emean ),
theta2eta(init.sd, .lsd , earg = .esd ))
- etastart <- etastart[, interleave.VGAM(M, M = M1)]
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1)]
}
}), list( .lmean = lmean, .lsd = lsd,
@@ -523,9 +517,9 @@ if (FALSE)
))),
last = eval(substitute(expression({
misc$link <- c(rep( .lmean , length = NOS),
- rep( .lsd , length = NOS))[interleave.VGAM(M, M = M1)]
+ rep( .lsd , length = NOS))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mean.names, sdev.names)
- names(misc$link) <- temp.names[interleave.VGAM(M, M = M1)]
+ names(misc$link) <- temp.names[interleave.VGAM(M, M1 = M1)]
misc$earg <- vector("list", M)
names(misc$earg) <- temp.names
@@ -598,7 +592,7 @@ if (FALSE)
dsd.deta <- dtheta.deta(mysd, .lsd , earg = .esd )
dthetas.detas <- cbind(dmu.deta, dsd.deta)
myderiv <- c(w) * dthetas.detas * cbind(dl.dmu, dl.dsd)
- myderiv <- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv <- myderiv[, interleave.VGAM(M, M1 = M1)]
myderiv
}), list( .lmean = lmean, .lsd = lsd,
.emean = emean, .esd = esd ))),
@@ -610,7 +604,7 @@ if (FALSE)
NOS <- M / M1
- dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M1 = M1)]
wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
@@ -899,7 +893,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
tikuv <- function(d, lmean = "identitylink", lsigma = "loge",
- isigma = NULL, zero = 2) {
+ isigma = NULL, zero = "sigma") {
lmean <- as.list(substitute(lmean))
@@ -912,10 +906,6 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
- if (length(zero) &&
- (!is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
- max(zero) > 2))
- stop("bad input for argument 'zero'")
if (!is.Numeric(d, length.arg = 1) || max(d) >= 2)
stop("bad input for argument 'd'")
@@ -930,12 +920,17 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
"\n", "\n",
"Mean: mean"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
multipleResponses = FALSE,
+ parameters.names = c("mean", "sigma"),
zero = .zero )
}, list( .zero = zero ))),
@@ -945,12 +940,12 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
predictors.names <-
- c(namesof("mean", .lmean , earg = .emean, tag = FALSE),
- namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
+ c(namesof("mean", .lmean , earg = .emean , tag = FALSE),
+ namesof("sigma", .lsigma , earg = .esigma , tag = FALSE))
if (!length(etastart)) {
- sigma.init <- if (length(.isigma)) rep(.isigma, length = n) else {
+ sigma.init <- if (length( .isigma )) rep( .isigma , length = n) else {
hh <- 2 - .d
KK <- 1 / (1 + 1/hh + 0.75/hh^2)
K2 <- 1 + 3/hh + 15/(4*hh^2)
@@ -958,8 +953,8 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
}
mean.init <- rep(weighted.mean(y, w), len = n)
etastart <-
- cbind(theta2eta(mean.init, .lmean , earg = .emean ),
- theta2eta(sigma.init, .lsigma, earg = .esigma))
+ cbind(theta2eta(mean.init, .lmean , earg = .emean ),
+ theta2eta(sigma.init, .lsigma , earg = .esigma ))
}
}),list( .lmean = lmean, .lsigma = lsigma,
.isigma = isigma, .d = d,
@@ -969,9 +964,9 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
}, list( .lmean = lmean,
.emean = emean, .esigma = esigma ))),
last = eval(substitute(expression({
- misc$link <- c("mean"= .lmean , "sigma"= .lsigma )
+ misc$link <- c("mean" = .lmean , "sigma"= .lsigma )
- misc$earg <- list("mean"= .emean , "sigma"= .esigma )
+ misc$earg <- list("mean" = .emean , "sigma"= .esigma )
misc$expected <- TRUE
misc$d <- .d
@@ -1038,7 +1033,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
ned2l.dmymu2 <- Dnos / sigma^2
ned2l.dnu2 <- Dstar / sigma^2
- wz <- matrix(as.numeric(NA), n, M) # diagonal matrix
+ wz <- matrix(NA_real_, n, M) # diagonal matrix
wz[, iam(1, 1, M)] <- ned2l.dmymu2 * dmu.deta^2
wz[, iam(2, 2, M)] <- ned2l.dnu2 * dsigma.deta^2
c(w) * wz
@@ -1195,9 +1190,6 @@ rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2 = 1) {
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(nsimEIM, length.arg = 1,
integer.valued = TRUE) ||
@@ -1217,8 +1209,11 @@ rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2 = 1) {
namesof("sd", lsd, earg = esd, tag = TRUE)),
infos = eval(substitute(function(...) {
list(M1 = 2,
+ Q1 = 1,
a1 = .a1 ,
a2 = .a2 ,
+ multiple.responses = FALSE,
+ parameters.names = c("mean", "sd"),
zero = .zero ,
nsimEIM = .nsimEIM )
}, list( .zero = zero,
@@ -1644,7 +1639,7 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
imu = NULL, isd = NULL,
type.fitted = c("uncensored", "censored", "mean.obs"),
byrow.arg = FALSE,
- imethod = 1, zero = -2) {
+ imethod = 1, zero = "sd") {
@@ -1675,9 +1670,6 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
stop("arguments 'Lower' and 'Upper' must be numeric and ",
"satisfy Lower < Upper")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
if (mode(type.fitted) != "character" && mode(type.fitted) != "name")
type.fitted <- as.character(substitute(type.fitted))
@@ -1699,9 +1691,9 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
"Conditional variance: sd^2"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
@@ -1710,7 +1702,8 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
Q1 = 1,
type.fitted = .type.fitted ,
zero = .zero ,
- expected = TRUE,
+ multiple.responses = TRUE,
+ parameters.names = c("mu", "sd"),
byrow.arg = .byrow.arg ,
stdTobit = .stdTobit ,
expected = TRUE )
@@ -1757,7 +1750,7 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
predictors.names <-
c(namesof(temp1.names, .lmu , earg = .emu , tag = FALSE),
namesof(temp2.names, .lsd , earg = .esd , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
@@ -1803,7 +1796,7 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
etastart <- cbind(theta2eta(mu.init, .lmu , earg = .emu ),
theta2eta(sd.init, .lsd , earg = .esd ))
- etastart <- etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
} # if (!length(etastart))
}), list( .Lower = Lower, .Upper = Upper,
.lmu = lmu, .lsd = lsd,
@@ -1866,10 +1859,9 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
temp0303 <- c(rep( .lmu , length = ncoly),
rep( .lsd , length = ncoly))
- names(temp0303) <-
- c(param.names("mu", ncoly),
- param.names("sd", ncoly))
- temp0303 <- temp0303[interleave.VGAM(M, M = M1)]
+ names(temp0303) <- c(param.names("mu", ncoly),
+ param.names("sd", ncoly))
+ temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
misc$link <- temp0303 # Already named
misc$earg <- vector("list", M)
@@ -2027,11 +2019,11 @@ moment.millsratio2 <- function(zedd) {
}
dthetas.detas <- cbind(dmu.deta, dsd.deta)
- dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M1 = M1)]
myderiv <- cbind(c(w) * dl.dmu,
c(w) * dl.dsd) * dthetas.detas
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lmu = lmu, .lsd = lsd,
.emu = emu, .esd = esd,
.byrow.arg = byrow.arg,
@@ -2172,7 +2164,7 @@ moment.millsratio2 <- function(zedd) {
isd = NULL,
parallel = FALSE,
smallno = 1.0e-5,
- zero = -2) {
+ zero = "sd") {
@@ -2197,9 +2189,6 @@ moment.millsratio2 <- function(zedd) {
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(smallno, length.arg = 1,
@@ -2247,9 +2236,9 @@ moment.millsratio2 <- function(zedd) {
constraints = constraints,
apply.int = .apply.parint )
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero,
.parallel = parallel, .apply.parint = apply.parint ))),
@@ -2259,8 +2248,13 @@ moment.millsratio2 <- function(zedd) {
Q1 = 1,
expected = TRUE,
multipleResponses = TRUE,
+ parameters.names = c("mean", if ( .var.arg ) "var" else "sd"),
+ var.arg = .var.arg ,
+ parallel = .parallel ,
zero = .zero )
- }, list( .zero = zero ))),
+ }, list( .zero = zero ,
+ .parallel = parallel ,
+ .var.arg = var.arg ))),
initialize = eval(substitute(expression({
orig.y <- y
@@ -2311,16 +2305,14 @@ moment.millsratio2 <- function(zedd) {
- mynames1 <- paste("mean",
- if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste(if ( .var.arg ) "var" else "sd",
- if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("mean", ncoly)
+ mynames2 <- param.names(if ( .var.arg ) "var" else "sd", ncoly)
predictors.names <-
c(namesof(mynames1, .lmean , earg = .emean , tag = FALSE),
if ( .var.arg )
namesof(mynames2, .lvare , earg = .evare , tag = FALSE) else
namesof(mynames2, .lsdev , earg = .esdev , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
extra$predictors.names <- predictors.names
@@ -2369,7 +2361,7 @@ moment.millsratio2 <- function(zedd) {
theta2eta(sdev.init^2, .lvare , earg = .evare ) else
theta2eta(sdev.init , .lsdev , earg = .esdev ))
etastart <-
- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
+ etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
colnames(etastart) <- predictors.names
}
@@ -2402,7 +2394,7 @@ moment.millsratio2 <- function(zedd) {
M1 <- extra$M1
temp.names <- c(mynames1, mynames2)
- temp.names <- temp.names[interleave.VGAM(M1 * ncoly, M = M1)]
+ temp.names <- temp.names[interleave.VGAM(M1 * ncoly, M1 = M1)]
misc$link <- rep( .lmean , length = M1 * ncoly)
misc$earg <- vector("list", M1 * ncoly)
names(misc$link) <- names(misc$earg) <- temp.names
@@ -2533,7 +2525,7 @@ moment.millsratio2 <- function(zedd) {
cbind(dl.dmu * dmu.deta,
if ( .var.arg ) dl.dva * dva.deta else
dl.dsd * dsd.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
@@ -2546,7 +2538,7 @@ moment.millsratio2 <- function(zedd) {
.smallno = smallno,
.var.arg = var.arg ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, M) # Diagonal matrix
+ wz <- matrix(NA_real_, n, M) # Diagonal matrix
ned2l.dmu2 <- 1 / sdev^2
@@ -2585,7 +2577,7 @@ moment.millsratio2 <- function(zedd) {
imethod = 1,
icoefficients = NULL,
isd = NULL,
- zero = "M") {
+ zero = "sd") {
@@ -2605,8 +2597,6 @@ moment.millsratio2 <- function(zedd) {
- if (is.character(zero) && zero != "M")
- stop("bad input for argument 'zero'")
@@ -2635,18 +2625,29 @@ moment.millsratio2 <- function(zedd) {
constraints = eval(substitute(expression({
+
+
+ M1 <- NA
+ if (FALSE) {
dotzero <- .zero
if (is.character(dotzero) && dotzero == "M")
dotzero <- M
M1 <- NA
eval(negzero.expression.VGAM)
+ } else {
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = M) # 20151222; Okay for one response?
+ }
}), list( .zero = zero
))),
infos = eval(substitute(function(...) {
list(M1 = NA,
Q1 = 1,
+ multipleResponses = FALSE, # zz unsure
+ parameters.names = as.character(NA), # zz unsure
zero = .zero )
}, list( .zero = zero ))),
@@ -2767,9 +2768,7 @@ moment.millsratio2 <- function(zedd) {
mynames1 <- mynames1[-max(extra$col.index.is.multilogit)]
}
- mynames2 <- paste(if ( .var.arg ) "var" else "sd",
- if (ncoly > 1) 1:ncoly else "", sep = "")
-
+ mynames2 <- param.names(if ( .var.arg ) "var" else "sd", ncoly)
predictors.names <-
c(mynames1,
if ( .var.arg )
@@ -3167,7 +3166,7 @@ moment.millsratio2 <- function(zedd) {
lognormal <- function(lmeanlog = "identitylink", lsdlog = "loge",
- zero = 2) {
+ zero = "sdlog") {
@@ -3183,10 +3182,6 @@ moment.millsratio2 <- function(zedd) {
- if (length(zero) &&
- (!is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
- zero > 2))
- stop("bad input for argument argument 'zero'")
new("vglmff",
@@ -3195,8 +3190,27 @@ moment.millsratio2 <- function(zedd) {
namesof("meanlog", lmulog, earg = emulog, tag = TRUE), ", ",
namesof("sdlog", lsdlog, earg = esdlog, tag = TRUE)),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ lmeanlog = .lmeanlog ,
+ lsdlog = .lsdlog ,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("meanlog", "sdlog"),
+ zero = .zero )
+ }, list( .zero = zero,
+ .lmeanlog = lmeanlog,
+ .lsdlog = lsdlog
+ ))),
+
+
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
@@ -3205,8 +3219,8 @@ moment.millsratio2 <- function(zedd) {
predictors.names <-
- c(namesof("meanlog", .lmulog , earg = .emulog, tag = FALSE),
- namesof("sdlog", .lsdlog , earg = .esdlog, tag = FALSE))
+ c(namesof("meanlog", .lmulog , earg = .emulog , tag = FALSE),
+ namesof("sdlog", .lsdlog , earg = .esdlog , tag = FALSE))
if (!length(etastart)) {
mylm <- lm.wfit(x = x, y = c(log(y)), w = c(w))
@@ -3242,7 +3256,8 @@ moment.millsratio2 <- function(zedd) {
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- ll.elts <- c(w) * dlnorm(y, meanlog = mulog, sdlog = sdlog, log = TRUE)
+ ll.elts <- c(w) * dlnorm(y, meanlog = mulog, sdlog = sdlog,
+ log = TRUE)
if (summation) {
sum(ll.elts)
} else {
@@ -3288,7 +3303,7 @@ moment.millsratio2 <- function(zedd) {
}), list( .lmulog = lmulog, .lsdlog = lsdlog,
.emulog = emulog, .esdlog = esdlog ))),
weight = expression({
- wz <- matrix(as.numeric(NA), n, 2) # Diagonal!
+ wz <- matrix(NA_real_, n, 2) # Diagonal!
ned2l.dmulog2 <- 1 / sdlog^2
ned2l.dsdlog2 <- 2 * ned2l.dmulog2
@@ -3376,11 +3391,14 @@ rskewnorm <- function(n, location = 0, scale = 1, shape = 0) {
new("vglmff",
blurb = c("1-parameter skew-normal distribution\n\n",
"Link: ",
- namesof("shape", lshape , earg = eshape ), "\n",
+ namesof("shape", lshape , earg = eshape), "\n",
"Mean: shape * sqrt(2 / (pi * (1 + shape^2 )))\n",
"Variance: 1-mu^2"),
infos = eval(substitute(function(...) {
list(M1 = 1,
+ Q1 = 1,
+ multipleResponses = FALSE,
+ parameters.names = c("shape"),
nsimEIM = .nsimEIM)
}, list( .nsimEIM = nsimEIM ))),
initialize = eval(substitute(expression({
diff --git a/R/family.others.R b/R/family.others.R
index 75bca8d..daa2e8b 100644
--- a/R/family.others.R
+++ b/R/family.others.R
@@ -19,8 +19,6 @@
-
-
dexppois <- function(x, rate = 1, shape, log = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
@@ -165,9 +163,6 @@ rexppois <- function(n, rate = 1, shape) {
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (length(ishape) &&
!is.Numeric(ishape, positive = TRUE))
@@ -189,9 +184,23 @@ rexppois <- function(n, rate = 1, shape) {
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero))),
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("rate", "shape"),
+ lrate = .lratee ,
+ lshape = .lshape ,
+ zero = .zero )
+ }, list( .zero = zero, .lratee = lratee, .lshape = lshape ))),
+
+
initialize = eval(substitute(expression({
temp5 <-
@@ -203,8 +212,8 @@ rexppois <- function(n, rate = 1, shape) {
predictors.names <- c(
- namesof("rate", .lratee, earg = .eratee, short = TRUE),
- namesof("shape", .lshape, earg = .eshape, short = TRUE))
+ namesof("rate", .lratee , earg = .eratee , short = TRUE),
+ namesof("shape", .lshape , earg = .eshape , short = TRUE))
if (!length(etastart)) {
ratee.init <- if (length( .iratee ))
@@ -460,9 +469,6 @@ genrayleigh.control <- function(save.weights = TRUE, ...) {
!is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(nsimEIM, length.arg = 1,
integer.valued = TRUE) ||
nsimEIM <= 50)
@@ -476,9 +482,25 @@ genrayleigh.control <- function(save.weights = TRUE, ...) {
namesof("scale", lscale, earg = escale), ", ",
namesof("shape", lshape, earg = eshape), "\n"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("scale", "shape"),
+ nsimEIM = .nsimEIM ,
+ lscale = .lscale ,
+ lshape = .lshape ,
+ zero = .zero )
+ }, list( .zero = zero, .lscale = lscale, .lshape = lshape,
+ .nsimEIM = nsimEIM ))),
+
+
initialize = eval(substitute(expression({
temp5 <-
@@ -722,9 +744,6 @@ expgeometric.control <- function(save.weights = TRUE, ...) {
if (!is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(nsimEIM, length.arg = 1,
@@ -743,9 +762,23 @@ expgeometric.control <- function(save.weights = TRUE, ...) {
"shape) / (shape / scale)"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
-
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("scale", "shape"),
+ nsimEIM = .nsimEIM ,
+ lscale = .lscale ,
+ lshape = .lshape ,
+ zero = .zero )
+ }, list( .zero = zero, .lscale = lscale, .lshape = lshape,
+ .nsimEIM = nsimEIM ))),
initialize = eval(substitute(expression({
@@ -999,10 +1032,6 @@ explogff.control <- function(save.weights = TRUE, ...) {
if (!is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE,
- positive = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(nsimEIM, length.arg = 1,
@@ -1020,9 +1049,25 @@ explogff.control <- function(save.weights = TRUE, ...) {
"Mean: ", "(-polylog(2, 1 - p) * scale) / log(shape)"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("scale", "shape"),
+ nsimEIM = .nsimEIM ,
+ lscale = .lscale ,
+ lshape = .lshape ,
+ zero = .zero )
+ }, list( .zero = zero, .lscale = lscale, .lshape = lshape,
+ .nsimEIM = nsimEIM ))),
+
+
initialize = eval(substitute(expression({
temp5 <-
@@ -1296,7 +1341,7 @@ qtpn <- function(p, location = 0, scale = 1, skewpar = 0.5) {
if (length(scale) != LLL) scale <- rep(scale, length = LLL)
if (length(skewpar) != LLL) skewpar <- rep(skewpar, length = LLL)
- qtpn <- rep(as.numeric(NA), length(LLL))
+ qtpn <- rep(NA_real_, length(LLL))
qtpn <- qnorm(pp / (2 * skewpar), sd = 2 * skewpar)
qtpn[pp > skewpar] <- sqrt(8 * ( 1 - skewpar)^2 *
qgamma(pos( pp - skewpar) / (
@@ -1322,8 +1367,7 @@ rtpn <- function(n, location = 0, scale = 1, skewpar = 0.5) {
tpnff <- function(llocation = "identitylink", lscale = "loge",
- pp = 0.5, method.init = 1, zero = 2)
-{
+ pp = 0.5, method.init = 1, zero = 2) {
if (!is.Numeric(method.init, length.arg = 1,
integer.valued = TRUE, positive = TRUE) ||
method.init > 4)
@@ -1343,12 +1387,6 @@ tpnff <- function(llocation = "identitylink", lscale = "loge",
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
-
-
-
new("vglmff",
blurb = c("Two-piece normal distribution \n\n",
"Links: ",
@@ -1356,8 +1394,24 @@ tpnff <- function(llocation = "identitylink", lscale = "loge",
namesof("scale", lscale, earg = escale), "\n\n",
"Mean: "),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("location", "scale"),
+ llocation = .llocat ,
+ lscale = .lscale ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .llocat = llocat,
+ .lscale = lscale ))),
+
initialize = eval(substitute(expression({
temp5 <-
@@ -1370,8 +1424,8 @@ tpnff <- function(llocation = "identitylink", lscale = "loge",
predictors.names <-
- c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE))
+ c(namesof("location", .llocat , earg = .elocat , tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
@@ -1399,7 +1453,7 @@ tpnff <- function(llocation = "identitylink", lscale = "loge",
}
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale,
- .method.init=method.init ))),
+ .method.init = method.init ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
eta2theta(eta[, 1], .llocat , earg = .elocat )
}, list( .llocat = llocat,
@@ -1469,17 +1523,16 @@ tpnff <- function(llocation = "identitylink", lscale = "loge",
.elocat = elocat, .escale = escale,
.pp = pp ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, M) # diag matrix; y is one-col too
+ wz <- matrix(0, n, M) # diag matrix; y is one-col too
temp10 <- mypp * (1 - mypp)
ned2l.dlocat2 <- 1 / ((4 * temp10) * myscale^2)
ned2l.dscale2 <- 2 / myscale^2
- wz[, iam(1, 1,M)] <- ned2l.dlocat2 * dlocat.deta^2
- wz[, iam(2, 2,M)] <- ned2l.dscale2 * dscale.deta^2
- # wz[, iam(3, 3,M)] <- ned2l.dskewpar2 * dskewpa.deta^2
- # wz[, iam(1, 3,M)] <- ned2l.dlocatdskewpar * dskewpar.deta * dlocat.deta
- ans
+ wz[, iam(1, 1, M)] <- ned2l.dlocat2 * dlocat.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dscale2 * dscale.deta^2
+ # wz[, iam(3, 3, M)] <- ned2l.dskewpar2 * dskewpa.deta^2
+ # wz[, iam(1, 3, M)] <- ned2l.dlocatdskewpar * dskewpar.deta * dlocat.deta
c(w) * wz
}))))
}
@@ -1515,9 +1568,6 @@ tpnff3 <- function(llocation = "identitylink",
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
@@ -1526,11 +1576,30 @@ tpnff3 <- function(llocation = "identitylink",
"Links: ",
namesof("location", llocat, earg = elocat), ", ",
namesof("scale", lscale, earg = escale), ", ",
- namesof("skewpar", lscale, earg = eskewp), "\n\n",
+ namesof("skewpar", lskewp, earg = eskewp), "\n\n",
"Mean: "),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("location", "scale", "skewpar"),
+ llocation = .llocat ,
+ lscale = .lscale ,
+ lskewpar = .lskewp ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .llocat = llocat,
+ .lscale = lscale,
+ .lskewp = lskewp ))),
+
+
initialize = eval(substitute(expression({
temp5 <-
@@ -1647,7 +1716,7 @@ tpnff3 <- function(llocation = "identitylink",
.elocat = elocat, .escale = escale, .eskewp = eskewp
))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, dimm(M)) # diag matrix; y is one-col too
+ wz <- matrix(NA_real_, n, dimm(M)) # diag matrix; y is one-col too
temp10 <- myskew * (1 - myskew)
@@ -1671,5 +1740,367 @@ tpnff3 <- function(llocation = "identitylink",
+dozibeta <- function(x, shape1, shape2, pobs0 = 0,
+ pobs1 = 0, log = FALSE, tol = .Machine$double.eps) {
+ log.arg <- log
+ rm(log)
+ LLL <- max(length(x), length(shape1),
+ length(shape2), length(pobs0), length(pobs1))
+ if (LLL != length(x))
+ x <- rep(x, length = LLL)
+ if (LLL != length(shape1))
+ shape1 <- rep(shape1, length = LLL)
+ if (LLL != length(shape2))
+ shape2 <- rep(shape2, length = LLL)
+ if (LLL != length(pobs0))
+ pobs0 <- rep(pobs0, length = LLL)
+ if (LLL != length(pobs1))
+ pobs1 <- rep(pobs1, length = LLL)
+ ans <- rep(NA, length = LLL)
+ k1 <- (pobs0 < -tol | pobs1 < -tol |
+ (pobs0 + pobs1) > (1 + tol))
+ k4 <- is.na(pobs0) | is.na(pobs1)
+ ans[!k4 & !k1] <- dbeta(x[!k4 & !k1],
+ shape1[!k4 & !k1],
+ shape2[!k4 & !k1], log = TRUE) +
+ log1p(-(pobs0[!k4 & !k1] + pobs1[!k4 & !k1]))
+ k2 <- x == 0 & pobs0 > 0 & !is.na(x)
+ k3 <- x == 1 & pobs1 > 0 & !is.na(x)
+ ans[k2 & !k4 & !k1] <- log(pobs0[k2 & !k4 & !k1])
+ ans[k3 & !k4 & !k1] <- log(pobs1[k3 & !k4 & !k1])
+ if (!log.arg) ans <- exp(ans)
+ if (any(k1 & !k4)) {
+ ans[k1 & !k4] <- NaN
+ warning("NaNs produced")
+ }
+ ans
+}
+
+
+rozibeta <- function(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
+ tol = .Machine$double.eps) {
+ use.n <- if ((length.n <- length(n)) > 1) {
+ length.n
+ } else {
+ if (!is.Numeric(n, integer.valued = TRUE, length.arg = 1,
+ positive = TRUE)) {
+ stop("bad input for argument 'n'")
+ } else {
+ n
+ }
+ }
+ shape1 <- rep(shape1, length.out = use.n)
+ shape2 <- rep(shape2, length.out = use.n)
+ pobs0 <- rep(pobs0, length.out = use.n)
+ pobs1 <- rep(pobs1, length.out = use.n)
+ random.number <- runif(use.n)
+ ans <- rep(NA, length = use.n)
+ k5 <- (pobs0 < -tol | pobs1 < -tol |
+ (pobs0 + pobs1) > (1 + tol))
+ k4 <- is.na(pobs0) | is.na(pobs1)
+ ans[!k4] <- qozibeta(random.number[!k4], shape1 = shape1,
+ shape2 = shape2, pobs0 = pobs0,
+ pobs1 = pobs1)
+ if (any(k5 & !k4)) {
+ ans[k5 & !k4] <- NaN
+ warning("NaNs produced")
+ }
+ ans
+}
+
+
+pozibeta <- function(q, shape1, shape2, pobs0 = 0, pobs1 = 0,
+ lower.tail = TRUE, log.p = FALSE,
+ tol = .Machine$double.eps) {
+ LLL <- max(length(q), length(shape1),
+ length(shape2), length(pobs0), length(pobs1))
+ if (LLL != length(q))
+ q <- rep(q, length = LLL)
+ if (LLL != length(shape1))
+ shape1 <- rep(shape1, length = LLL)
+ if (LLL != length(shape2))
+ shape2 <- rep(shape2, length = LLL)
+ if (LLL != length(pobs0))
+ pobs0 <- rep(pobs0, length = LLL)
+ if (LLL != length(pobs1))
+ pobs1 <- rep(pobs1, length = LLL)
+ k3 <- (pobs0 < -tol | pobs1 < -tol |
+ (pobs0 + pobs1) > (1 + tol))
+ k4 <- is.na(pobs0) | is.na(pobs1)
+ ans <- rep(NA, length = LLL)
+ ans[!k3 & !k4] <- pbeta(q[!k3 & !k4],
+ shape1[!k3 & !k4],
+ shape2[!k3 & !k4], log.p = TRUE) +
+ log1p(-(pobs0[!k3 & !k4] + pobs1[!k3 & !k4]))
+ ans <- exp(ans)
+ k1 <- q >= 0 & !is.na(q)
+ k2 <- q >= 1 & !is.na(q)
+ ans[k1 & !k3 & !k4] <- ans[k1 & !k3 & !k4] +
+ pobs0[k1 & !k3 & !k4]
+ ans[k2 & !k3 & !k4] <- ans[k2 & !k3 & !k4] +
+ pobs1[k2 & !k3 & !k4]
+ if (!lower.tail & log.p) {
+ ans <- log1p(-ans)
+ } else {
+ if (!lower.tail)
+ ans <- 1 - ans
+ if (log.p)
+ ans <- log(ans)
+ }
+ if (any(k3 & !k4)) {
+ ans[k3 & !k4] <- NaN
+ warning("NaNs produced")
+ }
+ ans
+}
+
+
+qozibeta <- function(p, shape1, shape2, pobs0 = 0, pobs1 = 0,
+ lower.tail = TRUE, log.p = FALSE,
+ tol = .Machine$double.eps) {
+ LLL <- max(length(p), length(shape1),
+ length(shape2), length(pobs0), length(pobs1))
+ if (LLL != length(p))
+ p <- rep(p, length = LLL)
+ if (LLL != length(shape1))
+ shape1 <- rep(shape1, length = LLL)
+ if (LLL != length(shape2))
+ shape2 <- rep(shape2, length = LLL)
+ if (LLL != length(pobs0))
+ pobs0 <- rep(pobs0, length = LLL)
+ if (LLL != length(pobs1))
+ pobs1 <- rep(pobs1, length = LLL)
+ k0 <- (pobs0 < -tol | pobs1 < -tol |
+ (pobs0 + pobs1) > (1 + tol))
+ k4 <- is.na(pobs0) | is.na(pobs1)
+ ans <- rep(NA, length = LLL)
+ if (!lower.tail & log.p) {
+ p <- -expm1(p)
+ } else{
+ if (!lower.tail)
+ p <- 1 - p
+ if (log.p) {
+ p <- exp(p)
+ }
+ }
+ k1 <- p >= 0 & p <= pobs0 & !is.na(p)
+ k2 <- p > pobs0 & p < (1 - pobs1) & !is.na(p)
+ k3 <- p >= (1 - pobs1) & p <= 1 & !is.na(p)
+ ans[k1 & !k0 & !k4] <- 0
+ ans[k2 & !k0 & !k4] <-
+ qbeta((p[k2 & !k0 & !k4] -
+ pobs0[k2 & !k0 & !k4]) / (1 - pobs0[k2 & !k0 & !k4] -
+ pobs1[k2 & !k0 & !k4]),
+ shape1 = shape1[k2 & !k0 & !k4],
+ shape2 = shape2[k2 & !k0 & !k4])
+ ans[k3 & !k0 & !k4] <- 1
+ if (any(k0 & !k4)) {
+ ans[k3 & !k4] <- NaN
+ warning("NaNs produced")
+ }
+ ans
+}
+
+
+
+
+
+log1mexp <- function(x) {
+ if (any(x < 0 & !is.na(x)))
+ stop("Inputs need to be non-negative!")
+ ifelse(x <= log(2), log(-expm1(-x)), log1p(-exp(-x)))
+}
+
+
+log1pexp <- function(x){
+
+ ifelse(x <= -37, exp(x),
+ ifelse(x <= 18, log1p(exp(x)),
+ ifelse(x <= 33, x + exp(-x), x)))
+}
+
+
+
+
+
+
+dozibetabinom.ab <- function(x, size, shape1, shape2, pstr0 = 0,
+ pstrsize = 0, log = FALSE) {
+ log.arg <- log
+ rm(log)
+ LLL <- max(length(x), length(size), length(shape1),
+ length(shape2), length(pstr0), length(pstrsize))
+ if (LLL != length(x))
+ x <- rep(x, length = LLL)
+ if (LLL != length(size))
+ size <- rep(size, length = LLL)
+ if (LLL != length(shape1))
+ shape1 <- rep(shape1, length = LLL)
+ if (LLL != length(shape2))
+ shape2 <- rep(shape2, length = LLL)
+ if (LLL != length(pstr0))
+ pstr0 <- rep(pstr0, length = LLL)
+ if (LLL != length(pstrsize))
+ pstrsize <- rep(pstrsize, length = LLL)
+ ans <- rep(NA, length = LLL)
+ k1 <- pstr0 < 0 | pstrsize < 0 |
+ (pstr0 + pstrsize) > 1
+ k <- is.na(size) | is.na(shape1) | is.na(shape2) |
+ is.na(pstr0) | is.na(pstrsize) | is.na(x)
+ if (sum(!k & !k1) > 0) {
+ ans[!k & !k1] <-
+ dbetabinom.ab(x[!k & !k1], size[!k & !k1], shape1[!k & !k1],
+ shape2[!k & !k1], log = TRUE) +
+ log1p(-(pstr0[!k & !k1]+pstrsize[!k & !k1]))
+ if (!log.arg) ans <- exp(ans)
+ }
+ k2 <- x == 0 & pstr0 > 0
+ k3 <- x == size & pstrsize > 0
+ if (sum(k2 & !k & !k1) > 0)
+ ans[k2 & !k & !k1] <- pstr0[k2 & !k & !k1] +
+ ans[k2 & !k & !k1]
+ if (sum(k3 & !k & !k1) > 0)
+ ans[k3 & !k & !k1] <- pstrsize[k3 & !k & !k1] +
+ ans[k3 & !k & !k1]
+ if (any(k1 & !k)) {
+ ans[k1 & !k] <- NaN
+ warning("NaNs produced")
+ }
+ ans
+}
+
+
+
+dozibetabinom <- function(x, size, prob, rho = 0, pstr0 = 0,
+ pstrsize = 0, log = FALSE) {
+ dozibetabinom.ab(x, size, shape1 = prob * (1 - rho) / rho,
+ shape2 = (1 - prob) * (1 - rho) / rho,
+ pstr0 = pstr0, pstrsize = pstrsize, log = log)
+}
+
+
+
+rozibetabinom.ab <- function(n, size, shape1, shape2,
+ pstr0 = 0, pstrsize = 0) {
+ use.n <- if ((length.n <- length(n)) > 1) {
+ length.n
+ } else {
+ if (!is.Numeric(n, integer.valued = TRUE, length.arg = 1,
+ positive = TRUE)) {
+ stop("bad input for argument 'n'")
+ } else {
+ n
+ }
+ }
+ size <- rep(size, length.out = use.n)
+ shape1 <- rep(shape1, length.out = use.n)
+ shape2 <- rep(shape2, length.out = use.n)
+ pstr0 <- rep(pstr0, length.out = use.n)
+ pstrsize <- rep(pstrsize, length.out = use.n)
+ k <- is.na(size) | is.na(shape1) | is.na(shape2) |
+ is.na(pstr0) | is.na(pstrsize)
+ ans <- rep(NA, length = use.n)
+ k1 <- pstr0 < 0 | pstrsize < 0 |
+ (pstr0 + pstrsize) > 1
+ random.number <- runif(use.n)
+ k2 <- random.number[!k] < pstr0[!k]
+ k3 <- pstr0[!k] <= random.number[!k] &
+ random.number[!k] <= (1 - pstrsize[!k])
+ k4 <- (1 - pstrsize[!k]) < random.number[!k]
+ if (sum(k2 & !k1 & !k) > 0)
+ ans[k2 & !k1 & !k] <- 0
+ if (sum(k3 & !k1 & !k) > 0)
+ ans[k3 & !k1 & !k] <- rbetabinom.ab(sum(k3 & !k1 & !k),
+ size = size[k3 & !k1 & !k],
+ shape1 = shape1[k3 & !k1 & !k],
+ shape2 = shape2[k3 & !k1 & !k])
+ if (sum(k4 & !k1 & !k) > 0)
+ ans[k4 & !k1 & !k] <- size[k4 & !k1 & !k]
+ ans
+}
+
+
+
+rozibetabinom <- function(n, size, prob, rho = 0, pstr0 = 0,
+ pstrsize = 0) {
+ rozibetabinom.ab(n, size, shape1 = prob * (1 - rho) / rho,
+ shape2 = (1 - prob) * (1 - rho) / rho,
+ pstr0 = pstr0,
+ pstrsize = pstrsize)
+}
+
+
+
+pozibetabinom.ab <- function(q, size, shape1, shape2, pstr0 = 0,
+ pstrsize = 0, lower.tail = TRUE,
+ log.p = FALSE) {
+ LLL <- max(length(q), length(size), length(shape1),
+ length(shape2), length(pstr0), length(pstrsize))
+ if (LLL != length(q))
+ q <- rep(q, length = LLL)
+ if (LLL != length(size))
+ size <- rep(size, length = LLL)
+ if (LLL != length(shape1))
+ shape1 <- rep(shape1, length = LLL)
+ if (LLL != length(shape2))
+ shape2 <- rep(shape2, length = LLL)
+ if (LLL != length(pstr0))
+ pstr0 <- rep(pstr0, length = LLL)
+ if (LLL != length(pstrsize))
+ pstrsize <- rep(pstrsize, length = LLL)
+ k <- is.na(size) | is.na(shape1) | is.na(shape2) |
+ is.na(pstr0) | is.na(pstrsize) | is.na(q)
+ ans <- rep(NA, length = LLL)
+ k1 <- pstr0 < 0 | pstrsize < 0 |
+ (pstr0 + pstrsize) > 1
+ if (sum(!k1 & !k) > 0)
+ ans[!k & !k1] <-
+ pbetabinom.ab(q[!k & !k1], size[!k & !k1],
+ shape1[!k & !k1], shape2[!k & !k1], log.p = TRUE) +
+ log1p(-(pstr0[!k & !k1] + pstrsize[!k & !k1]))
+ ans <- exp(ans)
+ k2 <- q >= 0
+ k3 <- q >= size
+ if (sum(k2 & !k1 & !k) > 0)
+ ans[k2 & !k & !k1] <- ans[k2 & !k & !k1] +
+ pstr0[k2 & !k & !k1]
+ if (sum(k3 & !k1 & !k) > 0)
+ ans[k3 & !k & !k1] <- ans[k3 & !k & !k1] +
+ pstrsize[k3 & !k & !k1]
+ if (!lower.tail & log.p) {
+ ans <- log1p(-ans)
+ } else {
+ if (!lower.tail)
+ ans <- 1 - ans
+ if (log.p)
+ ans <- log(ans)
+ }
+ if (any(!k & k1)) {
+ ans[!k & k1] <- NaN
+ warning("NaNs produced")
+ }
+ ans
+}
+
+
+pozibetabinom <- function(q, size, prob, rho,
+ pstr0 = 0, pstrsize = 0,
+ lower.tail = TRUE, log.p = FALSE) {
+ pozibetabinom.ab(q, size, shape1 = prob * (1 - rho) / rho,
+ shape2 = (1 - prob) * (1 - rho) / rho,
+ pstr0 = pstr0, pstrsize = pstrsize,
+ lower.tail = lower.tail, log.p = log.p)
+}
+
+
+
+
+
+
+
+
+
+
+
diff --git a/R/family.positive.R b/R/family.positive.R
index 7349555..0f49c8e 100644
--- a/R/family.positive.R
+++ b/R/family.positive.R
@@ -335,6 +335,7 @@ dposbern <- function(x, prob, prob0 = prob, log = FALSE) {
+
dposnegbin <- function(x, size, prob = NULL, munb = NULL, log = FALSE) {
if (length(munb)) {
if (length(prob))
@@ -369,6 +370,7 @@ dposnegbin <- function(x, size, prob = NULL, munb = NULL, log = FALSE) {
}
+
pposnegbin <- function(q, size, prob = NULL, munb = NULL) {
if (length(munb)) {
@@ -391,6 +393,7 @@ pposnegbin <- function(q, size, prob = NULL, munb = NULL) {
}
+
qposnegbin <- function(p, size, prob = NULL, munb = NULL) {
@@ -416,6 +419,116 @@ qposnegbin <- function(p, size, prob = NULL, munb = NULL) {
+ EIM.posNB.specialp <- function(munb, size,
+ y.max = NULL, # Must be an integer
+ cutoff.prob = 0.995,
+ prob0, df0.dkmat, df02.dkmat2,
+ intercept.only = FALSE,
+ second.deriv = TRUE) {
+
+
+ if (intercept.only) {
+ munb <- munb[1]
+ size <- size[1]
+ prob0 <- prob0[1]
+ df0.dkmat <- df0.dkmat[1]
+ df02.dkmat2 <- df02.dkmat2[1]
+ }
+
+ y.min <- 0 # Same as negbinomial() actually. A fixed constant really
+
+ if (!is.numeric(y.max)) {
+ eff.p <- sort(c(cutoff.prob, 1 - cutoff.prob))
+ y.max <- max(qposnegbin(p = eff.p[2], munb = munb, size = size)) + 10
+ }
+
+ Y.mat <- if (intercept.only) y.min:y.max else
+ matrix(y.min:y.max, length(munb), y.max-y.min+1, byrow = TRUE)
+ neff.row <- ifelse(intercept.only, 1, nrow(Y.mat))
+ neff.col <- ifelse(intercept.only, length(Y.mat), ncol(Y.mat))
+
+ if (FALSE) {
+ Y.mat2 <- Y.mat + 1
+ trigg.term0 <- if (intercept.only) {
+ dposnegbin(Y.mat2, size=size, munb=munb) %*% trigamma(Y.mat2+size)
+ } else {
+ rowSums(dposnegbin(Y.mat2, size = size, munb = munb) *
+ trigamma(Y.mat2 + size))
+ }
+ }
+
+
+ trigg.term <-
+ if (TRUE) {
+ answerC <- .C("eimpnbinomspecialp",
+ as.integer(intercept.only),
+ as.double(neff.row), as.double(neff.col),
+ as.double(size),
+ as.double(1 - pposnegbin(Y.mat, size = size, munb = munb)),
+ rowsums = double(neff.row))
+ answerC$rowsums
+ }
+
+
+
+ mymu <- munb / (1 - prob0) # E(Y)
+ ned2l.dk2 <- trigg.term -
+ munb / (size * (size + munb)) - (mymu - munb) / (munb + size)^2
+
+ if (second.deriv)
+ ned2l.dk2 <- ned2l.dk2 - df02.dkmat2 / (1 - prob0) -
+ (df0.dkmat / (1 - prob0))^2
+ ned2l.dk2
+ } # end of EIM.posNB.specialp()
+
+
+
+
+
+
+
+ EIM.posNB.speciald <- function(munb, size,
+ y.min = 1, # 20160201; must be an integer
+ y.max = NULL, # Must be an integer
+ cutoff.prob = 0.995,
+ prob0, df0.dkmat, df02.dkmat2,
+ intercept.only = FALSE,
+ second.deriv = TRUE) {
+
+
+ if (intercept.only) {
+ munb <- munb[1]
+ size <- size[1]
+ prob0 <- prob0[1]
+ df0.dkmat <- df0.dkmat[1]
+ df02.dkmat2 <- df02.dkmat2[1]
+ }
+
+ if (!is.numeric(y.max)) {
+ eff.p <- sort(c(cutoff.prob, 1 - cutoff.prob))
+ y.max <- max(qposnegbin(p = eff.p[2], munb = munb, size = size)) + 10
+ }
+
+ Y.mat <- if (intercept.only) y.min:y.max else
+ matrix(y.min:y.max, length(munb), y.max-y.min+1, byrow = TRUE)
+ trigg.term <- if (intercept.only) {
+ dposnegbin(Y.mat, size = size, munb = munb) %*% trigamma(Y.mat + size)
+ } else {
+ rowSums(dposnegbin(Y.mat, size = size, munb = munb) *
+ trigamma(Y.mat + size))
+ }
+
+ mymu <- munb / (1 - prob0) # E(Y)
+ ned2l.dk2 <- trigamma(size) - munb / (size * (size + munb)) -
+ (mymu - munb) / (munb + size)^2 - trigg.term
+ if (second.deriv)
+ ned2l.dk2 <- ned2l.dk2 - df02.dkmat2 / (1 - prob0) -
+ (df0.dkmat / (1 - prob0))^2
+ ned2l.dk2
+ } # end of EIM.posNB.speciald()
+
+
+
posnegbinomial.control <- function(save.weights = TRUE, ...) {
@@ -424,22 +537,27 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
- posnegbinomial <- function(lmunb = "loge", lsize = "loge",
- isize = NULL, zero = -2,
- nsimEIM = 250,
- ishrinkage = 0.95, imethod = 1) {
+ posnegbinomial <-
+ function(
+ zero = "size",
+ type.fitted = c("mean", "munb", "prob0"),
+ nsimEIM = 500,
+ cutoff.prob = 0.999, # higher is better for large 'size'
+ eps.trig = 1e-7,
+ max.support = 4000, # 20160201; I have changed this
+ max.chunk.MB = 30, # max.memory = Inf is allowed
+ lmunb = "loge", lsize = "loge",
+ imethod = 1,
+ imunb = NULL,
+ probs.y = 0.35,
+ ishrinkage = 0.95,
+ isize = NULL,
+ gsize.mux = exp((-12:6)/2)) {
+
+
- if (!is.Numeric(imethod, length.arg = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
if (length(isize) && !is.Numeric(isize, positive = TRUE))
stop("bad input for argument 'isize'")
- if (!is.Numeric(ishrinkage, length.arg = 1) ||
- ishrinkage < 0 ||
- ishrinkage > 1)
- stop("bad input for argument 'ishrinkage'")
-
lmunb <- as.list(substitute(lmunb))
emunb <- link2list(lmunb)
@@ -449,6 +567,13 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
esize <- link2list(lsize)
lsize <- attr(esize, "function.name")
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "munb", "prob0"))[1]
+
+
+ if (!is.Numeric(eps.trig, length.arg = 1,
+ positive = TRUE) || eps.trig > 0.001)
+ stop("argument 'eps.trig' must be positive and smaller in value")
if (!is.Numeric(nsimEIM, length.arg = 1,
positive = TRUE, integer.valued = TRUE))
@@ -465,36 +590,40 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
"Mean: munb / (1 - (size / (size + munb))^size)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("munb", "size"),
+ nsimEIM = .nsimEIM ,
+ eps.trig = .eps.trig ,
lmunb = .lmunb ,
emunb = .emunb ,
+ type.fitted = .type.fitted ,
+ zero = .zero ,
lsize = .lsize ,
esize = .esize )
}, list( .lmunb = lmunb, .lsize = lsize, .isize = isize,
- .emunb = emunb, .esize = esize,
- .ishrinkage = ishrinkage,
- .imethod = imethod ))),
+ .emunb = emunb, .esize = esize,
+ .zero = zero, .nsimEIM = nsimEIM,
+ .ishrinkage = ishrinkage, .eps.trig = eps.trig,
+ .imethod = imethod,
+ .type.fitted = type.fitted ))),
initialize = eval(substitute(expression({
M1 <- 2
- if (any(y == 0))
- stop("there are zero values in the response")
- y <- as.matrix(y)
-
-
temp5 <-
w.y.check(w = w, y = y,
- Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
+ Is.positive.y = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
- Is.integer.y = TRUE,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -503,79 +632,106 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
-
-
M <- M1 * ncol(y)
extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
+ extra$type.fitted <- .type.fitted
+ extra$dimnamesy <- dimnames(y)
predictors.names <- c(
- namesof(if (NOS == 1) "munb" else
- paste("munb", 1:NOS, sep = ""),
- .lmunb, earg = .emunb, tag = FALSE),
- namesof(if (NOS == 1) "size" else
- paste("size", 1:NOS, sep = ""),
- .lsize, earg = .esize, tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ namesof(param.names("munb", NOS), .lmunb , earg = .emunb , tag = FALSE),
+ namesof(param.names("size", NOS), .lsize , earg = .esize , tag = FALSE))
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
+
if (!length(etastart)) {
- mu.init <- y
- for (iii in 1:ncol(y)) {
- use.this <- if ( .imethod == 1) {
- weighted.mean(y[, iii], w[, iii])
- } else {
- median(y[,iii])
- }
- mu.init[, iii] <- (1 - .ishrinkage ) * y[, iii] + .ishrinkage * use.this
- }
+ munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ imu = .imunb , ishrinkage = .ishrinkage ,
+ probs.y = .probs.y )
+
if ( is.Numeric( .isize )) {
- kmat0 <- matrix( .isize , nrow = n, ncol = NOS, byrow = TRUE)
+ size.init <- matrix( .isize , nrow = n, ncol = NOS, byrow = TRUE)
} else {
- posnegbinomial.Loglikfun =
- function(kmat, y, x, w, extraargs) {
- munb <- extraargs
- sum(w * dposnegbin(x = y, size = kmat, munb = munb,
- log = TRUE))
- }
- k.grid <- 2^((-6):6)
- kmat0 <- matrix(0, nrow = n, ncol = NOS)
- for (spp. in 1:NOS) {
- kmat0[, spp.] <-
- grid.search(k.grid,
- objfun = posnegbinomial.Loglikfun,
- y = y[, spp.], x = x, w = w[, spp.],
- extraargs = mu.init[, spp.])
- }
+ posnegbinomial.Loglikfun <- function(kval, y, x, w, extraargs) {
+ munb <- extraargs
+ sum(c(w) * dposnegbin(x = y, mu = munb, size = kval, log = TRUE))
+ }
+ size.init <- matrix(0, nrow = n, ncol = NOS)
+ for (jay in 1:NOS) {
+ size.grid <- .gsize.mux * mean(munb.init[, jay])
+ size.init[, jay] <-
+ grid.search(size.grid,
+ objfun = posnegbinomial.Loglikfun,
+ y = y[, jay], # x = x,
+ w = w[, jay],
+ extraargs = munb.init[, jay])
+ }
}
- p00 <- (kmat0 / (kmat0 + mu.init))^kmat0
+
+
+
etastart <-
cbind(
- theta2eta(mu.init * (1 - p00), .lmunb, earg = .emunb ),
- theta2eta(kmat0, .lsize, earg = .esize ))
- etastart <- etastart[,interleave.VGAM(M, M = M1), drop = FALSE]
+ theta2eta(munb.init , .lmunb , earg = .emunb ),
+ theta2eta(size.init, .lsize , earg = .esize ))
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
}
- }), list( .lmunb = lmunb, .lsize = lsize, .isize = isize,
- .emunb = emunb, .esize = esize,
- .ishrinkage = ishrinkage,
- .imethod = imethod ))),
+ }), list( .lmunb = lmunb, .lsize = lsize,
+ .imunb = imunb, .isize = isize,
+ .emunb = emunb, .esize = esize, .gsize.mux = gsize.mux,
+ .ishrinkage = ishrinkage, .probs.y = probs.y,
+ .imethod = imethod,
+ .type.fitted = type.fitted ))),
+
linkinv = eval(substitute(function(eta, extra = NULL) {
- M1 <- 2
- NOS <- ncol(eta) / M1
- munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
- .lmunb, earg = .emunb )
- kmat <- eta2theta(eta[, M1*(1:NOS), drop = FALSE],
- .lsize, earg = .esize )
- po0 <- (kmat / (kmat + munb))^kmat
- munb / (1 - po0)
+ type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+ warning("cannot find 'type.fitted'. ",
+ "Returning the 'mean'.")
+ "mean"
+ }
+
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "munb", "prob0"))[1]
+
+ TF <- c(TRUE, FALSE)
+ munb <- eta2theta(eta[, TF, drop = FALSE], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, !TF, drop = FALSE], .lsize , earg = .esize )
+
+
+ tempk <- 1 / (1 + munb / kmat) # kmat / (kmat + munb)
+ prob0 <- tempk^kmat
+ oneminusf0 <- 1 - prob0
+
+ smallval <- 1e-3 # Something like this is needed
+ if (any(big.size <- munb / kmat < smallval)) {
+ prob0[big.size] <- exp(-munb[big.size]) # The limit as kmat --> Inf
+ oneminusf0[big.size] <- -expm1(-munb[big.size])
+ }
+
+ ans <- switch(type.fitted,
+ "mean" = munb / oneminusf0,
+ "munb" = munb,
+ "prob0" = prob0) # P(Y=0)
+ if (length(extra$dimnamesy) &&
+ is.matrix(ans) &&
+ length(extra$dimnamesy[[2]]) == ncol(ans) &&
+ length(extra$dimnamesy[[2]]) > 0) {
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
+ } else
+ if (NCOL(ans) == 1 &&
+ is.matrix(ans)) {
+ colnames(ans) <- NULL
+ }
+ ans
}, list( .lsize = lsize, .lmunb = lmunb,
.esize = esize, .emunb = emunb ))),
last = eval(substitute(expression({
temp0303 <- c(rep( .lmunb , length = NOS),
rep( .lsize , length = NOS))
- names(temp0303) =
- c(if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = ""),
- if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""))
- temp0303 <- temp0303[interleave.VGAM(M, M = M1)]
+ names(temp0303) <- c(param.names("munb", NOS),
+ param.names("size", NOS))
+ temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
misc$link <- temp0303 # Already named
misc$earg <- vector("list", M1*NOS)
@@ -585,21 +741,26 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
misc$earg[[M1*ii ]] <- .esize
}
+ misc$max.chunk.MB <- .max.chunk.MB
+ misc$cutoff.prob <- .cutoff.prob
+ misc$imethod <- .imethod
misc$nsimEIM <- .nsimEIM
- misc$imethod <- .imethod
- }), list( .lmunb = lmunb, .lsize = lsize,
- .emunb = emunb, .esize = esize,
- .nsimEIM = nsimEIM, .imethod = imethod ))),
+ misc$expected <- TRUE
+ misc$ishrinkage <- .ishrinkage
+ misc$multipleResponses <- TRUE
+ }), list( .lmunb = lmunb, .lsize = lsize,
+ .emunb = emunb, .esize = esize,
+ .cutoff.prob = cutoff.prob,
+ .max.chunk.MB = max.chunk.MB,
+ .ishrinkage = ishrinkage,
+ .nsimEIM = nsimEIM, .imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- M1 <- 2
- NOS <- ncol(eta) / M1
- munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
- .lmunb, earg = .emunb )
- kmat <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
- .lsize, earg = .esize )
+ TFvec <- c(TRUE, FALSE)
+ munb <- eta2theta(eta[, TFvec, drop = FALSE], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, !TFvec, drop = FALSE], .lsize , earg = .esize )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
@@ -636,6 +797,24 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
.emunb = emunb, .esize = esize ))),
+ validparams = eval(substitute(function(eta, extra = NULL) {
+ munb <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+ .lmunb , earg = .emunb )
+ size <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+ .lsize , earg = .esize )
+
+ smallval <- 1e-2
+ ans <- all(is.finite(munb)) && all(munb > 0) &&
+ all(is.finite(size)) && all(size > 0) &&
+ (overdispersion <- all(munb / size > smallval))
+ if (!overdispersion)
+ warning("parameter 'size' has very large values; ",
+ "replacing them by an arbitrary large value within ",
+ "the parameter space. Try fitting a positive-Poisson ",
+ "model instead.")
+ ans
+ }, list( .lmunb = lmunb, .emunb = emunb,
+ .lsize = lsize, .esize = esize))),
@@ -643,108 +822,222 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
M1 <- 2
NOS <- extra$NOS
- munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
- .lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
- .lsize , earg = .esize )
+ TFvec <- c(TRUE, FALSE)
+ munb <- eta2theta(eta[, TFvec, drop = FALSE], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, !TFvec, drop = FALSE], .lsize , earg = .esize )
+
+
+ smallval <- 1e-3 # Something like this is needed
+ if (any(big.size <- munb / kmat < smallval)) {
+ warning("parameter 'size' has very large values; ",
+ "try fitting a positive-Poisson ",
+ "model instead")
+ kmat[big.size] <- munb[big.size] / smallval
+ }
- dmunb.deta <- dtheta.deta(munb, .lmunb, earg = .emunb )
- dsize.deta <- dtheta.deta(kmat, .lsize, earg = .esize )
- NOS <- ncol(eta) / M1
+ dmunb.deta <- dtheta.deta(munb, .lmunb , earg = .emunb )
+ dsize.deta <- dtheta.deta(kmat, .lsize , earg = .esize )
- tempk <- kmat / (kmat + munb)
+
+ tempk <- 1 / (1 + munb / kmat) # kmat / (kmat + munb)
tempm <- munb / (kmat + munb)
prob0 <- tempk^kmat
oneminusf0 <- 1 - prob0
+ AA16 <- tempm + log(tempk)
df0.dmunb <- -tempk * prob0
- df0.dkmat <- prob0 * (tempm + log(tempk))
- df02.dmunb2 <- prob0 * tempk / (kmat + munb) - tempk * df0.dmunb
- df02.dkmat2 <- (prob0 / kmat) * tempm^2
- df02.dkmat.dmunb <- prob0 * (-tempk) * (tempm + log(tempk)) -
- tempm * prob0 / (kmat + munb)
+ df0.dkmat <- prob0 * AA16
+ df02.dmunb2 <- prob0 * tempk * (1 + 1/kmat) / (1 + munb/kmat)
+ df02.dkmat2 <- prob0 * ((tempm^2) / kmat + AA16^2)
+ df02.dkmat.dmunb <- -prob0 * (tempm/kmat + AA16) / (1 + munb/kmat)
+
+
+
+ if (any(big.size)) {
+ prob0[big.size] <- exp(-munb[big.size]) # The limit as kmat --> Inf
+ oneminusf0[big.size] <- -expm1(-munb[big.size])
+ df0.dmunb[big.size] <- -tempk[big.size] * prob0[big.size]
+ df0.dkmat[big.size] <- prob0[big.size] * AA16[big.size]
+ df02.dmunb2[big.size] <- prob0[big.size] * tempk[big.size] *
+ (1 + 1/kmat[big.size]) / (1 + smallval)
+ df02.dkmat2[big.size] <- prob0[big.size] *
+ ((tempm[big.size])^2 / kmat[big.size] + AA16[big.size]^2)
+ df02.dkmat.dmunb[big.size] <- -prob0[big.size] *
+ (tempm[big.size]/kmat[big.size] + AA16[big.size]) / (1 + smallval)
+ }
+
+
+
+
+ smallno <- 1e-6
+ if (FALSE && all(near.boundary <- oneminusf0 < smallno)) {
+ warning("solution near the boundary; either there is no need ",
+ "to fit a positive NBD or the distribution is centred ",
+ "on the value 1")
+ oneminusf0[near.boundary] <- smallno
+ prob0[near.boundary] <- 1 - oneminusf0[near.boundary]
+ }
+
+
- dl.dmunb <- y / munb - (y + kmat) / (munb + kmat) +
+ dl.dmunb <- y / munb - (1 + y/kmat) / (1 + munb/kmat) +
df0.dmunb / oneminusf0
dl.dsize <- digamma(y + kmat) - digamma(kmat) -
- (y + kmat)/(munb + kmat) + 1 + log(tempk) +
+ (y - munb) / (munb + kmat) + log(tempk) +
df0.dkmat / oneminusf0
+
+ if (any(big.size)) {
+ dl.dsize[big.size] <- 1e-8 # A small number
+ }
+
+
+
myderiv <- c(w) * cbind(dl.dmunb * dmunb.deta,
dl.dsize * dsize.deta)
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lmunb = lmunb, .lsize = lsize,
.emunb = emunb, .esize = esize ))),
+
+
weight = eval(substitute(expression({
- run.varcov =
- wz <- matrix(0.0, n, 2 * M1 * NOS - 1)
+ wz <- matrix(0, n, M+M-1)
+ mymu <- munb / oneminusf0 # Is the same as 'mu', == E(Y)
+ max.support <- .max.support
+ max.chunk.MB <- .max.chunk.MB
- if (FALSE) {
- usualmeanY <- munb
- meanY <- usualmeanY / oneminusf0
- ed2l.dmu2 <- meanY / munb^2 -
- (meanY + kmat) / (munb + kmat)^2 -
- df02.dmunb2 / oneminusf0 -
- (df0.dmunb / oneminusf0)^2
- }
+ ind2 <- matrix(FALSE, n, NOS) # Used for SFS
+ for (jay in 1:NOS) {
+ eff.p <- sort(c( .cutoff.prob , 1 - .cutoff.prob ))
+ Q.mins <- 1
+ Q.maxs <- qposnegbin(p = eff.p[2] ,
+ munb = munb[, jay],
+ size = kmat[, jay]) + 10
+ eps.trig <- .eps.trig
+ Q.MAXS <- pmax(10, ceiling(1 / sqrt(eps.trig))) #
+ Q.maxs <- pmin(Q.maxs, Q.MAXS)
- {
- ind2 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
- for (ii in 1:( .nsimEIM )) {
- ysim <- rposnegbin(n = n*NOS, mu = c(munb), size = c(kmat))
- dim(ysim) <- c(n, NOS)
- dl.dmunb <- ysim / munb - (ysim + kmat) / (munb + kmat) +
- df0.dmunb / oneminusf0
- dl.dsize <- digamma(ysim + kmat) - digamma(kmat) -
- (ysim + kmat) / (munb + kmat) + 1 + log(tempk) +
- df0.dkmat / oneminusf0
+ ind1 <- if (max.chunk.MB > 0) (Q.maxs - Q.mins < max.support) else FALSE
+ if ((NN <- sum(ind1)) > 0) {
+ Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
+ n.chunks <- if (intercept.only) 1 else
+ max(1, ceiling( Object.Size / max.chunk.MB))
+ chunk.rows <- ceiling(NN / n.chunks)
+ ind2[, jay] <- ind1 # Save this
+ wind2 <- which(ind1)
+
+
+ upr.ptr <- 0
+ lwr.ptr <- upr.ptr + 1
+ while (lwr.ptr <= NN) {
+ upr.ptr <- min(upr.ptr + chunk.rows, NN)
+ sind2 <- wind2[lwr.ptr:upr.ptr]
+ wz[sind2, M1*jay] <-
+ EIM.posNB.specialp(munb = munb[sind2, jay],
+ size = kmat[sind2, jay],
+ y.max = max(Q.maxs[sind2]),
+ cutoff.prob = .cutoff.prob ,
+ prob0 = prob0[sind2, jay],
+ df0.dkmat = df0.dkmat[sind2, jay],
+ df02.dkmat2 = df02.dkmat2[sind2, jay],
+ intercept.only = intercept.only)
+
+
+ if (any(eim.kk.TF <- wz[sind2, M1*jay] <= 0 |
+ is.na(wz[sind2, M1*jay]))) {
+ ind2[sind2[eim.kk.TF], jay] <- FALSE
+ }
+
+
+ lwr.ptr <- upr.ptr + 1
+ } # while
+
+ } # if
+ } # end of for (jay in 1:NOS)
- for (kk in 1:NOS) {
- temp2 <- cbind(dl.dmunb[, kk],
- dl.dsize[, kk]) *
- cbind(dmunb.deta[, kk],
- dsize.deta[, kk])
- small.varcov <- temp2[, ind2$row.index] *
- temp2[, ind2$col.index]
- run.varcov[, ((kk-1)*M1+1):(kk*M1)] =
- run.varcov[, ((kk-1)*M1+1):(kk*M1)] +
- c(small.varcov[, 1:M1])
- run.varcov[, M + (kk-1)*M1 + 1] =
- run.varcov[, M + (kk-1)*M1 + 1] +
- c(small.varcov[, M1 + 1])
- }
- } # ii
- run.varcov <- cbind(run.varcov / .nsimEIM )
- wz <- if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow = TRUE) else run.varcov
- }
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / M1)
- }), list( .nsimEIM = nsimEIM ))))
+
+
+ for (jay in 1:NOS) {
+ run.varcov <- 0
+ ii.TF <- !ind2[, jay] # Not assigned above
+ if (any(ii.TF)) {
+ kkvec <- kmat[ii.TF, jay]
+ muvec <- munb[ii.TF, jay]
+ for (ii in 1:( .nsimEIM )) {
+ ysim <- rposnegbin(sum(ii.TF), munb = muvec, size = kkvec)
+ dl.dk <- digamma(ysim + kkvec) - digamma(kkvec) -
+ (ysim - muvec) / (muvec + kkvec) +
+ log1p(-muvec / (kkvec + muvec)) +
+ df0.dkmat[ii.TF, jay] / oneminusf0[ii.TF, jay]
+ run.varcov <- run.varcov + dl.dk^2
+ } # end of for loop
+
+ run.varcov <- c(run.varcov / .nsimEIM )
+ ned2l.dk2 <- if (intercept.only) mean(run.varcov) else run.varcov
+
+ wz[ii.TF, M1*jay] <- ned2l.dk2 # * (dsize.deta[ii.TF, jay])^2
+ }
+ } # jay
+
+
+
+ wz[, M1*(1:NOS) ] <- wz[, M1*(1:NOS) ] * dsize.deta^2
+
+
+
+
+
+
+ save.weights <- !all(ind2)
+
+
+ ned2l.dmunb2 <- mymu / munb^2 -
+ ((1 + mymu/kmat) / kmat) / (1 + munb/kmat)^2 -
+ df02.dmunb2 / oneminusf0 -
+ (df0.dmunb / oneminusf0)^2
+ wz[, M1*(1:NOS) - 1] <- ned2l.dmunb2 * dmunb.deta^2
+
+
+ ned2l.dmunbsize <- (munb - mymu) / (munb + kmat)^2 -
+ df02.dkmat.dmunb / oneminusf0 -
+ df0.dmunb * df0.dkmat / oneminusf0^2
+ wz[, M + M1*(1:NOS) - 1] <- ned2l.dmunbsize * dmunb.deta * dsize.deta
+
+
+
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
+ }), list( .cutoff.prob = cutoff.prob, .eps.trig = eps.trig,
+ .max.support = max.support,
+ .max.chunk.MB = max.chunk.MB,
+ .nsimEIM = nsimEIM ))))
+
}
+
dposgeom <- function(x, prob, log = FALSE) {
dgeom(x - 1, prob = prob, log = log)
}
+
pposgeom <- function(q, prob) {
if (!is.Numeric(prob, positive = TRUE))
stop("bad input for argument 'prob'")
@@ -759,6 +1052,7 @@ pposgeom <- function(q, prob) {
}
+
qposgeom <- function(p, prob) {
@@ -775,7 +1069,6 @@ qposgeom <- function(p, prob) {
-
rposgeom <- function(n, prob) {
qgeom(p = runif(n, min = dgeom(0, prob)), prob)
}
@@ -863,7 +1156,9 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
- pospoisson <- function(link = "loge", expected = TRUE,
+ pospoisson <- function(link = "loge",
+ type.fitted = c("mean", "lambda", "prob0"),
+ expected = TRUE,
ilambda = NULL, imethod = 1, zero = NULL) {
link <- as.list(substitute(link))
@@ -876,14 +1171,9 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
if (length( ilambda) && !is.Numeric(ilambda, positive = TRUE))
stop("bad input for argument 'ilambda'")
- if (!is.Numeric(imethod, length.arg = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "lambda", "prob0"))[1]
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
@@ -892,26 +1182,32 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
"Links: ",
namesof("lambda", link, earg = earg, tag = FALSE)),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 1
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("lambda"),
link = .link ,
+ type.fitted = .type.fitted ,
+ expected = .expected ,
earg = .earg)
- }, list( .link = link, .earg = earg ))),
+ }, list( .link = link, .earg = earg,
+ .expected = expected,
+ .type.fitted = type.fitted ))),
initialize = eval(substitute(expression({
-
temp5 <-
w.y.check(w = w, y = y,
Is.positive.y = TRUE,
+ Is.integer.y = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
- Is.integer.y = TRUE,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -923,33 +1219,49 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
extra$ncoly <- ncoly
extra$M1 <- M1
M <- M1 * ncoly
+ extra$type.fitted <- .type.fitted
+ extra$dimnamesy <- dimnames(y)
+ mynames1 <- param.names("lambda", ncoly)
+ predictors.names <- namesof(mynames1, .link , earg = .earg, tag = FALSE)
- mynames1 <- paste("lambda",
- if (ncoly > 1) 1:ncoly else "", sep = "")
- predictors.names <-
- namesof(mynames1, .link , earg = .earg, tag = FALSE)
-
- if ( .imethod == 1) {
- lambda.init <- apply(y, 2, median) + 1/8
- lambda.init <- matrix(lambda.init, n, ncoly, byrow = TRUE)
- } else if ( .imethod == 2) {
- lambda.init <- apply(y, 2, weighted.mean, w = w) + 1/8
- lambda.init <- matrix(lambda.init, n, ncoly, byrow = TRUE)
- } else {
- lambda.init <- -y / expm1(-y)
- }
- if (length( .ilambda))
- lambda.init <- lambda.init * 0 + .ilambda
+ if (!length(etastart)) {
+ lambda.init <- Init.mu(y = y, w = w, imethod = .imethod ,
+ imu = .ilambda )
- if (!length(etastart))
etastart <- theta2eta(lambda.init, .link , earg = .earg)
+ }
}), list( .link = link, .earg = earg,
- .ilambda = ilambda, .imethod = imethod ))),
+ .ilambda = ilambda, .imethod = imethod,
+ .type.fitted = type.fitted ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
+ type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+ warning("cannot find 'type.fitted'. ",
+ "Returning the 'mean'.")
+ "mean"
+ }
+
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "lambda", "prob0"))[1]
+
lambda <- eta2theta(eta, .link , earg = .earg )
- -lambda / expm1(-lambda)
+ ans <- switch(type.fitted,
+ "mean" = -lambda / expm1(-lambda),
+ "lambda" = lambda,
+ "prob0" = exp(-lambda)) # P(Y=0)
+ if (length(extra$dimnamesy) &&
+ is.matrix(ans) &&
+ length(extra$dimnamesy[[2]]) == ncol(ans) &&
+ length(extra$dimnamesy[[2]]) > 0) {
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
+ } else
+ if (NCOL(ans) == 1 &&
+ is.matrix(ans)) {
+ colnames(ans) <- NULL
+ }
+ ans
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$link <- rep( .link , len = M)
@@ -1010,10 +1322,10 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
if ( .expected ) {
- ned2l.dlambda2 <- (temp6 + 1) * (1/lambda - 1/temp6) / temp6
+ ned2l.dlambda2 <- (1 + 1 / temp6) * (1/lambda - 1/temp6)
wz <- ned2l.dlambda2 * dlambda.deta^2
} else {
- d2l.dlambda2 <- y / lambda^2 - (temp6 + 1) / temp6^2
+ d2l.dlambda2 <- y / lambda^2 - (1 + 1 / temp6 + 1) / temp6
d2lambda.deta2 <- d2theta.deta2(lambda, .link , earg = .earg)
wz <- (dlambda.deta^2) * d2l.dlambda2 - dl.dlambda * d2lambda.deta2
}
@@ -1128,9 +1440,6 @@ dposbinom <- function(x, size, prob, log = FALSE) {
if (!is.logical(omit.constant) || length(omit.constant) != 1)
stop("bad input for argument 'omit.constant'")
- if (multiple.responses && length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(p.small, positive = TRUE, length.arg = 1))
@@ -1151,18 +1460,22 @@ dposbinom <- function(x, size, prob, log = FALSE) {
bool = .parallel ,
constraints = constraints)
- dotzero <- .zero
- M1 <- 1
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .parallel = parallel, .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = .multiple.responses ,
+ parameters.names = c("prob"),
p.small = .p.small ,
no.warning = .no.warning ,
zero = .zero )
}, list( .zero = zero,
.p.small = p.small,
+ .multiple.responses = multiple.responses,
.no.warning = no.warning ))),
initialize = eval(substitute(expression({
@@ -1474,7 +1787,10 @@ dposbinom <- function(x, size, prob, log = FALSE) {
.apply.parint = apply.parint ))),
infos = eval(substitute(function(...) {
list(M1 = 1,
+ Q1 = NA,
+ expected = TRUE,
multipleResponses = TRUE,
+ parameters.names = c("prob"),
p.small = .p.small ,
no.warning = .no.warning ,
apply.parint = .apply.parint ,
@@ -1645,7 +1961,7 @@ dposbinom <- function(x, size, prob, log = FALSE) {
ned2l.dprobs2 <- 1 / (probs * AAA) + 1 / temp2 -
probs / (AAA * temp2) - (B.s / AAA)^2
- wz <- matrix(as.numeric(NA), n, dimm(M))
+ wz <- matrix(NA_real_, n, dimm(M))
wz[, 1:M] <- ned2l.dprobs2 * (dprobs.deta^2)
for (slocal in 1:(M-1))
@@ -1739,11 +2055,13 @@ dposbinom <- function(x, size, prob, log = FALSE) {
infos = eval(substitute(function(...) {
list(M1 = 2,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("pcapture", "precapture"),
p.small = .p.small ,
no.warning = .no.warning ,
type.fitted = .type.fitted ,
- apply.parint.b = .apply.parint.b ,
- multipleResponses = FALSE)
+ apply.parint.b = .apply.parint.b )
}, list(
.apply.parint.b = apply.parint.b,
.p.small = p.small,
@@ -1787,11 +2105,11 @@ dposbinom <- function(x, size, prob, log = FALSE) {
temp5 <-
w.y.check(w = w, y = y,
+ Is.integer.y = TRUE,
Is.nonnegative.y = TRUE,
ncol.w.max = 1,
ncol.y.min = 2,
ncol.y.max = Inf,
- Is.integer.y = TRUE,
out.wy = TRUE,
colsyperw = ncol(y),
maximize = TRUE)
@@ -2159,7 +2477,9 @@ dposbinom <- function(x, size, prob, log = FALSE) {
.apply.parint.t = apply.parint.t ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
+ expected = TRUE,
multipleResponses = TRUE,
+ parameters.names = as.character(NA),
ridge.constant = .ridge.constant ,
ridge.power = .ridge.power ,
drop.b = .drop.b,
@@ -2492,6 +2812,59 @@ dposbinom <- function(x, size, prob, log = FALSE) {
+setClass("posbernoulli.tb", contains = "vglmff")
+setClass("posbernoulli.t", contains = "posbernoulli.tb")
+setClass("posbernoulli.b", contains = "posbernoulli.tb")
+
+ setClass("posbinomial", contains = "posbernoulli.b")
+
+
+
+setMethod("summaryvglmS4VGAM", signature(VGAMff = "posbernoulli.tb"),
+ function(object,
+ VGAMff,
+ ...) {
+ object at post
+})
+
+
+
+setMethod("showsummaryvglmS4VGAM", signature(VGAMff = "posbernoulli.tb"),
+ function(object,
+ VGAMff,
+ ...) {
+ if (length(object at extra$N.hat) == 1 &&
+ is.numeric(object at extra$N.hat)) {
+ cat("\nEstimate of N: ", round(object at extra$N.hat, digits = 3), "\n")
+ cat("\nStd. Error of N: ", round(object at extra$SE.N.hat, digits = 3), "\n")
+
+ confint.N <- object at extra$N.hat + c(Lower = -1, Upper = 1) *
+ qnorm(0.975) * object at extra$SE.N.hat
+ cat("\nApproximate 95 percent confidence interval for N:\n")
+ print(round(confint.N, digits = 2))
+ }
+})
+
+
+
+setMethod("showsummaryvglmS4VGAM", signature(VGAMff = "posbernoulli.b"),
+ function(object,
+ VGAMff,
+ ...) {
+ callNextMethod(VGAMff = VGAMff, object = object, ...)
+})
+
+
+
+setMethod("showsummaryvglmS4VGAM", signature(VGAMff = "posbernoulli.t"),
+ function(object,
+ VGAMff,
+ ...) {
+ callNextMethod(VGAMff = VGAMff, object = object, ...)
+})
+
+
+
diff --git a/R/family.qreg.R b/R/family.qreg.R
index 7be8ef0..1ce841f 100644
--- a/R/family.qreg.R
+++ b/R/family.qreg.R
@@ -65,7 +65,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
lms.bcn <- function(percentiles = c(25, 50, 75),
- zero = c(1, 3),
+ zero = c("lambda", "sigma"),
llambda = "identitylink",
lmu = "identitylink",
lsigma = "loge",
@@ -108,8 +108,24 @@ lms.yjn.control <- function(trace = TRUE, ...)
namesof("mu", link = lmu, earg = emu), ", ",
namesof("sigma", link = lsigma, earg = esigma)),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 3,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("lambda", "mu", "sigma"),
+ llambda = .llambda ,
+ lmu = .lmu ,
+ lsigma = .lsigma ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .llambda = llambda, .lmu = lmu, .lsigma = lsigma ))),
+
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
@@ -227,7 +243,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
}), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
.elambda = elambda, .emu = emu, .esigma = esigma ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, 6)
+ wz <- matrix(NA_real_, n, 6)
wz[,iam(1, 1, M)] <- (7 * sigma^2 / 4) * dlambda.deta^2
wz[,iam(2, 2, M)] <- (1 + 2*(lambda*sigma)^2)/(mymu*sigma)^2 *
dmu.deta^2
@@ -247,7 +263,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
lms.bcg <- function(percentiles = c(25, 50, 75),
- zero = c(1, 3),
+ zero = c("lambda", "sigma"),
llambda = "identitylink",
lmu = "identitylink",
lsigma = "loge",
@@ -278,11 +294,27 @@ lms.yjn.control <- function(trace = TRUE, ...)
"(Box-Cox transformation to a Gamma distribution)\n",
"Links: ",
namesof("lambda", link = llambda, earg = elambda), ", ",
- namesof("mu", link = lmu, earg = emu), ", ",
- namesof("sigma", link = lsigma, earg = esigma)),
+ namesof("mu", link = lmu, earg = emu), ", ",
+ namesof("sigma", link = lsigma, earg = esigma)),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list(.zero = zero))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 3,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("lambda", "mu", "sigma"),
+ llambda = .llambda ,
+ lmu = .lmu ,
+ lsigma = .lsigma ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .llambda = llambda, .lmu = lmu, .lsigma = lsigma ))),
+
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
@@ -522,7 +554,7 @@ dpsi.dlambda.yjn <- function(psi, lambda, mymu, sigma,
if (length(mymu) != L) mymu <- rep(mymu, length.out = L)
if (length(sigma) != L) sigma <- rep(sigma, length.out = L)
- answer <- matrix(as.numeric(NA), L, derivative+1)
+ answer <- matrix(NA_real_, L, derivative+1)
CC <- psi >= 0
BB <- ifelse(CC, lambda, -2+lambda)
AA <- psi * BB
@@ -681,7 +713,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
}
lms.yjn2 <- function(percentiles = c(25, 50, 75),
- zero = c(1, 3),
+ zero = c("lambda", "sigma"),
llambda = "identitylink",
lmu = "identitylink",
lsigma = "loge",
@@ -716,14 +748,28 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
blurb = c("LMS Quantile Regression (Yeo-Johnson transformation",
" to normality)\n",
"Links: ",
- namesof("lambda", link = llambda, earg = elambda),
- ", ",
- namesof("mu", link = lmu, earg = emu),
- ", ",
- namesof("sigma", link = lsigma, earg = esigma)),
+ namesof("lambda", link = llambda, earg = elambda), ", ",
+ namesof("mu", link = lmu, earg = emu ), ", ",
+ namesof("sigma", link = lsigma, earg = esigma )),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
- }), list(.zero = zero))),
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 3,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("lambda", "mu", "sigma"),
+ llambda = .llambda ,
+ lmu = .lmu ,
+ lsigma = .lsigma ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .llambda = llambda, .lmu = lmu, .lsigma = lsigma ))),
+
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
@@ -888,7 +934,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
lms.yjn <- function(percentiles = c(25, 50, 75),
- zero = c(1, 3),
+ zero = c("lambda", "sigma"),
llambda = "identitylink",
lsigma = "loge",
idf.mu = 4,
@@ -920,12 +966,28 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
blurb = c("LMS Quantile Regression ",
"(Yeo-Johnson transformation to normality)\n",
"Links: ",
- namesof("lambda", link = llambda, earg = elambda),
- ", mu, ",
- namesof("sigma", link = lsigma, earg = esigma)),
+ namesof("lambda", link = llambda, earg = elambda), ", ",
+ namesof("mu", link = "identitylink", earg = list()), ", ",
+ namesof("sigma", link = lsigma, earg = esigma)),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list(.zero = zero))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 3,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("lambda", "mu", "sigma"),
+ llambda = .llambda ,
+ lmu = "identitylink",
+ lsigma = .lsigma ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .llambda = llambda, .lsigma = lsigma ))),
+
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
@@ -2559,7 +2621,7 @@ alaplace2.control <- function(maxit = 100, ...) {
digt = 4,
idf.mu = 3,
imethod = 1,
- zero = -2) {
+ zero = "scale") {
@@ -2594,10 +2656,6 @@ alaplace2.control <- function(maxit = 100, ...) {
ishrinkage < 0 ||
ishrinkage > 1)
stop("bad input for argument 'ishrinkage'")
- if (length(zero) &&
- !(is.Numeric(zero, integer.valued = TRUE) ||
- is.character(zero )))
- stop("bad input for argument 'zero'")
if (length(tau) &&
max(abs(kappa - sqrt(tau / (1 - tau)))) > 1.0e-6)
@@ -2668,10 +2726,9 @@ alaplace2.control <- function(maxit = 100, ...) {
constraints <- con.use
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
- constraints <- cm.zero.VGAM(constraints, x = x, z.Index, M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .parallel.locat = parallel.locat,
.parallel.scale = parallel.scale,
.zero = zero,
@@ -2679,11 +2736,12 @@ alaplace2.control <- function(maxit = 100, ...) {
.apply.parint.locat = apply.parint.locat ))),
-
-
infos = eval(substitute(function(...) {
list(M1 = 2,
+ Q1 = 1,
summary.pvalues = FALSE,
+ multipleResponses = FALSE,
+ parameters.names = c("location1", "scale1", "location2", "scale2"),
zero = .zero )
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -2728,13 +2786,13 @@ alaplace2.control <- function(maxit = 100, ...) {
extra$individual <- FALSE
- mynames1 <- paste("location", if (Mdiv2 > 1) 1:Mdiv2 else "", sep = "")
- mynames2 <- paste("scale", if (Mdiv2 > 1) 1:Mdiv2 else "", sep = "")
+ mynames1 <- param.names("location", Mdiv2)
+ mynames2 <- param.names("scale", Mdiv2)
predictors.names <-
c(namesof(mynames1, .llocat , earg = .elocat, tag = FALSE),
namesof(mynames2, .lscale , earg = .escale, tag = FALSE))
predictors.names <-
- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names[interleave.VGAM(M, M1 = M1)]
@@ -2780,7 +2838,7 @@ alaplace2.control <- function(maxit = 100, ...) {
etastart <-
cbind(theta2eta(locat.init, .llocat , earg = .elocat ),
theta2eta(scale.init, .lscale , earg = .escale ))
- etastart <- etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
}
}), list( .imethod = imethod,
.idf.mu = idf.mu,
@@ -2814,7 +2872,7 @@ alaplace2.control <- function(maxit = 100, ...) {
tmp34 <- c(rep( .llocat , length = Mdiv2),
rep( .lscale , length = Mdiv2))
names(tmp34) <- c(mynames1, mynames2)
- tmp34 <- tmp34[interleave.VGAM(M, M = M1)]
+ tmp34 <- tmp34[interleave.VGAM(M, M1 = M1)]
misc$link <- tmp34 # Already named
misc$earg <- vector("list", M)
@@ -2924,13 +2982,13 @@ alaplace2.control <- function(maxit = 100, ...) {
ans <- c(w) * cbind(dl.dlocat * dlocat.deta,
dl.dscale * dscale.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
ans
}), list( .escale = escale, .lscale = lscale,
.elocat = elocat, .llocat = llocat,
.kappa = kappa ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, M)
+ wz <- matrix(NA_real_, n, M)
d2l.dlocat2 <- 2 / Scale^2
d2l.dscale2 <- 1 / Scale^2
@@ -3012,10 +3070,6 @@ alaplace1.control <- function(maxit = 100, ...) {
if (!is.Numeric(Scale.arg, positive = TRUE))
stop("bad input for argument 'Scale.arg'")
- if (length(zero) &&
- !(is.Numeric(zero, integer.valued = TRUE) ||
- is.character(zero )))
- stop("bad input for argument 'zero'")
@@ -3058,7 +3112,9 @@ alaplace1.control <- function(maxit = 100, ...) {
constraints <- con.locat
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .parallel.locat = parallel.locat,
.zero = zero,
.apply.parint.locat = apply.parint.locat ))),
@@ -3067,8 +3123,11 @@ alaplace1.control <- function(maxit = 100, ...) {
infos = eval(substitute(function(...) {
list(M1 = 1,
+ Q1 = 1,
summary.pvalues = FALSE,
- tau = .tau,
+ tau = .tau ,
+ multipleResponses = FALSE,
+ parameters.names = c("location"),
kappa = .kappa)
}, list( .kappa = kappa,
.tau = tau ))),
@@ -3118,7 +3177,7 @@ alaplace1.control <- function(maxit = 100, ...) {
extra$individual <- FALSE
- mynames1 <- paste("location", if (M > 1) 1:M else "", sep = "")
+ mynames1 <- param.names("location", M)
predictors.names <-
c(namesof(mynames1, .llocat , earg = .elocat , tag = FALSE))
@@ -3299,7 +3358,7 @@ alaplace3.control <- function(maxit = 100, ...) {
alaplace3 <-
function(llocation = "identitylink", lscale = "loge", lkappa = "loge",
ilocation = NULL, iscale = NULL, ikappa = 1.0,
- imethod = 1, zero = 2:3) {
+ imethod = 1, zero = c("scale", "kappa")) {
llocat <- as.list(substitute(llocation))
elocat <- link2list(llocat)
@@ -3319,9 +3378,6 @@ alaplace3.control <- function(maxit = 100, ...) {
integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (length(iscale) &&
!is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
@@ -3338,10 +3394,16 @@ alaplace3.control <- function(maxit = 100, ...) {
"\n",
"Variance: Scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
+
infos = eval(substitute(function(...) {
list(M1 = 3,
+ Q1 = 1,
+ multipleResponses = FALSE,
+ parameters.names = c("location", "scale", "kappa"),
summary.pvalues = FALSE,
zero = .zero )
}, list( .zero = zero ))),
@@ -3590,7 +3652,8 @@ rlaplace <- function(n, location = 0, scale = 1) {
laplace <- function(llocation = "identitylink", lscale = "loge",
ilocation = NULL, iscale = NULL,
- imethod = 1, zero = 2) {
+ imethod = 1,
+ zero = "scale") {
llocat <- as.list(substitute(llocation))
elocat <- link2list(llocat)
@@ -3607,9 +3670,6 @@ rlaplace <- function(n, location = 0, scale = 1) {
integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (length(iscale) &&
@@ -3621,13 +3681,25 @@ rlaplace <- function(n, location = 0, scale = 1) {
blurb = c("Two-parameter Laplace distribution\n\n",
"Links: ",
namesof("location", llocat, earg = elocat), ", ",
- namesof("scale", lscale, earg = escale),
+ namesof("scale", lscale, earg = escale),
"\n", "\n",
"Mean: location", "\n",
"Variance: 2*scale^2"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ multipleResponses = FALSE,
+ parameters.names = c("location", "scale"),
+ summary.pvalues = FALSE,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
@@ -3745,9 +3817,6 @@ fff.control <- function(save.weights = TRUE, ...) {
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(nsimEIM, length.arg = 1,
integer.valued = TRUE) ||
@@ -3771,8 +3840,19 @@ fff.control <- function(save.weights = TRUE, ...) {
"2*df2^2*(df1+df2-2)/(df1*(df2-2)^2*(df2-4)) ",
"provided df2>4 and ncp = 0"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ multipleResponses = FALSE,
+ parameters.names = c("df1", "df2"),
+ zero = .zero )
+ }, list( .zero = zero ))),
+
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
@@ -3798,14 +3878,14 @@ fff.control <- function(save.weights = TRUE, ...) {
var.est <- summy[5] - summy[2]
df1.init <- 2*b^2*(b-2)/(var.est*(b-2)^2 * (b-4) - 2*b^2)
}
- df1.init <- if (length( .idf1))
- rep( .idf1, length.out = n) else
+ df1.init <- if (length( .idf1 ))
+ rep( .idf1 , length.out = n) else
rep(df1.init, length.out = n)
- df2.init <- if (length( .idf2))
- rep( .idf2, length.out = n) else
+ df2.init <- if (length( .idf2 ))
+ rep( .idf2 , length.out = n) else
rep(1, length.out = n)
etastart <- cbind(theta2eta(df1.init, .link , earg = .earg ),
- theta2eta(df2.init, .link , earg = .earg ))
+ theta2eta(df2.init, .link , earg = .earg ))
}
}), list( .imethod = imethod, .idf1 = idf1, .earg = earg,
.idf2 = idf2, .link = link ))),
@@ -4169,9 +4249,6 @@ rbenini <- function(n, y0, shape) {
if (!is.Numeric(y0, positive = TRUE))
stop("bad input for argument 'y0'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
@@ -4182,17 +4259,19 @@ rbenini <- function(n, y0, shape) {
"\n", "\n",
"Median: qbenini(p = 0.5, y0, shape)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 1
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 1,
+ Q1 = 1,
+ parameters.names = c("shape"),
lshape = .lshape ,
- eshape = .eshape)
+ eshape = .eshape )
}, list( .eshape = eshape,
- .lshape = lshape ))),
+ .lshape = lshape))),
initialize = eval(substitute(expression({
@@ -4467,7 +4546,7 @@ qtriangle <- function(p, theta, lower = 0, upper = 1,
if (length(lower) != N) lower <- rep(lower, length.out = N)
if (length(upper) != N) upper <- rep(upper, length.out = N)
- ans <- as.numeric(NA) * p
+ ans <- NA_real_ * p
if (lower.tail) {
if (log.p) {
Neg <- (exp(ln.p) <= (theta - lower) / (upper - lower))
@@ -4633,6 +4712,8 @@ triangle.control <- function(stepsize = 0.33, maxit = 100, ...) {
namesof("theta", link, earg = earg)),
infos = eval(substitute(function(...) {
list(M1 = 1,
+ Q1 = 1,
+ parameters.names = c("theta"),
link = .link )
}, list( .link = link ))),
@@ -4811,10 +4892,6 @@ loglaplace1.control <- function(maxit = 300, ...) {
ishrinkage > 1)
stop("bad input for argument 'ishrinkage'")
- if (length(zero) &&
- !(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
- is.character(zero )))
- stop("bad input for argument 'zero'")
if (!is.Numeric(Scale.arg, positive = TRUE))
stop("bad input for argument 'Scale.arg'")
if (!is.logical(parallel.locat) ||
@@ -4846,13 +4923,24 @@ loglaplace1.control <- function(maxit = 300, ...) {
constraints <- cm.VGAM(matrix(1, M, 1), x = x,
bool = .parallel.locat ,
constraints = constraints, apply.int = FALSE)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .parallel.locat = parallel.locat,
.Scale.arg = Scale.arg, .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 1,
+ Q1 = 1,
+ parameters.names = c("location"),
+ llocation = .llocat )
+ }, list( .llocat = llocat,
+ .zero = zero ))),
+
initialize = eval(substitute(expression({
extra$M <- M <- max(length( .Scale.arg ), length( .kappa )) # Recycle
- extra$Scale <- rep( .Scale.arg, length = M)
- extra$kappa <- rep( .kappa, length = M)
+ extra$Scale <- rep( .Scale.arg , length = M)
+ extra$kappa <- rep( .kappa , length = M)
extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
@@ -5091,10 +5179,6 @@ loglaplace2.control <- function(save.weights = TRUE, ...) {
ishrinkage < 0 ||
ishrinkage > 1)
stop("bad input for argument 'ishrinkage'")
- if (length(zero) &&
- !(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
- is.character(zero )))
- stop("bad input for argument 'zero'")
if (!is.logical(eq.scale) || length(eq.scale) != 1)
stop("bad input for argument 'eq.scale'")
if (!is.logical(parallel.locat) ||
@@ -5120,19 +5204,22 @@ loglaplace2.control <- function(save.weights = TRUE, ...) {
"Variance: zz scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
constraints = eval(substitute(expression({
.ZERO <- .zero
- if (is.character( .ZERO)) .ZERO <- eval(parse(text = .ZERO))
+ if (is.character( .ZERO ))
+ .ZERO <- eval(parse(text = .ZERO ))
.PARALLEL <- .parallel.locat
parelHmat <- if (is.logical( .PARALLEL ) && .PARALLEL )
- matrix(1, M/2, 1) else diag(M/2)
+ matrix(1, M/2, 1) else diag(M/2)
scaleHmat <- if (is.logical( .eq.scale ) && .eq.scale )
- matrix(1, M/2, 1) else diag(M/2)
+ matrix(1, M/2, 1) else diag(M/2)
mycmatrix <- cbind(rbind( parelHmat, 0*parelHmat),
- rbind(0*scaleHmat, scaleHmat))
+ rbind(0*scaleHmat, scaleHmat))
constraints <- cm.VGAM(mycmatrix, x = x,
bool = .PARALLEL ,
constraints = constraints,
apply.int = FALSE)
- constraints <- cm.zero.VGAM(constraints, x = x, .ZERO , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .ZERO , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
if ( .PARALLEL && names(constraints)[1] == "(Intercept)") {
parelHmat <- diag(M/2)
@@ -5373,15 +5460,16 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
- logitlaplace1 <- function(tau = NULL,
- llocation = "logit",
- ilocation = NULL,
- kappa = sqrt(tau/(1-tau)),
- Scale.arg = 1,
- ishrinkage = 0.95, parallel.locat = FALSE, digt = 4,
- idf.mu = 3,
- rep01 = 0.5,
- imethod = 1, zero = NULL) {
+ logitlaplace1 <-
+ function(tau = NULL,
+ llocation = "logit",
+ ilocation = NULL,
+ kappa = sqrt(tau/(1-tau)),
+ Scale.arg = 1,
+ ishrinkage = 0.95, parallel.locat = FALSE, digt = 4,
+ idf.mu = 3,
+ rep01 = 0.5,
+ imethod = 1, zero = NULL) {
if (!is.Numeric(rep01, positive = TRUE, length.arg = 1) ||
rep01 > 0.5)
@@ -5415,10 +5503,6 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
ishrinkage < 0 ||
ishrinkage > 1)
stop("bad input for argument 'ishrinkage'")
- if (length(zero) &&
- !(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
- is.character(zero )))
- stop("bad input for argument 'zero'")
if (!is.Numeric(Scale.arg, positive = TRUE))
stop("bad input for argument 'Scale.arg'")
@@ -5448,9 +5532,22 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
constraints <- cm.VGAM(matrix(1, M, 1), x = x,
bool = .parallel.locat ,
constraints = constraints, apply.int = FALSE)
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .parallel.locat = parallel.locat,
.Scale.arg = Scale.arg, .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 1,
+ Q1 = 1,
+ multipleResponses = FALSE,
+ parameters.names = c("location"),
+ llocation = .llocat ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .llocat = llocat ))),
+
initialize = eval(substitute(expression({
extra$M <- M <- max(length( .Scale.arg ), length( .kappa )) # Recycle
extra$Scale <- rep( .Scale.arg, length = M)
diff --git a/R/family.rcim.R b/R/family.rcim.R
index 8c99499..5842e06 100644
--- a/R/family.rcim.R
+++ b/R/family.rcim.R
@@ -854,7 +854,8 @@ plota21 <- function(rrvglm2, show.plot = TRUE, nseq.a21 = 31,
abline(h = loglik.orig,
col = "darkorange", lty = "dashed")
- abline(h = loglik.orig - qchisq(0.95, df = 1),
+ abline(h = loglik.orig -
+ qchisq(0.95, df = 1) / 2,
col = "darkorange", lty = "dashed")
abline(v = a21.hat + c(-1, 1) * 1.96 * SE.a21.hat,
diff --git a/R/family.rcqo.R b/R/family.rcqo.R
index b9a8fcf..1c14b50 100644
--- a/R/family.rcqo.R
+++ b/R/family.rcqo.R
@@ -157,7 +157,7 @@ rcqo <- function(n, p, S,
S^(1/Rank) < 2)
stop("S^(1/Rank) must be an integer greater or equal to 2")
if (Rank == 1) {
- optimums <- matrix(as.numeric(NA), S, Rank)
+ optimums <- matrix(NA_real_, S, Rank)
for (r in 1:Rank) {
optimums[, r] <- seq(-AA, AA, len = S^(1/Rank))
}
diff --git a/R/family.robust.R b/R/family.robust.R
index 88afece..5817bf7 100644
--- a/R/family.robust.R
+++ b/R/family.robust.R
@@ -181,7 +181,8 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1,
huber2 <- function(llocation = "identitylink", lscale = "loge",
- k = 0.862, imethod = 1, zero = 2) {
+ k = 0.862, imethod = 1,
+ zero = "scale") {
A1 <- (2 * dnorm(k) / k - 2 * pnorm(-k))
@@ -195,9 +196,6 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1,
if (!is.Numeric(k, length.arg = 1, positive = TRUE))
stop("bad input for argument 'k'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
llocat <- as.list(substitute(llocation))
@@ -216,9 +214,27 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1,
namesof("location", llocat, earg = elocat), ", ",
namesof("scale", lscale, earg = escale), "\n\n",
"Mean: location"),
+
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("location", "scale"),
+ llocation = .llocat ,
+ lscale = .lscale ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .llocat = llocat,
+ .lscale = lscale ))),
+
+
initialize = eval(substitute(expression({
temp5 <-
@@ -321,7 +337,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1,
.elocat = elocat, .escale = escale,
.eps = eps, .k = k ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, 2) # diag matrix; y is one-col too
+ wz <- matrix(NA_real_, n, 2) # diag matrix; y is one-col too
@@ -468,7 +484,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1,
.elocat = elocat,
.eps = eps, .k = k ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, 1) # diag matrix; y is one-col too
+ wz <- matrix(NA_real_, n, 1) # diag matrix; y is one-col too
diff --git a/R/family.rrr.R b/R/family.rrr.R
index 4afedab..542915b 100644
--- a/R/family.rrr.R
+++ b/R/family.rrr.R
@@ -970,7 +970,7 @@ Coef.qrrvglm <-
Tolerance[, , ii] <- -0.5 * solve(Darray[, , ii])
bellshaped[ii] <- all(eigen(Tolerance[, , ii])$values > 0)
}
- optimum <- matrix(as.numeric(NA), Rank, M)
+ optimum <- matrix(NA_real_, Rank, M)
for (ii in 1:M)
if (bellshaped[ii])
optimum[, ii] <- Tolerance[, , ii] %*% cbind(Amat[ii, ])
@@ -1127,7 +1127,7 @@ Coef.qrrvglm <-
mymax <- object at family@linkinv(rbind(eta.temp), extra = object at extra)
c(mymax) # Convert from matrix to vector
} else {
- 5 * rep(as.numeric(NA), length.out = M) # Make "numeric"
+ 5 * rep(NA_real_, length.out = M) # Make "numeric"
}
names(maximum) <- ynames
@@ -1233,7 +1233,7 @@ show.Coef.qrrvglm <- function(x, ...) {
Rank <- object at Rank
M <- nrow(object at A)
NOS <- object at NOS
- mymat <- matrix(as.numeric(NA), NOS, Rank)
+ mymat <- matrix(NA_real_, NOS, Rank)
if (Rank == 1) { # || object at Diagonal
for (ii in 1:NOS) {
fred <- if (Rank > 1)
@@ -1870,7 +1870,7 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
if (nrow(Cimat) != p2 || ncol(Cimat) != r)
stop("'Cimat' wrong shape")
- dct.da <- matrix(as.numeric(NA), (M-r-length(str0))*r, r*p2)
+ dct.da <- matrix(NA_real_, (M-r-length(str0))*r, r*p2)
if ((length(Index.corner) + length(str0)) == M)
stop("cannot handle full rank models yet")
@@ -1995,7 +1995,7 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
nn <- nrow(xmat)
- Aimat <- matrix(as.numeric(NA), M, r)
+ Aimat <- matrix(NA_real_, M, r)
Aimat[Index.corner,] <- diag(r)
Aimat[-Index.corner,] <- theta # [-(1:M)]
@@ -2104,7 +2104,7 @@ rrr.deriv.ResSS <- function(theta, wz, U, z, M, r, xmat,
pp, Index.corner, intercept = TRUE,
xij = NULL) {
- Amat <- matrix(as.numeric(NA), M, r)
+ Amat <- matrix(NA_real_, M, r)
Amat[Index.corner,] <- diag(r)
Amat[-Index.corner,] <- theta # [-(1:M)]
@@ -2135,7 +2135,7 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
nn <- nrow(xmat)
- Aimat <- matrix(as.numeric(NA), M, r)
+ Aimat <- matrix(NA_real_, M, r)
Aimat[Index.corner,] <- diag(r)
Aimat[-Index.corner,] <- theta # [-(1:M)]
diff --git a/R/family.sur.R b/R/family.sur.R
index a0679ed..63f67ac 100644
--- a/R/family.sur.R
+++ b/R/family.sur.R
@@ -70,8 +70,11 @@
infos = eval(substitute(function(...) {
list(M1 = 1, # zz???
+ Q1 = 1,
parallel = .parallel ,
- multipleResponses = TRUE )
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = as.character(NA))
}, list( .parallel = parallel ))),
initialize = eval(substitute(expression({
diff --git a/R/family.survival.R b/R/family.survival.R
index ce55513..41e130c 100644
--- a/R/family.survival.R
+++ b/R/family.survival.R
@@ -16,7 +16,8 @@
function(r1 = 0, r2 = 0,
lmu = "identitylink",
lsd = "loge",
- imu = NULL, isd = NULL, zero = 2) {
+ imu = NULL, isd = NULL,
+ zero = "sd") {
if (!is.Numeric(r1, length.arg = 1, integer.valued = TRUE) ||
r1 < 0)
stop("bad input for 'r1'")
@@ -34,21 +35,34 @@
new("vglmff",
- blurb = c("Univariate Normal distribution with double censoring\n\n",
+ blurb = c("Univariate normal distribution with double censoring\n\n",
"Links: ",
namesof("mu", lmu, earg = emu, tag = TRUE), ", ",
namesof("sd", lsd, earg = esd, tag = TRUE),
"\n",
"Variance: sd^2"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}) , list( .zero = zero))),
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("mu", "sd"),
+ lmu = .lmu ,
+ lsd = .lsd ,
+ zero = .zero )
+ }, list( .zero = zero, .lmu = lmu, .lsd = lsd
+ ))),
initialize = eval(substitute(expression({
predictors.names <-
- c(namesof("mu", .lmu, earg =.emu, tag = FALSE),
- namesof("sd", .lsd, earg =.esd, tag = FALSE))
+ c(namesof("mu", .lmu , earg = .emu , tag = FALSE),
+ namesof("sd", .lsd , earg = .esd , tag = FALSE))
if (ncol(y <- cbind(y)) != 1)
stop("the response must be a vector or a one-column matrix")
@@ -132,15 +146,15 @@
dl.dsd <- -1/sd + (y-mu)^2 / sd^3 +
((- .r1 * z1*fz1/Fz1 + .r2 * z2*fz2/(1-Fz2)) / sd) / (n*w)
- dmu.deta <- dtheta.deta(mu, .lmu, earg =.emu)
- dsd.deta <- dtheta.deta(sd, .lsd, earg =.esd)
+ dmu.deta <- dtheta.deta(mu, .lmu , earg =.emu )
+ dsd.deta <- dtheta.deta(sd, .lsd , earg =.esd )
c(w) * cbind(dl.dmu * dmu.deta, dl.dsd * dsd.deta)
}) , list( .lmu = lmu, .lsd = lsd,
.emu = emu, .esd = esd,
.r1 = r1, .r2 = r2 ))),
- weight=expression({
- wz <- matrix(as.numeric(NA), n, dimm(M))
+ weight = expression({
+ wz <- matrix(NA_real_, n, dimm(M))
Q.1 <- ifelse(q1 == 0, 1, q1) # Saves division by 0 below; not elegant
Q.2 <- ifelse(q2 == 0, 1, q2) # Saves division by 0 below; not elegant
@@ -286,7 +300,9 @@ rbisa <- function(n, scale = 1, shape) {
bisa <- function(lscale = "loge", lshape = "loge",
iscale = 1, ishape = NULL,
- imethod = 1, zero = NULL, nowarning = FALSE) {
+ imethod = 1,
+ zero = "shape",
+ nowarning = FALSE) {
@@ -315,15 +331,31 @@ rbisa <- function(n, scale = 1, shape) {
namesof("scale", lscale, earg = escale, tag = TRUE), "; ",
namesof("shape", lshape, earg = eshape, tag = TRUE)),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}) , list( .zero = zero))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("scale", "shape"),
+ lscale = .lscale ,
+ lshape = .lshape ,
+ zero = .zero )
+ }, list( .zero = zero, .lscale = lscale, .lshape = lshape
+ ))),
+
+
initialize = eval(substitute(expression({
if (ncol(y <- cbind(y)) != 1)
stop("the response must be a vector or a one-column matrix")
predictors.names <-
- c(namesof("scale", .lscale , earg = .escale, tag = FALSE),
- namesof("shape", .lshape , earg = .eshape, tag = FALSE))
+ c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape", .lshape , earg = .eshape , tag = FALSE))
if (!length(etastart)) {
scale.init <- rep( .iscale , len = n)
@@ -397,7 +429,7 @@ rbisa <- function(n, scale = 1, shape) {
}) , list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, M) # Diagonal!!
+ wz <- matrix(NA_real_, n, M) # Diagonal!!
wz[, iam(2, 2, M)] <- 2 * dsh.deta^2 / sh^2
hfunction <- function(alpha)
alpha * sqrt(pi/2) - pi * exp(2/alpha^2) *
diff --git a/R/family.ts.R b/R/family.ts.R
index 695683e..74fb898 100644
--- a/R/family.ts.R
+++ b/R/family.ts.R
@@ -140,6 +140,7 @@ rrar.control <- function(stepsize = 0.5, save.weights = TRUE, ...) {
}
+
rrar <- function(Ranks = 1, coefstart = NULL) {
lag.p <- length(Ranks)
@@ -345,7 +346,7 @@ vglm.garma.control <- function(save.weights = TRUE, ...) {
etastart <- x[-indices, , drop = FALSE] %*% new.coeffs[1:p.lm]
}
- x <- cbind(x, matrix(as.numeric(NA), n, plag)) # Right size now
+ x <- cbind(x, matrix(NA_real_, n, plag)) # Right size now
dx <- dimnames(x.save)
morenames <- paste("(lag", 1:plag, ")", sep = "")
dimnames(x) <- list(dx[[1]], c(dx[[2]], morenames))
@@ -517,9 +518,10 @@ setMethod("show", "Coef.rrar",
- AR1.control <- function(criterion = "coefficients",
- stepsize = 0.33,
- maxit = 100, ...) {
+if (FALSE)
+ AR1.control <- function(criterion = "loglikelihood",
+ stepsize = 1,
+ maxit = 30, ...) {
list(criterion = criterion,
stepsize = stepsize,
maxit = maxit)
@@ -527,6 +529,16 @@ setMethod("show", "Coef.rrar",
+if (TRUE)
+ AR1.control <-
+ function(half.stepsizing = FALSE, # Avoids jittering very near the solution
+ ...) {
+ list(half.stepsizing = half.stepsizing
+ )
+}
+
+
+
AR1 <-
function(ldrift = "identitylink",
lsd = "loge",
@@ -536,15 +548,16 @@ setMethod("show", "Coef.rrar",
isd = NULL,
ivar = NULL,
irho = NULL,
- ishrinkage = 0.9,
+ imethod = 1,
+ ishrinkage = 1, # 0.90; unity means a constant
type.likelihood = c("exact", "conditional"),
var.arg = FALSE, # TRUE,
nodrift = FALSE, # TRUE,
almost1 = 0.99,
- zero = c(-2, -3)) {
- imethod <- 1
+ zero = c(if (var.arg) "var" else "sd", "rho") # "ARcoef1"
+ ) {
type.likelihood <- match.arg(type.likelihood,
- c("exact", "conditional"))[1]
+ c("exact", "conditional"))[1]
if (!is.Numeric(almost1, length.arg = 1) || almost1 < 0.9 ||
almost1 >= 1)
@@ -562,8 +575,6 @@ setMethod("show", "Coef.rrar",
-
-
if (!is.logical(nodrift) ||
length(nodrift) != 1)
stop("argument 'nodrift' must be a single logical")
@@ -572,8 +583,6 @@ setMethod("show", "Coef.rrar",
length(var.arg) != 1)
stop("argument 'var.arg' must be a single logical")
- if(length(zero) && !is.Numeric(zero, integer.valued = TRUE))
- stop("Bad input for argument 'zero'.")
ismn <- idrift
lsmn <- as.list(substitute(ldrift))
esmn <- link2list(lsmn)
@@ -602,8 +611,8 @@ setMethod("show", "Coef.rrar",
"Links: ",
if (nodrift) "" else
paste(namesof("drift", lsmn, earg = esmn), ", ", sep = ""),
- namesof(n.sc , l.sc, earg = e.sc), ", ",
- namesof("ARcoef1", lrho, earg = erho), "\n",
+ namesof(n.sc , l.sc, earg = e.sc), ", ",
+ namesof("rho", lrho, earg = erho), "\n",
"Model: Y_t = drift + rho * Y_{t-1} + error_{t},", "\n",
" where 'error_{2:n}' ~ N(0, sigma^2) independently",
if (nodrift) ", and drift = 0" else "",
@@ -624,8 +633,8 @@ setMethod("show", "Coef.rrar",
expected = TRUE,
multipleResponse = TRUE,
type.likelihood = .type.likelihood ,
- ldrift = if ( .nodrift) NULL else .lsmn ,
- edrift = if ( .nodrift) NULL else .esmn ,
+ ldrift = if ( .nodrift ) NULL else .lsmn ,
+ edrift = if ( .nodrift ) NULL else .esmn ,
lvar = .lvar ,
lsd = .lsdv ,
evar = .evar ,
@@ -651,7 +660,7 @@ setMethod("show", "Coef.rrar",
w <- check$w
y <- check$y
if ( .type.likelihood == "conditional")
- w[1, ] <- 1.0e-6
+ w[1, ] <- 1.0e-8 # 1.0e-6
NOS <- ncoly <- ncol(y)
@@ -673,33 +682,33 @@ setMethod("show", "Coef.rrar",
namesof(var.names, .lvar , earg = .evar , tag = FALSE) else
namesof(sdv.names, .lsdv , earg = .esdv , tag = FALSE),
namesof(rho.names, .lrho , earg = .erho , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
- init.smn <- if (length( .ismn ))
- matrix( .ismn , n, NOS, byrow = TRUE) else
- (1 - .ishrinkage ) * y +
- .ishrinkage * matrix(colMeans(y),
- n, ncoly, byrow = TRUE)
- init.rho <- matrix(if (length( .irho )) .irho else 0.05,
+ init.smn <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ imu = .ismn , ishrinkage = .ishrinkage ,
+ pos.only = FALSE)
+
+
+
+
+ init.rho <- matrix(if (length( .irho )) .irho else 0.1, # Dummy value
n, NOS, byrow = TRUE)
- init.sdv <- matrix(if (length( .isdv )) .isdv else 1.0,
+ init.sdv <- matrix(if (length( .isdv )) .isdv else 1.0, # Dummy value
n, NOS, byrow = TRUE)
- init.var <- matrix(if (length( .ivar )) .ivar else 1.0,
+ init.var <- matrix(if (length( .ivar )) .ivar else 1.0, # Dummy value
n, NOS, byrow = TRUE)
- if ( .imethod == 1 ) {
- for (spp. in 1: NOS) {
- mycor <- cor(y[-1, spp.], y[-n, spp.])
- init.smn[-1, spp.] <- init.smn[-1, spp.] * (1 - mycor)
- if (!length( .irho ))
- init.rho[, spp.] <- sign(mycor) * min(0.95, abs(mycor))
- if (!length( .ivar ))
- init.var[, spp.] <- var(y[, spp.]) * (1 - mycor^2)
- if (!length( .isdv ))
- init.sdv[, spp.] <- sqrt(init.var[, spp.])
- }
- }
+ for (jay in 1: NOS) {
+ mycor <- cor(y[-1, jay], y[-n, jay])
+ init.smn[-1, jay] <- init.smn[-1, jay] * (1 - mycor)
+ if (!length( .irho ))
+ init.rho[, jay] <- sign(mycor) * min(0.95, abs(mycor))
+ if (!length( .ivar ))
+ init.var[, jay] <- var(y[, jay]) * (1 - mycor^2)
+ if (!length( .isdv ))
+ init.sdv[, jay] <- sqrt(init.var[, jay])
+ } # for
etastart <-
cbind(if ( .nodrift ) NULL else
@@ -708,7 +717,7 @@ setMethod("show", "Coef.rrar",
theta2eta(init.var, .lvar , earg = .evar ) else
theta2eta(init.sdv, .lsdv , earg = .esdv ),
theta2eta(init.rho, .lrho , earg = .erho ))
- etastart <- etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
} # end of etastart
}), list( .lsmn = lsmn, .lrho = lrho, .lsdv = lsdv, .lvar = lvar,
.esmn = esmn, .erho = erho, .esdv = esdv, .evar = evar,
@@ -738,7 +747,7 @@ setMethod("show", "Coef.rrar",
M1 <- extra$M1
temp.names <- c(mynames1, mynames2, mynames3)
- temp.names <- temp.names[interleave.VGAM(M1 * ncoly, M = M1)]
+ temp.names <- temp.names[interleave.VGAM(M1 * ncoly, M1 = M1)]
misc$link <- rep( .lrho , length = M1 * ncoly)
misc$earg <- vector("list", M1 * ncoly)
@@ -929,7 +938,7 @@ setMethod("show", "Coef.rrar",
dl.dsdv * dsdv.deta,
dl.drho * drho.deta)
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lsmn = lsmn, .lrho = lrho, .lsdv = lsdv, .lvar = lvar,
.esmn = esmn, .erho = erho, .esdv = esdv, .evar = evar,
.nodrift = nodrift,
@@ -958,7 +967,7 @@ setMethod("show", "Coef.rrar",
ned2l.drho <- (( mu[-n, , drop = FALSE])^2 +
ar.var[-n, , drop = FALSE] /
- temp5[-1, , drop = FALSE]) / ar.var[-1, , drop = FALSE]
+ temp5[-n, , drop = FALSE]) / ar.var[-1, , drop = FALSE]
ned2l.drho <- rbind(0, ned2l.drho)
ned2l.drho[1, ] <- 2 * (ar.rho[1, ] / temp5[1, ])^2
@@ -1004,7 +1013,7 @@ dAR1 <- function(x,
log = FALSE) {
type.likelihood <- match.arg(type.likelihood,
- c("exact", "conditional"))[1]
+ c("exact", "conditional"))[1]
is.vector.x <- is.vector(x)
diff --git a/R/family.univariate.R b/R/family.univariate.R
index bde34c1..bdc49fa 100644
--- a/R/family.univariate.R
+++ b/R/family.univariate.R
@@ -47,9 +47,6 @@
inuvec <- inu
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
new("vglmff",
@@ -156,7 +153,7 @@
d2l.dTheta2 <- (2 * nuvec^2 / (1+nuvec)) / (1-Theta^2)
d2l.dnuvec2 <- trigamma(nuvec+0.5) - trigamma(nuvec+1)
- wz <- matrix(as.numeric(NA), n, M) # diagonal matrix
+ wz <- matrix(NA_real_, n, M) # diagonal matrix
wz[, iam(1, 1, M)] <- d2l.dTheta2 * dTheta.deta^2
wz[, iam(2, 2, M)] <- d2l.dnuvec2 * dnuvec.deta^2
@@ -389,10 +386,6 @@ rhzeta <- function(n, alpha) {
lphi <- attr(ephi, "function.name")
- if (length(zero) &&
- !(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
- is.character(zero )))
- stop("bad input for argument 'zero'")
if (!is.Numeric(iphi, positive = TRUE) ||
max(iphi) >= 1.0)
@@ -719,9 +712,6 @@ dirmul.old <- function(link = "loge", ialpha = 0.01,
link <- attr(earg, "function.name")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(ialpha, positive = TRUE))
stop("'ialpha' must contain positive values only")
@@ -897,9 +887,6 @@ rdiric <- function(n, shape, dimension = NULL,
stop("argument 'imethod' must be 1 or 2")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
@@ -1102,7 +1089,7 @@ rdiric <- function(n, shape, dimension = NULL,
stop("Sorry, currently cannot handle x < 0")
ok <- is.finite(x) & x > 0 & x != 1 # Handles NAs
- ans <- rep(as.numeric(NA), length(x))
+ ans <- rep(NA_real_, length(x))
nn <- sum(ok) # Effective length (excludes x < 0 and x = 1 values)
if (nn)
ans[ok] <- .C("vzetawr", as.double(x[ok]), ans = double(nn),
@@ -1147,6 +1134,7 @@ dzeta <- function(x, p, log = FALSE) {
}
+
zetaff <- function(link = "loge", init.p = NULL, zero = NULL) {
@@ -1158,9 +1146,6 @@ dzeta <- function(x, p, log = FALSE) {
link <- attr(earg, "function.name")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
new("vglmff",
@@ -1195,7 +1180,7 @@ dzeta <- function(x, p, log = FALSE) {
ncoly <- ncol(y)
- mynames1 <- paste("p", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("p", ncoly)
predictors.names <-
namesof(mynames1, .link , earg = .earg , tag = FALSE)
@@ -1501,7 +1486,8 @@ cauchy.control <- function(save.weights = TRUE, ...) {
cauchy <- function(llocation = "identitylink", lscale = "loge",
ilocation = NULL, iscale = NULL,
iprobs = seq(0.2, 0.8, by = 0.2),
- imethod = 1, nsimEIM = NULL, zero = 2) {
+ imethod = 1, nsimEIM = NULL,
+ zero = "scale") {
llocat <- as.list(substitute(llocation))
elocat <- link2list(llocat)
@@ -1519,9 +1505,6 @@ cauchy.control <- function(save.weights = TRUE, ...) {
stop("argument 'imethod' must be 1 or 2 or 3")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (length(nsimEIM) &&
(!is.Numeric(nsimEIM, length.arg = 1, integer.valued = TRUE) ||
nsimEIM <= 50))
@@ -1534,16 +1517,32 @@ cauchy.control <- function(save.weights = TRUE, ...) {
new("vglmff",
- blurb = c("Two parameter Cauchy distribution ",
+ blurb = c("Two-parameter Cauchy distribution ",
"(location & scale unknown)\n\n",
"Link: ",
namesof("location", llocat, earg = elocat), "\n",
namesof("scale", lscale, earg = escale), "\n\n",
"Mean: NA\n",
"Variance: NA"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("location", "scale"),
+ llocation = .llocat ,
+ lscale = .lscale ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .llocat = llocat,
+ .lscale = lscale ))),
+
initialize = eval(substitute(expression({
predictors.names <- c(
namesof("location", .llocat , earg = .elocat , tag = FALSE),
@@ -1616,8 +1615,8 @@ cauchy.control <- function(save.weights = TRUE, ...) {
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
- myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
@@ -1694,7 +1693,7 @@ cauchy.control <- function(save.weights = TRUE, ...) {
dthetas.detas[, ind1$col]
wz <- c(w) * matrix(wz, n, dimm(M))
} else {
- wz <- cbind(matrix(0.5 / myscale^2,n,2), matrix(0,n,1)) *
+ wz <- cbind(matrix(0.5 / myscale^2, n, 2), matrix(0, n, 1)) *
dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
wz <- c(w) * wz[, 1:M] # diagonal wz
}
@@ -1977,9 +1976,6 @@ cauchy.control <- function(save.weights = TRUE, ...) {
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
new("vglmff",
@@ -1988,14 +1984,20 @@ cauchy.control <- function(save.weights = TRUE, ...) {
"Mean: shape * scale", "\n",
"Variance: shape * scale^2"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 1
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
+
+
+
+
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
+ multipleResponses = TRUE,
+ expected = TRUE,
zero = .zero )
}, list( .zero = zero ))),
@@ -2021,31 +2023,27 @@ cauchy.control <- function(save.weights = TRUE, ...) {
M <- M1 * ncoly
- mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+ parameters.names <- param.names("scale", ncoly)
predictors.names <-
- namesof(mynames1, .link , earg = .earg , tag = FALSE)
+ namesof(parameters.names, .link , earg = .earg , tag = FALSE)
shape.mat <- matrix( .shape.arg , nrow(cbind(y)), ncol(cbind(y)),
byrow = TRUE)
if (!length(etastart)) {
- if ( .imethod == 1) {
- sc.init <- y / shape.mat
- }
- if ( .imethod == 2) {
- sc.init <- (colSums(y * w) / colSums(w)) / shape.mat
- }
- if ( .imethod == 3) {
- sc.init <- median(y) / shape.mat
- }
-
- if ( !is.matrix(sc.init))
- sc.init <- matrix(sc.init, n, M, byrow = TRUE)
+ sc.init <- if ( .imethod == 1) {
+ y / shape.mat
+ } else if ( .imethod == 2) {
+ (colSums(y * w) / colSums(w)) / shape.mat
+ } else if ( .imethod == 3) {
+ matrix(apply(y, 2, median), n, ncoly, byrow = TRUE) / shape.mat
+ }
+ if ( !is.matrix(sc.init))
+ sc.init <- matrix(sc.init, n, M, byrow = TRUE)
- etastart <-
- theta2eta(sc.init, .link , earg = .earg )
+ etastart <- theta2eta(sc.init, .link , earg = .earg )
}
}), list( .link = link, .earg = earg,
.shape.arg = shape.arg, .imethod = imethod ))),
@@ -2058,10 +2056,10 @@ cauchy.control <- function(save.weights = TRUE, ...) {
last = eval(substitute(expression({
M1 <- extra$M1
misc$link <- c(rep( .link , length = ncoly))
- names(misc$link) <- mynames1
+ names(misc$link) <- parameters.names
misc$earg <- vector("list", M)
- names(misc$earg) <- mynames1
+ names(misc$earg) <- parameters.names
for (ii in 1:ncoly) {
misc$earg[[ii]] <- .earg
}
@@ -2460,9 +2458,6 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (length(imu) && (!is.Numeric(imu, positive = TRUE) ||
any(imu <= A) || any(imu >= B)))
@@ -2616,7 +2611,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
d2l.dphi2 <- -trigamma(phi) + trigamma(temp1) * m1u^2 +
trigamma(temp2) * (1-m1u)^2
d2l.dmu1phi <- temp1 * trigamma(temp1) - temp2 * trigamma(temp2)
- wz <- matrix(as.numeric(NA), n, dimm(M))
+ wz <- matrix(NA_real_, n, dimm(M))
wz[, iam(1, 1, M)] <- d2l.dmu12 * dmu1.dmu^2 * dmu.deta^2
wz[, iam(2, 2, M)] <- d2l.dphi2 * dphi.deta^2
wz[, iam(1, 2, M)] <- d2l.dmu1phi * dmu1.dmu * dmu.deta * dphi.deta
@@ -2641,9 +2636,6 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
eshape2 <- link2list(lshape2)
lshape2 <- attr(eshape2, "function.name")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (length( i1 ) && !is.Numeric( i1, positive = TRUE))
stop("bad input for argument 'i1'")
@@ -2797,7 +2789,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
ned2l.dshape12 <- trigamma(shapes[, 1]) - trig.sum
ned2l.dshape22 <- trigamma(shapes[, 2]) - trig.sum
ned2l.dshape1shape2 <- -trig.sum
- wz <- matrix(as.numeric(NA), n, dimm(M)) # dimm(M) == 3
+ wz <- matrix(NA_real_, n, dimm(M)) # dimm(M) == 3
wz[, iam(1, 1, M)] <- ned2l.dshape12 * dshapes.deta[, 1]^2
wz[, iam(2, 2, M)] <- ned2l.dshape22 * dshapes.deta[, 2]^2
wz[, iam(1, 2, M)] <- ned2l.dshape1shape2 * dshapes.deta[, 1] *
@@ -2908,9 +2900,8 @@ simple.exponential <- function() {
if (any(y <= extra$location))
stop("all responses must be greater than argument 'location'")
- mynames1 <- if (M == 1) "rate" else paste("rate", 1:M, sep = "")
- predictors.names <-
- namesof(mynames1, .link , earg = .earg , short = TRUE)
+ mynames1 <- param.names("rate", M)
+ predictors.names <- namesof(mynames1, .link , earg = .earg , short = TRUE)
if (length(mustart) + length(etastart) == 0)
mustart <- matrix(colSums(y * w) / colSums(w), n, M, byrow = TRUE) *
@@ -2983,9 +2974,6 @@ simple.exponential <- function() {
earg <- link2list(link)
link <- attr(earg, "function.name")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(ishrinkage, length.arg = 1) ||
ishrinkage < 0 || ishrinkage > 1)
@@ -3050,9 +3038,8 @@ simple.exponential <- function() {
if (any(y <= extra$location))
stop("all responses must be greater than ", extra$location)
- mynames1 <- if (M == 1) "rate" else paste("rate", 1:M, sep = "")
- predictors.names <-
- namesof(mynames1, .link , earg = .earg , short = TRUE)
+ mynames1 <- param.names("rate", M)
+ predictors.names <- namesof(mynames1, .link , earg = .earg , short = TRUE)
if (length(mustart) + length(etastart) == 0)
mustart <- matrix(colSums(y * w) / colSums(w), n, M, byrow = TRUE) *
@@ -3149,9 +3136,6 @@ simple.exponential <- function() {
link <- attr(earg, "function.name")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
@@ -3190,9 +3174,8 @@ simple.exponential <- function() {
M <- if (is.matrix(y)) ncol(y) else 1
M1 <- 1
- mynames1 <- if (M == 1) "shape" else paste("shape", 1:M, sep = "")
- predictors.names <-
- namesof(mynames1, .link , earg = .earg , short = TRUE)
+ mynames1 <- param.names("shape", M)
+ predictors.names <- namesof(mynames1, .link , earg = .earg , short = TRUE)
if (!length(etastart))
etastart <- cbind(theta2eta(y + 1/8, .link , earg = .earg ))
@@ -3278,7 +3261,8 @@ simple.exponential <- function() {
function(lrate = "loge", lshape = "loge",
irate = NULL, ishape = NULL,
lss = TRUE,
- zero = ifelse(lss, -2, -1)) {
+ zero = "shape"
+ ) {
expected <- TRUE # FALSE does not work well
@@ -3299,9 +3283,6 @@ simple.exponential <- function() {
if (length( ishape) && !is.Numeric(ishape, positive = TRUE))
stop("bad input for argument 'ishape'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
if (!is.logical(expected) || length(expected) != 1)
stop("bad input for argument 'expected'")
@@ -3323,9 +3304,9 @@ simple.exponential <- function() {
"Mean: mu = shape/rate\n",
"Variance: (mu^2)/shape = shape/rate^2"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
@@ -3359,21 +3340,22 @@ simple.exponential <- function() {
if ( .lss ) {
- mynames1 <- paste("rate", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("rate", ncoly)
+ mynames2 <- param.names("shape", ncoly)
predictors.names <-
c(namesof(mynames1, .lratee , earg = .eratee , tag = FALSE),
namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))
} else {
- mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("rate", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("shape", ncoly)
+ mynames2 <- param.names("rate", ncoly)
predictors.names <-
c(namesof(mynames1, .lshape , earg = .eshape , tag = FALSE),
namesof(mynames2, .lratee , earg = .eratee , tag = FALSE))
}
+ parameters.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
predictors.names <- predictors.names[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
@@ -3404,10 +3386,10 @@ simple.exponential <- function() {
etastart <- if ( .lss )
cbind(theta2eta(Ratee.init, .lratee , earg = .eratee ),
theta2eta(Shape.init, .lshape , earg = .eshape ))[,
- interleave.VGAM(M, M = M1)] else
+ interleave.VGAM(M, M1 = M1)] else
cbind(theta2eta(Shape.init, .lshape , earg = .eshape ),
theta2eta(Ratee.init, .lratee , earg = .eratee ))[,
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
}
}), list( .lratee = lratee, .lshape = lshape,
.iratee = iratee, .ishape = ishape,
@@ -3428,8 +3410,8 @@ simple.exponential <- function() {
rep( .lshape , length = ncoly)) else
c(rep( .lshape , length = ncoly),
rep( .lratee , length = ncoly))
- misc$link <- avector[interleave.VGAM(M, M = M1)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+ misc$link <- avector[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
@@ -3497,7 +3479,7 @@ simple.exponential <- function() {
dl.dshape * dshape.deta) else
c(w) * cbind(dl.dshape * dshape.deta,
dl.dratee * dratee.deta)
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lratee = lratee, .lshape = lshape,
.eratee = eratee, .eshape = eshape,
.scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
@@ -3541,7 +3523,8 @@ simple.exponential <- function() {
function(lmu = "loge", lshape = "loge",
imethod = 1, ishape = NULL,
parallel = FALSE,
- deviance.arg = FALSE, zero = -2) {
+ deviance.arg = FALSE,
+ zero = "shape") {
@@ -3560,8 +3543,6 @@ simple.exponential <- function() {
lshape <- attr(eshape, "function.name")
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
if (length( ishape) && !is.Numeric(ishape, positive = TRUE))
stop("bad input for argument 'ishape'")
@@ -3582,7 +3563,7 @@ simple.exponential <- function() {
ans <-
new("vglmff",
- blurb = c("2-parameter Gamma distribution",
+ blurb = c("2-parameter gamma distribution",
" (McCullagh and Nelder 1989 parameterization)\n",
"Links: ",
namesof("mu", lmu, earg = emu), ", ",
@@ -3596,16 +3577,18 @@ simple.exponential <- function() {
constraints = constraints,
apply.int = .apply.parint )
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
- constraints <- cm.zero.VGAM(constraints, x = x, z.Index, M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero,
.parallel = parallel, .apply.parint = apply.parint ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("mu", "shape"),
zero = .zero )
}, list( .zero = zero ))),
@@ -3627,7 +3610,7 @@ simple.exponential <- function() {
assign("CQO.FastAlgorithm", ( .lmu == "loge" && .lshape == "loge"),
envir = VGAMenv)
- if (any(function.name == c("cqo","cao")) &&
+ if (any(function.name == c("cqo", "cao")) &&
is.Numeric( .zero , length.arg = 1) && .zero != -2)
stop("argument zero = -2 is required")
@@ -3635,14 +3618,12 @@ simple.exponential <- function() {
NOS <- ncoly <- ncol(y) # Number of species
- temp1.names <-
- if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = "")
- temp2.names <-
- if (NOS == 1) "shape" else paste("shape", 1:NOS, sep = "")
+ temp1.names <- param.names("mu", NOS)
+ temp2.names <- param.names("shape", NOS)
predictors.names <-
c(namesof(temp1.names, .lmu , earg = .emu , tag = FALSE),
namesof(temp2.names, .lshape , earg = .eshape , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
@@ -3673,7 +3654,7 @@ simple.exponential <- function() {
cbind(theta2eta(mymu, .lmu , earg = .emu ),
theta2eta(init.shape, .lshape , earg = .eshape ))
etastart <-
- etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
+ etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
}
}), list( .lmu = lmu, .lshape = lshape, .ishape = ishape,
.emu = emu, .eshape = eshape,
@@ -3691,10 +3672,9 @@ simple.exponential <- function() {
tmp34 <- c(rep( .lmu , length = NOS),
rep( .lshape , length = NOS))
- names(tmp34) =
- c(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = ""),
- if (NOS == 1) "shape" else paste("shape", 1:NOS, sep = ""))
- tmp34 <- tmp34[interleave.VGAM(M, M = 2)]
+ names(tmp34) <- c(param.names("mu", NOS),
+ param.names("shape", NOS))
+ tmp34 <- tmp34[interleave.VGAM(M, M1 = M1)]
misc$link <- tmp34 # Already named
misc$earg <- vector("list", M)
@@ -3715,7 +3695,7 @@ simple.exponential <- function() {
linkfun = eval(substitute(function(mu, extra = NULL) {
temp <- theta2eta(mu, .lmu , earg = .emu )
temp <- cbind(temp, NA * temp)
- temp[, interleave.VGAM(ncol(temp), M = 2), drop = FALSE]
+ temp[, interleave.VGAM(ncol(temp), M1 = M1), drop = FALSE]
}, list( .lmu = lmu, .emu = emu ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
@@ -3785,13 +3765,13 @@ simple.exponential <- function() {
myderiv <- c(w) * cbind(dl.dmu * dmu.deta,
dl.dshape * dshape.deta)
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lmu = lmu, .lshape = lshape,
.emu = emu, .eshape = eshape))),
weight = eval(substitute(expression({
ned2l.dmu2 <- shape / (mymu^2)
ned2l.dshape2 <- trigamma(shape) - 1 / shape
- wz <- matrix(as.numeric(NA), n, M) # 2 = M1; diagonal!
+ wz <- matrix(NA_real_, n, M) # 2 = M1; diagonal!
wz[, M1*(1:NOS)-1] <- ned2l.dmu2 * dmu.deta^2
wz[, M1*(1:NOS) ] <- ned2l.dshape2 * dshape.deta^2
@@ -3852,9 +3832,6 @@ simple.exponential <- function() {
stop("argument 'imethod' must be 1 or 2 or 3")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
@@ -3901,7 +3878,7 @@ simple.exponential <- function() {
M <- M1 * ncoly
- mynames1 <- paste("prob", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("prob", ncoly)
predictors.names <-
namesof(mynames1, .link , earg = .earg , tag = FALSE)
@@ -4080,6 +4057,167 @@ rbetageom <- function(n, shape1, shape2) {
+ Init.mu <-
+ function(y, x = cbind("(Intercept)" = rep(1, nrow(as.matrix(y)))),
+ w = x, imethod = 1, imu = NULL,
+ ishrinkage = 0.95,
+ pos.only = FALSE,
+ probs.y = 0.35) {
+ if (!is.matrix(x)) x <- as.matrix(x)
+ if (!is.matrix(y)) y <- as.matrix(y)
+ if (!is.matrix(w)) w <- as.matrix(w)
+ if (ncol(w) != ncol(y))
+ w <- matrix(w, nrow = nrow(y), ncol = ncol(y))
+
+ if (length(imu)) {
+ MU.INIT <- matrix(imu, nrow(y), ncol(y), byrow = TRUE)
+ return(MU.INIT)
+ }
+
+
+ if (!is.Numeric(ishrinkage, length.arg = 1) ||
+ ishrinkage < 0 || ishrinkage > 1)
+ warning("bad input for argument 'ishrinkage'; ",
+ "using the value 0.95 instead")
+
+
+ if (imethod > 6) {
+ warning("argument 'imethod' should be 1 or 2 or... 6; ",
+ "using the value 1")
+ imethod <- 1
+ }
+ mu.init <- y
+ for (jay in 1:ncol(y)) {
+ TFvec <- if (pos.only) y[, jay] > 0 else TRUE
+ locn.est <- if ( imethod %in% c(1, 4)) {
+ weighted.mean(y[TFvec, jay], w[TFvec, jay]) + 1/16
+ } else if ( imethod %in% c(3, 6)) {
+ c(quantile(y[TFvec, jay], probs = probs.y ) + 1/16)
+ } else {
+ median(y[TFvec, jay]) + 1/16
+ }
+
+ if (imethod <= 3) {
+ mu.init[, jay] <- ishrinkage * locn.est +
+ (1 - ishrinkage ) * y[, jay]
+ } else {
+ medabsres <- median(abs(y[, jay] - locn.est)) + 1/32
+ allowfun <- function(z, maxtol = 1)
+ sign(z) * pmin(abs(z), maxtol)
+ mu.init[, jay] <- locn.est + (1 - ishrinkage ) *
+ allowfun(y[, jay] - locn.est, maxtol = medabsres)
+
+ mu.init[, jay] <- abs(mu.init[, jay]) + 1 / 1024
+ }
+ } # of for (jay)
+
+ mu.init
+ }
+
+
+
+
+
+
+
+
+EIM.NB.specialp <- function(mu, size,
+ y.max = NULL, # Must be an integer
+ cutoff.prob = 0.995,
+ intercept.only = FALSE,
+ extra.bit = TRUE) {
+
+
+ if (intercept.only) {
+ mu <- mu[1]
+ size <- size[1]
+ }
+
+ y.min <- 0 # A fixed constant really
+
+ if (!is.numeric(y.max)) {
+ eff.p <- sort(c(cutoff.prob, 1 - cutoff.prob))
+ y.max <- max(qnbinom(p = eff.p[2], mu = mu, size = size)) + 10
+ }
+
+ Y.mat <- if (intercept.only) y.min:y.max else
+ matrix(y.min:y.max, length(mu), y.max-y.min+1, byrow = TRUE)
+ neff.row <- ifelse(intercept.only, 1, nrow(Y.mat))
+ neff.col <- ifelse(intercept.only, length(Y.mat), ncol(Y.mat))
+
+ if (FALSE) {
+ trigg.term <- if (intercept.only) {
+ check2 <-
+ sum(pnbinom(Y.mat, size = size, mu = mu, lower.tail = FALSE)
+ / (Y.mat + size)^2)
+ check2
+ } else {
+ check2 <-
+ rowSums(pnbinom(Y.mat, size = size, mu = mu, lower.tail = FALSE)
+ / (Y.mat + size)^2)
+ check2
+ }
+ }
+
+
+ trigg.term <-
+ if (TRUE) {
+ answerC <- .C("eimpnbinomspecialp",
+ as.integer(intercept.only),
+ as.double(neff.row), as.double(neff.col),
+ as.double(size),
+ as.double(pnbinom(Y.mat, size = size, mu = mu, lower.tail = FALSE)),
+ rowsums = double(neff.row))
+ answerC$rowsums
+ }
+
+ ned2l.dk2 <- trigg.term
+ if (extra.bit)
+ ned2l.dk2 <- ned2l.dk2 - 1 / size + 1 / (size + mu)
+ ned2l.dk2
+} # end of EIM.NB.specialp()
+
+
+
+
+
+
+
+EIM.NB.speciald <- function(mu, size,
+ y.min = 0, # 20160201; must be an integer
+ y.max = NULL, # Must be an integer
+ cutoff.prob = 0.995,
+ intercept.only = FALSE,
+ extra.bit = TRUE) {
+
+
+
+
+
+ if (intercept.only) {
+ mu <- mu[1]
+ size <- size[1]
+ }
+
+ if (!is.numeric(y.max)) {
+ eff.p <- sort(c(cutoff.prob, 1 - cutoff.prob))
+ y.max <- max(qnbinom(p = eff.p[2], mu = mu, size = size)) + 10
+ }
+
+ Y.mat <- if (intercept.only) y.min:y.max else
+ matrix(y.min:y.max, length(mu), y.max-y.min+1, byrow = TRUE)
+ trigg.term <- if (intercept.only) {
+ dnbinom(Y.mat, size = size, mu = mu) %*% trigamma(Y.mat + size)
+ } else {
+ rowSums(dnbinom(Y.mat, size = size, mu = mu) *
+ trigamma(Y.mat + size))
+ }
+ ned2l.dk2 <- trigamma(size) - trigg.term
+ if (extra.bit)
+ ned2l.dk2 <- ned2l.dk2 - 1 / size + 1 / (size + mu)
+ ned2l.dk2
+} # end of EIM.NB.speciald()
+
@@ -4090,18 +4228,22 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
negbinomial <-
- function(lmu = "loge", lsize = "loge",
- imu = NULL, isize = NULL,
- probs.y = 0.75,
- nsimEIM = 250, cutoff.prob = 0.995, # Maxiter = 5000,
- max.qnbinom = 1000,
- max.chunk.MB = 20, # max.memory = Inf is allowed
- deviance.arg = FALSE, imethod = 1,
- gsize = exp((-4):4),
+ function(
+ zero = "size",
parallel = FALSE,
- ishrinkage = 0.95, zero = -2) {
-
-
+ deviance.arg = FALSE,
+ mds.min = 1e-4,
+ nsimEIM = 500, cutoff.prob = 0.999, # Maxiter = 5000,
+ eps.trig = 1e-7,
+ max.support = 4000,
+ max.chunk.MB = 30, # max.memory = Inf is allowed
+ lmu = "loge", lsize = "loge",
+ imethod = 1,
+ imu = NULL,
+ probs.y = 0.35,
+ ishrinkage = 0.95,
+ isize = NULL,
+ gsize.mux = exp((-12:6)/2)) {
@@ -4109,7 +4251,6 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
- alternate.derivs <- FALSE # 20130823; added for 'nbcanlink'
if (!is.logical( deviance.arg ) || length( deviance.arg ) != 1)
@@ -4117,18 +4258,22 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
- lmuuu <- as.list(substitute(lmu))
- emuuu <- link2list(lmuuu)
- lmuuu <- attr(emuuu, "function.name")
+ lmunb <- as.list(substitute(lmu))
+ emunb <- link2list(lmunb)
+ lmunb <- attr(emunb, "function.name")
- imuuu <- imu
+ imunb <- imu
lsize <- as.list(substitute(lsize))
esize <- link2list(lsize)
lsize <- attr(esize, "function.name")
- if (length(imuuu) && !is.Numeric(imuuu, positive = TRUE))
+ if (!is.Numeric(eps.trig, length.arg = 1,
+ positive = TRUE) || eps.trig > 1e-5)
+ stop("argument 'eps.trig' must be positive and smaller in value")
+
+ if (length(imunb) && !is.Numeric(imunb, positive = TRUE))
stop("bad input for argument 'imu'")
if (length(isize) && !is.Numeric(isize, positive = TRUE))
stop("bad input for argument 'isize'")
@@ -4136,15 +4281,8 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
if (!is.Numeric(cutoff.prob, length.arg = 1) ||
cutoff.prob < 0.95 ||
cutoff.prob >= 1)
- stop("range error in the argument 'cutoff.prob'")
- if (!is.Numeric(imethod, length.arg = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
- if (!is.Numeric(ishrinkage, length.arg = 1) ||
- ishrinkage < 0 ||
- ishrinkage > 1)
- stop("bad input for argument 'ishrinkage'")
+ stop("range error in the argument 'cutoff.prob'; ",
+ "a value in [0.95, 1) is needed")
if (!is.Numeric(nsimEIM, length.arg = 1, integer.valued = TRUE))
stop("bad input for argument 'nsimEIM'")
@@ -4164,9 +4302,9 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
- blurb = c("Negative-binomial distribution\n\n",
+ blurb = c("Negative binomial distribution\n\n",
"Links: ",
- namesof("mu", lmuuu, earg = emuuu), ", ",
+ namesof("mu", lmunb, earg = emunb), ", ",
namesof("size", lsize, earg = esize), "\n",
"Mean: mu\n",
"Variance: mu * (1 + mu / size) for NB-2"),
@@ -4180,9 +4318,9 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
bool = .parallel ,
constraints = constraints)
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .parallel = parallel, .zero = zero ))),
@@ -4190,141 +4328,119 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ expected = TRUE,
+ mds.min = .mds.min ,
multipleResponses = TRUE,
- lmu = .lmuuu ,
+ parameters.names = c("mu", "size"),
+ lmu = .lmunb ,
lsize = .lsize ,
+ eps.trig = .eps.trig ,
zero = .zero )
- }, list( .zero = zero, .lsize = lsize, .lmuuu = lmuuu ))),
-
-
+ }, list( .zero = zero, .lsize = lsize, .lmunb = lmunb,
+ .eps.trig = eps.trig,
+ .mds.min = mds.min))),
initialize = eval(substitute(expression({
M1 <- 2
- temp5 <- w.y.check(w = w, y = y,
- Is.integer.y = TRUE,
- ncol.w.max = Inf,
- ncol.y.max = Inf,
- out.wy = TRUE,
- colsyperw = 1, maximize = TRUE)
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1, maximize = TRUE)
w <- temp5$w
y <- temp5$y
assign("CQO.FastAlgorithm",
- ( .lmuuu == "loge") && ( .lsize == "loge"),
+ ( .lmunb == "loge") && ( .lsize == "loge"),
envir = VGAMenv)
if (any(function.name == c("cqo", "cao")) &&
- is.Numeric( .zero , length.arg = 1) &&
- .zero != -2)
- stop("argument zero = -2 is required")
+ ((is.Numeric( .zero , length.arg = 1) && .zero != -2) ||
+ (is.character( .zero ) && .zero != "size")))
+ stop("argument zero = 'size' or zero = -2 is required")
- if (any(y < 0))
- stop("negative values not allowed for the 'negbinomial' family")
- if (any(round(y) != y))
- stop("integer-values only allowed for the 'negbinomial' family")
- if (ncol(w) > ncol(y))
- stop("number of columns of prior-'weights' is greater than ",
- "the number of responses")
-
M <- M1 * ncol(y)
NOS <- ncoly <- ncol(y) # Number of species
predictors.names <-
- c(namesof(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = ""),
- .lmuuu , earg = .emuuu , tag = FALSE),
- namesof(if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""),
+ c(namesof(param.names("mu", NOS),
+ .lmunb , earg = .emunb , tag = FALSE),
+ namesof(param.names("size", NOS),
.lsize , earg = .esize , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
-
-
- if (is.numeric( .mu.init ))
- MU.INIT <- matrix( .mu.init , nrow(y), ncol(y), byrow = TRUE)
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
- mu.init <- y
- for (jay in 1:ncol(y)) {
- use.this <- if ( .imethod == 1) {
- weighted.mean(y[, jay], w[, jay]) + 1/16
- } else if ( .imethod == 3) {
- c(quantile(y[, jay], probs = .probs.y ) + 1/16)
- } else {
- median(y[, jay]) + 1/16
- }
-
- if (is.numeric( .mu.init )) {
- mu.init[, jay] <- MU.INIT[, jay]
- } else {
- medabsres <- median(abs(y[, jay] - use.this)) + 1/32
- allowfun <- function(z, maxtol = 1) sign(z) * pmin(abs(z), maxtol)
- mu.init[, jay] <- use.this + (1 - .ishrinkage ) *
- allowfun(y[, jay] - use.this, maxtol = medabsres)
+ munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ imu = .imunb , ishrinkage = .ishrinkage ,
+ pos.only = FALSE,
+ probs.y = .probs.y )
- mu.init[, jay] <- abs(mu.init[, jay]) + 1 / 1024
- }
- } # of for (jay)
- if ( is.Numeric( .k.init )) {
- kay.init <- matrix( .k.init , nrow = n, ncol = NOS, byrow = TRUE)
+ if ( is.Numeric( .isize )) {
+ size.init <- matrix( .isize , nrow = n, ncol = NOS, byrow = TRUE)
} else {
negbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
- mu <- extraargs
- sum(c(w) * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
+ sum(c(w) * dnbinom(x = y, mu = extraargs, size = kmat, log = TRUE))
}
- k.grid <- .gsize
- kay.init <- matrix(0, nrow = n, ncol = NOS)
- for (spp. in 1:NOS) {
- kay.init[, spp.] <- grid.search(k.grid,
- objfun = negbinomial.Loglikfun,
- y = y[, spp.], x = x, w = w[, spp.],
- extraargs = mu.init[, spp.])
+ size.init <- matrix(0, nrow = n, ncol = NOS)
+ for (jay in 1:NOS) {
+ size.grid <- .gsize.mux * mean(munb.init[, jay])
+ size.init[, jay] <- grid.search(size.grid,
+ objfun = negbinomial.Loglikfun,
+ y = y[, jay], x = x, w = w[, jay],
+ extraargs = munb.init[, jay])
}
}
- newemu <- .emuuu
- if ( .lmuuu == "nbcanlink") {
- newemu$size <- kay.init
+ newemu <- .emunb
+ if ( .lmunb == "nbcanlink") {
+ newemu$size <- size.init
+ testing1 <- log(munb.init / (munb.init + size.init))
+ testing2 <- theta2eta(munb.init, link = .lmunb , earg = newemu )
}
-
etastart <-
- cbind(theta2eta(mu.init , link = .lmuuu , earg = newemu ),
- theta2eta(kay.init, link = .lsize , earg = .esize ))
+ cbind(theta2eta(munb.init, link = .lmunb , earg = newemu ),
+ theta2eta(size.init, link = .lsize , earg = .esize ))
etastart <-
- etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
+ etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
}
- }), list( .lmuuu = lmuuu, .lsize = lsize,
- .emuuu = emuuu, .esize = esize,
- .mu.init = imu, .gsize = gsize,
+ }), list( .lmunb = lmunb, .lsize = lsize,
+ .emunb = emunb, .esize = esize,
+ .imunb = imunb, .gsize.mux = gsize.mux,
.deviance.arg = deviance.arg,
- .k.init = isize, .probs.y = probs.y,
+ .isize = isize, .probs.y = probs.y,
.ishrinkage = ishrinkage, .nsimEIM = nsimEIM,
.zero = zero, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- M1 <- 2
- NOS <- ncol(eta) / M1
- eta.k <- eta[, M1 * (1:NOS) , drop = FALSE]
- kmat <- eta2theta(eta.k, .lsize , earg = .esize )
+ if ( .lmunb == "nbcanlink") {
+ eta.k <- eta[, c(FALSE, TRUE), drop = FALSE]
+ kmat <- eta2theta(eta.k, .lsize , earg = .esize )
-
-
-
- newemu <- .emuuu
- if ( .lmuuu == "nbcanlink") {
+
+ newemu <- .emunb
newemu$size <- kmat
- }
-
+ check.munb <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+ .lmunb , earg = newemu )
-
- eta2theta(eta[, M1 * (1:NOS) - 1, drop = FALSE], .lmuuu ,
- earg = newemu)
- }, list( .lmuuu = lmuuu, .lsize = lsize,
- .emuuu = emuuu, .esize = esize))),
+
+ munb <- kmat / expm1(-eta[, c(TRUE, FALSE), drop = FALSE])
+ munb
+ } else {
+ eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+ .lmunb , earg = .emunb )
+ }
+ }, list( .lmunb = lmunb, .lsize = lsize,
+ .emunb = emunb, .esize = esize))),
last = eval(substitute(expression({
if (exists("CQO.FastAlgorithm", envir = VGAMenv))
@@ -4334,13 +4450,12 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
save.weights <- control$save.weights <- !all(ind2)
- temp0303 <- c(rep( .lmuuu , length = NOS),
+ temp0303 <- c(rep( .lmunb , length = NOS),
rep( .lsize , length = NOS))
- names(temp0303) =
- c(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = ""),
- if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""))
- temp0303 <- temp0303[interleave.VGAM(M, M = 2)]
- misc$link <- temp0303 # Already named
+ names(temp0303) <- c(param.names("mu", NOS),
+ param.names("size", NOS))
+ temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
+ misc$link <- temp0303 # Already named
misc$earg <- vector("list", M)
names(misc$earg) <- names(misc$link)
@@ -4356,9 +4471,9 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
misc$expected <- TRUE
misc$ishrinkage <- .ishrinkage
misc$multipleResponses <- TRUE
- }), list( .lmuuu = lmuuu, .lsize = lsize,
- .emuuu = emuuu, .esize = esize,
- .cutoff.prob = cutoff.prob, # .min.size = min.size,
+ }), list( .lmunb = lmunb, .lsize = lsize,
+ .emunb = emunb, .esize = esize,
+ .cutoff.prob = cutoff.prob,
.max.chunk.MB = max.chunk.MB,
.nsimEIM = nsimEIM,
.ishrinkage = ishrinkage,
@@ -4367,56 +4482,46 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
linkfun = eval(substitute(function(mu, extra = NULL) {
M1 <- 2
- newemu <- .emuuu
+ newemu <- .emunb
- eta.temp <- theta2eta(mu, .lmuuu , earg = newemu)
+ eta.temp <- theta2eta(mu, .lmunb , earg = newemu)
eta.kayy <- theta2eta(if (is.numeric( .isize )) .isize else 1.0,
.lsize , earg = .esize )
eta.kayy <- 0 * eta.temp + eta.kayy # Right dimension now.
-
-
- if ( .lmuuu == "nbcanlink") {
+ if ( .lmunb == "nbcanlink") {
newemu$size <- eta2theta(eta.kayy, .lsize , earg = .esize )
}
eta.temp <- cbind(eta.temp, eta.kayy)
- eta.temp[, interleave.VGAM(ncol(eta.temp), M = M1), drop = FALSE]
- }, list( .lmuuu = lmuuu, .lsize = lsize,
- .emuuu = emuuu, .esize = esize,
+ eta.temp[, interleave.VGAM(ncol(eta.temp), M1 = M1), drop = FALSE]
+ }, list( .lmunb = lmunb, .lsize = lsize,
+ .emunb = emunb, .esize = esize,
.isize = isize ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- M1 <- 2
- NOS <- ncol(eta) / M1
-
- eta.k <- eta[, M1*(1:NOS), drop = FALSE]
-
+ eta.k <- eta[, c(FALSE, TRUE), drop = FALSE]
if ( FALSE && .lsize == "loge") {
bigval <- 68
- eta.k <- ifelse(eta.k > bigval, bigval, eta.k)
- eta.k <- ifelse(eta.k < -bigval, -bigval, eta.k)
+ eta.k[eta.k > bigval] <- bigval
+ eta.k[eta.k < -bigval] <- -bigval
}
kmat <- eta2theta(eta.k, .lsize , earg = .esize )
- newemu <- .emuuu
- if ( .lmuuu == "nbcanlink") {
+ newemu <- .emunb
+ if ( .lmunb == "nbcanlink") {
newemu$size <- kmat
}
-
-
-
-
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
@@ -4428,7 +4533,7 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
}
}
}, list( .lsize = lsize,
- .lmuuu = lmuuu, .emuuu = emuuu, .esize = esize))),
+ .lmunb = lmunb, .emunb = emunb, .esize = esize))),
vfamily = c("negbinomial"),
@@ -4442,13 +4547,33 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
if (any(pwts != 1))
warning("ignoring prior weights")
eta <- predict(object)
- muuuu <- cbind(eta2theta(eta[, c(TRUE, FALSE)], .lmuuu , .emuuu ))
- eta.k <- cbind(eta2theta(eta[, c(FALSE, TRUE)], .lsize , .esize ))
+ muuuu <- cbind(eta2theta(eta[, c(TRUE, FALSE)], .lmunb , earg = .emunb ))
+ eta.k <- cbind(eta2theta(eta[, c(FALSE, TRUE)], .lsize , earg = .esize ))
rnbinom(nsim * length(muuuu), mu = muuuu, size = eta.k)
- }, list( .lmuuu = lmuuu, .lsize = lsize,
- .emuuu = emuuu, .esize = esize ))),
+ }, list( .lmunb = lmunb, .lsize = lsize,
+ .emunb = emunb, .esize = esize ))),
+ validparams = eval(substitute(function(eta, extra = NULL) {
+ munb <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+ .lmunb , earg = .emunb )
+ size <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+ .lsize , earg = .esize )
+
+ smallval <- .mds.min # .munb.div.size
+ overdispersion <- all(munb / size > smallval)
+ ans <- all(is.finite(munb)) && all(munb > 0) &&
+ all(is.finite(size)) && all(size > 0) &&
+ overdispersion
+ if (!overdispersion)
+ warning("parameter 'size' has very large values; ",
+ "replacing them by an arbitrary large value within ",
+ "the parameter space. Try fitting a quasi-Poisson ",
+ "model instead.")
+ ans
+ }, list( .lmunb = lmunb, .emunb = emunb,
+ .lsize = lsize, .esize = esize,
+ .mds.min = mds.min))),
@@ -4457,6 +4582,9 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
+ odd.iter <- 1 # iter %% 2
+ even.iter <- 1 # 1 - odd.iter
+
if ( iter == 1 && .deviance.arg ) {
if (control$criterion != "coefficients" &&
control$half.step)
@@ -4466,8 +4594,6 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
-
-
low.index <- ifelse(names(constraints)[1] == "(Intercept)", 2, 1)
if (low.index <= length(constraints))
for (iii in low.index:length(constraints)) {
@@ -4483,139 +4609,139 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
-
M1 <- 2
NOS <- ncol(eta) / M1
- M <- ncol(eta)
- eta.k <- eta[, M1*(1:NOS) , drop = FALSE]
+ eta.k <- eta[, c(FALSE, TRUE), drop = FALSE]
if (FALSE && .lsize == "loge") {
- bigval <- 68
- eta.k <- ifelse(eta.k > bigval, bigval, eta.k)
- eta.k <- ifelse(eta.k < -bigval, -bigval, eta.k)
+ bigval <- 68 # 3.404276e+29
+ bigval <- 68 # 3.404276e+29
+ eta.k[eta.k > bigval] <- bigval
+ eta.k[eta.k < -bigval] <- -bigval
}
kmat <- eta2theta(eta.k, .lsize , earg = .esize )
+ smallval <- 1e-4 # Something like this is needed
+ if (any(infinite.size <- mu / kmat < smallval)) {
+ warning("parameter 'size' has very large values; ",
+ "replacing them by a large value within ",
+ "the parameter space. Try fitting a quasi-Poisson ",
+ "model instead.")
+ kmat[infinite.size] <- mu[infinite.size] / smallval
+ }
- newemu <- .emuuu
- if ( .lmuuu == "nbcanlink") {
+ newemu <- .emunb
+ if ( .lmunb == "nbcanlink") {
newemu$size <- kmat
}
+ dl.dmunb <- y / mu - (1 + y/kmat) / (1 + mu/kmat)
+ dl.dsize <- digamma(y + kmat) - digamma(kmat) -
+ (y - mu) / (mu + kmat) + log1p(-mu / (kmat + mu))
+ if (any(infinite.size)) {
+ dl.dsize[infinite.size] <- 1e-8 # A small number
+ }
+
- dl.dmu <- y / mu - (y + kmat) / (mu + kmat)
- dl.dk <- digamma(y + kmat) - digamma(kmat) -
- (y - mu) / (mu + kmat) + log(kmat / (kmat + mu))
-
- if ( .lmuuu == "nbcanlink")
- newemu$wrt.eta <- 1
- dmu.deta <- dtheta.deta(mu, .lmuuu , earg = newemu) # eta1
-
- if ( .lmuuu == "nbcanlink")
- newemu$wrt.eta <- 2
- dk.deta1 <- dtheta.deta(mu, .lmuuu , earg = newemu) # eta2
-
- dk.deta2 <- dtheta.deta(kmat, .lsize , earg = .esize )
+ dsize.deta <- dtheta.deta(kmat, .lsize , earg = .esize )
+ myderiv <- if ( .lmunb == "nbcanlink") {
+ dmunb.deta1 <- 1 / nbcanlink(mu, size = kmat, wrt.param = 1, deriv = 1)
- myderiv <- c(w) * cbind(dl.dmu * dmu.deta,
- dl.dk * dk.deta2)
+ dsize.deta1 <- 1 / nbcanlink(mu, size = kmat, wrt.param = 2, deriv = 1)
- if ( .lmuuu == "nbcanlink") {
- if ( iter%% 2 == 0) {
- myderiv[, 1:NOS] <- dl.dk * dk.deta1
- } else {
- }
+ c(w) * cbind(dl.dmunb * dmunb.deta1 * odd.iter +
+ dl.dsize * dsize.deta1 * 1 * even.iter,
+ dl.dsize * dsize.deta * even.iter)
+ } else {
+ dmunb.deta <- dtheta.deta(mu, .lmunb , earg = .emunb )
+ c(w) * cbind(dl.dmunb * dmunb.deta,
+ dl.dsize * dsize.deta)
}
- myderiv <- myderiv[, interleave.VGAM(M, M = M1)]
-
-
- if ( .alternate.derivs || ( .lmuuu == "nbcanlink")) { # 20130823 added
- }
+ myderiv <- myderiv[, interleave.VGAM(M, M1 = M1)]
myderiv
- }), list( .lmuuu = lmuuu, .lsize = lsize,
- .alternate.derivs = alternate.derivs,
- .deviance.arg = deviance.arg,
- .emuuu = emuuu, .esize = esize))),
+ }), list( .lmunb = lmunb, .lsize = lsize,
+ .emunb = emunb, .esize = esize,
+ .deviance.arg = deviance.arg ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, M)
+ wz <- matrix(NA_real_, n, M)
- max.qnbinom <- .max.qnbinom
+ max.support <- .max.support
max.chunk.MB <- .max.chunk.MB
- EIM.NB.special2 <- function(mu, size, y.max = NULL,
- cutoff.prob = 0.995,
- intercept.only = FALSE) {
-
-
-
- if (intercept.only) {
- mu <- mu[1]
- size <- size[1]
- }
+ ind2 <- matrix(FALSE, n, NOS) # Used for SFS
+ for (jay in 1:NOS) {
+ eff.p <- sort(c( .cutoff.prob , 1 - .cutoff.prob ))
+ Q.mins <- 0
+ Q.maxs <- qnbinom(p = eff.p[2],
+ mu = mu[, jay],
+ size = kmat[, jay]) + 10
+
+
+ eps.trig <- .eps.trig
+ Q.MAXS <- if ( .lsize == "loge")
+ pmax(10, ceiling(kmat[, jay] / sqrt(eps.trig))) else Inf
+ Q.maxs <- pmin(Q.maxs, Q.MAXS)
+
+
+
+ ind1 <- if (max.chunk.MB > 0) (Q.maxs - Q.mins < max.support) else FALSE
+ if ((NN <- sum(ind1)) > 0) {
+ Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
+ n.chunks <- if (intercept.only) 1 else
+ max(1, ceiling( Object.Size / max.chunk.MB))
+ chunk.rows <- ceiling(NN / n.chunks)
+ ind2[, jay] <- ind1 # Save this
+ wind2 <- which(ind1)
+
+
+ upr.ptr <- 0
+ lwr.ptr <- upr.ptr + 1
+ while (lwr.ptr <= NN) {
+ upr.ptr <- min(upr.ptr + chunk.rows, NN)
+ sind2 <- wind2[lwr.ptr:upr.ptr]
+ if (FALSE)
+ wz[sind2, M1*jay] <-
+ EIM.NB.speciald(mu = mu[sind2, jay],
+ size = kmat[sind2, jay],
+ y.min = min(Q.mins[sind2]), # 20160130
+ y.max = max(Q.maxs[sind2]),
+ cutoff.prob = .cutoff.prob ,
+ intercept.only = intercept.only)
+ wz[sind2, M1*jay] <-
+ EIM.NB.specialp(mu = mu[sind2, jay],
+ size = kmat[sind2, jay],
+ y.max = max(Q.maxs[sind2]),
+ cutoff.prob = .cutoff.prob ,
+ intercept.only = intercept.only)
+
+
+ if (any(eim.kk.TF <- wz[sind2, M1*jay] <= 0)) {
+ ind2[sind2[eim.kk.TF], jay] <- FALSE
+ }
+
- if (!is.numeric(y.max)) {
- y.max <- max(qnbinom(p = cutoff.prob, mu = mu, size = size)) + 2
- }
+ lwr.ptr <- upr.ptr + 1
+ } # while
+ } # if
+ } # end of for (jay in 1:NOS)
- Y.mat <- if (intercept.only) 0:y.max else
- matrix(0:y.max, length(mu), y.max+1, byrow = TRUE)
- trigg.term <- if (intercept.only) {
- dnbinom(Y.mat, size = size, mu = mu) %*% trigamma(Y.mat + size)
- } else {
- rowSums(dnbinom(Y.mat, size = size, mu = mu) *
- trigamma(Y.mat + size))
- }
- ned2l.dk2 <- trigamma(size) - 1 / size + 1 / (size + mu) - trigg.term
- ned2l.dk2
- } # end of EIM.NB.special2()
-
- ind2 <- matrix(FALSE, n, NOS) # Used for SFS
- for (jay in 1:NOS) {
- Q.maxs <- qnbinom(p = .cutoff.prob , mu = mu[, jay],
- size = kmat[, jay])
- ind1 <- if (max.chunk.MB > 0) (Q.maxs < max.qnbinom) else FALSE
- if ((NN <- sum(ind1)) > 0) {
- Object.Size <- NN * 8 * max(Q.maxs) / (2^20) # Mb; 8 bytes/double
- n.chunks <- if (intercept.only) 1 else
- max(1, ceiling( Object.Size / max.chunk.MB))
- chunk.rows <- ceiling(NN / n.chunks)
- ind2[, jay] <- ind1 # Save this
- wind2 <- which(ind1)
-
-
- upr.ptr <- 0
- lwr.ptr <- upr.ptr + 1
- while (lwr.ptr <= NN) {
- upr.ptr <- min(upr.ptr + chunk.rows, NN)
- sind2 <- wind2[lwr.ptr:upr.ptr]
- wz[sind2, M1*jay] <-
- EIM.NB.special2(mu = mu[sind2, jay],
- size = kmat[sind2, jay],
- y.max = max(Q.maxs[sind2]),
- cutoff.prob = .cutoff.prob ,
- intercept.only = intercept.only) *
- (dk.deta2[sind2, jay])^2
- lwr.ptr <- upr.ptr + 1
- } # while
- }
- } # end of for (jay in 1:NOS)
@@ -4630,46 +4756,56 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
muvec <- mu[ii.TF, jay]
for (ii in 1:( .nsimEIM )) {
ysim <- rnbinom(sum(ii.TF), mu = muvec, size = kkvec)
- dl.dk <- digamma(ysim + kkvec) - digamma(kkvec) -
- (ysim + kkvec) / (muvec + kkvec) +
- 1 + log(kkvec / (kkvec + muvec))
- run.varcov <- run.varcov + dl.dk^2
+ dl.dsize <- digamma(ysim + kkvec) - digamma(kkvec) -
+ (ysim - muvec) / (muvec + kkvec) +
+ log1p( -muvec / (kkvec + muvec))
+ run.varcov <- run.varcov + dl.dsize^2
} # end of for loop
run.varcov <- c(run.varcov / .nsimEIM )
- ned2l.dk2 <- if (intercept.only) mean(run.varcov) else run.varcov
+ ned2l.dsize2 <- if (intercept.only) mean(run.varcov) else run.varcov
- wz[ii.TF, M1*jay] <- ned2l.dk2 * (dk.deta2[ii.TF, jay])^2
+ wz[ii.TF, M1*jay] <- ned2l.dsize2
}
}
+
save.weights <- !all(ind2)
- ned2l.dmu2 <- 1 / mu - 1 / (mu + kmat)
- wz[, M1*(1:NOS) - 1] <- ned2l.dmu2 * dmu.deta^2
+
+ ned2l.dmunb2 <- 1 / mu - 1 / (mu + kmat)
+ ned2l.dsize2 <- wz[, M1*(1:NOS), drop = FALSE]
+ if ( .lmunb == "nbcanlink") {
+ wz <- cbind(wz, matrix(0, n, M-1)) # Make it tridiagonal
- if ( FALSE && .lmuuu == "nbcanlink") {
- if ( iter %% 2 == 0) {
+ wz[, M1*(1:NOS) - 1] <-
+ (ned2l.dmunb2 * (mu/kmat)^2 * odd.iter +
+ ned2l.dsize2 * even.iter * 1) *
+ (mu + kmat)^2
- wz[, M1*(1:NOS) - 1] <- ned2l.dk2 * dk.deta1^2
- } else {
- }
+ wz[, M + M1*(1:NOS) - 1] <-
+ -(mu + kmat) * ned2l.dsize2 * dsize.deta * even.iter
+ } else {
+ wz[, c(TRUE, FALSE)] <- ned2l.dmunb2 * dmunb.deta^2
}
+ wz[, M1*(1:NOS)] <- wz[, M1*(1:NOS)] * dsize.deta^2
+
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
}), list( .cutoff.prob = cutoff.prob,
- .max.qnbinom = max.qnbinom,
+ .max.support = max.support,
.max.chunk.MB = max.chunk.MB,
- .lmuuu = lmuuu,
+ .lmunb = lmunb, .lsize = lsize,
+ .eps.trig = eps.trig,
.nsimEIM = nsimEIM ))))
@@ -4685,9 +4821,7 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
- M1 <- 2
- NOS <- ncol(eta) / M1
- eta.k <- eta[, M1 * (1:NOS) , drop = FALSE]
+ eta.k <- eta[, c(FALSE, TRUE), drop = FALSE]
kmat <- eta2theta(eta.k, .lsize , earg = .esize )
if (residuals) {
@@ -4704,7 +4838,7 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
}
}
}, list( .lsize = lsize, .esize = esize,
- .lmuuu = lmuuu, .emuuu = emuuu )))
+ .lmunb = lmunb, .emunb = emunb )))
@@ -4733,16 +4867,29 @@ polya.control <- function(save.weights = TRUE, ...) {
polya <-
- function(lprob = "logit", lsize = "loge",
- iprob = NULL, isize = NULL,
- probs.y = 0.75,
- nsimEIM = 100,
+ function(
+ zero = "size",
+ type.fitted = c("mean", "prob"),
+ mds.min = 1e-4,
+ nsimEIM = 500, cutoff.prob = 0.999, # Maxiter = 5000,
+ eps.trig = 1e-7,
+ max.support = 4000,
+ max.chunk.MB = 30, # max.memory = Inf is allowed
+ lprob = "logit", lsize = "loge",
imethod = 1,
- ishrinkage = 0.95, zero = -2) {
+ iprob = NULL,
+ probs.y = 0.35,
+ ishrinkage = 0.95,
+ isize = NULL,
+ gsize.mux = exp((-12:6)/2),
+ imunb = NULL) {
deviance.arg <- FALSE # 20131212; for now
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "prob"))[1]
+
if (length(iprob) && !is.Numeric(iprob, positive = TRUE))
@@ -4750,14 +4897,9 @@ polya.control <- function(save.weights = TRUE, ...) {
if (length(isize) && !is.Numeric(isize, positive = TRUE))
stop("bad input for argument 'isize'")
- if (!is.Numeric(imethod, length.arg = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
- if (!is.Numeric(ishrinkage, length.arg = 1) ||
- ishrinkage < 0 ||
- ishrinkage > 1)
- stop("bad input for argument 'ishrinkage'")
+ if (!is.Numeric(eps.trig, length.arg = 1,
+ positive = TRUE) || eps.trig > 0.001)
+ stop("argument 'eps.trig' must be positive and smaller in value")
if (!is.Numeric(nsimEIM, length.arg = 1,
integer.valued = TRUE))
@@ -4787,17 +4929,25 @@ polya.control <- function(save.weights = TRUE, ...) {
"Variance: mean / prob"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ mds.min = .mds.min ,
+ type.fitted = .type.fitted ,
+ eps.trig = .eps.trig ,
+ parameters.names = c("prob", "size"),
zero = .zero)
- }, list( .zero = zero ))),
+ }, list( .zero = zero, .eps.trig = eps.trig,
+ .type.fitted = type.fitted,
+ .mds.min = mds.min))),
initialize = eval(substitute(expression({
M1 <- 2
@@ -4806,9 +4956,9 @@ polya.control <- function(save.weights = TRUE, ...) {
"Try negbinomial()")
-
temp5 <- w.y.check(w = w, y = y,
Is.integer.y = TRUE,
+ Is.nonnegative = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
out.wy = TRUE,
@@ -4817,107 +4967,106 @@ polya.control <- function(save.weights = TRUE, ...) {
y <- temp5$y
-
M <- M1 * ncol(y)
NOS <- ncoly <- ncol(y) # Number of species
+ extra$type.fitted <- .type.fitted
+ extra$dimnamesy <- dimnames(y)
predictors.names <-
- c(namesof(if (NOS == 1) "prob" else
- paste("prob", 1:NOS, sep = ""),
- .lprob , earg = .eprob , tag = FALSE),
- namesof(if (NOS == 1) "size" else
- paste("size", 1:NOS, sep = ""),
- .lsize , earg = .esize , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = 2)]
+ c(namesof(param.names("prob", NOS), .lprob , earg = .eprob , tag = FALSE),
+ namesof(param.names("size", NOS), .lsize , earg = .esize , tag = FALSE))
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
if (is.null( .nsimEIM )) {
save.weights <- control$save.weights <- FALSE
}
- PROB.INIT <- if (is.numeric( .pinit )) {
- matrix( .pinit, nrow(y), ncol(y), byrow = TRUE)
- } else {
- NULL
- }
if (!length(etastart)) {
- mu.init <- y
- for (iii in 1:ncol(y)) {
- use.this <- if ( .imethod == 1) {
- weighted.mean(y[, iii], w[, iii]) + 1/16
- } else if ( .imethod == 3) {
- c(quantile(y[, iii], probs <- .probs.y) + 1/16)
- } else {
- median(y[, iii]) + 1/16
- }
-
- if (FALSE) {
- mu.init[, iii] <- MU.INIT[, iii]
- } else {
- medabsres <- median(abs(y[, iii] - use.this)) + 1/32
- allowfun <- function(z, maxtol = 1) sign(z) * pmin(abs(z), maxtol)
- mu.init[, iii] <- use.this + (1 - .ishrinkage ) * allowfun(y[, iii] -
- use.this, maxtol = medabsres)
+ munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ imu = .imunb , ishrinkage = .ishrinkage ,
+ pos.only = FALSE,
+ probs.y = .probs.y )
- mu.init[, iii] <- abs(mu.init[, iii]) + 1 / 1024
- }
- }
-
-
- if ( is.Numeric( .kinit )) {
- kayy.init <- matrix( .kinit, nrow = n, ncol = NOS, byrow = TRUE)
+ if ( is.Numeric( .isize )) {
+ size.init <- matrix( .isize , nrow = n, ncol = NOS, byrow = TRUE)
} else {
negbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
mu <- extraargs
sum(c(w) * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
}
- k.grid <- 2^((-7):7)
- k.grid <- 2^(seq(-8, 8, length = 40))
- kayy.init <- matrix(0, nrow = n, ncol = NOS)
- for (spp. in 1:NOS) {
- kayy.init[, spp.] <- grid.search(k.grid,
- objfun = negbinomial.Loglikfun,
- y = y[, spp.], x = x, w = w,
- extraargs = mu.init[, spp.])
+ size.init <- matrix(0, nrow = n, ncol = NOS)
+ for (jay in 1:NOS) {
+ size.grid <- .gsize.mux * mean(munb.init[, jay])
+ size.init[, jay] <- grid.search(size.grid,
+ objfun = negbinomial.Loglikfun,
+ y = y[, jay], # x = x,
+ w = w[, jay],
+ extraargs = munb.init[, jay])
}
}
- prob.init <- if (length(PROB.INIT)) PROB.INIT else
- kayy.init / (kayy.init + mu.init)
+ prob.init <- if (length( .iprob ))
+ matrix( .iprob , nrow(y), ncol(y), byrow = TRUE) else
+ size.init / (size.init + munb.init)
etastart <-
cbind(theta2eta(prob.init, .lprob , earg = .eprob),
- theta2eta(kayy.init, .lsize , earg = .esize))
+ theta2eta(size.init, .lsize , earg = .esize))
etastart <-
- etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
+ etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
}
}), list( .lprob = lprob, .lsize = lsize,
.eprob = eprob, .esize = esize,
- .pinit = iprob, .kinit = isize,
+ .iprob = iprob, .isize = isize,
+ .pinit = iprob,
+ .gsize.mux = gsize.mux,
.probs.y = probs.y,
.ishrinkage = ishrinkage, .nsimEIM = nsimEIM, .zero = zero,
- .imethod = imethod ))),
+ .imethod = imethod , .imunb = imunb,
+ .type.fitted = type.fitted ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- M1 <- 2
- NOS <- ncol(eta) / M1
- pmat <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
- .lprob , earg = .eprob)
- kmat <- eta2theta(eta[, M1*(1:NOS)- 0, drop = FALSE],
- .lsize , earg = .esize)
- kmat / (kmat + pmat)
+ pmat <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+ .lprob , earg = .eprob )
+ kmat <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+ .lsize , earg = .esize )
+
+ type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+ warning("cannot find 'type.fitted'. ",
+ "Returning the 'mean'.")
+ "mean"
+ }
+
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "prob"))[1]
+
+ ans <- switch(type.fitted,
+ "mean" = kmat * (1 - pmat) / pmat,
+ "prob" = pmat)
+ if (length(extra$dimnamesy) &&
+ is.matrix(ans) &&
+ length(extra$dimnamesy[[2]]) == ncol(ans) &&
+ length(extra$dimnamesy[[2]]) > 0) {
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
+ } else
+ if (NCOL(ans) == 1 &&
+ is.matrix(ans)) {
+ colnames(ans) <- NULL
+ }
+ ans
}, list( .lprob = lprob, .eprob = eprob,
.lsize = lsize, .esize = esize))),
last = eval(substitute(expression({
temp0303 <- c(rep( .lprob , length = NOS),
- rep( .lsize , length = NOS))
- names(temp0303) =
- c(if (NOS == 1) "prob" else paste("prob", 1:NOS, sep = ""),
- if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""))
- temp0303 <- temp0303[interleave.VGAM(M, M = 2)]
- misc$link <- temp0303 # Already named
+ rep( .lsize , length = NOS))
+ names(temp0303) <- c(param.names("prob", NOS),
+ param.names("size", NOS))
+ temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
+ misc$link <- temp0303 # Already named
misc$earg <- vector("list", M)
names(misc$earg) <- names(misc$link)
@@ -4929,10 +5078,7 @@ polya.control <- function(save.weights = TRUE, ...) {
misc$isize <- .isize
misc$imethod <- .imethod
misc$nsimEIM <- .nsimEIM
- misc$expected <- TRUE
misc$ishrinkage <- .ishrinkage
- misc$M1 <- 2
- misc$multipleResponses <- TRUE
}), list( .lprob = lprob, .lsize = lsize,
.eprob = eprob, .esize = esize,
.isize = isize,
@@ -4944,21 +5090,19 @@ polya.control <- function(save.weights = TRUE, ...) {
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- M1 <- 2
- NOS <- ncol(eta) / M1
- pmat <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+ pmat <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
.lprob , earg = .eprob)
- temp300 <- eta[, M1*(1:NOS) , drop = FALSE]
+ temp300 <- eta[, c(FALSE, TRUE), drop = FALSE]
if ( .lsize == "loge") {
bigval <- 68
- temp300 <- ifelse(temp300 > bigval, bigval, temp300)
- temp300 <- ifelse(temp300 < -bigval, -bigval, temp300)
+ temp300[temp300 > bigval] <- bigval
+ temp300[temp300 < -bigval] <- -bigval
}
- kmat <- eta2theta(temp300, .lsize , earg = .esize)
+ kmat <- eta2theta(temp300, .lsize , earg = .esize )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- ll.elts <- c(w) * dnbinom(x = y, prob = pmat, size = kmat, log = TRUE)
+ ll.elts <- c(w) * dnbinom(y, prob = pmat, size = kmat, log = TRUE)
if (summation) {
sum(ll.elts)
} else {
@@ -4973,7 +5117,6 @@ polya.control <- function(save.weights = TRUE, ...) {
simslot = eval(substitute(
function(object, nsim) {
-
pwts <- if (length(pwts <- object at prior.weights) > 0)
pwts else weights(object, type = "prior")
if (any(pwts != 1))
@@ -4987,79 +5130,153 @@ polya.control <- function(save.weights = TRUE, ...) {
+ validparams = eval(substitute(function(eta, extra = NULL) {
+ pmat <- eta2theta(eta[, c(TRUE, FALSE)], .lprob , .eprob )
+ size <- eta2theta(eta[, c(FALSE, TRUE)], .lsize , .esize )
+ munb <- size * (1 / pmat - 1)
+
+ smallval <- .mds.min # .munb.div.size
+ okay1 <- all(is.finite(munb)) && all(munb > 0) &&
+ all(is.finite(size)) && all(size > 0) &&
+ all(is.finite(pmat)) && all(pmat > 0 & pmat < 1)
+ overdispersion <- if (okay1) all(munb / size > smallval) else FALSE
+ if (!overdispersion)
+ warning("parameter 'size' has very large values; ",
+ "replacing them by an arbitrary large value within ",
+ "the parameter space. Try fitting a quasi-Poisson ",
+ "model instead.")
+ okay1 && overdispersion
+ }, list( .lprob = lprob, .eprob = eprob,
+ .lsize = lsize, .esize = esize,
+ .mds.min = mds.min))),
+
deriv = eval(substitute(expression({
M1 <- 2
NOS <- ncol(eta) / M1
- M <- ncol(eta)
- pmat <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
- .lprob , earg = .eprob)
- temp3 <- eta[, M1*(1:NOS) , drop = FALSE]
+ pmat <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+ .lprob , earg = .eprob )
+ temp3 <- eta[, c(FALSE, TRUE), drop = FALSE]
if ( .lsize == "loge") {
bigval <- 68
- temp3 <- ifelse(temp3 > bigval, bigval, temp3)
- temp3 <- ifelse(temp3 < -bigval, -bigval, temp3)
- }
- kmat <- eta2theta(temp3, .lsize , earg = .esize)
+ temp3[temp3 > bigval] <- bigval # pmin() collapses matrices
+ temp3[temp3 < -bigval] <- -bigval
+ }
+ kmat <- as.matrix(eta2theta(temp3, .lsize , earg = .esize ))
dl.dprob <- kmat / pmat - y / (1.0 - pmat)
dl.dkayy <- digamma(y + kmat) - digamma(kmat) + log(pmat)
- dprob.deta <- dtheta.deta(pmat, .lprob , earg = .eprob)
- dkayy.deta <- dtheta.deta(kmat, .lsize , earg = .esize)
- dthetas.detas <- cbind(dprob.deta, dkayy.deta)
- dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
- myderiv <- c(w) * cbind(dl.dprob, dl.dkayy) * dthetas.detas
- myderiv[, interleave.VGAM(M, M = M1)]
+ dprob.deta <- dtheta.deta(pmat, .lprob , earg = .eprob )
+ dkayy.deta <- dtheta.deta(kmat, .lsize , earg = .esize )
+
+ myderiv <- c(w) * cbind(dl.dprob * dprob.deta,
+ dl.dkayy * dkayy.deta)
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lprob = lprob, .lsize = lsize,
.eprob = eprob, .esize = esize))),
weight = eval(substitute(expression({
- wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
+ wz <- matrix(0, n, M + M - 1) # wz is 'tridiagonal'
- ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
- mumat <- as.matrix(mu)
- for (spp. in 1:NOS) {
+
+ max.support <- .max.support
+ max.chunk.MB <- .max.chunk.MB
+
+
+ ind2 <- matrix(FALSE, n, NOS) # Used for SFS
+ for (jay in 1:NOS) {
+ eff.p <- sort(c( .cutoff.prob , 1 - .cutoff.prob ))
+ Q.mins <- 0
+ Q.maxs <- qnbinom(p = eff.p[2],
+ mu = mu[, jay],
+ size = kmat[, jay]) + 10
+
+
+
+ eps.trig <- .eps.trig
+ Q.MAXS <- pmax(10, ceiling(1 / sqrt(eps.trig)))
+ Q.maxs <- pmin(Q.maxs, Q.MAXS)
+
+
+ ind1 <- if (max.chunk.MB > 0) (Q.maxs - Q.mins < max.support) else FALSE
+ if ((NN <- sum(ind1)) > 0) {
+ Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
+ n.chunks <- if (intercept.only) 1 else
+ max(1, ceiling( Object.Size / max.chunk.MB))
+ chunk.rows <- ceiling(NN / n.chunks)
+ ind2[, jay] <- ind1 # Save this
+ wind2 <- which(ind1)
+
+
+ upr.ptr <- 0
+ lwr.ptr <- upr.ptr + 1
+ while (lwr.ptr <= NN) {
+ upr.ptr <- min(upr.ptr + chunk.rows, NN)
+ sind2 <- wind2[lwr.ptr:upr.ptr]
+ wz[sind2, M1*jay] <-
+ EIM.NB.specialp(mu = mu[sind2, jay],
+ size = kmat[sind2, jay],
+ y.max = max(Q.maxs[sind2]),
+ cutoff.prob = .cutoff.prob ,
+ intercept.only = intercept.only,
+ extra.bit = FALSE)
+ lwr.ptr <- upr.ptr + 1
+ } # while
+ } # if
+ } # end of for (jay in 1:NOS)
+
+
+
+
+
+
+
+
+
+ for (jay in 1:NOS) {
run.varcov <- 0
- kvec <- kmat[, spp.]
- pvec <- pmat[, spp.]
+ ii.TF <- !ind2[, jay] # Not assigned above
+ if (any(ii.TF)) {
+ ppvec <- pmat[ii.TF, jay]
+ kkvec <- kmat[ii.TF, jay]
+ muvec <- mu[ii.TF, jay]
+ for (ii in 1:( .nsimEIM )) {
+ ysim <- rnbinom(sum(ii.TF), mu = muvec, size = kkvec)
+ dl.dk <- digamma(ysim + kkvec) - digamma(kkvec) + log(ppvec)
+ run.varcov <- run.varcov + dl.dk^2
+ } # end of for loop
- for (ii in 1:( .nsimEIM )) {
- ysim <- rnbinom(n = n, prob = pvec, size = kvec)
-
- dl.dprob <- kvec / pvec - ysim / (1.0 - pvec)
- dl.dkayy <- digamma(ysim + kvec) - digamma(kvec) + log(pvec)
- temp3 <- cbind(dl.dprob, dl.dkayy)
- run.varcov <- run.varcov +
- temp3[, ind1$row.index] *
- temp3[, ind1$col.index]
+ run.varcov <- c(run.varcov / .nsimEIM )
+ ned2l.dk2 <- if (intercept.only) mean(run.varcov) else run.varcov
+
+ wz[ii.TF, M1*jay] <- ned2l.dk2 # * (dk.deta2[ii.TF, jay])^2
}
- run.varcov <- cbind(run.varcov / .nsimEIM)
+ }
- wz1 <- if (intercept.only)
- matrix(colMeans(run.varcov),
- nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else
- run.varcov
- wz1 <- wz1 * dThetas.detas[, M1 * (spp. - 1) + ind1$row] *
- dThetas.detas[, M1 * (spp. - 1) + ind1$col]
+ wz[, M1*(1:NOS) ] <- wz[, M1 * (1:NOS)] * dkayy.deta^2
- for (jay in 1:M1)
- for (kay in jay:M1) {
- cptr <- iam((spp. - 1) * M1 + jay,
- (spp. - 1) * M1 + kay,
- M = M)
- wz[, cptr] <- wz1[, iam(jay, kay, M = M1)]
- }
- } # End of for (spp.) loop
+ save.weights <- !all(ind2)
+
+
+ ned2l.dprob2 <- kmat / ((1 - pmat) * pmat^2)
+ wz[, M1*(1:NOS) - 1] <- ned2l.dprob2 * dprob.deta^2
+
+ ned2l.dkayyprob <- -1 / pmat
+ wz[, M + M1*(1:NOS) - 1] <- ned2l.dkayyprob * dkayy.deta * dprob.deta
+
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
- }), list( .nsimEIM = nsimEIM ))))
+ }), list( .cutoff.prob = cutoff.prob, .eps.trig = eps.trig,
+ .max.support = max.support,
+ .max.chunk.MB = max.chunk.MB,
+ .nsimEIM = nsimEIM ))))
@@ -5068,11 +5285,7 @@ polya.control <- function(save.weights = TRUE, ...) {
ans at deviance <- eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL,
summation = TRUE) {
- M1 <- 2
- NOS <- ncol(eta) / M1
- temp300 <- eta[, M1*(1:NOS), drop = FALSE]
-
-
+ temp300 <- eta[, c(FALSE, TRUE), drop = FALSE]
if (ncol(as.matrix(y)) > 1 && ncol(as.matrix(w)) > 1)
@@ -5101,7 +5314,7 @@ polya.control <- function(save.weights = TRUE, ...) {
}
}
}, list( .lsize = lsize, .eprob = eprob,
- .esize = esize)))
+ .esize = esize )))
ans
} # End of polya()
@@ -5123,33 +5336,41 @@ polyaR.control <- function(save.weights = TRUE, ...) {
polyaR <-
- function(lsize = "loge", lprob = "logit",
- isize = NULL, iprob = NULL,
- probs.y = 0.75,
- nsimEIM = 100,
+ function(
+ zero = "size",
+ type.fitted = c("mean", "prob"),
+ mds.min = 1e-4,
+ nsimEIM = 500, cutoff.prob = 0.999, # Maxiter = 5000,
+ eps.trig = 1e-7,
+ max.support = 4000,
+ max.chunk.MB = 30, # max.memory = Inf is allowed
+ lsize = "loge", lprob = "logit",
imethod = 1,
- ishrinkage = 0.95, zero = -1) {
-
+ isize = NULL,
+ iprob = NULL,
+ probs.y = 0.35,
+ ishrinkage = 0.95,
+ gsize.mux = exp((-12:6)/2),
+ imunb = NULL) {
deviance.arg <- FALSE # 20131212; for now
+
+
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "prob"))[1]
+ if (!is.Numeric(eps.trig, length.arg = 1,
+ positive = TRUE) || eps.trig > 0.001)
+ stop("argument 'eps.trig' must be positive and smaller in value")
+
if (length(iprob) && !is.Numeric(iprob, positive = TRUE))
stop("bad input for argument 'iprob'")
if (length(isize) && !is.Numeric(isize, positive = TRUE))
stop("bad input for argument 'isize'")
- if (!is.Numeric(imethod, length.arg = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
- if (!is.Numeric(ishrinkage, length.arg = 1) ||
- ishrinkage < 0 ||
- ishrinkage > 1)
- stop("bad input for argument 'ishrinkage'")
-
if (!is.Numeric(nsimEIM, length.arg = 1,
integer.valued = TRUE))
stop("bad input for argument 'nsimEIM'")
@@ -5178,17 +5399,25 @@ polyaR.control <- function(save.weights = TRUE, ...) {
"Variance: mean / prob"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
- zero = .zero)
- }, list( .zero = zero ))),
+ expected = TRUE,
+ mds.min = .mds.min ,
+ multipleResponses = TRUE,
+ type.fitted = .type.fitted ,
+ parameters.names = c("size", "prob"),
+ eps.trig = .eps.trig ,
+ zero = .zero )
+ }, list( .zero = zero, .eps.trig = eps.trig,
+ .type.fitted = type.fitted,
+ .mds.min = mds.min))),
initialize = eval(substitute(expression({
M1 <- 2
@@ -5197,9 +5426,9 @@ polyaR.control <- function(save.weights = TRUE, ...) {
"Try negbinomial()")
-
temp5 <- w.y.check(w = w, y = y,
Is.integer.y = TRUE,
+ Is.nonnegative = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
out.wy = TRUE,
@@ -5208,111 +5437,107 @@ polyaR.control <- function(save.weights = TRUE, ...) {
y <- temp5$y
-
M <- M1 * ncol(y)
NOS <- ncoly <- ncol(y) # Number of species
+ extra$type.fitted <- .type.fitted
+ extra$dimnamesy <- dimnames(y)
predictors.names <-
- c(namesof(if (NOS == 1) "size" else
- paste("size", 1:NOS, sep = ""),
- .lsize , earg = .esize , tag = FALSE),
- namesof(if (NOS == 1) "prob" else
- paste("prob", 1:NOS, sep = ""),
- .lprob , earg = .eprob , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = 2)]
+ c(namesof(param.names("size", NOS), .lsize , earg = .esize , tag = FALSE),
+ namesof(param.names("prob", NOS), .lprob , earg = .eprob , tag = FALSE))
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
if (is.null( .nsimEIM )) {
save.weights <- control$save.weights <- FALSE
}
- PROB.INIT <- if (is.numeric( .pinit )) {
- matrix( .pinit, nrow(y), ncol(y), byrow = TRUE)
- } else {
- NULL
- }
if (!length(etastart)) {
- mu.init <- y
- for (iii in 1:ncol(y)) {
- use.this <- if ( .imethod == 1) {
- weighted.mean(y[, iii], w[, iii]) + 1/16
- } else if ( .imethod == 3) {
- c(quantile(y[, iii], probs <- .probs.y) + 1/16)
- } else {
- median(y[, iii]) + 1/16
- }
-
- if (FALSE) {
- mu.init[, iii] <- MU.INIT[, iii]
- } else {
- medabsres <- median(abs(y[, iii] - use.this)) + 1/32
- allowfun <- function(z, maxtol = 1) sign(z) * pmin(abs(z), maxtol)
- mu.init[, iii] <- use.this +
- (1 - .ishrinkage ) * allowfun(y[, iii] -
- use.this, maxtol = medabsres)
-
- mu.init[, iii] <- abs(mu.init[, iii]) + 1 / 1024
- }
- }
+ munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ imu = .imunb , ishrinkage = .ishrinkage ,
+ pos.only = FALSE,
+ probs.y = .probs.y )
-
- if ( is.Numeric( .kinit )) {
- kayy.init <- matrix( .kinit, nrow = n, ncol = NOS, byrow = TRUE)
+ if ( is.Numeric( .isize )) {
+ size.init <- matrix( .isize , nrow = n, ncol = NOS, byrow = TRUE)
} else {
negbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
mu <- extraargs
sum(c(w) * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
}
- k.grid <- 2^((-7):7)
- k.grid <- 2^(seq(-8, 8, length = 40))
- kayy.init <- matrix(0, nrow = n, ncol = NOS)
- for (spp. in 1:NOS) {
- kayy.init[, spp.] <- grid.search(k.grid,
- objfun = negbinomial.Loglikfun,
- y = y[, spp.], x = x, w = w,
- extraargs = mu.init[, spp.])
+ size.init <- matrix(0, nrow = n, ncol = NOS)
+ for (jay in 1:NOS) {
+ size.grid <- .gsize.mux * mean(munb.init[, jay])
+ size.init[, jay] <- grid.search(size.grid,
+ objfun = negbinomial.Loglikfun,
+ y = y[, jay], # x = x,
+ w = w[, jay],
+ extraargs = munb.init[, jay])
}
}
- prob.init <- if (length(PROB.INIT)) PROB.INIT else
- kayy.init / (kayy.init + mu.init)
+ prob.init <- if (length( .iprob ))
+ matrix( .iprob , nrow(y), ncol(y), byrow = TRUE) else
+ size.init / (size.init + munb.init)
etastart <-
- cbind(theta2eta(kayy.init, .lsize , earg = .esize ),
+ cbind(theta2eta(size.init, .lsize , earg = .esize ),
theta2eta(prob.init, .lprob , earg = .eprob ))
-
etastart <-
- etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
+ etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
}
}), list( .lprob = lprob, .lsize = lsize,
.eprob = eprob, .esize = esize,
- .pinit = iprob, .kinit = isize,
+ .iprob = iprob, .isize = isize,
+ .pinit = iprob,
+ .gsize.mux = gsize.mux,
.probs.y = probs.y,
.ishrinkage = ishrinkage, .nsimEIM = nsimEIM, .zero = zero,
- .imethod = imethod ))),
+ .imethod = imethod , .imunb = imunb,
+ .type.fitted = type.fitted ))),
+
linkinv = eval(substitute(function(eta, extra = NULL) {
- M1 <- 2
- NOS <- ncol(eta) / M1
- kmat <- eta2theta(eta[, M1*(1:NOS)- 1, drop = FALSE],
- .lsize , earg = .esize)
- pmat <- eta2theta(eta[, M1*(1:NOS) - 0, drop = FALSE],
- .lprob , earg = .eprob)
- kmat / (kmat + pmat)
+ kmat <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+ .lsize , earg = .esize )
+ pmat <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+ .lprob , earg = .eprob )
+
+ type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+ warning("cannot find 'type.fitted'. ",
+ "Returning the 'mean'.")
+ "mean"
+ }
+
+ type.fitted <- match.arg(type.fitted,
+ c("mean", "prob"))[1]
+
+ ans <- switch(type.fitted,
+ "mean" = kmat * (1 - pmat) / pmat,
+ "prob" = pmat)
+ if (length(extra$dimnamesy) &&
+ is.matrix(ans) &&
+ length(extra$dimnamesy[[2]]) == ncol(ans) &&
+ length(extra$dimnamesy[[2]]) > 0) {
+ if (length(extra$dimnamesy[[1]]) == nrow(ans))
+ dimnames(ans) <- extra$dimnamesy
+ } else
+ if (NCOL(ans) == 1 &&
+ is.matrix(ans)) {
+ colnames(ans) <- NULL
+ }
+ ans
}, list( .lprob = lprob, .eprob = eprob,
.lsize = lsize, .esize = esize))),
last = eval(substitute(expression({
- temp0303 <- c(rep( .lsize , length = NOS),
- rep( .lprob , length = NOS))
-
- names(temp0303) <-
- c(if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""),
- if (NOS == 1) "prob" else paste("prob", 1:NOS, sep = ""))
-
- temp0303 <- temp0303[interleave.VGAM(M, M = 2)]
- misc$link <- temp0303 # Already named
+ temp0303 <- c(rep( .lprob , length = NOS),
+ rep( .lsize , length = NOS))
+ names(temp0303) <- c(param.names("size", NOS),
+ param.names("prob", NOS))
+ temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
+ misc$link <- temp0303 # Already named
misc$earg <- vector("list", M)
names(misc$earg) <- names(misc$link)
@@ -5324,10 +5549,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
misc$isize <- .isize
misc$imethod <- .imethod
misc$nsimEIM <- .nsimEIM
- misc$expected <- TRUE
misc$ishrinkage <- .ishrinkage
- misc$M1 <- 2
- misc$multipleResponses <- TRUE
}), list( .lprob = lprob, .lsize = lsize,
.eprob = eprob, .esize = esize,
.isize = isize,
@@ -5339,21 +5561,19 @@ polyaR.control <- function(save.weights = TRUE, ...) {
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- M1 <- 2
- NOS <- ncol(eta) / M1
- pmat <- eta2theta(eta[, M1*(1:NOS) - 0, drop = FALSE],
+ pmat <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
.lprob , earg = .eprob)
- temp300 <- eta[, M1*(1:NOS) - 1, drop = FALSE]
+ temp300 <- eta[, c(TRUE, FALSE), drop = FALSE]
if ( .lsize == "loge") {
bigval <- 68
- temp300 <- ifelse(temp300 > bigval, bigval, temp300)
- temp300 <- ifelse(temp300 < -bigval, -bigval, temp300)
+ temp300[temp300 > bigval] <- bigval
+ temp300[temp300 < -bigval] <- -bigval
}
kmat <- eta2theta(temp300, .lsize , earg = .esize)
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- ll.elts <- c(w) * dnbinom(x = y, prob = pmat, size = kmat, log = TRUE)
+ ll.elts <- c(w) * dnbinom(y, prob = pmat, size = kmat, log = TRUE)
if (summation) {
sum(ll.elts)
} else {
@@ -5368,7 +5588,6 @@ polyaR.control <- function(save.weights = TRUE, ...) {
simslot = eval(substitute(
function(object, nsim) {
-
pwts <- if (length(pwts <- object at prior.weights) > 0)
pwts else weights(object, type = "prior")
if (any(pwts != 1))
@@ -5381,79 +5600,155 @@ polyaR.control <- function(save.weights = TRUE, ...) {
.eprob = eprob, .esize = esize ))),
+ validparams = eval(substitute(function(eta, extra = NULL) {
+ size <- eta2theta(eta[, c(TRUE, FALSE)], .lsize , .esize )
+ pmat <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , .eprob )
+ munb <- size * (1 / pmat - 1)
+
+ smallval <- .mds.min # .munb.div.size
+ overdispersion <- all(munb / size > smallval)
+ ans <- all(is.finite(munb)) && all(munb > 0) &&
+ all(is.finite(size)) && all(size > 0) &&
+ all(is.finite(pmat)) && all(pmat > 0 & pmat < 1) &&
+ overdispersion
+ if (!overdispersion)
+ warning("parameter 'size' has very large values; ",
+ "replacing them by an arbitrary large value within ",
+ "the parameter space. Try fitting a quasi-Poisson ",
+ "model instead.")
+ ans
+ }, list( .lprob = lprob, .eprob = eprob,
+ .lsize = lsize, .esize = esize,
+ .mds.min = mds.min))),
deriv = eval(substitute(expression({
M1 <- 2
NOS <- ncol(eta) / M1
- M <- ncol(eta)
- pmat <- eta2theta(eta[, M1*(1:NOS) - 0, drop = FALSE],
- .lprob , earg = .eprob)
- temp3 <- eta[, M1*(1:NOS) - 1, drop = FALSE]
+ pmat <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+ .lprob , earg = .eprob)
+ temp3 <- eta[, c(TRUE, FALSE), drop = FALSE]
if ( .lsize == "loge") {
bigval <- 68
- temp3 <- ifelse(temp3 > bigval, bigval, temp3)
- temp3 <- ifelse(temp3 < -bigval, -bigval, temp3)
- }
- kmat <- eta2theta(temp3, .lsize , earg = .esize)
+ temp3[temp3 > bigval] <- bigval # pmin() collapses matrices
+ temp3[temp3 < -bigval] <- -bigval
+ }
+ kmat <- as.matrix(eta2theta(temp3, .lsize , earg = .esize ))
- dl.dkayy <- digamma(y + kmat) - digamma(kmat) + log(pmat)
dl.dprob <- kmat / pmat - y / (1.0 - pmat)
+ dl.dkayy <- digamma(y + kmat) - digamma(kmat) + log(pmat)
- dkayy.deta <- dtheta.deta(kmat, .lsize , earg = .esize)
dprob.deta <- dtheta.deta(pmat, .lprob , earg = .eprob)
- dthetas.detas <- cbind(dkayy.deta, dprob.deta)
- dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
- myderiv <- c(w) * cbind(dl.dkayy, dl.dprob) * dthetas.detas
- myderiv[, interleave.VGAM(M, M = M1)]
+ dkayy.deta <- dtheta.deta(kmat, .lsize , earg = .esize)
+
+ myderiv <- c(w) * cbind(dl.dkayy * dkayy.deta,
+ dl.dprob * dprob.deta)
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .lprob = lprob, .lsize = lsize,
.eprob = eprob, .esize = esize))),
weight = eval(substitute(expression({
wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
- ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
- mumat <- as.matrix(mu)
- for (spp. in 1:NOS) {
+
+ max.support <- .max.support
+ max.chunk.MB <- .max.chunk.MB
+
+
+ ind2 <- matrix(FALSE, n, NOS) # Used for SFS
+ for (jay in 1:NOS) {
+ eff.p <- sort(c( .cutoff.prob , 1 - .cutoff.prob ))
+ Q.mins <- 0
+ Q.maxs <- qnbinom(p = eff.p[2],
+ mu = mu[, jay],
+ size = kmat[, jay]) + 10
+
+
+
+ eps.trig <- .eps.trig
+ Q.MAXS <- pmax(10, ceiling(1 / sqrt(eps.trig) - kmat[, jay]))
+ Q.maxs <- pmin(Q.maxs, Q.MAXS)
+
+
+
+ ind1 <- if (max.chunk.MB > 0) (Q.maxs - Q.mins < max.support) else FALSE
+ if ((NN <- sum(ind1)) > 0) {
+ Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
+ n.chunks <- if (intercept.only) 1 else
+ max(1, ceiling( Object.Size / max.chunk.MB))
+ chunk.rows <- ceiling(NN / n.chunks)
+ ind2[, jay] <- ind1 # Save this
+ wind2 <- which(ind1)
+
+
+ upr.ptr <- 0
+ lwr.ptr <- upr.ptr + 1
+ while (lwr.ptr <= NN) {
+ upr.ptr <- min(upr.ptr + chunk.rows, NN)
+ sind2 <- wind2[lwr.ptr:upr.ptr]
+ wz[sind2, M1*jay - 1] <-
+ EIM.NB.specialp(mu = mu[sind2, jay],
+ size = kmat[sind2, jay],
+ y.max = max(Q.maxs[sind2]),
+ cutoff.prob = .cutoff.prob ,
+ intercept.only = intercept.only,
+ extra.bit = FALSE)
+ lwr.ptr <- upr.ptr + 1
+ } # while
+ } # if
+ } # end of for (jay in 1:NOS)
+
+
+
+
+
+
+
+
+
+ for (jay in 1:NOS) {
run.varcov <- 0
- kvec <- kmat[, spp.]
- pvec <- pmat[, spp.]
+ ii.TF <- !ind2[, jay] # Not assigned above
+ if (any(ii.TF)) {
+ ppvec <- pmat[ii.TF, jay]
+ kkvec <- kmat[ii.TF, jay]
+ muvec <- mu[ii.TF, jay]
+ for (ii in 1:( .nsimEIM )) {
+ ysim <- rnbinom(sum(ii.TF), mu = muvec, size = kkvec)
+ dl.dk <- digamma(ysim + kkvec) - digamma(kkvec) + log(ppvec)
+ run.varcov <- run.varcov + dl.dk^2
+ } # end of for loop
- for (ii in 1:( .nsimEIM )) {
- ysim <- rnbinom(n = n, prob = pvec, size = kvec)
+ run.varcov <- c(run.varcov / .nsimEIM )
+ ned2l.dk2 <- if (intercept.only) mean(run.varcov) else run.varcov
- dl.dkayy <- digamma(ysim + kvec) - digamma(kvec) + log(pvec)
- dl.dprob <- kvec / pvec - ysim / (1.0 - pvec)
- temp3 <- cbind(dl.dkayy, dl.dprob)
- run.varcov <- run.varcov + temp3[, ind1$row.index] *
- temp3[, ind1$col.index]
+ wz[ii.TF, M1*jay - 1] <- ned2l.dk2 # * (dk.deta2[ii.TF, jay])^2
}
- run.varcov <- cbind(run.varcov / .nsimEIM)
+ }
- wz1 <- if (intercept.only)
- matrix(colMeans(run.varcov),
- nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else
- run.varcov
- wz1 <- wz1 * dThetas.detas[, M1 * (spp. - 1) + ind1$row] *
- dThetas.detas[, M1 * (spp. - 1) + ind1$col]
+ wz[, M1*(1:NOS) - 1] <- wz[, M1*(1:NOS) - 1] * dkayy.deta^2
- for (jay in 1:M1)
- for (kay in jay:M1) {
- cptr <- iam((spp. - 1) * M1 + jay,
- (spp. - 1) * M1 + kay,
- M = M)
- wz[, cptr] <- wz1[, iam(jay, kay, M = M1)]
- }
- } # End of for (spp.) loop
+ save.weights <- !all(ind2)
+
+
+ ned2l.dprob2 <- kmat / ((1 - pmat) * pmat^2)
+ wz[, M1*(1:NOS) ] <- ned2l.dprob2 * dprob.deta^2
+
+ ned2l.dkayyprob <- -1 / pmat
+ wz[, M + M1*(1:NOS) - 1] <- ned2l.dkayyprob * dkayy.deta * dprob.deta
+
w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
- }), list( .nsimEIM = nsimEIM ))))
+ }), list( .cutoff.prob = cutoff.prob, .eps.trig = eps.trig,
+ .max.support = max.support,
+ .max.chunk.MB = max.chunk.MB,
+ .nsimEIM = nsimEIM ))))
@@ -5462,11 +5757,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
ans at deviance <- eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL,
summation = TRUE) {
- M1 <- 2
- NOS <- ncol(eta) / M1
- temp300 <- eta[, M1*(1:NOS) - 1, drop = FALSE]
-
-
+ temp300 <- eta[, c(FALSE, TRUE), drop = FALSE]
if (ncol(as.matrix(y)) > 1 && ncol(as.matrix(w)) > 1)
@@ -5495,7 +5786,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
}
}
}, list( .lsize = lsize, .eprob = eprob,
- .esize = esize)))
+ .esize = esize )))
ans
} # End of polyaR()
@@ -5745,7 +6036,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
ldf = "loglog",
ilocation = NULL, iscale = NULL, idf = NULL,
imethod = 1,
- zero = -(2:3)) {
+ zero = c("scale", "df")) {
@@ -5793,12 +6084,16 @@ polyaR.control <- function(save.weights = TRUE, ...) {
"Variance: scale^2 * df / (df - 2) if df > 2\n"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 3
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 3,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("location", "scale", "df"),
zero = .zero)
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -5820,15 +6115,15 @@ polyaR.control <- function(save.weights = TRUE, ...) {
extra$M1 <- M1
M <- M1 * ncoly #
- mynames1 <- paste("location", if (NOS > 1) 1:NOS else "", sep = "")
- mynames2 <- paste("scale", if (NOS > 1) 1:NOS else "", sep = "")
- mynames3 <- paste("df", if (NOS > 1) 1:NOS else "", sep = "")
+ mynames1 <- param.names("location", NOS)
+ mynames2 <- param.names("scale", NOS)
+ mynames3 <- param.names("df", NOS)
predictors.names <-
c(namesof(mynames1, .lloc , earg = .eloc , tag = FALSE),
namesof(mynames2, .lsca , earg = .esca , tag = FALSE),
namesof(mynames3, .ldof , earg = .edof , tag = FALSE))
predictors.names <-
- predictors.names[interleave.VGAM(M1 * NOS, M = M1)]
+ predictors.names[interleave.VGAM(M1 * NOS, M1 = M1)]
if (!length(etastart)) {
init.loc <- if (length( .iloc )) .iloc else {
@@ -5862,7 +6157,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
mat3 <- matrix(theta2eta(init.dof, .ldof , earg = .edof ), n, NOS,
byrow = TRUE)
etastart <- cbind(mat1, mat2, mat3)
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
}
}), list( .lloc = lloc, .eloc = eloc, .iloc = iloc,
.lsca = lsca, .esca = esca, .isca = isca,
@@ -5883,9 +6178,9 @@ polyaR.control <- function(save.weights = TRUE, ...) {
misc$link <- c(rep( .lloc , length = NOS),
rep( .lsca , length = NOS),
rep( .ldof , length = NOS))
- misc$link <- misc$link[interleave.VGAM(M1 * NOS, M = M1)]
+ misc$link <- misc$link[interleave.VGAM(M1 * NOS, M1 = M1)]
temp.names <- c(mynames1, mynames2, mynames3)
- temp.names <- temp.names[interleave.VGAM(M1 * NOS, M = M1)]
+ temp.names <- temp.names[interleave.VGAM(M1 * NOS, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M1 * NOS)
@@ -5976,7 +6271,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
ans <- c(w) * cbind(dl.dloc * dloc.deta,
dl.dsca * dsca.deta,
dl.ddof * ddof.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
ans
}), list( .lloc = lloc, .eloc = eloc,
.lsca = lsca, .esca = esca,
@@ -6053,7 +6348,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
lscale = "loge",
ilocation = NULL, iscale = NULL,
imethod = 1,
- zero = -2) {
+ zero = "scale") {
lloc <- as.list(substitute(llocation))
eloc <- link2list(lloc)
@@ -6094,13 +6389,17 @@ polyaR.control <- function(save.weights = TRUE, ...) {
"Variance: scale^2 * df / (df - 2) if df > 2\n"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
- zero = .zero)
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("location", "scale"),
+ zero = .zero )
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
M1 <- 2
@@ -6120,13 +6419,13 @@ polyaR.control <- function(save.weights = TRUE, ...) {
extra$M1 <- M1
M <- M1 * ncoly #
- mynames1 <- paste("location", if (NOS > 1) 1:NOS else "", sep = "")
- mynames2 <- paste("scale", if (NOS > 1) 1:NOS else "", sep = "")
+ mynames1 <- param.names("location", NOS)
+ mynames2 <- param.names("scale", NOS)
predictors.names <-
c(namesof(mynames1, .lloc , earg = .eloc , tag = FALSE),
namesof(mynames2, .lsca , earg = .esca , tag = FALSE))
predictors.names <-
- predictors.names[interleave.VGAM(M1 * NOS, M = M1)]
+ predictors.names[interleave.VGAM(M1 * NOS, M1 = M1)]
if (!length(etastart)) {
@@ -6146,7 +6445,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
mat2 <- matrix(theta2eta(init.sca, .lsca , earg = .esca ), n, NOS,
byrow = TRUE)
etastart <- cbind(mat1, mat2)
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
}
}), list( .lloc = lloc, .eloc = eloc, .iloc = iloc,
.lsca = lsca, .esca = esca, .isca = isca,
@@ -6167,7 +6466,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
misc$link <- c(rep( .lloc , length = NOS),
rep( .lsca , length = NOS))
temp.names <- c(mynames1, mynames2)
- temp.names <- temp.names[interleave.VGAM(M1 * NOS, M = M1)]
+ temp.names <- temp.names[interleave.VGAM(M1 * NOS, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M1 * NOS)
names(misc$earg) <- temp.names
@@ -6256,7 +6555,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
ans <- c(w) * cbind(dl.dlocat * dlocat.deta,
dl.dscale * dscale.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
ans
}), list( .lloc = lloc, .eloc = eloc,
.lsca = lsca, .esca = esca,
@@ -6273,7 +6572,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
ned2l.dscale2 <- 2.0 * const2 / Sca^2 # 2.0 seems to work
- wz <- matrix(as.numeric(NA), n, M) #2=M; diagonal!
+ wz <- matrix(NA_real_, n, M) #2=M; diagonal!
wz[, M1*(1:NOS) - 1] <- ned2l.dlocat2 * dlocat.deta^2
wz[, M1*(1:NOS) ] <- ned2l.dscale2 * dscale.deta^2
@@ -6295,9 +6594,6 @@ polyaR.control <- function(save.weights = TRUE, ...) {
link <- attr(earg, "function.name")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
@@ -6341,9 +6637,8 @@ polyaR.control <- function(save.weights = TRUE, ...) {
extra$ncoly <- NOS <- ncoly # Number of species
- mynames1 <- paste("df", if (NOS > 1) 1:NOS else "", sep = "")
- predictors.names <-
- namesof(mynames1, .link , earg = .earg , tag = FALSE)
+ mynames1 <- param.names("df", NOS)
+ predictors.names <- namesof(mynames1, .link , earg = .earg , tag = FALSE)
if (!length(mustart) && !length(etastart))
mustart <- y + (1 / 8) * (y == 0)
@@ -6502,7 +6797,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
simplex <- function(lmu = "logit", lsigma = "loge",
imu = NULL, isigma = NULL,
imethod = 1, ishrinkage = 0.95,
- zero = 2) {
+ zero = "sigma") {
@@ -6528,9 +6823,6 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
ishrinkage > 1)
stop("bad input for argument 'ishrinkage'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
new("vglmff",
@@ -6545,8 +6837,23 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
"Mean: mu\n",
"Variance function: V(mu) = mu^3 * (1 - mu)^3"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("mu", "sigma"),
+ lmu = .lmu ,
+ lsigma = .lsigma ,
+ zero = .zero )
+ }, list( .zero = zero, .lsigma = lsigma, .lmu = lmu
+ ))),
+
initialize = eval(substitute(expression({
if (any(y <= 0.0 | y >= 1.0))
stop("all 'y' values must be in (0,1)")
@@ -6558,14 +6865,14 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
predictors.names <- c(
namesof("mu", .lmu , earg = .emu , tag = FALSE),
- namesof("sigma", .lsigma , earg = .esigma, tag = FALSE))
+ namesof("sigma", .lsigma , earg = .esigma , tag = FALSE))
deeFun <- function(y, mu)
(((y - mu) / (mu * (1 - mu)))^2) / (y * (1 - y))
if (!length(etastart)) {
- use.this =
+ use.this <-
if ( .imethod == 3) weighted.mean(y, w = w) else
if ( .imethod == 1) median(y) else
mean(y, trim = 0.1)
@@ -6574,7 +6881,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
init.mu <- (1 - .ishrinkage ) * y + .ishrinkage * use.this
mu.init <- rep(if (length( .imu )) .imu else init.mu, length = n)
sigma.init <- if (length( .isigma )) rep( .isigma, leng = n) else {
- use.this <- deeFun(y, mu=init.mu)
+ use.this <- deeFun(y, mu = init.mu)
rep(sqrt( if ( .imethod == 3) weighted.mean(use.this, w) else
if ( .imethod == 1) median(use.this) else
mean(use.this, trim = 0.1)),
@@ -6781,7 +7088,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
weight = eval(substitute(expression({
d2l.dthetas2 <- attr(eval.d3, "hessian")
- wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz <- matrix(NA_real_, n, dimm(M)) #3=dimm(M)
wz[, iam(1, 1, M)] <- -d2l.dthetas2[, 1, 1] * dtheta.detas[, 1]^2
wz[, iam(2, 2, M)] <- -d2l.dthetas2[, 2, 2] * dtheta.detas[, 2]^2
wz[, iam(1, 2, M)] <- -d2l.dthetas2[, 1, 2] * dtheta.detas[, 1] *
@@ -7088,7 +7395,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
d2l.dthetas2[, 2, 2] <- c(w) * (-0.25*trigamma((lambda+1)/2) +
0.25*trigamma(1+lambda/2))
- wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz <- matrix(NA_real_, n, dimm(M)) #3=dimm(M)
wz[, iam(1, 1, M)] <- -d2l.dthetas2[, 1, 1] * dtheta.detas[, 1]^2
wz[, iam(2, 2, M)] <- -d2l.dthetas2[, 2, 2] * dtheta.detas[, 2]^2
wz[, iam(1, 2, M)] <- -d2l.dthetas2[, 1, 2] * dtheta.detas[, 1] *
@@ -7127,9 +7434,6 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
@@ -7236,7 +7540,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
d2l.dlambda2 <- 1/(lambda^2) + trigamma(2*y+lambda)+trigamma(y+lambda+1)
ned2l.dlambdarho <- -1/rho
- wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz <- matrix(NA_real_, n, dimm(M)) #3=dimm(M)
wz[, iam(1, 1, M)] <- ned2l.drho2 * drho.deta^2
wz[, iam(1, 2, M)] <- ned2l.dlambdarho * dlambda.deta * drho.deta
wz[, iam(2, 2, M)] <- d2l.dlambda2 * dlambda.deta^2
@@ -7300,7 +7604,7 @@ dgenpois <- function(x, lambda = 0, theta, log = FALSE) {
use.approx = TRUE,
imethod = 1,
ishrinkage = 0.95,
- zero = -1) {
+ zero = "lambda") {
@@ -7339,15 +7643,17 @@ dgenpois <- function(x, lambda = 0, theta, log = FALSE) {
"Variance: theta / (1-lambda)^3"),
constraints = eval(substitute(expression({
- M1 <- 2
- dotzero <- .zero
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ expected = FALSE,
multipleResponses = TRUE,
+ parameters.names = c("lambda", "theta"),
imethod = .imethod ,
zero = .zero )
}, list( .zero = zero,
@@ -7356,9 +7662,9 @@ dgenpois <- function(x, lambda = 0, theta, log = FALSE) {
initialize = eval(substitute(expression({
temp5 <-
w.y.check(w = w, y = y,
+ Is.integer.y = TRUE,
ncol.w.max = Inf, # 1,
ncol.y.max = Inf, # 1,
- Is.integer.y = TRUE,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -7374,7 +7680,7 @@ dgenpois <- function(x, lambda = 0, theta, log = FALSE) {
predictors.names <-
c(namesof(mynames1, .llambda , earg = .elambda , tag = FALSE),
namesof(mynames2, .ltheta , earg = .etheta , tag = FALSE))
- predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
init.lambda <- init.theta <- matrix(0, n, NOS)
for (spp. in 1: NOS) {
@@ -7409,7 +7715,7 @@ dgenpois <- function(x, lambda = 0, theta, log = FALSE) {
etastart <-
cbind(theta2eta(init.lambda, .llambda , earg = .elambda ),
theta2eta(init.theta, .ltheta , earg = .etheta ))
- etastart <- etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
+ etastart <- etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
}
}), list( .ltheta = ltheta, .llambda = llambda,
.etheta = etheta, .elambda = elambda,
@@ -7425,7 +7731,7 @@ dgenpois <- function(x, lambda = 0, theta, log = FALSE) {
M1 <- extra$M1
temp.names <- c(mynames1, mynames2)
- temp.names <- temp.names[interleave.VGAM(M1 * ncoly, M = M1)]
+ temp.names <- temp.names[interleave.VGAM(M1 * ncoly, M1 = M1)]
misc$link <- rep( .llambda , length = M1 * ncoly)
misc$earg <- vector("list", M1 * ncoly)
@@ -7480,7 +7786,7 @@ dgenpois <- function(x, lambda = 0, theta, log = FALSE) {
dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
myderiv <- c(w) * cbind(dl.dlambda * dlambda.deta,
dl.dtheta * dTHETA.deta )
- myderiv[, interleave.VGAM(M, M = M1)]
+ myderiv[, interleave.VGAM(M, M1 = M1)]
}), list( .ltheta = ltheta, .llambda = llambda,
.etheta = etheta, .elambda = elambda ))),
weight = eval(substitute(expression({
@@ -7675,12 +7981,10 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
lgamma3 <-
function(llocation = "identitylink", lscale = "loge", lshape = "loge",
- ilocation = NULL, iscale = NULL, ishape = 1, zero = 2:3) {
+ ilocation = NULL, iscale = NULL, ishape = 1,
+ zero = c("scale", "shape")) {
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (length(iscale) &&
!is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
@@ -7711,9 +8015,27 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
namesof("scale", lscale, earg = escale), ", ",
namesof("shape", lshape, earg = eshape), "\n\n",
"Mean: a + b * digamma(k)", "\n"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("location", "scale", "shape"),
+ llocation = .llocat ,
+ lscale = .lscale ,
+ lshape = .lshape ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .llocat = llocat ,
+ .lscale = lscale ,
+ .lshape = lshape ))),
+
initialize = eval(substitute(expression({
w.y.check(w = w, y = y,
@@ -7840,7 +8162,7 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
ned2l.dadk <- 1 / b
ned2l.dbdk <- digamma(k) / b
- wz <- matrix(as.numeric(NA), n, dimm(M))
+ wz <- matrix(NA_real_, n, dimm(M))
wz[, iam(1, 1, M)] <- ned2l.da2 * da.deta^2
wz[, iam(2, 2, M)] <- ned2l.db2 * db.deta^2
wz[, iam(3, 3, M)] <- ned2l.dk2 * dk.deta^2
@@ -7858,11 +8180,9 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
prentice74 <-
function(llocation = "identitylink", lscale = "loge",
lshape = "identitylink",
- ilocation = NULL, iscale = NULL, ishape = NULL, zero = 2:3) {
+ ilocation = NULL, iscale = NULL, ishape = NULL,
+ zero = c("scale", "shape")) {
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (length(iscale) &&
!is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
@@ -7890,12 +8210,30 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
"location = a, scale = b > 0, shape = q\n\n",
"Links: ",
namesof("location", llocat, earg = elocat), ", ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape", lshape, earg = eshape), "\n", "\n",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape", lshape, earg = eshape), "\n", "\n",
"Mean: a", "\n"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(M1 = 2,
+ Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("location", "scale", "shape"),
+ llocation = .llocat ,
+ lscale = .lscale ,
+ lshape = .lshape ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .llocat = llocat ,
+ .lscale = lscale ,
+ .lshape = lshape ))),
+
initialize = eval(substitute(expression({
@@ -8002,7 +8340,7 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
ned2l.dadk <- (2*(sigmastar2*tmp55^2 - tmp55) - 1) / b
ned2l.dbdk <- (sigmastar2*tmp55 - 1) / (b*k)
- wz <- matrix(as.numeric(NA), n, dimm(M))
+ wz <- matrix(NA_real_, n, dimm(M))
wz[, iam(1, 1, M)] <- ned2l.da2 * da.deta^2
wz[, iam(2, 2, M)] <- ned2l.db2 * db.deta^2
wz[, iam(3, 3, M)] <- ned2l.dk2 * dk.deta^2
@@ -8117,9 +8455,6 @@ rgengamma.stacy <- function(n, scale = 1, d = 1, k = 1) {
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (length(iscale) &&
!is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
@@ -8167,7 +8502,7 @@ rgengamma.stacy <- function(n, scale = 1, d = 1, k = 1) {
if (!length(etastart)) {
sc.init <-
dd.init <-
- kk.init <- matrix(as.numeric(NA), n, NOS)
+ kk.init <- matrix(NA_real_, n, NOS)
for (spp. in 1:NOS) { # For each response 'y_spp.'... do:
yvec <- y[, spp.]
@@ -8184,7 +8519,7 @@ rgengamma.stacy <- function(n, scale = 1, d = 1, k = 1) {
gshape2.p <- rep( .ik , length = NOS)
allmat1 <- expand.grid(shape1.d = gshape1.d,
shape2.k = gshape2.k)
- allmat2 <- matrix(as.numeric(NA), nrow(allmat1), 2)
+ allmat2 <- matrix(NA_real_, nrow(allmat1), 2)
ll.gstacy <- function(scaleval, x = x, y = y, w = w, extraargs) {
ans <- sum(c(w) * dgengamma.stacy(x = y,
@@ -8308,7 +8643,7 @@ rgengamma.stacy <- function(n, scale = 1, d = 1, k = 1) {
ned2l.dbdk <- d / b
ned2l.dddk <- -digamma(k) / d
- wz <- matrix(as.numeric(NA), n, dimm(M))
+ wz <- matrix(NA_real_, n, dimm(M))
wz[, iam(1, 1, M)] <- ned2l.db2 * db.deta^2
wz[, iam(2, 2, M)] <- ned2l.dd2 * dd.deta^2
wz[, iam(3, 3, M)] <- ned2l.dk2 * dk.deta^2
@@ -8456,9 +8791,6 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
link <- attr(earg, "function.name")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
@@ -8502,9 +8834,8 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
M <- M1 * ncoly
- mynames1 <- paste("c", if (ncoly > 1) 1:ncoly else "", sep = "")
- predictors.names <-
- namesof(mynames1, .link , earg = .earg , tag = FALSE)
+ mynames1 <- param.names("c", ncoly)
+ predictors.names <- namesof(mynames1, .link , earg = .earg , tag = FALSE)
if (!length(etastart)) {
@@ -8762,7 +9093,7 @@ rlevy <- function(n, location = 0, scale = 1)
.delta.known = delta.known,
.delta = delta ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, dimm(M))
+ wz <- matrix(NA_real_, n, dimm(M))
wz[, iam(1, 1, M)] <- 1 * dgamma.deta^2
if (! .delta.known ) {
wz[, iam(1, 2, M)] <- 3 * dgamma.deta
@@ -8831,9 +9162,6 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
ishape1 = NULL, ishape2 = NULL, ilambda = 1,
zero = NULL) {
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(ilambda, positive = TRUE))
stop("bad input for argument 'ilambda'")
@@ -8995,7 +9323,7 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
ned2l.dsh1lambda <- -sh2 / ((sh1+sh2)*lambda)
ned2l.dsh2lambda <- sh1 / ((sh1+sh2)*lambda)
- wz <- matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+ wz <- matrix(NA_real_, n, dimm(M)) #M==3 means 6=dimm(M)
wz[, iam(1, 1, M)] <- ned2l.dsh1 * dsh1.deta^2
wz[, iam(2, 2, M)] <- ned2l.dsh2 * dsh2.deta^2
wz[, iam(3, 3, M)] <- ned2l.dlambda2 * dlambda.deta^2
@@ -9103,7 +9431,7 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
d2l.dshape22 <- temp2 - trigamma(shapes[, 2])
d2l.dshape1shape2 <- temp2
- wz <- matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz <- matrix(NA_real_, n, dimm(M)) #3=dimm(M)
wz[, iam(1, 1, M)] <- d2l.dshape12 * dshapes.deta[, 1]^2
wz[, iam(2, 2, M)] <- d2l.dshape22 * dshapes.deta[, 2]^2
wz[, iam(1, 2, M)] <- d2l.dshape1shape2 *
@@ -9194,9 +9522,6 @@ rmaxwell <- function(n, rate) {
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
new("vglmff",
@@ -9238,9 +9563,8 @@ rmaxwell <- function(n, rate) {
M <- M1 * ncoly
- mynames1 <- paste("rate", if (ncoly > 1) 1:ncoly else "", sep = "")
- predictors.names <-
- namesof(mynames1, .link , earg = .earg )
+ mynames1 <- param.names("rate", ncoly)
+ predictors.names <- namesof(mynames1, .link , earg = .earg , tag = FALSE)
if (!length(etastart)) {
@@ -9550,7 +9874,7 @@ rnaka <- function(n, scale = 1, shape, Smallno = 1.0e-6) {
weight = eval(substitute(expression({
d2l.dshape2 <- trigamma(shape) - 1/shape
d2l.dscale2 <- shape / Scale^2
- wz <- matrix(as.numeric(NA), n, M) # diagonal
+ wz <- matrix(NA_real_, n, M) # diagonal
wz[, iam(1, 1, M)] <- d2l.dscale2 * dscale.deta^2
wz[, iam(2, 2, M)] <- d2l.dshape2 * dshape.deta^2
c(w) * wz
@@ -9672,9 +9996,6 @@ rrayleigh <- function(n, scale = 1) {
stop("bad input for argument 'oim.mean'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
@@ -9685,14 +10006,17 @@ rrayleigh <- function(n, scale = 1) {
namesof("scale", lscale, earg = escale), "\n\n",
"Mean: scale * sqrt(pi / 2)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 1
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 1)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("scale"),
zero = .zero )
}, list( .zero = zero ))),
@@ -9718,7 +10042,7 @@ rrayleigh <- function(n, scale = 1) {
M <- M1 * ncoly
- mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("scale", ncoly)
predictors.names <-
namesof(mynames1, .lscale , earg = .escale , tag = FALSE)
@@ -11029,9 +11353,6 @@ rtruncpareto <- function(n, lower, upper, shape) {
zero = NULL) {
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
if (!is.Numeric(tolerance, positive = TRUE, length.arg = 1) ||
tolerance > 1.0e-2)
@@ -11145,7 +11466,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
.eshape = eshape, .eratee = eratee))),
weight = eval(substitute(expression({
d11 <- 1 / shape^2 # True for all shape
- d22 <- d12 <- rep(as.numeric(NA), length.out = n)
+ d22 <- d12 <- rep(NA_real_, length.out = n)
index2 <- abs(shape - 2) > .tolerance # index2 = shape != 1
largeno <- 10000
if (any(index2)) {
@@ -11322,7 +11643,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
logistic <- function(llocation = "identitylink",
lscale = "loge",
ilocation = NULL, iscale = NULL,
- imethod = 1, zero = -2) {
+ imethod = 1, zero = "scale") {
ilocat <- ilocation
@@ -11332,9 +11653,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
+
if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
@@ -11361,12 +11680,17 @@ rtruncpareto <- function(n, lower, upper, shape) {
constraints = eval(substitute(expression({
dotzero <- .zero
M1 <- 2
+
+ Q1 <- 1
+
eval(negzero.expression.VGAM)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ multipleResponses = TRUE,
+ expected = TRUE,
zero = .zero )
}, list( .zero = zero ))),
@@ -11392,12 +11716,13 @@ rtruncpareto <- function(n, lower, upper, shape) {
- mynames1 <- paste("location", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("location", ncoly)
+ mynames2 <- param.names("scale", ncoly)
+ parameters.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
predictors.names <-
c(namesof(mynames1, .llocat , earg = .elocat , tag = FALSE),
namesof(mynames2, .lscale , earg = .escale , tag = FALSE))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
@@ -11424,7 +11749,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
etastart <- cbind(
theta2eta(locat.init, .llocat , earg = .elocat ),
theta2eta(scale.init, .lscale , earg = .escale ))[,
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
}
}), list( .imethod = imethod,
.elocat = elocat, .escale = escale,
@@ -11442,9 +11767,9 @@ rtruncpareto <- function(n, lower, upper, shape) {
M1 <- extra$M1
misc$link <-
c(rep( .llocat , length = ncoly),
- rep( .lscale , length = ncoly))[interleave.VGAM(M, M = M1)]
+ rep( .lscale , length = ncoly))[interleave.VGAM(M, M1 = M1)]
temp.names <- c(mynames1, mynames2)[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
@@ -11522,14 +11847,14 @@ rtruncpareto <- function(n, lower, upper, shape) {
dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
c(w) * cbind(dl.dlocat * dlocat.deta,
- dl.dscale * dscale.deta)[, interleave.VGAM(M, M = M1)]
+ dl.dscale * dscale.deta)[, interleave.VGAM(M, M1 = M1)]
}), list( .llocat = llocat, .lscale = lscale,
.elocat = elocat, .escale = escale))),
weight = eval(substitute(expression({
ned2l.dlocat2 <- 1 / (3 * Scale^2)
ned2l.dscale2 <- (3 + pi^2) / (9 * Scale^2)
- wz <- matrix(as.numeric(NA), nrow = n, ncol = M) # diagonal
+ wz <- matrix(NA_real_, nrow = n, ncol = M) # diagonal
wz[, (1:ncoly) * M1 - 1] <- ned2l.dlocat2 * dlocat.deta^2
wz[, (1:ncoly) * M1 ] <- ned2l.dscale2 * dscale.deta^2
@@ -11547,7 +11872,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
negbinomial.size <- function(size = Inf,
lmu = "loge",
imu = NULL,
- probs.y = 0.75,
+ probs.y = 0.35,
imethod = 1,
ishrinkage = 0.95, zero = NULL) {
@@ -11596,15 +11921,18 @@ rtruncpareto <- function(n, lower, upper, shape) {
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 1,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("mu"),
zero = .zero)
}, list( .zero = zero ))),
@@ -11630,9 +11958,8 @@ rtruncpareto <- function(n, lower, upper, shape) {
M <- M1 * ncol(y)
NOS <- ncoly <- ncol(y) # Number of species
- mynames1 <- paste("mu", if (NOS > 1) 1:NOS else "", sep = "")
- predictors.names <-
- namesof(mynames1, .lmu , earg = .emu , tag = FALSE)
+ mynames1 <- param.names("mu", NOS)
+ predictors.names <- namesof(mynames1, .lmu , earg = .emu , tag = FALSE)
if (is.numeric( .mu.init ))
@@ -11645,7 +11972,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
use.this <- if ( .imethod == 1) {
weighted.mean(y[, iii], w[, iii]) + 1/16
} else if ( .imethod == 3) {
- c(quantile(y[, iii], probs = .probs.y) + 1/16)
+ c(quantile(y[, iii], probs = .probs.y ) + 1/16)
} else {
median(y[, iii]) + 1/16
}
@@ -11806,7 +12133,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
dl.dmu[!is.finite(dl.dmu)] <- (y/mu)[!is.finite(dl.dmu)] - 1
if ( .lmu == "nbcanlink")
- newemu$wrt.eta <- 1
+ newemu$wrt.param <- 1
dmu.deta <- dtheta.deta(mu, .lmu , earg = newemu) # eta1
myderiv <- c(w) * dl.dmu * dmu.deta
@@ -11816,10 +12143,10 @@ rtruncpareto <- function(n, lower, upper, shape) {
.size = size ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, M) # wz is 'diagonal'
+ wz <- matrix(NA_real_, n, M) # wz is 'diagonal'
- ned2l.dmu2 <- 1 / mu - 1 / (mu + kmat)
- wz <- dmu.deta^2 * ned2l.dmu2
+ ned2l.dmunb2 <- 1 / mu - 1 / (mu + kmat)
+ wz <- dmu.deta^2 * ned2l.dmunb2
diff --git a/R/family.zeroinf.R b/R/family.zeroinf.R
index 6e428fd..4d48a51 100644
--- a/R/family.zeroinf.R
+++ b/R/family.zeroinf.R
@@ -13,12 +13,13 @@
+
dzanegbin <- function(x, size, prob = NULL, munb = NULL, pobs0 = 0,
log = FALSE) {
if (length(munb)) {
if (length(prob))
stop("arguments 'prob' and 'munb' both specified")
- prob <- size / (size + munb)
+ prob <- 1 / (1 + munb/size)
}
if (!is.logical(log.arg <- log) || length(log) != 1)
@@ -34,8 +35,9 @@ dzanegbin <- function(x, size, prob = NULL, munb = NULL, pobs0 = 0,
ans <- rep(0.0, len = LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
stop("argument 'pobs0' must be in [0,1]")
- if (!is.Numeric(prob, positive = TRUE))
- stop("argument 'prob' must be in (0,Inf)")
+ if (!is.Numeric(prob, positive = TRUE) ||
+ max(prob, na.rm = TRUE) >= 1)
+ stop("argument 'prob' must be in (0,1)")
if (!is.Numeric(size, positive = TRUE))
stop("argument 'size' must be in (0,Inf)")
index0 <- x == 0
@@ -59,7 +61,7 @@ pzanegbin <- function(q, size, prob = NULL, munb = NULL, pobs0 = 0) {
if (length(munb)) {
if (length(prob))
stop("arguments 'prob' and 'munb' both specified")
- prob <- size / (size + munb)
+ prob <- 1 / (1 + munb/size)
}
LLL <- max(length(q), length(pobs0), length(prob), length(size))
@@ -77,6 +79,10 @@ pzanegbin <- function(q, size, prob = NULL, munb = NULL, pobs0 = 0) {
prob = prob[qindex])
ans[q < 0] <- 0
ans[q == 0] <- pobs0[q == 0]
+
+ ans <- pmax(0, ans)
+ ans <- pmin(1, ans)
+
ans
}
@@ -85,7 +91,7 @@ qzanegbin <- function(p, size, prob = NULL, munb = NULL, pobs0 = 0) {
if (length(munb)) {
if (length(prob))
stop("arguments 'prob' and 'munb' both specified")
- prob <- size/(size + munb)
+ prob <- 1 / (1 + munb/size)
}
LLL <- max(length(p), length(pobs0), length(prob), length(size))
@@ -108,6 +114,7 @@ qzanegbin <- function(p, size, prob = NULL, munb = NULL, pobs0 = 0) {
}
+
rzanegbin <- function(n, size, prob = NULL, munb = NULL, pobs0 = 0) {
use.n <- if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
@@ -117,7 +124,7 @@ rzanegbin <- function(n, size, prob = NULL, munb = NULL, pobs0 = 0) {
if (length(munb)) {
if (length(prob))
stop("arguments 'prob' and 'munb' both specified")
- prob <- size / (size + munb)
+ prob <- 1 / (1 + munb/size)
}
ans <- rposnegbin(n = use.n, prob = prob, size = size)
@@ -133,6 +140,7 @@ rzanegbin <- function(n, size, prob = NULL, munb = NULL, pobs0 = 0) {
+
dzapois <- function(x, lambda, pobs0 = 0, log = FALSE) {
if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
@@ -176,10 +184,15 @@ pzapois <- function(q, lambda, pobs0 = 0) {
(1-pobs0[q > 0]) * ppospois(q[q > 0], lambda[q > 0])
ans[q < 0] <- 0
ans[q == 0] <- pobs0[q == 0]
+
+ ans <- pmax(0, ans)
+ ans <- pmin(1, ans)
+
ans
}
+
qzapois <- function(p, lambda, pobs0 = 0) {
LLL <- max(length(p), length(lambda), length(pobs0))
if (length(p) != LLL) p <- rep(p, len = LLL)
@@ -255,6 +268,7 @@ dzipois <- function(x, lambda, pstr0 = 0, log = FALSE) {
}
+
pzipois <- function(q, lambda, pstr0 = 0) {
LLL <- max(length(pstr0), length(lambda), length(q))
@@ -275,6 +289,7 @@ pzipois <- function(q, lambda, pstr0 = 0) {
}
+
qzipois <- function(p, lambda, pstr0 = 0) {
LLL <- max(length(p), length(lambda), length(pstr0))
@@ -312,6 +327,7 @@ qzipois <- function(p, lambda, pstr0 = 0) {
}
+
rzipois <- function(n, lambda, pstr0 = 0) {
use.n <- if ((length.n <- length(n)) > 1) length.n else
@@ -347,7 +363,10 @@ rzipois <- function(n, lambda, pstr0 = 0) {
- yip88 <- function(link = "loge", n.arg = NULL) {
+
+
+
+ yip88 <- function(link = "loge", n.arg = NULL, imethod = 1) {
@@ -403,7 +422,8 @@ rzipois <- function(n, lambda, pstr0 = 0) {
namesof("lambda", .link, list(theta = NULL), tag = FALSE)
if (!length(etastart)) {
- lambda.init <- rep(median(y), length = length(y))
+ lambda.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ pos.only = FALSE)
etastart <- theta2eta(lambda.init, .link , earg = .earg )
}
if (length(extra)) {
@@ -412,7 +432,8 @@ rzipois <- function(n, lambda, pstr0 = 0) {
} else {
extra <- list(sumw = sum(w), narg = narg)
}
- }), list( .link = link, .earg = earg, .n.arg = n.arg ))),
+ }), list( .link = link, .earg = earg,
+ .n.arg = n.arg, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
lambda <- eta2theta(eta, .link, .earg)
@@ -477,12 +498,14 @@ rzipois <- function(n, lambda, pstr0 = 0) {
zapoisson <-
function(lpobs0 = "logit", llambda = "loge",
- type.fitted = c("mean", "pobs0", "onempobs0"),
+ type.fitted = c("mean", "lambda", "pobs0", "onempobs0"),
+ imethod = 1,
+ ipobs0 = NULL, ilambda = NULL, ishrinkage = 0.95,
+ probs.y = 0.35,
zero = NULL) {
-
lpobs.0 <- as.list(substitute(lpobs0))
epobs.0 <- link2list(lpobs.0)
lpobs.0 <- attr(epobs.0, "function.name")
@@ -492,7 +515,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
llambda <- attr(elambda, "function.name")
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "onempobs0"))[1]
+ c("mean", "lambda", "pobs0", "onempobs0"))[1]
@@ -506,14 +529,17 @@ rzipois <- function(n, lambda, pstr0 = 0) {
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("pobs0", "lambda"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -522,14 +548,13 @@ rzipois <- function(n, lambda, pstr0 = 0) {
initialize = eval(substitute(expression({
M1 <- 2
- if (any(y < 0))
- stop("the response must not have negative values")
temp5 <-
w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
- Is.integer.y = TRUE,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -543,30 +568,32 @@ rzipois <- function(n, lambda, pstr0 = 0) {
extra$dimnamesy <- dimnames(y)
extra$type.fitted <- .type.fitted
- mynames1 <- if (ncoly == 1) "pobs0" else
- paste("pobs0", 1:ncoly, sep = "")
- mynames2 <- if (ncoly == 1) "lambda" else
- paste("lambda", 1:ncoly, sep = "")
+ mynames1 <- param.names("pobs0", ncoly)
+ mynames2 <- param.names("lambda", ncoly)
predictors.names <-
c(namesof(mynames1, .lpobs.0, earg = .epobs.0, tag = FALSE),
namesof(mynames2, .llambda, earg = .elambda, tag = FALSE))[
- interleave.VGAM(M1*NOS, M = M1)]
+ interleave.VGAM(M1*NOS, M1 = M1)]
if (!length(etastart)) {
+ lambda.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ imu = .ilambda,
+ ishrinkage = .ishrinkage,
+ pos.only = TRUE,
+ probs.y = .probs.y )
+
etastart <-
- cbind(theta2eta((0.5 + w*y0) / (1+w),
- .lpobs.0, earg = .epobs.0 ),
- matrix(1, n, NOS)) # 1 here is any old value
- for (spp. in 1:NOS) {
- sthese <- skip.these[, spp.]
- etastart[!sthese, NOS+spp.] =
- theta2eta(y[!sthese, spp.] / (-expm1(-y[!sthese, spp.])),
- .llambda, earg = .elambda )
- }
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
+ cbind(theta2eta(if (length( .ipobs0 )) .ipobs0 else
+ (0.5 + w * y0) / (1 + w),
+ .lpobs.0 , earg = .epobs.0 ),
+ theta2eta(lambda.init, .llambda , earg = .elambda ))
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
}
}), list( .lpobs.0 = lpobs.0, .llambda = llambda,
.epobs.0 = epobs.0, .elambda = elambda,
+ .ipobs0 = ipobs0, .ilambda = ilambda,
+ .ishrinkage = ishrinkage, .probs.y = probs.y,
+ .imethod = imethod,
.type.fitted = type.fitted ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
@@ -576,10 +603,10 @@ rzipois <- function(n, lambda, pstr0 = 0) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "onempobs0"))[1]
+ c("mean", "lambda", "pobs0", "onempobs0"))[1]
- NOS <- extra$NOS
M1 <- 2
+ NOS <- ncol(eta) / M1
pobs.0 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
@@ -590,6 +617,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
ans <- switch(type.fitted,
"mean" = (1 - pobs.0) * lambda / (-expm1(-lambda)),
+ "lambda" = lambda,
"pobs0" = pobs.0, # P(Y=0)
"onempobs0" = 1 - pobs.0) # P(Y>0)
if (length(extra$dimnamesy) &&
@@ -612,10 +640,10 @@ rzipois <- function(n, lambda, pstr0 = 0) {
temp.names <- c(rep( .lpobs.0 , len = NOS),
rep( .llambda , len = NOS))
- temp.names <- temp.names[interleave.VGAM(M1*NOS, M = M1)]
+ temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
misc$link <- temp.names
names(misc$link) <-
- c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M = M1)]
+ c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
misc$earg <- vector("list", M1 * NOS)
names(misc$earg) <- names(misc$link)
@@ -629,12 +657,10 @@ rzipois <- function(n, lambda, pstr0 = 0) {
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- NOS <- extra$NOS
- M1 <- 2
- pobs0 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+ pobs0 <- cbind(eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
.lpobs.0, earg = .epobs.0))
- lambda <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
+ lambda <- cbind(eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
.llambda, earg = .elambda ))
if (residuals) {
@@ -673,7 +699,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
deriv = eval(substitute(expression({
M1 <- 2
- NOS <- extra$NOS
+ NOS <- ncol(eta) / M1 # extra$NOS
y0 <- extra$y0
skip <- extra$skip.these
@@ -689,19 +715,19 @@ rzipois <- function(n, lambda, pstr0 = 0) {
dl.dphimat[skip[, spp.], spp.] <- 1 / phimat[skip[, spp.], spp.]
dl.dlambda[skip[, spp.], spp.] <- 0
}
- dlambda.deta <- dtheta.deta(lambda, .llambda, earg = .elambda)
+ dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
mu.phi0 <- phimat
temp3 <- if (.lpobs.0 == "logit") {
c(w) * (y0 - mu.phi0)
} else {
c(w) * dtheta.deta(mu.phi0, link = .lpobs.0 , earg = .epobs.0 ) *
- dl.dphimat
+ dl.dphimat
}
ans <- cbind(temp3,
c(w) * dl.dlambda * dlambda.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
ans
}), list( .lpobs.0 = lpobs.0, .llambda = llambda,
.epobs.0 = epobs.0, .elambda = elambda ))),
@@ -714,15 +740,15 @@ rzipois <- function(n, lambda, pstr0 = 0) {
temp5 <- expm1(lambda)
ned2l.dlambda2 <- (1 - phimat) * (temp5 + 1) *
(1 / lambda - 1 / temp5) / temp5
- wz[, NOS+(1:NOS)] <- w * ned2l.dlambda2 * dlambda.deta^2
+ wz[, NOS+(1:NOS)] <- c(w) * ned2l.dlambda2 * dlambda.deta^2
- tmp100 <- mu.phi0 * (1.0 - mu.phi0)
+ tmp100 <- mu.phi0 * (1 - mu.phi0)
tmp200 <- if ( .lpobs.0 == "logit" && is.empty.list( .epobs.0 )) {
cbind(c(w) * tmp100)
} else {
cbind(c(w) * (1 / tmp100) *
- dtheta.deta(mu.phi0, link = .lpobs.0, earg = .epobs.0)^2)
+ dtheta.deta(mu.phi0, link = .lpobs.0 , earg = .epobs.0 )^2)
}
@@ -737,7 +763,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
wz[, 1:NOS] <- tmp200
- wz <- wz[, interleave.VGAM(ncol(wz), M = M1)]
+ wz <- wz[, interleave.VGAM(ncol(wz), M1 = M1)]
@@ -752,8 +778,11 @@ rzipois <- function(n, lambda, pstr0 = 0) {
zapoissonff <-
function(llambda = "loge", lonempobs0 = "logit",
- type.fitted = c("mean", "pobs0", "onempobs0"),
- zero = -2) {
+ type.fitted = c("mean", "lambda", "pobs0", "onempobs0"),
+ imethod = 1,
+ ilambda = NULL, ionempobs0 = NULL, ishrinkage = 0.95,
+ probs.y = 0.35,
+ zero = "onempobs0") {
@@ -766,7 +795,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
lonempobs0 <- attr(eonempobs0, "function.name")
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "onempobs0"))[1]
+ c("mean", "lambda", "pobs0", "onempobs0"))[1]
new("vglmff",
@@ -781,14 +810,17 @@ rzipois <- function(n, lambda, pstr0 = 0) {
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("lambda", "onempobs0"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -797,14 +829,13 @@ rzipois <- function(n, lambda, pstr0 = 0) {
initialize = eval(substitute(expression({
M1 <- 2
- if (any(y < 0))
- stop("the response must not have negative values")
temp5 <-
w.y.check(w = w, y = y,
+ Is.integer.y = TRUE,
+ Is.nonnegative.y = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
- Is.integer.y = TRUE,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -819,32 +850,33 @@ rzipois <- function(n, lambda, pstr0 = 0) {
extra$dimnamesy <- dimnames(y)
extra$type.fitted <- .type.fitted
- mynames1 <- if (ncoly == 1) "lambda" else
- paste("lambda", 1:ncoly, sep = "")
- mynames2 <- if (ncoly == 1) "onempobs0" else
- paste("onempobs0", 1:ncoly, sep = "")
-
+ mynames1 <- param.names("lambda", ncoly)
+ mynames2 <- param.names("onempobs0", ncoly)
predictors.names <-
c(namesof(mynames1, .llambda, earg = .elambda , tag = FALSE),
namesof(mynames2, .lonempobs0 , earg = .eonempobs0 , tag = FALSE))[
- interleave.VGAM(M1*NOS, M = M1)]
+ interleave.VGAM(M1*NOS, M1 = M1)]
if (!length(etastart)) {
+ lambda.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ imu = .ilambda,
+ ishrinkage = .ishrinkage,
+ pos.only = TRUE,
+ probs.y = .probs.y )
+
etastart <-
- cbind(matrix(1, n, NOS), # 1 here is any old value
+ cbind(theta2eta(lambda.init, .llambda , earg = .elambda ),
theta2eta(1 - (0.5 + w * y0) / (1 + w),
.lonempobs0 , earg = .eonempobs0 ))
- for (spp. in 1:NOS) {
- sthese <- skip.these[, spp.]
- etastart[!sthese, 0 * NOS + spp.] <-
- theta2eta(y[!sthese, spp.] / (-expm1(-y[!sthese, spp.])),
- .llambda, earg = .elambda )
- }
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
+
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
}
}), list( .lonempobs0 = lonempobs0, .llambda = llambda,
.eonempobs0 = eonempobs0, .elambda = elambda,
- .type.fitted = type.fitted ))),
+ .ilambda = ilambda,
+ .ishrinkage = ishrinkage, .probs.y = probs.y,
+ .type.fitted = type.fitted,
+ .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
warning("cannot find 'type.fitted'. ",
@@ -853,10 +885,10 @@ rzipois <- function(n, lambda, pstr0 = 0) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "onempobs0"))[1]
+ c("mean", "lambda", "pobs0", "onempobs0"))[1]
- NOS <- extra$NOS
M1 <- 2
+ NOS <- ncol(eta) / M1
lambda <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.llambda , earg = .elambda ))
@@ -865,7 +897,8 @@ rzipois <- function(n, lambda, pstr0 = 0) {
ans <- switch(type.fitted,
- "mean" = (onempobs0) * lambda / (-expm1(-lambda)),
+ "mean" = onempobs0 * lambda / (-expm1(-lambda)),
+ "lambda" = lambda,
"pobs0" = 1 - onempobs0, # P(Y=0)
"onempobs0" = onempobs0) # P(Y>0)
if (length(extra$dimnamesy) &&
@@ -888,10 +921,10 @@ rzipois <- function(n, lambda, pstr0 = 0) {
temp.names <- c(rep( .llambda , len = NOS),
rep( .lonempobs0 , len = NOS))
- temp.names <- temp.names[interleave.VGAM(M1*NOS, M = M1)]
+ temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
misc$link <- temp.names
names(misc$link) <-
- c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M = M1)]
+ c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
misc$earg <- vector("list", M1 * NOS)
names(misc$earg) <- names(misc$link)
@@ -952,7 +985,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
deriv = eval(substitute(expression({
M1 <- 2
- NOS <- extra$NOS
+ NOS <- ncol(eta) / M1 # extra$NOS
y0 <- extra$y0
skip <- extra$skip.these
@@ -981,7 +1014,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
ans <- cbind(c(w) * dl.dlambda * dlambda.deta,
temp3)
- ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
ans
}), list( .lonempobs0 = lonempobs0, .llambda = llambda,
.eonempobs0 = eonempobs0, .elambda = elambda ))),
@@ -1009,7 +1042,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
wz[, 1 * NOS + (1:NOS)] <- tmp200
- wz <- wz[, interleave.VGAM(ncol(wz), M = M1)]
+ wz <- wz[, interleave.VGAM(ncol(wz), M1 = M1)]
@@ -1031,18 +1064,31 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
zanegbinomial <-
- function(lpobs0 = "logit", lmunb = "loge", lsize = "loge",
- type.fitted = c("mean", "pobs0"),
- ipobs0 = NULL, isize = NULL,
- zero = -3, # Prior to 20130917 the default was: c(-1, -3),
+ function(
+ zero = "size",
+ type.fitted = c("mean", "munb", "pobs0"),
+ nsimEIM = 500,
+ cutoff.prob = 0.999, # higher is better for large 'size'
+ eps.trig = 1e-7,
+ max.support = 4000, # 20160127; I have changed this
+ max.chunk.MB = 30, # max.memory = Inf is allowed
+ lpobs0 = "logit", lmunb = "loge", lsize = "loge",
imethod = 1,
- nsimEIM = 250,
- ishrinkage = 0.95) {
+ ipobs0 = NULL,
+ imunb = NULL,
+ probs.y = 0.35,
+ ishrinkage = 0.95,
+ isize = NULL,
+
+ gsize.mux = exp((-12:6)/2)) {
+ if (!is.Numeric(eps.trig, length.arg = 1,
+ positive = TRUE) || eps.trig > 0.001)
+ stop("argument 'eps.trig' must be positive and smaller in value")
if (!is.Numeric(nsimEIM, length.arg = 1,
positive = TRUE, integer.valued = TRUE))
@@ -1058,17 +1104,6 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
if (length(isize) && !is.Numeric(isize, positive = TRUE))
stop("If given, argument 'isize' must contain positive values only")
- if (!is.Numeric(imethod, length.arg = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
-
- if (!is.Numeric(ishrinkage, length.arg = 1) ||
- ishrinkage < 0 ||
- ishrinkage > 1)
- stop("bad input for argument 'ishrinkage'")
-
-
lpobs0 <- as.list(substitute(lpobs0))
epobs0 <- link2list(lpobs0)
lpobs0 <- attr(epobs0, "function.name")
@@ -1083,8 +1118,9 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0"))[1]
+ c("mean", "munb", "pobs0"))[1]
+ ipobs0.small <- 1/64 # A number easily represented exactly
new("vglmff",
blurb = c("Zero-altered negative binomial (Bernoulli and\n",
@@ -1097,32 +1133,37 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
"munb))^size)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 3
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 3,
Q1 = 1,
+ expected = TRUE,
+ imethod = .imethod ,
+ multipleResponses = TRUE,
+ parameters.names = c("pobs0", "munb", "size"),
+ nsimEIM = .nsimEIM ,
+ eps.trig = .eps.trig ,
type.fitted = .type.fitted ,
zero = .zero )
- }, list( .zero = zero,
+ }, list( .zero = zero, .imethod = imethod,
+ .nsimEIM = nsimEIM, .eps.trig = eps.trig,
.type.fitted = type.fitted
))),
initialize = eval(substitute(expression({
M1 <- 3
- if (any(y < 0))
- stop("the response must not have negative values")
-
temp5 <-
w.y.check(w = w, y = y,
+ Is.integer.y = TRUE,
+ Is.nonnegative.y = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
- Is.integer.y = TRUE,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -1136,14 +1177,14 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
extra$dimnamesy <- dimnames(y)
extra$type.fitted <- .type.fitted
- mynames1 <- if (NOS == 1) "pobs0" else paste("pobs0", 1:NOS, sep = "")
- mynames2 <- if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
- mynames3 <- if (NOS == 1) "size" else paste("size", 1:NOS, sep = "")
+ mynames1 <- param.names("pobs0", NOS)
+ mynames2 <- param.names("munb", NOS)
+ mynames3 <- param.names("size", NOS)
predictors.names <-
c(namesof(mynames1, .lpobs0 , earg = .epobs0 , tag = FALSE),
namesof(mynames2, .lmunb , earg = .emunb , tag = FALSE),
namesof(mynames3, .lsize , earg = .esize , tag = FALSE))[
- interleave.VGAM(M1*NOS, M = M1)]
+ interleave.VGAM(M1*NOS, M1 = M1)]
extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
@@ -1151,70 +1192,59 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
if (!length(etastart)) {
- mu.init <- y
- for (iii in 1:ncol(y)) {
- index.posy <- (y[, iii] > 0)
- if ( .imethod == 1) {
- use.this <- weighted.mean(y[index.posy, iii],
- w[index.posy, iii])
- mu.init[ index.posy, iii] <- (1 - .ishrinkage ) * y[index.posy, iii] +
- .ishrinkage * use.this
- mu.init[!index.posy, iii] <- use.this
- } else {
- use.this <-
- mu.init[, iii] <- (y[, iii] +
- weighted.mean(y[index.posy, iii],
- w[index.posy, iii])) / 2
- }
- max.use.this <- 7 * use.this + 10
- vecTF <- (mu.init[, iii] > max.use.this)
- if (any(vecTF))
- mu.init[vecTF, iii] <- max.use.this
- }
+ munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ imu = .imunb , ishrinkage = .ishrinkage ,
+ pos.only = TRUE,
+ probs.y = .probs.y )
+
+
- pnb0 <- matrix(if (length( .ipobs0 )) .ipobs0 else -1,
- nrow = n, ncol = NOS, byrow = TRUE)
- for (spp. in 1:NOS) {
- if (any(pnb0[, spp.] < 0)) {
- index.y0 <- y[, spp.] < 0.5
- pnb0[, spp.] <- max(min(sum(index.y0) / n, 0.97), 0.03)
+ pobs0.init <- matrix(if (length( .ipobs0 )) .ipobs0 else -1,
+ nrow = n, ncol = NOS, byrow = TRUE)
+ for (jay in 1:NOS) {
+ if (any(pobs0.init[, jay] < 0)) {
+ index.y0 <- (y[, jay] < 0.5)
+ pobs0.init[, jay] <- max(min(mean(index.y0), 1 - .ipobs0.small ),
+ .ipobs0.small )
}
}
if ( is.Numeric( .isize )) {
- kmat0 <- matrix( .isize , nrow = n, ncol = ncoly, byrow = TRUE)
+ size.init <- matrix( .isize , nrow = n, ncol = ncoly, byrow = TRUE)
} else {
posnegbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
munb <- extraargs
- sum(c(w) * dposnegbin(x = y, munb = munb, size = kmat,
- log = TRUE))
+ sum(c(w) * dposnegbin(y, munb = munb, size = kmat, log = TRUE))
}
- k.grid <- 2^((-6):6)
- kmat0 <- matrix(0, nrow = n, ncol = NOS)
- for (spp. in 1:NOS) {
- index.posy <- (y[, spp.] > 0)
- posy <- y[index.posy, spp.]
- kmat0[, spp.] <-
- grid.search(k.grid, objfun = posnegbinomial.Loglikfun,
- y = posy, x = x[index.posy, ],
- w = w[index.posy, spp.],
- extraargs = mu.init[index.posy, spp.])
+ size.init <- matrix(0, nrow = n, ncol = NOS)
+ for (jay in 1:NOS) {
+ size.grid <- .gsize.mux * mean(munb.init[, jay])
+ TFvec <- (y[, jay] > 0)
+ size.init[, jay] <-
+ grid.search(size.grid, objfun = posnegbinomial.Loglikfun,
+ y = y[TFvec, jay], # x = x[TFvec, ],
+ w = w[TFvec, jay],
+ extraargs = munb.init[TFvec, jay])
}
}
- etastart <- cbind(theta2eta(pnb0, .lpobs0 , earg = .epobs0 ),
- theta2eta(mu.init, .lmunb , earg = .emunb ),
- theta2eta(kmat0, .lsize , earg = .esize ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
+ etastart <- cbind(theta2eta(pobs0.init, .lpobs0 , earg = .epobs0 ),
+ theta2eta(munb.init, .lmunb , earg = .emunb ),
+ theta2eta(size.init, .lsize , earg = .esize ))
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
} # End of if (!length(etastart))
}), list( .lpobs0 = lpobs0, .lmunb = lmunb, .lsize = lsize,
.epobs0 = epobs0, .emunb = emunb, .esize = esize,
.ipobs0 = ipobs0, .isize = isize,
+ .ipobs0.small = ipobs0.small,
+ .imunb = imunb, .gsize.mux = gsize.mux,
.imethod = imethod, .ishrinkage = ishrinkage,
- .type.fitted = type.fitted ))),
+ .type.fitted = type.fitted, .probs.y = probs.y ))),
+
+
linkinv = eval(substitute(function(eta, extra = NULL) {
type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
warning("cannot find 'type.fitted'. ",
@@ -1223,18 +1253,29 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0"))[1]
+ c("mean", "munb", "pobs0"))[1]
M1 <- 3
- NOS <- extra$NOS
+ NOS <- ncol(eta) / M1
phi0 <- eta2theta(eta[, M1*(1:NOS)-2], .lpobs0 , earg = .epobs0 )
munb <- eta2theta(eta[, M1*(1:NOS)-1], .lmunb , earg = .emunb )
kmat <- eta2theta(eta[, M1*(1:NOS) ], .lsize , earg = .esize )
- pnb0 <- (kmat / (kmat + munb))^kmat # p(0) from negative binomial
+
+ tempk <- 1 / (1 + munb / kmat) # kmat / (kmat + munb)
+ prob0 <- tempk^kmat # p(0) from negative binomial
+ oneminusf0 <- 1 - prob0
+
+ smallval <- 1e-3 # Something like this is needed
+ if (any(big.size <- munb / kmat < smallval)) {
+ prob0[big.size] <- exp(-munb[big.size]) # The limit as kmat --> Inf
+ oneminusf0[big.size] <- -expm1(-munb[big.size])
+ }
+
ans <- switch(type.fitted,
- "mean" = (1 - phi0) * munb / (1 - pnb0),
+ "mean" = (1 - phi0) * munb / oneminusf0,
+ "munb" = munb,
"pobs0" = phi0) # P(Y=0)
if (length(extra$dimnamesy) &&
is.matrix(ans) &&
@@ -1250,49 +1291,48 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
ans
}, list( .lpobs0 = lpobs0, .lsize = lsize, .lmunb = lmunb,
.epobs0 = epobs0, .emunb = emunb, .esize = esize ))),
+
+
last = eval(substitute(expression({
- misc$link =
+ misc$link <-
c(rep( .lpobs0 , length = NOS),
rep( .lmunb , length = NOS),
- rep( .lsize , length = NOS))[interleave.VGAM(M1*NOS,
- M = M1)]
+ rep( .lsize , length = NOS))[interleave.VGAM(M1*NOS, M1 = M1)]
temp.names <- c(mynames1,
- mynames2,
- mynames3)[interleave.VGAM(M1*NOS, M = M1)]
+ mynames2,
+ mynames3)[interleave.VGAM(M1*NOS, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M1*NOS)
names(misc$earg) <- temp.names
for (ii in 1:NOS) {
- misc$earg[[M1*ii-2]] <- .epobs0
- misc$earg[[M1*ii-1]] <- .emunb
- misc$earg[[M1*ii ]] <- .esize
+ misc$earg[[M1*ii - 2]] <- .epobs0
+ misc$earg[[M1*ii - 1]] <- .emunb
+ misc$earg[[M1*ii ]] <- .esize
}
misc$nsimEIM <- .nsimEIM
- misc$imethod <- .imethod
misc$ipobs0 <- .ipobs0
misc$isize <- .isize
misc$multipleResponses <- TRUE
}), list( .lpobs0 = lpobs0, .lmunb = lmunb, .lsize = lsize,
.epobs0 = epobs0, .emunb = emunb, .esize = esize,
- .ipobs0 = ipobs0, .isize = isize,
- .nsimEIM = nsimEIM,
- .imethod = imethod ))),
+ .ipobs0 = ipobs0, .isize = isize,
+ .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- NOS <- extra$NOS
M1 <- 3
+ NOS <- ncol(eta) / M1
phi0 <- eta2theta(eta[, M1*(1:NOS)-2], .lpobs0 , earg = .epobs0 )
munb <- eta2theta(eta[, M1*(1:NOS)-1], .lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, M1*(1:NOS) ], .lsize , earg = .esize )
+ size <- eta2theta(eta[, M1*(1:NOS) ], .lsize , earg = .esize )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
ll.elts <-
- c(w) * dzanegbin(x = y, pobs0 = phi0, munb = munb, size = kmat,
+ c(w) * dzanegbin(x = y, pobs0 = phi0, munb = munb, size = size,
log = TRUE)
if (summation) {
sum(ll.elts)
@@ -1329,40 +1369,80 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
deriv = eval(substitute(expression({
M1 <- 3
- NOS <- extra$NOS
+ NOS <- ncol(eta) / M1
y0 <- extra$y0
phi0 <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
- .lpobs0 , earg = .epobs0 )
+ .lpobs0 , earg = .epobs0 )
munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
- .lmunb , earg = .emunb )
+ .lmunb , earg = .emunb )
kmat <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
- .lsize , earg = .esize )
+ .lsize , earg = .esize )
skip <- extra$skip.these
+
dphi0.deta <- dtheta.deta(phi0, .lpobs0 , earg = .epobs0 )
dmunb.deta <- dtheta.deta(munb, .lmunb , earg = .emunb )
dsize.deta <- dtheta.deta(kmat, .lsize , earg = .esize )
- tempk <- kmat / (kmat + munb)
+
+ smallval <- 1e-3 # Something like this is needed
+ if (any(big.size <- munb / kmat < smallval)) {
+ warning("parameter 'size' has very large values; ",
+ "try fitting a zero-altered Poisson ",
+ "model instead")
+ kmat[big.size] <- munb[big.size] / smallval
+ }
+
+
+
+ tempk <- 1 / (1 + munb / kmat) # kmat / (kmat + munb)
tempm <- munb / (kmat + munb)
prob0 <- tempk^kmat
oneminusf0 <- 1 - prob0
+ AA16 <- tempm + log(tempk)
df0.dmunb <- -tempk * prob0
- df0.dkmat <- prob0 * (tempm + log(tempk))
+ df0.dkmat <- prob0 * AA16
+ df02.dmunb2 <- prob0 * tempk * (1 + 1/kmat) / (1 + munb/kmat)
+ df02.dkmat2 <- prob0 * ((tempm^2) / kmat + AA16^2)
+ df02.dkmat.dmunb <- -prob0 * (tempm/kmat + AA16) / (1 + munb/kmat)
+
+
+
+
+ if (any(big.size)) {
+ prob0[big.size] <- exp(-munb[big.size]) # The limit as kmat --> Inf
+ oneminusf0[big.size] <- -expm1(-munb[big.size])
+ df0.dmunb[big.size] <- -tempk[big.size] * prob0[big.size]
+ df0.dkmat[big.size] <- prob0[big.size] * AA16[big.size]
+ df02.dmunb2[big.size] <- prob0[big.size] * tempk[big.size] *
+ (1 + 1/kmat[big.size]) / (1 + smallval)
+ df02.dkmat2[big.size] <- prob0[big.size] *
+ ((tempm[big.size])^2 / kmat[big.size] + AA16[big.size]^2)
+ df02.dkmat.dmunb[big.size] <- -prob0[big.size] *
+ (tempm[big.size]/kmat[big.size] + AA16[big.size]) / (1 + smallval)
+ }
+
+
+ mymu <- munb / oneminusf0 # E(Y) of Pos-NBD
+
dl.dphi0 <- -1 / (1 - phi0)
- dl.dmunb <- y / munb - (y + kmat) / (munb + kmat) +
+ dl.dmunb <- y / munb - (1 + y/kmat) / (1 + munb/kmat) +
df0.dmunb / oneminusf0
dl.dsize <- digamma(y + kmat) - digamma(kmat) -
- (y + kmat)/(munb + kmat) + 1 + log(tempk) +
+ (y - munb) / (munb + kmat) + log(tempk) +
df0.dkmat / oneminusf0
+ if (any(big.size)) {
+ dl.dsize[big.size] <- 1e-8 # A small number
+ }
- dl.dphi0[y == 0] <- 1 / phi0[y == 0] # Do it in one line
+
+ dl.dphi0[y == 0] <- 1 / phi0[y == 0] # Do it in one line
skip <- extra$skip.these
for (spp. in 1:NOS) {
dl.dsize[skip[, spp.], spp.] <-
@@ -1373,119 +1453,186 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
dl.dsize * dsize.deta)
- muphi0 <- phi0
dl.deta1 <- if ( .lpobs0 == "logit") {
- c(w) * (y0 - muphi0)
+ c(w) * (y0 - phi0)
} else {
- c(w) * dphi0.deta * (y0 / muphi0 - 1) / (1 - muphi0)
+ c(w) * dl.dphi0 * dphi0.deta
}
+
+
ans <- cbind(dl.deta1, dl.deta23)
- ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
ans
}), list( .lpobs0 = lpobs0 , .lmunb = lmunb , .lsize = lsize ,
.epobs0 = epobs0 , .emunb = emunb , .esize = esize ))),
+
+
weight = eval(substitute(expression({
+ wz <- matrix(0, n, M + M-1) # tridiagonal
- six <- dimm(M1)
- wz <- run.varcov <- matrix(0.0, n, six*NOS-1)
- M1m1 <- M1 - 1
+ max.support <- .max.support
+ max.chunk.MB <- .max.chunk.MB
+ mu.phi0 <- phi0 # pobs0 # phi0
+ tmp100 <- mu.phi0 * (1 - mu.phi0)
+ wz[, (1:NOS)*M1 - 2] <-
+ if ( .lpobs0 == "logit" && is.empty.list( .epobs0 )) {
+ cbind(c(w) * tmp100)
+ } else {
+ cbind(c(w) * (1 / tmp100) *
+ dtheta.deta(mu.phi0, link = .lpobs0 , earg = .epobs0 )^2)
+ }
+ ned2l.dmunb2 <- mymu / munb^2 -
+ ((1 + mymu/kmat) / kmat) / (1 + munb/kmat)^2 -
+ df02.dmunb2 / oneminusf0 -
+ (df0.dmunb / oneminusf0)^2
+ wz[, M1*(1:NOS) - 1] <- c(w) * (1 - phi0) *
+ ned2l.dmunb2 * dmunb.deta^2
- ind2 <- iam(NA, NA, M = M1 - 1, both = TRUE, diag = TRUE)
+ ned2l.dmunbsize <- (munb - mymu) / (munb + kmat)^2 -
+ df02.dkmat.dmunb / oneminusf0 -
+ df0.dmunb * df0.dkmat / oneminusf0^2
+ wz[, M + M1*(1:NOS) - 1] <- c(w) * (1 - phi0) *
+ ned2l.dmunbsize * dmunb.deta * dsize.deta
- for (ii in 1:( .nsimEIM )) {
- ysim <- rzanegbin(n = n*NOS, pobs0 = phi0,
- size = kmat, mu = munb)
- dim(ysim) <- c(n, NOS)
- dl.dphi0 <- -1 / (1 - phi0)
- dl.dmunb <- ysim / munb - (ysim + kmat) / (munb + kmat) +
- df0.dmunb / oneminusf0
- dl.dsize <- digamma(ysim + kmat) - digamma(kmat) -
- (ysim + kmat)/(munb + kmat) + 1 + log(tempk) +
- df0.dkmat / oneminusf0
+ ind2 <- matrix(FALSE, n, NOS) # Used for SFS
+ for (jay in 1:NOS) {
+ eff.p <- sort(c( .cutoff.prob , 1 - .cutoff.prob ))
+ Q.mins <- 1
+ Q.maxs <- qposnegbin(p = eff.p[2] ,
+ munb = munb[, jay],
+ size = kmat[, jay]) + 10
+ eps.trig <- .eps.trig
+ Q.MAXS <- pmax(10, ceiling(1 / sqrt(eps.trig)))
+ Q.maxs <- pmin(Q.maxs, Q.MAXS)
- dl.dphi0[ysim == 0] <- 1 / phi0[ysim == 0] # Do it in one line
- ysim0 <- ifelse(ysim == 0, 1, 0)
- skip.sim <- matrix(as.logical(ysim0), n, NOS)
- for (spp. in 1:NOS) {
- dl.dsize[skip.sim[, spp.], spp.] <-
- dl.dmunb[skip.sim[, spp.], spp.] <- 0
- }
- for (kk in 1:NOS) {
- temp2 <- cbind(dl.dmunb[, kk] * dmunb.deta[, kk],
- dl.dsize[, kk] * dsize.deta[, kk])
- small.varcov <- temp2[, ind2$row.index] *
- temp2[, ind2$col.index]
+ ind1 <- if (max.chunk.MB > 0) (Q.maxs - Q.mins < max.support) else FALSE
+ if ((NN <- sum(ind1)) > 0) {
+ Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
+ n.chunks <- if (intercept.only) 1 else
+ max(1, ceiling( Object.Size / max.chunk.MB))
+ chunk.rows <- ceiling(NN / n.chunks)
+ ind2[, jay] <- ind1 # Save this
+ wind2 <- which(ind1)
+ upr.ptr <- 0
+ lwr.ptr <- upr.ptr + 1
+ while (lwr.ptr <= NN) {
+ upr.ptr <- min(upr.ptr + chunk.rows, NN)
+ sind2 <- wind2[lwr.ptr:upr.ptr]
+ wz[sind2, M1*jay] <-
+ EIM.posNB.specialp(munb = munb[sind2, jay],
+ size = kmat[sind2, jay],
+ y.max = max(Q.maxs[sind2]),
+ cutoff.prob = .cutoff.prob ,
+ prob0 = prob0[sind2, jay],
+ df0.dkmat = df0.dkmat[sind2, jay],
+ df02.dkmat2 = df02.dkmat2[sind2, jay],
+ intercept.only = intercept.only)
+ if (FALSE)
+ wz2[sind2, M1*jay] <-
+ EIM.posNB.speciald(munb = munb[sind2, jay],
+ size = kmat[sind2, jay],
+ y.min = min(Q.mins2[sind2]),
+ y.max = max(Q.maxs[sind2]),
+ cutoff.prob = .cutoff.prob ,
+ prob0 = prob0[sind2, jay],
+ df0.dkmat = df0.dkmat[sind2, jay],
+ df02.dkmat2 = df02.dkmat2[sind2, jay],
+ intercept.only = intercept.only) # *
+
+
+
+ if (any(eim.kk.TF <- wz[sind2, M1*jay] <= 0 |
+ is.na(wz[sind2, M1*jay]))) {
+ ind2[sind2[eim.kk.TF], jay] <- FALSE
+ }
+
+
+ lwr.ptr <- upr.ptr + 1
+ } # while
+ } # if
+ } # end of for (jay in 1:NOS)
- run.varcov[, ((kk-1)*M1+2):(kk*M1)] <-
- run.varcov[, ((kk-1)*M1+2):(kk*M1)] +
- c(small.varcov[, 1:M1m1])
- run.varcov[, M + (kk-1)*M1 + 2] <-
- run.varcov[, M + (kk-1)*M1 + 2] +
- c(small.varcov[, M1m1 + 1])
- } # kk; end of NOS
- } # ii; end of nsimEIM
+
- run.varcov <- cbind(run.varcov / .nsimEIM )
- run.varcov <- if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wzind1 <- sort(c( M1*(1:NOS) - 1,
- M1*(1:NOS) - 0,
- M + M1*(1:NOS) - 1))
- wz[, wzind1] <- c(w) * run.varcov[, wzind1]
+ for (jay in 1:NOS) {
+ run.varcov <- 0
+ ii.TF <- !ind2[, jay] # Not assigned above
+ if (any(ii.TF)) {
+ kkvec <- kmat[ii.TF, jay]
+ muvec <- munb[ii.TF, jay]
+ for (ii in 1:( .nsimEIM )) {
+ ysim <- rzanegbin(sum(ii.TF), munb = muvec, size = kkvec,
+ pobs0 = phi0[ii.TF, jay])
+ dl.dk <- digamma(ysim + kkvec) - digamma(kkvec) -
+ (ysim - muvec) / (muvec + kkvec) +
+ log1p(-muvec / (kkvec + muvec)) +
+ df0.dkmat[ii.TF, jay] / oneminusf0[ii.TF, jay]
+ dl.dk[ysim == 0] <- 0
+ run.varcov <- run.varcov + dl.dk^2
+ } # end of for loop
+ run.varcov <- c(run.varcov / .nsimEIM )
+ ned2l.dk2 <- if (intercept.only) mean(run.varcov) else run.varcov
- tmp100 <- muphi0 * (1 - muphi0)
- tmp200 <- if ( .lpobs0 == "logit") {
- cbind(c(w) * tmp100)
- } else {
- c(w) * cbind(dphi0.deta^2 / tmp100)
- }
- for (ii in 1:NOS) {
- index200 <- abs(tmp200[, ii]) < .Machine$double.eps
- if (any(index200)) {
- tmp200[index200, ii] <- .Machine$double.eps # Diagonal 0's are bad
+ wz[ii.TF, M1*jay] <- ned2l.dk2 # * (dsize.deta[ii.TF, jay])^2
}
- }
- wz[, M1*(1:NOS)-2] <- tmp200
+ } # jay
+
+
+
+ wz[, M1*(1:NOS) ] <- wz[, M1*(1:NOS) ] * dsize.deta^2
+
+
+
+ save.weights <- !all(ind2)
+
+
+
+
+ wz[, M1*(1:NOS) ] <- c(w) * (1 - phi0) *
+ wz[, M1*(1:NOS) ]
wz
}), list( .lpobs0 = lpobs0,
.epobs0 = epobs0,
+ .cutoff.prob = cutoff.prob, .eps.trig = eps.trig,
+ .max.support = max.support,
+ .max.chunk.MB = max.chunk.MB,
.nsimEIM = nsimEIM ))))
} # End of zanegbinomial()
+
zanegbinomialff.control <- function(save.weights = TRUE, ...) {
list(save.weights = save.weights)
}
@@ -1493,16 +1640,31 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
zanegbinomialff <-
- function(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
- type.fitted = c("mean", "pobs0", "onempobs0"),
+ function(
+ lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
+ type.fitted = c("mean", "munb", "pobs0", "onempobs0"),
isize = NULL, ionempobs0 = NULL,
- zero = c(-2, -3),
+ zero = c("size", "onempobs0"),
+
+ probs.y = 0.35,
+ cutoff.prob = 0.999, # higher is better for large 'size'
+ eps.trig = 1e-7,
+ max.support = 4000, # 20160127; I have changed this
+ max.chunk.MB = 30, # max.memory = Inf is allowed
+ gsize.mux = exp((-12:6)/2),
+
imethod = 1,
- nsimEIM = 250,
+ imunb = NULL,
+ nsimEIM = 500,
ishrinkage = 0.95) {
+
+ if (!is.Numeric(eps.trig, length.arg = 1,
+ positive = TRUE) || eps.trig > 0.001)
+ stop("argument 'eps.trig' must be positive and smaller in value")
+
if (!is.Numeric(nsimEIM, length.arg = 1,
positive = TRUE, integer.valued = TRUE))
stop("argument 'nsimEIM' must be a positive integer")
@@ -1517,16 +1679,6 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
if (length(isize) && !is.Numeric(isize, positive = TRUE))
stop("If given, argument 'isize' must contain positive values only")
- if (!is.Numeric(imethod, length.arg = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
-
- if (!is.Numeric(ishrinkage, length.arg = 1) ||
- ishrinkage < 0 ||
- ishrinkage > 1)
- stop("bad input for argument 'ishrinkage'")
-
lmunb <- as.list(substitute(lmunb))
emunb <- link2list(lmunb)
lmunb <- attr(emunb, "function.name")
@@ -1540,8 +1692,10 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
lonempobs0 <- attr(eonempobs0, "function.name")
+ ipobs0.small <- 1/64 # A number easily represented exactly
+
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "onempobs0"))[1]
+ c("mean", "munb", "pobs0", "onempobs0"))[1]
new("vglmff",
@@ -1556,32 +1710,36 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
"munb))^size)"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 3
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 3,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ nsimEIM = .nsimEIM ,
+ parameters.names = c("munb", "size", "onempobs0"),
+ eps.trig = .eps.trig ,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
+ .nsimEIM = nsimEIM, .eps.trig = eps.trig,
.type.fitted = type.fitted
))),
initialize = eval(substitute(expression({
M1 <- 3
- if (any(y < 0))
- stop("the response must not have negative values")
-
temp5 <-
w.y.check(w = w, y = y,
+ Is.integer.y = TRUE,
+ Is.nonnegative.y = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
- Is.integer.y = TRUE,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -1595,16 +1753,15 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
extra$dimnamesy <- dimnames(y)
extra$type.fitted <- .type.fitted
- mynames1 <- if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
- mynames2 <- if (NOS == 1) "size" else paste("size", 1:NOS, sep = "")
- mynames3 <- if (NOS == 1) "onempobs0" else paste("onempobs0", 1:NOS,
- sep = "")
+ mynames1 <- param.names("munb", NOS)
+ mynames2 <- param.names("size", NOS)
+ mynames3 <- param.names("onempobs0", NOS)
predictors.names <-
c(namesof(mynames1, .lmunb , earg = .emunb , tag = FALSE),
namesof(mynames2, .lsize , earg = .esize , tag = FALSE),
namesof(mynames3, .lonempobs0 , earg = .eonempobs0 ,
tag = FALSE))[
- interleave.VGAM(M1*NOS, M = M1)]
+ interleave.VGAM(M1*NOS, M1 = M1)]
extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
@@ -1612,71 +1769,58 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
if (!length(etastart)) {
- mu.init <- y
- for (iii in 1:ncol(y)) {
- index.posy <- (y[, iii] > 0)
- if ( .imethod == 1) {
- use.this <- weighted.mean(y[index.posy, iii],
- w[index.posy, iii])
- mu.init[ index.posy, iii] <- (1 - .ishrinkage ) * y[index.posy, iii] +
- .ishrinkage * use.this
- mu.init[!index.posy, iii] <- use.this
- } else {
- use.this <-
- mu.init[, iii] <- (y[, iii] +
- weighted.mean(y[index.posy, iii],
- w[index.posy, iii])) / 2
- }
- max.use.this <- 7 * use.this + 10
- vecTF <- (mu.init[, iii] > max.use.this)
- if (any(vecTF))
- mu.init[vecTF, iii] <- max.use.this
- }
-
- pnb0 <- matrix(if (length( .ionempobs0 )) 1 - .ionempobs0 else -1,
- nrow = n, ncol = NOS, byrow = TRUE)
- for (spp. in 1:NOS) {
- if (any(pnb0[, spp.] < 0)) {
- index.y0 <- y[, spp.] < 0.5
- pnb0[, spp.] <- max(min(sum(index.y0) / n, 0.97), 0.03)
+ munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ imu = .imunb , ishrinkage = .ishrinkage ,
+ pos.only = TRUE,
+ probs.y = .probs.y )
+
+
+ pobs0.init <- matrix(if (length( .ionempobs0 )) 1 - .ionempobs0 else -1,
+ nrow = n, ncol = NOS, byrow = TRUE)
+ for (jay in 1:NOS) {
+ if (any(pobs0.init[, jay] < 0)) {
+ index.y0 <- y[, jay] < 0.5
+ pobs0.init[, jay] <- max(min(mean(index.y0), 1 - .ipobs0.small ),
+ .ipobs0.small )
}
}
if ( is.Numeric( .isize )) {
- kmat0 <- matrix( .isize , nrow = n, ncol = ncoly, byrow = TRUE)
+ size.init <- matrix( .isize , nrow = n, ncol = ncoly, byrow = TRUE)
} else {
posnegbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
munb <- extraargs
sum(c(w) * dposnegbin(x = y, munb = munb, size = kmat,
log = TRUE))
}
- k.grid <- 2^((-6):6)
- kmat0 <- matrix(0, nrow = n, ncol = NOS)
- for (spp. in 1:NOS) {
- index.posy <- (y[, spp.] > 0)
- posy <- y[index.posy, spp.]
- kmat0[, spp.] <-
- grid.search(k.grid, objfun = posnegbinomial.Loglikfun,
- y = posy, x = x[index.posy, ],
- w = w[index.posy, spp.],
- extraargs = mu.init[index.posy, spp.])
+ size.init <- matrix(0, nrow = n, ncol = NOS)
+ for (jay in 1:NOS) {
+ size.grid <- .gsize.mux * mean(munb.init[, jay])
+ TFvec <- (y[, jay] > 0)
+ size.init[, jay] <-
+ grid.search(size.grid, objfun = posnegbinomial.Loglikfun,
+ y = y[TFvec, jay], # x = x[index.posy, ],
+ w = w[TFvec, jay],
+ extraargs = munb.init[TFvec, jay])
}
}
etastart <-
- cbind(theta2eta(mu.init , .lmunb , earg = .emunb ),
- theta2eta(kmat0 , .lsize , earg = .esize ),
- theta2eta(1 - pnb0, .lonempobs0 , earg = .eonempobs0 ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
+ cbind(theta2eta(munb.init , .lmunb , earg = .emunb ),
+ theta2eta(size.init , .lsize , earg = .esize ),
+ theta2eta(1 - pobs0.init, .lonempobs0 , earg = .eonempobs0 ))
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
} # End of if (!length(etastart))
}), list( .lonempobs0 = lonempobs0, .lmunb = lmunb, .lsize = lsize,
.eonempobs0 = eonempobs0, .emunb = emunb, .esize = esize,
- .ionempobs0 = ionempobs0, .isize = isize,
+ .ionempobs0 = ionempobs0, .imunb = imunb, .isize = isize,
+ .gsize.mux = gsize.mux,
+ .ipobs0.small = ipobs0.small,
.imethod = imethod, .ishrinkage = ishrinkage,
- .type.fitted = type.fitted ))),
+ .probs.y = probs.y, .type.fitted = type.fitted ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
warning("cannot find 'type.fitted'. ",
@@ -1685,19 +1829,30 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "onempobs0"))[1]
+ c("mean", "munb", "pobs0", "onempobs0"))[1]
M1 <- 3
- NOS <- extra$NOS
+ NOS <- ncol(eta) / M1
munb <- eta2theta(eta[, M1*(1:NOS)-2], .lmunb , earg = .emunb )
kmat <- eta2theta(eta[, M1*(1:NOS)-1], .lsize , earg = .esize )
onempobs0 <- eta2theta(eta[, M1*(1:NOS) ], .lonempobs0 ,
earg = .eonempobs0 )
- pnb0 <- (kmat / (kmat + munb))^kmat # p(0) from negative binomial
+
+
+ tempk <- 1 / (1 + munb / kmat) # kmat / (kmat + munb); NBD p(0)
+ prob0 <- tempk^kmat # p(0) from negative binomial
+ oneminusf0 <- 1 - prob0
+
+ smallval <- 1e-3 # Something like this is needed
+ if (any(big.size <- munb / kmat < smallval)) {
+ prob0[big.size] <- exp(-munb[big.size]) # The limit as kmat --> Inf
+ oneminusf0[big.size] <- -expm1(-munb[big.size])
+ }
ans <- switch(type.fitted,
- "mean" = (onempobs0) * munb / (1 - pnb0),
+ "mean" = onempobs0 * munb / oneminusf0,
+ "munb" = munb,
"pobs0" = 1 - onempobs0, # P(Y=0)
"onempobs0" = onempobs0) # P(Y>0)
if (length(extra$dimnamesy) &&
@@ -1719,10 +1874,10 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
c(rep( .lmunb , length = NOS),
rep( .lsize , length = NOS),
rep( .lonempobs0 , length = NOS))[
- interleave.VGAM(M1*NOS, M = M1)]
+ interleave.VGAM(M1*NOS, M1 = M1)]
temp.names <- c(mynames1,
mynames2,
- mynames3)[interleave.VGAM(M1*NOS, M = M1)]
+ mynames3)[interleave.VGAM(M1*NOS, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M1*NOS)
@@ -1747,8 +1902,8 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- NOS <- extra$NOS
M1 <- 3
+ NOS <- ncol(eta) / M1
munb <- eta2theta(eta[, M1*(1:NOS)-2], .lmunb , earg = .emunb )
kmat <- eta2theta(eta[, M1*(1:NOS)-1], .lsize , earg = .esize )
onempobs0 <- eta2theta(eta[, M1*(1:NOS) ], .lonempobs0 ,
@@ -1758,8 +1913,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
} else {
ll.elts <-
c(w) * dzanegbin(x = y, pobs0 = 1 - onempobs0,
- munb = munb, size = kmat,
- log = TRUE)
+ munb = munb, size = kmat, log = TRUE)
if (summation) {
sum(ll.elts)
} else {
@@ -1780,8 +1934,8 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
if (any(pwts != 1))
warning("ignoring prior weights")
eta <- predict(object)
- munb <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lsize , earg = .esize )
+ munb <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lsize , earg = .esize )
onempobs0 <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lonempobs0 ,
earg = .eonempobs0 )
@@ -1795,7 +1949,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
deriv = eval(substitute(expression({
M1 <- 3
- NOS <- extra$NOS
+ NOS <- ncol(eta) / M1
y0 <- extra$y0
munb <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
@@ -1807,29 +1961,59 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
skip <- extra$skip.these
phi0 <- 1 - onempobs0
- dmunb.deta <- dtheta.deta(munb, .lmunb , earg = .emunb )
- dsize.deta <- dtheta.deta(kmat, .lsize , earg = .esize )
+ dmunb.deta <- dtheta.deta(munb, .lmunb , earg = .emunb )
+ dsize.deta <- dtheta.deta(kmat, .lsize , earg = .esize )
donempobs0.deta <- dtheta.deta(onempobs0, .lonempobs0 ,
earg = .eonempobs0 )
- tempk <- kmat / (kmat + munb)
+
+
+
+
+ smallval <- 1e-3 # Something like this is needed
+ if (any(big.size <- munb / kmat < smallval)) {
+ warning("parameter 'size' has very large values; ",
+ "try fitting a zero-altered Poisson ",
+ "model instead")
+ kmat[big.size] <- munb[big.size] / smallval
+ }
+
+
+
+ tempk <- 1 / (1 + munb / kmat) # kmat / (kmat + munb)
tempm <- munb / (kmat + munb)
prob0 <- tempk^kmat
oneminusf0 <- 1 - prob0
+ AA16 <- tempm + log(tempk)
df0.dmunb <- -tempk * prob0
- df0.dkmat <- prob0 * (tempm + log(tempk))
+ df0.dkmat <- prob0 * AA16
+ df02.dmunb2 <- prob0 * tempk * (1 + 1/kmat) / (1 + munb/kmat)
+ df02.dkmat2 <- prob0 * ((tempm^2) / kmat + AA16^2)
+ df02.dkmat.dmunb <- -prob0 * (tempm/kmat + AA16) / (1 + munb/kmat)
- dl.dmunb <- y / munb - (y + kmat) / (munb + kmat) +
+
+ mymu <- munb / oneminusf0 # E(Y) of Pos-NBD
+
+
+
+
+ dl.dmunb <- y / munb - (1 + y/kmat) / (1 + munb/kmat) +
df0.dmunb / oneminusf0
dl.dsize <- digamma(y + kmat) - digamma(kmat) -
- (y + kmat)/(munb + kmat) + 1 + log(tempk) +
+ (y - munb) / (munb + kmat) + log(tempk) +
df0.dkmat / oneminusf0
dl.donempobs0 <- +1 / (onempobs0)
+ if (any(big.size)) {
+ dl.dsize[big.size] <- 1e-8 # A small number
+ }
+
+
+
dl.donempobs0[y == 0] <-
-1 / (1 - onempobs0[y == 0]) # Do it in 1 line
skip <- extra$skip.these
@@ -1842,106 +2026,202 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
dl.dsize * dsize.deta)
- muphi0 <- onempobs0 # Originally: phi0
- dl.deta3 <- if (FALSE &&
- .lonempobs0 == "logit") {
- } else {
- c(w) * donempobs0.deta * dl.donempobs0
+ dl.deta3 <- if ( .lonempobs0 == "logit") {
+ -c(w) * (y0 - phi0)
+ } else {
+ -c(w) * dl.donempobs0 * donempobs0.deta
}
+
+
+
ans <- cbind(dl.deta12, dl.deta3)
- ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
ans
}), list( .lonempobs0 = lonempobs0 , .lmunb = lmunb , .lsize = lsize ,
.eonempobs0 = eonempobs0 , .emunb = emunb , .esize = esize ))),
+
+
+
+
+
+
weight = eval(substitute(expression({
- six <- dimm(M1)
- wz <- run.varcov <- matrix(0.0, n, six*NOS-1)
- M1m1 <- M1 - 1
+ wz <- matrix(0, n, M + M-1) # tridiagonal
+ max.support <- .max.support
+ max.chunk.MB <- .max.chunk.MB
- ind2 <- iam(NA, NA, M = M1 - 1, both = TRUE, diag = TRUE)
+ tmp100 <- onempobs0 * (1 - onempobs0)
+ wz[, (1:NOS)*M1 ] <-
+ if ( .lonempobs0 == "logit" && is.empty.list( .eonempobs0 )) {
+ cbind(c(w) * tmp100)
+ } else {
+ cbind(c(w) * (1 / tmp100) *
+ dtheta.deta(onempobs0, link = .lonempobs0 , earg = .eonempobs0 )^2)
+ }
- for (ii in 1:( .nsimEIM )) {
- ysim <- rzanegbin(n = n*NOS, pobs0 = phi0,
- size = kmat, mu = munb)
- dim(ysim) <- c(n, NOS)
- dl.dmunb <- ysim / munb - (ysim + kmat) / (munb + kmat) +
- df0.dmunb / oneminusf0
- dl.dsize <- digamma(ysim + kmat) - digamma(kmat) -
- (ysim + kmat)/(munb + kmat) + 1 + log(tempk) +
- df0.dkmat / oneminusf0
- dl.donempobs0 <- +1 / (onempobs0)
+ ned2l.dmunb2 <- mymu / munb^2 -
+ ((1 + mymu/kmat) / kmat) / (1 + munb/kmat)^2 -
+ df02.dmunb2 / oneminusf0 -
+ (df0.dmunb / oneminusf0)^2
+ wz[, M1*(1:NOS) - 2] <- c(w) * (1 - phi0) *
+ ned2l.dmunb2 * dmunb.deta^2
+ ned2l.dmunbsize <- (munb - mymu) / (munb + kmat)^2 -
+ df02.dkmat.dmunb / oneminusf0 -
+ df0.dmunb * df0.dkmat / oneminusf0^2
+ wz[, M + M1*(1:NOS) - 2] <- c(w) * (1 - phi0) *
+ ned2l.dmunbsize * dmunb.deta * dsize.deta
- dl.donempobs0[ysim == 0] <-
- -1 / (1 - onempobs0[ysim == 0]) # Do it in 1 line
- ysim0 <- ifelse(ysim == 0, 1, 0)
- skip.sim <- matrix(as.logical(ysim0), n, NOS)
- for (spp. in 1:NOS) {
- dl.dsize[skip.sim[, spp.], spp.] <-
- dl.dmunb[skip.sim[, spp.], spp.] <- 0
- }
- for (kk in 1:NOS) {
- temp2 <- cbind(dl.dmunb[, kk] * dmunb.deta[, kk],
- dl.dsize[, kk] * dsize.deta[, kk])
- small.varcov <- temp2[, ind2$row.index] *
- temp2[, ind2$col.index]
- run.varcov[, ((kk-1)*M1+2-1):(kk*M1-1)] <-
- run.varcov[, ((kk-1)*M1+2-1):(kk*M1-1)] +
- c(small.varcov[, 1:M1m1])
- run.varcov[, M + (kk-1)*M1 + 2-1] <-
- run.varcov[, M + (kk-1)*M1 + 2-1] +
- c(small.varcov[, M1m1 + 1])
- } # kk; end of NOS
- } # ii; end of nsimEIM
- run.varcov <- cbind(run.varcov / .nsimEIM )
- run.varcov <- if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow = TRUE) else run.varcov
+ ind2 <- matrix(FALSE, n, NOS) # Used for SFS
+ for (jay in 1:NOS) {
+ eff.p <- sort(c( .cutoff.prob , 1 - .cutoff.prob ))
+ Q.mins <- 1
+ Q.maxs <- qposnegbin(p = eff.p[2] ,
+ munb = munb[, jay],
+ size = kmat[, jay]) + 10
- wzind1 <- sort(c( M1*(1:NOS) - 1 - 1,
- M1*(1:NOS) - 0 - 1,
- M + M1*(1:NOS) - 1 - 1))
- wz[, wzind1] <- c(w) * run.varcov[, wzind1]
+ eps.trig <- .eps.trig
+ Q.MAXS <- pmax(10, ceiling(1 / sqrt(eps.trig)))
+ Q.maxs <- pmin(Q.maxs, Q.MAXS)
- tmp100 <- muphi0 * (1 - muphi0)
- tmp200 <- if (FALSE &&
- .lpobs0 == "logit") {
- } else {
- c(w) * cbind(donempobs0.deta^2 / tmp100)
- }
- for (ii in 1:NOS) {
- index200 <- abs(tmp200[, ii]) < .Machine$double.eps
- if (any(index200)) {
- tmp200[index200, ii] <- .Machine$double.eps # Diagonal 0's are bad
+
+
+
+ ind1 <- if (max.chunk.MB > 0) (Q.maxs - Q.mins < max.support) else FALSE
+ if ((NN <- sum(ind1)) > 0) {
+ Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
+ n.chunks <- if (intercept.only) 1 else
+ max(1, ceiling( Object.Size / max.chunk.MB))
+ chunk.rows <- ceiling(NN / n.chunks)
+ ind2[, jay] <- ind1 # Save this
+ wind2 <- which(ind1)
+
+
+ upr.ptr <- 0
+ lwr.ptr <- upr.ptr + 1
+ while (lwr.ptr <= NN) {
+ upr.ptr <- min(upr.ptr + chunk.rows, NN)
+ sind2 <- wind2[lwr.ptr:upr.ptr]
+
+ wz[sind2, M1*jay - 1] <-
+ EIM.posNB.specialp(munb = munb[sind2, jay],
+ size = kmat[sind2, jay],
+ y.max = max(Q.maxs[sind2]),
+ cutoff.prob = .cutoff.prob ,
+ prob0 = prob0[sind2, jay],
+ df0.dkmat = df0.dkmat[sind2, jay],
+ df02.dkmat2 = df02.dkmat2[sind2, jay],
+ intercept.only = intercept.only)
+ if (FALSE)
+ wz2[sind2, M1*jay - 1] <-
+ EIM.posNB.speciald(munb = munb[sind2, jay],
+ size = kmat[sind2, jay],
+ y.min = min(Q.mins2[sind2]),
+ y.max = max(Q.maxs[sind2]),
+ cutoff.prob = .cutoff.prob ,
+ prob0 = prob0[sind2, jay],
+ df0.dkmat = df0.dkmat[sind2, jay],
+ df02.dkmat2 = df02.dkmat2[sind2, jay],
+ intercept.only = intercept.only) # *
+
+
+
+ if (any(eim.kk.TF <- wz[sind2, M1*jay - 1] <= 0 |
+ is.na(wz[sind2, M1*jay - 1]))) {
+ ind2[sind2[eim.kk.TF], jay] <- FALSE
+ }
+
+
+ lwr.ptr <- upr.ptr + 1
+ } # while
+ } # if
+ } # end of for (jay in 1:NOS)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ for (jay in 1:NOS) {
+ run.varcov <- 0
+ ii.TF <- !ind2[, jay] # Not assigned above
+ if (any(ii.TF)) {
+ kkvec <- kmat[ii.TF, jay]
+ muvec <- munb[ii.TF, jay]
+ for (ii in 1:( .nsimEIM )) {
+ ysim <- rzanegbin(sum(ii.TF), munb = muvec, size = kkvec,
+ pobs0 = phi0[ii.TF, jay])
+ dl.dk <- digamma(ysim + kkvec) - digamma(kkvec) -
+ (ysim - muvec) / (muvec + kkvec) +
+ log1p(-muvec / (kkvec + muvec)) +
+ df0.dkmat[ii.TF, jay] / oneminusf0[ii.TF, jay]
+
+ dl.dk[ysim == 0] <- 0
+
+ run.varcov <- run.varcov + dl.dk^2
+ } # end of for loop
+
+ run.varcov <- c(run.varcov / .nsimEIM )
+ ned2l.dk2 <- if (intercept.only) mean(run.varcov) else run.varcov
+
+ wz[ii.TF, M1*jay - 1] <- ned2l.dk2 # * (dsize.deta[ii.TF, jay])^2
}
- }
- wz[, M1*(1:NOS) ] <- tmp200
+ } # jay
+
+
+
+
+ wz[, M1*(1:NOS) - 1] <- wz[, M1*(1:NOS) - 1] * dsize.deta^2
+
+
+
+
+
+
+ save.weights <- !all(ind2)
+
+
+
+
+ wz[, M1*(1:NOS) - 1] <- c(w) * (1 - phi0) *
+ wz[, M1*(1:NOS) - 1]
wz
}), list( .lonempobs0 = lonempobs0,
.eonempobs0 = eonempobs0,
+ .cutoff.prob = cutoff.prob, .eps.trig = eps.trig,
+ .max.support = max.support,
+ .max.chunk.MB = max.chunk.MB,
.nsimEIM = nsimEIM ))))
} # End of zanegbinomialff()
@@ -1957,11 +2237,15 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
zipoisson <-
function(lpstr0 = "logit", llambda = "loge",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+ type.fitted = c("mean", "lambda", "pobs0", "pstr0", "onempstr0"),
ipstr0 = NULL, ilambda = NULL,
+ gpstr0 = NULL, # (1:9) / 10,
imethod = 1,
- ishrinkage = 0.8, zero = NULL) {
+ ishrinkage = 0.95, probs.y = 0.35,
+ zero = NULL) {
ipstr00 <- ipstr0
+ gpstr00 <- gpstr0
+ ipstr0.small <- 1/64 # A number easily represented exactly
lpstr0 <- as.list(substitute(lpstr0))
@@ -1975,7 +2259,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "lambda", "pobs0", "pstr0", "onempstr0"))[1]
if (length(ipstr00))
@@ -1987,17 +2271,6 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
stop("argument 'ilambda' values must be positive")
- if (!is.Numeric(imethod, length.arg = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
-
- if (!is.Numeric(ishrinkage, length.arg = 1) ||
- ishrinkage < 0 ||
- ishrinkage > 1)
- stop("bad input for argument 'ishrinkage'")
-
-
new("vglmff",
blurb = c("Zero-inflated Poisson\n\n",
"Links: ",
@@ -2006,26 +2279,31 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
"Mean: (1 - pstr0) * lambda"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("pstr0", "lambda"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
.type.fitted = type.fitted
))),
initialize = eval(substitute(expression({
+ M1 <- 2
temp5 <-
w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
- Is.integer.y = TRUE,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -2035,83 +2313,68 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
ncoly <- ncol(y)
- M1 <- 2
extra$ncoly <- ncoly
extra$M1 <- M1
extra$dimnamesy <- dimnames(y)
M <- M1 * ncoly
extra$type.fitted <- .type.fitted
-
- if (any(round(y) != y))
- stop("integer-valued responses only allowed for ",
- "the 'zipoisson' family")
-
- mynames1 <- paste("pstr0", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("lambda", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("pstr0", ncoly)
+ mynames2 <- param.names("lambda", ncoly)
predictors.names <-
c(namesof(mynames1, .lpstr00 , earg = .epstr00 , tag = FALSE),
namesof(mynames2, .llambda , earg = .elambda , tag = FALSE))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
- matL <- matrix(if (length( .ilambda )) .ilambda else 0,
- n, ncoly, byrow = TRUE)
+
+ matL <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ imu = .ilambda , ishrinkage = .ishrinkage ,
+ pos.only = TRUE,
+ probs.y = .probs.y )
+
+
matP <- matrix(if (length( .ipstr00 )) .ipstr00 else 0,
n, ncoly, byrow = TRUE)
+ phi.grid <- .gpstr00 # seq(0.02, 0.98, len = 21)
+ ipstr0.small <- .ipstr0.small # A number easily represented exactly
-
- for (spp. in 1:ncoly) {
- yvec <- y[, spp.]
-
- Phi.init <- 1 - 0.85 * sum(w[yvec > 0]) / sum(w)
- Phi.init[Phi.init <= 0.02] <- 0.02 # Last resort
- Phi.init[Phi.init >= 0.98] <- 0.98 # Last resort
-
- if ( length(mustart)) {
- mustart <- matrix(mustart, n, ncoly) # Make sure right size
- Lambda.init <- mustart / (1 - Phi.init)
- } else if ( .imethod == 2) {
- mymean <- weighted.mean(yvec[yvec > 0],
- w[yvec > 0]) + 1/16
- Lambda.init <- (1 - .ishrinkage ) * (yvec + 1/8) + .ishrinkage * mymean
- } else {
- use.this <- median(yvec[yvec > 0]) + 1 / 16
- Lambda.init <- (1 - .ishrinkage ) * (yvec + 1/8) + .ishrinkage * use.this
- }
+ if (!length( .ipstr00 ))
+ for (jay in 1:ncoly) {
zipois.Loglikfun <- function(phival, y, x, w, extraargs) {
sum(c(w) * dzipois(x = y, pstr0 = phival,
- lambda = extraargs$lambda,
- log = TRUE))
+ lambda = extraargs$lambda, log = TRUE))
}
- phi.grid <- seq(0.02, 0.98, len = 21)
- Phimat.init <- grid.search(phi.grid, objfun = zipois.Loglikfun,
- y = y, x = x, w = w,
- extraargs = list(lambda = Lambda.init))
-
- if (length(mustart)) {
- Lambda.init <- Lambda.init / (1 - Phimat.init)
+ Phi.init <- if (length(phi.grid)) {
+ grid.search(phi.grid, objfun = zipois.Loglikfun,
+ y = y[, jay], x = x, w = w[, jay],
+ extraargs = list(lambda = matL[, jay]))
+ } else {
+ pmax(ipstr0.small,
+ weighted.mean(y[, jay] == 0, w[, jay]) -
+ dpois(0, matL[, jay]))
}
-
- if (!length( .ipstr00 ))
- matP[, spp.] <- Phimat.init
- if (!length( .ilambda ))
- matL[, spp.] <- Lambda.init
- } # spp.
-
- etastart <- cbind(theta2eta(matP, .lpstr00, earg = .epstr00 ),
- theta2eta(matL, .llambda, earg = .elambda ))[,
- interleave.VGAM(M, M = M1)]
+ if (mean(Phi.init == ipstr0.small) > 0.95)
+ warning("from the initial values only, the data appears to ",
+ "have little or no 0-inflation")
+ matP[, jay] <- Phi.init
+ } # for (jay)
+
+ etastart <- cbind(theta2eta(matP, .lpstr00 , earg = .epstr00 ),
+ theta2eta(matL, .llambda , earg = .elambda ))[,
+ interleave.VGAM(M, M1 = M1)]
mustart <- NULL # Since etastart has been computed.
} # End of !length(etastart)
}), list( .lpstr00 = lpstr00, .llambda = llambda,
.epstr00 = epstr00, .elambda = elambda,
.ipstr00 = ipstr00, .ilambda = ilambda,
- .imethod = imethod,
+ .gpstr00 = gpstr00,
+ .imethod = imethod, .probs.y = probs.y,
+ .ipstr0.small = ipstr0.small,
.type.fitted = type.fitted,
.ishrinkage = ishrinkage ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -2122,7 +2385,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "lambda", "pobs0", "pstr0", "onempstr0"))[1]
phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
@@ -2130,6 +2393,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
ans <- switch(type.fitted,
"mean" = (1 - phimat) * lambda,
+ "lambda" = lambda,
"pobs0" = phimat + (1-phimat)*exp(-lambda), # P(Y=0)
"pstr0" = phimat,
"onempstr0" = 1 - phimat)
@@ -2153,8 +2417,8 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
M1 <- extra$M1
misc$link <-
c(rep( .lpstr00 , length = ncoly),
- rep( .llambda , length = ncoly))[interleave.VGAM(M, M = M1)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+ rep( .llambda , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M)
@@ -2241,7 +2505,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
ans <- c(w) * cbind(dl.dphimat * dphimat.deta,
dl.dlambda * dlambda.deta)
- ans <- ans[, interleave.VGAM(M, M = M1)]
+ ans <- ans[, interleave.VGAM(M, M1 = M1)]
if ( .llambda == "loge" && is.empty.list( .elambda ) &&
@@ -2290,7 +2554,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
zibinomial <-
function(lpstr0 = "logit", lprob = "logit",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+ type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
ipstr0 = NULL,
zero = NULL, # 20130917; was originally zero = 1,
multiple.responses = FALSE, imethod = 1) {
@@ -2306,7 +2570,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
lprob <- attr(eprob, "function.name")
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
if (is.Numeric(ipstr0))
@@ -2326,7 +2590,9 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
namesof("prob" , lprob , earg = eprob ), "\n",
"Mean: (1 - pstr0) * prob"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
@@ -2334,12 +2600,13 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
list(M1 = 2,
type.fitted = .type.fitted ,
expected = TRUE,
- multiple.responses = FALSE,
+ multipleResponses = FALSE,
+ parameters.names = c("pstr0", "prob"),
zero = .zero )
}, list( .zero = zero,
.type.fitted = type.fitted
))),
-
+
initialize = eval(substitute(expression({
if (!all(w == 1))
extra$orig.w <- w
@@ -2438,10 +2705,11 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
ans <- switch(type.fitted,
"mean" = (1 - pstr0) * mubin,
+ "prob" = mubin,
"pobs0" = pstr0 + (1-pstr0)*(1-mubin)^nvec, # P(Y=0)
"pstr0" = pstr0,
"onempstr0" = 1 - pstr0)
@@ -2525,7 +2793,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
}), list( .lpstr0 = lpstr0, .lprob = lprob,
.epstr0 = epstr0, .eprob = eprob ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
+ wz <- matrix(NA_real_, nrow = n, ncol = dimm(M))
@@ -2568,9 +2836,9 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
zibinomialff <-
function(lprob = "logit", lonempstr0 = "logit",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+ type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
ionempstr0 = NULL,
- zero = 2,
+ zero = "onempstr0",
multiple.responses = FALSE, imethod = 1) {
@@ -2590,7 +2858,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
lonempstr0 <- attr(eonempstr0, "function.name")
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
if (is.Numeric(ionempstr0))
@@ -2610,12 +2878,18 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
namesof("onempstr0", lonempstr0, earg = eonempstr0), "\n",
"Mean: onempstr0 * prob"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
+ Q1 = NA,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("prob", "onempstr0"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -2721,10 +2995,11 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
ans <- switch(type.fitted,
"mean" = (onempstr0) * mubin,
+ "prob" = mubin,
"pobs0" = 1 - onempstr0 + (onempstr0)*(1-mubin)^nvec, # P(Y=0)
"pstr0" = 1 - onempstr0,
"onempstr0" = onempstr0)
@@ -2813,7 +3088,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
}), list( .lonempstr0 = lonempstr0, .lprob = lprob,
.eonempstr0 = eonempstr0, .eprob = eprob ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
+ wz <- matrix(NA_real_, nrow = n, ncol = dimm(M))
@@ -3075,7 +3350,9 @@ qzinegbin <- function(p, size, prob = NULL, munb = NULL, pstr0 = 0) {
ind4 <- (p > pstr0)
ans[!ind4] <- 0
ans[ ind4] <- qnbinom(p = (p[ind4] - pstr0[ind4]) / (1 - pstr0[ind4]),
- size = size[ind4], prob = prob[ind4])
+ size = size[ind4], prob = prob[ind4])
+
+
@@ -3155,12 +3432,24 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
zinegbinomial <-
- function(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
- ipstr0 = NULL, isize = NULL,
- zero = -3, # 20130917; used to be c(-1, -3)
- imethod = 1, ishrinkage = 0.95,
- nsimEIM = 250) {
+ function(
+ zero = "size",
+ type.fitted = c("mean", "munb", "pobs0", "pstr0", "onempstr0"),
+ nsimEIM = 500,
+ cutoff.prob = 0.999, # higher is better for large 'size'
+ eps.trig = 1e-7,
+ max.support = 4000, # 20160127; I have changed this
+ max.chunk.MB = 30, # max.memory = Inf is allowed
+ lpstr0 = "logit", lmunb = "loge", lsize = "loge",
+ imethod = 1,
+ ipstr0 = NULL,
+ imunb = NULL,
+ probs.y = 0.35,
+ ishrinkage = 0.95,
+ isize = NULL,
+ gsize.mux = exp((-12:6)/2)) {
+
+
lpstr0 <- as.list(substitute(lpstr0))
@@ -3177,10 +3466,15 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "munb", "pobs0", "pstr0", "onempstr0"))[1]
+
+ if (!is.Numeric(eps.trig, length.arg = 1,
+ positive = TRUE) || eps.trig > 0.001)
+ stop("argument 'eps.trig' must be positive and smaller in value")
+ ipstr0.small <- 1/64 # A number easily represented exactly
if (length(ipstr0) &&
(!is.Numeric(ipstr0, positive = TRUE) ||
any(ipstr0 >= 1)))
@@ -3188,23 +3482,11 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
if (length(isize) && !is.Numeric(isize, positive = TRUE))
stop("argument 'isize' must contain positive values only")
- if (!is.Numeric(imethod, length.arg = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1, 2 or 3")
-
if (!is.Numeric(nsimEIM, length.arg = 1, integer.valued = TRUE))
stop("argument 'nsimEIM' must be a positive integer")
if (nsimEIM <= 50)
warning("argument 'nsimEIM' should be greater than 50, say")
- if (!is.Numeric(ishrinkage, length.arg = 1) ||
- ishrinkage < 0 ||
- ishrinkage > 1)
- stop("bad input for argument 'ishrinkage'")
-
-
-
new("vglmff",
blurb = c("Zero-inflated negative binomial\n\n",
@@ -3215,18 +3497,24 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
"Mean: (1 - pstr0) * munb"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 3
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 3,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("pstr0", "munb", "size"),
+ eps.trig = .eps.trig ,
type.fitted = .type.fitted ,
+ nsimEIM = .nsimEIM ,
zero = .zero )
}, list( .zero = zero,
+ .nsimEIM = nsimEIM, .eps.trig = eps.trig,
.type.fitted = type.fitted
))),
@@ -3236,9 +3524,10 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
temp5 <-
w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
- Is.integer.y = TRUE,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -3254,87 +3543,83 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
- mynames1 <- if (NOS == 1) "pstr0" else paste("pstr0", 1:NOS, sep = "")
- mynames2 <- if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
- mynames3 <- if (NOS == 1) "size" else paste("size", 1:NOS, sep = "")
+ mynames1 <- param.names("pstr0", NOS)
+ mynames2 <- param.names("munb", NOS)
+ mynames3 <- param.names("size", NOS)
predictors.names <-
c(namesof(mynames1, .lpstr0 , earg = .epstr0 , tag = FALSE),
namesof(mynames2, .lmunb , earg = .emunb , tag = FALSE),
namesof(mynames3, .lsize , earg = .esize , tag = FALSE))[
- interleave.VGAM(M1*NOS, M = M1)]
+ interleave.VGAM(M1*NOS, M1 = M1)]
if (!length(etastart)) {
- mum.init <- if ( .imethod == 3) {
- y + 1/16
- } else {
- mum.init <- y
- for (iii in 1:ncol(y)) {
- index <- (y[, iii] > 0)
- mum.init[, iii] <- if ( .imethod == 2)
- weighted.mean(y[index, iii], w = w[index, iii]) else
- median(rep(y[index, iii], times = w[index, iii])) + 1/8
- }
- (1 - .ishrinkage ) * (y + 1/16) + .ishrinkage * mum.init
- }
- pstr0.init <- if (length( .ipstr0 )) {
- matrix( .ipstr0 , n, ncoly, byrow = TRUE)
+ munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ imu = .imunb , ishrinkage = .ishrinkage ,
+ pos.only = TRUE,
+ probs.y = .probs.y )
+
+
+
+ if ( is.Numeric( .isize )) {
+ size.init <- matrix( .isize , nrow = n, ncol = ncoly, byrow = TRUE)
} else {
- pstr0.init <- y
- for (iii in 1:ncol(y))
- pstr0.init[, iii] <- sum(w[y[, iii] == 0, iii]) / sum(w[, iii])
- pstr0.init[pstr0.init <= 0.02] <- 0.02 # Last resort
- pstr0.init[pstr0.init >= 0.98] <- 0.98 # Last resort
- pstr0.init
+ posnegbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
+ munb <- extraargs
+ sum(c(w) * dposnegbin(y, munb = munb, size = kmat, log = TRUE))
+ }
+
+ size.init <- matrix(0, nrow = n, ncol = NOS)
+ for (jay in 1:NOS) {
+ size.grid <- .gsize.mux * mean(munb.init[, jay])
+ TFvec <- (y[, jay] > 0)
+ size.init[, jay] <-
+ grid.search(size.grid, objfun = posnegbinomial.Loglikfun,
+ y = y[TFvec, jay], # x = x[TFvec, ],
+ w = w[TFvec, jay],
+ extraargs = munb.init[TFvec, jay])
+ }
}
- kay.init <-
- if ( is.Numeric( .isize )) {
- matrix( .isize, nrow = n, ncol = ncoly, byrow = TRUE)
+
+
+ if (length( .ipstr0 )) {
+ pstr0.init <- matrix( .ipstr0 , n, ncoly, byrow = TRUE)
} else {
- zinegbin.Loglikfun <- function(kval, y, x, w, extraargs) {
- index0 <- (y == 0)
- pstr0vec <- extraargs$pstr0
- muvec <- extraargs$mu
-
- ans1 <- 0.0
- if (any( index0))
- ans1 <- ans1 + sum(w[ index0] *
- dzinegbin(x = y[ index0], size = kval,
- munb = muvec[ index0],
- pstr0 = pstr0vec[ index0], log = TRUE))
- if (any(!index0))
- ans1 <- ans1 + sum(w[!index0] *
- dzinegbin(x = y[!index0], size = kval,
- munb = muvec[!index0],
- pstr0 = pstr0vec[!index0], log = TRUE))
- ans1
- }
- k.grid <- 2^((-6):6)
- kay.init <- matrix(0, nrow = n, ncol = NOS)
- for (spp. in 1:NOS) {
- kay.init[, spp.] <-
- grid.search(k.grid, objfun = zinegbin.Loglikfun,
- y = y[, spp.], x = x, w = w[, spp.],
- extraargs = list(pstr0 = pstr0.init[, spp.],
- mu = mum.init[, spp.]))
- }
- kay.init
+ pstr0.init <- matrix(0, n, ncoly)
+ ipstr0.small <- .ipstr0.small # A number easily represented exactly
+ for (jay in 1:NOS) {
+ Phi.init <- pmax(ipstr0.small,
+ weighted.mean(y[, jay] == 0, w[, jay]) -
+ dnbinom(0, mu = munb.init[, jay],
+ size = size.init[, jay]))
+ if (mean(Phi.init == ipstr0.small) > 0.95)
+ warning("from the initial values only, the data appears to ",
+ "have little or no 0-inflation")
+ pstr0.init[, jay] <- Phi.init
+ } # for (jay)
}
+
+
+
+
+
etastart <-
cbind(theta2eta(pstr0.init, .lpstr0 , earg = .epstr0 ),
- theta2eta(mum.init, .lmunb , earg = .emunb ),
- theta2eta(kay.init, .lsize , earg = .esize ))
+ theta2eta(munb.init, .lmunb , earg = .emunb ),
+ theta2eta(size.init, .lsize , earg = .esize ))
etastart <-
- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
+ etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
}
}), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
.epstr0 = epstr0, .emunb = emunb, .esize = esize,
- .ipstr0 = ipstr0, .isize = isize,
+ .ipstr0 = ipstr0, .imunb = imunb, .isize = isize,
+ .gsize.mux = gsize.mux,
.type.fitted = type.fitted,
- .ishrinkage = ishrinkage,
+ .ishrinkage = ishrinkage, .probs.y = probs.y,
+ .ipstr0.small = ipstr0.small,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -3345,23 +3630,31 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "munb", "pobs0", "pstr0", "onempstr0"))[1]
- M1 <- 3
- NOS <- extra$NOS
- pstr0 <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
+ pstr0 <- eta2theta(eta[, c(TRUE, FALSE, FALSE)],
.lpstr0 , earg = .epstr0 )
- if (type.fitted %in% c("mean", "pobs0"))
- munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+ if (type.fitted %in% c("mean", "munb", "pobs0"))
+ munb <- eta2theta(eta[, c(FALSE, TRUE, FALSE)],
.lmunb , earg = .emunb )
- if (type.fitted %in% c("pobs0"))
- kmat <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
- .lsize , earg = .esize )
+
+ if (type.fitted %in% c("pobs0")) {
+ kmat <- eta2theta(eta[, c(FALSE, FALSE, TRUE)],
+ .lsize , earg = .esize )
+
+ tempk <- 1 / (1 + munb / kmat) # kmat / (kmat + munb)
+ prob0 <- tempk^kmat # p(0) from negative binomial
+
+ smallval <- 1e-3 # Something like this is needed
+ if (any(big.size <- munb / kmat < smallval)) {
+ prob0[big.size] <- exp(-munb[big.size]) # The limit as kmat --> Inf
+ }
+ }
ans <- switch(type.fitted,
"mean" = (1 - pstr0) * munb,
- "pobs0" = pstr0 + (1 - pstr0) *
- (kmat / (kmat + munb))^kmat, # P(Y=0)
+ "munb" = munb,
+ "pobs0" = pstr0 + (1 - pstr0) * prob0, # P(Y=0)
"pstr0" = pstr0,
"onempstr0" = 1 - pstr0)
if (length(extra$dimnamesy) &&
@@ -3384,12 +3677,11 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
misc$link <-
c(rep( .lpstr0 , length = NOS),
rep( .lmunb , length = NOS),
- rep( .lsize , length = NOS))[interleave.VGAM(M1*NOS,
- M = M1)]
+ rep( .lsize , length = NOS))[interleave.VGAM(M1*NOS, M1 = M1)]
temp.names <-
c(mynames1,
mynames2,
- mynames3)[interleave.VGAM(M1*NOS, M = M1)]
+ mynames3)[interleave.VGAM(M1*NOS, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M1*NOS)
@@ -3400,31 +3692,32 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
misc$earg[[M1*ii ]] <- .esize
}
- misc$imethod <- .imethod
- misc$nsimEIM <- .nsimEIM
- misc$expected <- TRUE
- misc$M1 <- M1
misc$ipstr0 <- .ipstr0
misc$isize <- .isize
- misc$multipleResponses <- TRUE
-
+ misc$max.chunk.MB <- .max.chunk.MB
+ misc$cutoff.prob <- .cutoff.prob
+ misc$imethod <- .imethod
+ misc$nsimEIM <- .nsimEIM
+ misc$expected <- TRUE
+ misc$ishrinkage <- .ishrinkage
+ misc$multipleResponses <- TRUE
}), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
.epstr0 = epstr0, .emunb = emunb, .esize = esize,
.ipstr0 = ipstr0, .isize = isize,
- .nsimEIM = nsimEIM, .imethod = imethod ))),
+ .nsimEIM = nsimEIM, .imethod = imethod,
+ .cutoff.prob = cutoff.prob,
+ .max.chunk.MB = max.chunk.MB,
+ .ishrinkage = ishrinkage
+ ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- M1 <- 3
- NOS <- extra$NOS
- pstr0 <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
- .lpstr0 , earg = .epstr0 )
- munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
- .lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
- .lsize , earg = .esize )
+ pstr0 <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lpstr0 , earg = .epstr0 )
+ munb <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lsize , earg = .esize )
+
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
@@ -3451,10 +3744,9 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
if (any(pwts != 1))
warning("ignoring prior weights")
eta <- predict(object)
- pstr0 <- eta2theta(eta[, c(TRUE, FALSE, FALSE)],
- .lpstr0 , earg = .epstr0 )
- munb <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lmunb , earg = .emunb )
- kmat <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lsize , earg = .esize )
+ pstr0 <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lpstr0 , earg = .epstr0 )
+ munb <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lsize , earg = .esize )
rzinegbin(nsim * length(munb),
size = kmat, munb = munb, pstr0 = pstr0)
}, list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
@@ -3464,53 +3756,114 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
+ validparams = eval(substitute(function(eta, extra = NULL) {
+ M1 <- 3
+ NOS <- ncol(eta) / M1
- deriv = eval(substitute(expression({
+ pstr0 <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
+ .lpstr0 , earg = .epstr0 )
+ munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+ .lmunb , earg = .emunb )
+ size <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
+ .lsize , earg = .esize )
+
+ smallval <- 1e-3
+ ans <- all(is.finite(munb)) && all(munb > 0) &&
+ all(is.finite(size)) && all(size > 0) &&
+ all(is.finite(pstr0)) && all(pstr0 > 0) &&
+ all(pstr0 < 1) &&
+ (overdispersion <- all(munb / size > smallval))
+ if (!overdispersion)
+ warning("parameter 'size' has very large values; ",
+ "replacing them by an arbitrary large value within ",
+ "the parameter space. Try fitting ",
+ "a zero-inflated Poisson ",
+ "model instead.")
+ ans
+ }, list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
+ .epstr0 = epstr0, .emunb = emunb, .esize = esize ))),
+
+
+
+ deriv = eval(substitute(expression({
M1 <- 3
- NOS <- extra$NOS
+ NOS <- ncol(eta) / M1
pstr0 <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
- .lpstr0 , earg = .epstr0 )
+ .lpstr0 , earg = .epstr0 )
munb <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
- .lmunb , earg = .emunb )
+ .lmunb , earg = .emunb )
kmat <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
- .lsize , earg = .esize )
+ .lsize , earg = .esize )
+
+ dpstr0.deta <- dtheta.deta(pstr0, .lpstr0 , earg = .epstr0 )
+ dmunb.deta <- dtheta.deta(munb , .lmunb , earg = .emunb )
+ dsize.deta <- dtheta.deta(kmat , .lsize , earg = .esize )
+ dthetas.detas <-
+ (cbind(dpstr0.deta,
+ dmunb.deta,
+ dsize.deta))[, interleave.VGAM(M1*NOS, M1 = M1)]
+
+
+
+ smallval <- 1e-2 # Something like this is needed
+ if (any(big.size <- munb / kmat < smallval)) {
+ warning("parameter 'size' has very large values; ",
+ "try fitting a zero-inflated Poisson ",
+ "model instead")
+ kmat[big.size] <- munb[big.size] / smallval
+ }
+
+
+
+ tempk <- 1 / (1 + munb / kmat) # kmat / (kmat + munb)
+ tempm <- munb / (kmat + munb)
+ prob0 <- tempk^kmat
+ oneminusf0 <- 1 - prob0
+ AA16 <- tempm + log(tempk)
+ df0.dmunb <- -tempk * prob0
+ df0.dkmat <- prob0 * AA16
+ df02.dmunb2 <- prob0 * tempk * (1 + 1/kmat) / (1 + munb/kmat)
+ df02.dkmat2 <- prob0 * ((tempm^2) / kmat + AA16^2)
+ df02.dkmat.dmunb <- -prob0 * (tempm/kmat + AA16) / (1 + munb/kmat)
+
+
+
+ AA <- pobs0 <- cbind(pstr0 + (1 - pstr0) * prob0)
+
+
+
- dpstr0.deta <- dtheta.deta(pstr0, .lpstr0 , earg = .epstr0 )
- dmunb.deta <- dtheta.deta(munb , .lmunb , earg = .emunb )
- dsize.deta <- dtheta.deta(kmat , .lsize , earg = .esize )
- dthetas.detas <-
- (cbind(dpstr0.deta,
- dmunb.deta,
- dsize.deta))[, interleave.VGAM(M1*NOS, M = M1)]
dl.dpstr0 <- -1 / (1 - pstr0)
- dl.dmunb <- y / munb - (y + kmat) / (munb + kmat)
+ dl.dmunb <- y / munb - (1 + y/kmat) / (1 + munb/kmat)
dl.dsize <- digamma(y + kmat) - digamma(kmat) -
- (y + kmat) / (munb + kmat) + 1 +
- log(kmat / (kmat + munb))
+ (y - munb) / (munb + kmat) + log(tempk)
+ if (any(big.size)) {
+ dl.dsize[big.size] <- 1e-7 # A small number
+ }
+
+
for (spp. in 1:NOS) {
index0 <- (y[, spp.] == 0)
if (all(index0) || all(!index0))
stop("must have some 0s AND some positive counts in the data")
- kmat. <- kmat[index0, spp.]
- munb. <- munb[index0, spp.]
pstr0. <- pstr0[index0, spp.]
- tempk. <- kmat. / (kmat. + munb.)
- tempm. <- munb. / (kmat. + munb.)
- prob0. <- tempk.^kmat.
- df0.dmunb. <- -tempk.* prob0.
- df0.dkmat. <- prob0. * (tempm. + log(tempk.))
+ tempk. <- tempk[index0, spp.] # kmat. / (kmat. + munb.)
+ tempm. <- tempm[index0, spp.] # munb. / (kmat. + munb.)
+ prob0. <- prob0[index0, spp.] # tempk.^kmat.
+ df0.dmunb. <- df0.dmunb[index0, spp.] # -tempk.* prob0.
+ df0.dkmat. <- df0.dkmat[index0, spp.] # prob0. * (tempm. + log(tempk.))
- denom. <- pstr0. + (1 - pstr0.) * prob0.
+ denom. <- AA[index0, spp.] # pstr0. + (1 - pstr0.) * prob0.
dl.dpstr0[index0, spp.] <- (1 - prob0.) / denom.
dl.dmunb[index0, spp.] <- (1 - pstr0.) * df0.dmunb. / denom.
dl.dsize[index0, spp.] <- (1 - pstr0.) * df0.dkmat. / denom.
@@ -3520,114 +3873,201 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
dl.dthetas <-
cbind(dl.dpstr0,
dl.dmunb,
- dl.dsize)[, interleave.VGAM(M1*NOS, M = M1)]
+ dl.dsize)[, interleave.VGAM(M1*NOS, M1 = M1)]
- c(w) * dl.dthetas * dthetas.detas
+ ans <- c(w) * dl.dthetas * dthetas.detas
+ ans
}), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
.epstr0 = epstr0, .emunb = emunb, .esize = esize ))),
+
+
weight = eval(substitute(expression({
+ wz <- matrix(0, n, M + M-1 + M-2)
+ mymu <- munb / oneminusf0 # Is the same as 'mu', == E(Y)
- wz <- matrix(0, n, M1*M - M1)
+ max.support <- .max.support
+ max.chunk.MB <- .max.chunk.MB
- ind3 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
- run.varcov <- array(0.0, c(n, length(ind3$row.index), NOS))
- for (ii in 1:( .nsimEIM )) {
- ysim <- rzinegbin(n = n*NOS, pstr0 = pstr0,
- size = kmat, mu = munb)
- dim(ysim) <- c(n, NOS)
- index0 <- (ysim[, spp.] == 0)
- dl.dpstr0 <- -1 / (1 - pstr0)
- dl.dmunb <- ysim / munb - (ysim + kmat) / (munb + kmat)
- dl.dsize <- digamma(ysim + kmat) - digamma(kmat) -
- (ysim + kmat) / (munb + kmat) + 1 +
- log(kmat / (kmat + munb))
+ ind2 <- matrix(FALSE, n, NOS) # Used for SFS
+ for (jay in 1:NOS) {
+ eff.p <- sort(c( .cutoff.prob , 1 - .cutoff.prob ))
+ Q.mins <- 1
+ Q.maxs <- qposnegbin(p = eff.p[2] ,
+ munb = munb[, jay],
+ size = kmat[, jay]) + 10
- for (spp. in 1:NOS) {
- index0 <- (ysim[, spp.] == 0)
- if (all(index0) || all(!index0)) {
- repeat {
- ysim[, spp.] <- rzinegbin(n = n,
- pstr0 = pstr0[, spp.],
- size = kmat[, spp.],
- mu = munb[, spp.])
- index0 <- (ysim[, spp.] == 0)
- if (any(!index0) && any(index0))
- break
- }
- }
- kmat. <- kmat[index0, spp.]
- munb. <- munb[index0, spp.]
- pstr0. <- pstr0[index0, spp.]
+ eps.trig <- .eps.trig
+ Q.MAXS <- pmax(10, ceiling(1 / sqrt(eps.trig)))
+ Q.maxs <- pmin(Q.maxs, Q.MAXS)
- tempk. <- kmat. / (kmat. + munb.)
- tempm. <- munb. / (kmat. + munb.)
- prob0. <- tempk.^kmat.
- df0.dmunb. <- -tempk.* prob0.
- df0.dkmat. <- prob0. * (tempm. + log(tempk.))
- denom. <- pstr0. + (1 - pstr0.) * prob0.
- dl.dpstr0[index0, spp.] <- (1 - prob0.) / denom.
- dl.dmunb[index0, spp.] <- (1 - pstr0.) * df0.dmunb. / denom.
- dl.dsize[index0, spp.] <- (1 - pstr0.) * df0.dkmat. / denom.
+ ind1 <- if (max.chunk.MB > 0) (Q.maxs - Q.mins < max.support) else FALSE
+ if ((NN <- sum(ind1)) > 0) {
+ Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
+ n.chunks <- if (intercept.only) 1 else
+ max(1, ceiling( Object.Size / max.chunk.MB))
+ chunk.rows <- ceiling(NN / n.chunks)
+ ind2[, jay] <- ind1 # Save this
+ wind2 <- which(ind1)
- sdl.dthetas <- cbind(dl.dpstr0[, spp.],
- dl.dmunb[, spp.],
- dl.dsize[, spp.])
- temp3 <- sdl.dthetas
- run.varcov[,, spp.] <- run.varcov[,, spp.] +
- temp3[, ind3$row.index] *
- temp3[, ind3$col.index]
+ upr.ptr <- 0
+ lwr.ptr <- upr.ptr + 1
+ while (lwr.ptr <= NN) {
+ upr.ptr <- min(upr.ptr + chunk.rows, NN)
+ sind2 <- wind2[lwr.ptr:upr.ptr]
- } # End of for (spp.) loop
- } # End of ii nsimEIM loop
+ wz[sind2, M1*jay] <-
+ EIM.posNB.specialp(munb = munb[sind2, jay],
+ size = kmat[sind2, jay],
+ y.max = max(Q.maxs[sind2]),
+ cutoff.prob = .cutoff.prob ,
+ prob0 = prob0[sind2, jay],
+ df0.dkmat = df0.dkmat[sind2, jay],
+ df02.dkmat2 = df02.dkmat2[sind2, jay],
+ intercept.only = intercept.only,
+ second.deriv = FALSE)
+ if (FALSE)
+ wz2[sind2, M1*jay] <-
+ EIM.posNB.speciald(munb = munb[sind2, jay],
+ size = kmat[sind2, jay],
+ y.min = min(Q.mins2[sind2]),
+ y.max = max(Q.maxs[sind2]),
+ cutoff.prob = .cutoff.prob ,
+ prob0 = prob0[sind2, jay],
+ df0.dkmat = df0.dkmat[sind2, jay],
+ df02.dkmat2 = df02.dkmat2[sind2, jay],
+ intercept.only = intercept.only,
+ second.deriv = FALSE)
- run.varcov <- run.varcov / .nsimEIM
- wz1 <- if (intercept.only) {
- for (spp. in 1:NOS) {
- for (jay in 1:length(ind3$row.index)) {
- run.varcov[, jay, spp.] <- mean(run.varcov[, jay, spp.])
- }
+
+
+ wz[sind2, M1*jay] <-
+ wz[sind2, M1*jay] * (1 - AA[sind2, jay]) -
+ (1-pstr0[sind2, jay]) * (df02.dkmat2[sind2, jay] -
+ (1-pstr0[sind2, jay]) * (df0.dkmat[sind2, jay]^2) / AA[sind2, jay])
+
+
+
+ if (any(eim.kk.TF <- wz[sind2, M1*jay] <= 0 |
+ is.na(wz[sind2, M1*jay]))) {
+ ind2[sind2[eim.kk.TF], jay] <- FALSE
+ }
+
+
+
+ lwr.ptr <- upr.ptr + 1
+ } # while
+
}
- run.varcov
- } else {
- run.varcov
- }
+ } # end of for (jay in 1:NOS)
- for (spp. in 1:NOS) {
- wz1[,, spp.] <- wz1[,, spp.] *
- dthetas.detas[, M1 * (spp. - 1) + ind3$row] *
- dthetas.detas[, M1 * (spp. - 1) + ind3$col]
- }
- for (spp. in 1:NOS) {
- for (jay in 1:M1) {
- for (kay in jay:M1) {
- cptr <- iam((spp. - 1) * M1 + jay,
- (spp. - 1) * M1 + kay, M = M)
- temp.wz1 <- wz1[,, spp.]
- wz[, cptr] <- temp.wz1[, iam(jay, kay, M = M1)]
- }
+
+
+
+
+
+ for (jay in 1:NOS) {
+ run.varcov <- 0
+ ii.TF <- !ind2[, jay] # Not assigned above
+ if (any(ii.TF)) {
+ kkvec <- kmat[ii.TF, jay]
+ muvec <- munb[ii.TF, jay]
+ PSTR0 <- pstr0[ii.TF, jay]
+ for (ii in 1:( .nsimEIM )) {
+ ysim <- rzinegbin(sum(ii.TF), pstr0 = PSTR0,
+ mu = muvec, size = kkvec)
+
+ index0 <- (ysim == 0)
+
+
+ dl.dk <- digamma(ysim + kkvec) - digamma(kkvec) -
+ (ysim - muvec) / (muvec + kkvec) +
+ log1p(-muvec / (kkvec + muvec)) # +
+
+ ans0 <- (1 - PSTR0) *
+ df0.dkmat[ii.TF , jay] / AA[ii.TF , jay]
+ dl.dk[index0] <- ans0[index0]
+
+ run.varcov <- run.varcov + dl.dk^2
+ } # end of for loop
+
+ run.varcov <- c(run.varcov / .nsimEIM )
+ ned2l.dk2 <- if (intercept.only) mean(run.varcov) else run.varcov
+
+ wz[ii.TF, M1*jay] <- ned2l.dk2 # * (dsize.deta[ii.TF, jay])^2
}
}
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / M1)
+
+ wz[, M1*(1:NOS) ] <- wz[, M1*(1:NOS) ] * dsize.deta^2
+
+
+
+
+ save.weights <- !all(ind2)
+
+
+ ned2l.dpstr02 <- oneminusf0 / (AA * (1 - pstr0))
+ wz[, M1*(1:NOS) - 2] <- ned2l.dpstr02 * dpstr0.deta^2
+
+
+ ned2l.dpstr0.dmunb <- df0.dmunb / AA
+ wz[, M + M1*(1:NOS) - 2] <- ned2l.dpstr0.dmunb *
+ dpstr0.deta * dmunb.deta
+
+ ned2l.dpstr0.dsize <- df0.dkmat / AA
+ wz[, M + M-1 + M1*(1:NOS) - 2] <- ned2l.dpstr0.dsize *
+ dpstr0.deta * dsize.deta
+
+
+
+ ned2l.dmunb2 <-
+ (1 - AA) * (mymu / munb^2 -
+ ((1 + mymu/kmat) / kmat) / (1 + munb/kmat)^2) -
+ (1-pstr0) * (df02.dmunb2 -
+ (1 - pstr0) * (df0.dmunb^2) / AA)
+
+ wz[, M1*(1:NOS) - 1] <- ned2l.dmunb2 * dmunb.deta^2
+
+
+ dAA.dmunb <- (1 - pstr0) * df0.dmunb
+
+
+
+ ned2l.dmunbsize <-
+ (1 - AA) * (munb - mymu) / (munb + kmat)^2 -
+ (1-pstr0) * (df02.dkmat.dmunb -
+ df0.dkmat * dAA.dmunb / AA)
+
+ wz[, M + M1*(1:NOS) - 1] <- ned2l.dmunbsize * dmunb.deta *
+ dsize.deta
+
+
+
+
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
}), list( .lpstr0 = lpstr0,
- .epstr0 = epstr0, .nsimEIM = nsimEIM ))))
+ .epstr0 = epstr0, .nsimEIM = nsimEIM,
+ .cutoff.prob = cutoff.prob, .eps.trig = eps.trig,
+ .max.support = max.support,
+ .max.chunk.MB = max.chunk.MB ))))
} # End of zinegbinomial
@@ -3644,12 +4084,19 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
zinegbinomialff <-
function(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
- isize = NULL, ionempstr0 = NULL,
- zero = c(-2, -3),
+ type.fitted = c("mean", "munb", "pobs0", "pstr0", "onempstr0"),
+ imunb = NULL, isize = NULL, ionempstr0 = NULL,
+ zero = c("size", "onempstr0"),
imethod = 1, ishrinkage = 0.95,
- nsimEIM = 250) {
+ probs.y = 0.35,
+ cutoff.prob = 0.999, # higher is better for large 'size'
+ eps.trig = 1e-7,
+ max.support = 4000, # 20160127; I have changed this
+ max.chunk.MB = 30, # max.memory = Inf is allowed
+ gsize.mux = exp((-12:6)/2),
+
+ nsimEIM = 500) {
lmunb <- as.list(substitute(lmunb))
@@ -3664,12 +4111,17 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
eonempstr0 <- link2list(lonempstr0)
lonempstr0 <- attr(eonempstr0, "function.name")
+ ipstr0.small <- 1/64 # A number easily represented exactly
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "munb", "pobs0", "pstr0", "onempstr0"))[1]
+ if (!is.Numeric(eps.trig, length.arg = 1,
+ positive = TRUE) || eps.trig > 0.001)
+ stop("argument 'eps.trig' must be positive and smaller in value")
+
if (length(ionempstr0) &&
(!is.Numeric(ionempstr0, positive = TRUE) ||
any(ionempstr0 >= 1)))
@@ -3677,22 +4129,11 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
if (length(isize) && !is.Numeric(isize, positive = TRUE))
stop("argument 'isize' must contain positive values only")
- if (!is.Numeric(imethod, length.arg = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1, 2 or 3")
-
if (!is.Numeric(nsimEIM, length.arg = 1, integer.valued = TRUE))
stop("argument 'nsimEIM' must be a positive integer")
if (nsimEIM <= 50)
warning("argument 'nsimEIM' should be greater than 50, say")
- if (!is.Numeric(ishrinkage, length.arg = 1) ||
- ishrinkage < 0 ||
- ishrinkage > 1)
- stop("bad input for argument 'ishrinkage'")
-
-
new("vglmff",
@@ -3705,18 +4146,24 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
"Mean: (1 - pstr0) * munb"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 3
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 3)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 3,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("munb", "size", "onempstr0"),
+ eps.trig = .eps.trig ,
+ nsimEIM = .nsimEIM ,
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
+ .nsimEIM = nsimEIM, .eps.trig = eps.trig,
.type.fitted = type.fitted
))),
@@ -3726,9 +4173,10 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
temp5 <-
w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
- Is.integer.y = TRUE,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -3742,89 +4190,82 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
- mynames1 <- if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
- mynames2 <- if (NOS == 1) "size" else paste("size", 1:NOS, sep = "")
- mynames3 <- if (NOS == 1) "onempstr0" else paste("onempstr0", 1:NOS,
- sep = "")
+ mynames1 <- param.names("munb", NOS)
+ mynames2 <- param.names("size", NOS)
+ mynames3 <- param.names("onempstr0", NOS)
predictors.names <-
c(namesof(mynames1, .lmunb , earg = .emunb , tag = FALSE),
namesof(mynames2, .lsize , earg = .esize , tag = FALSE),
namesof(mynames3, .lonempstr0 , earg = .eonempstr0 , tag = FALSE))[
- interleave.VGAM(M1*NOS, M = M1)]
+ interleave.VGAM(M1*NOS, M1 = M1)]
if (!length(etastart)) {
- mum.init <- if ( .imethod == 3) {
- y + 1/16
- } else {
- mum.init <- y
- for (iii in 1:ncol(y)) {
- index <- (y[, iii] > 0)
- mum.init[, iii] <- if ( .imethod == 2)
- weighted.mean(y[index, iii], w = w[index, iii]) else
- median(rep(y[index, iii], times = w[index, iii])) + 1/8
- }
- (1 - .ishrinkage ) * (y + 1/16) + .ishrinkage * mum.init
- }
+
+ munb.init <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ imu = .imunb , ishrinkage = .ishrinkage ,
+ pos.only = TRUE,
+ probs.y = .probs.y )
- onempstr0.init <- if (length( .ionempstr0 )) {
- matrix( .ionempstr0 , n, ncoly, byrow = TRUE)
+
+ if ( is.Numeric( .isize )) {
+ size.init <- matrix( .isize , nrow = n, ncol = ncoly, byrow = TRUE)
} else {
- pstr0.init <- y
- for (iii in 1:ncol(y))
- pstr0.init[, iii] <- sum(w[y[, iii] == 0, iii]) / sum(w[, iii])
- pstr0.init[pstr0.init <= 0.02] <- 0.02 # Last resort
- pstr0.init[pstr0.init >= 0.98] <- 0.98 # Last resort
- 1 - pstr0.init
+ posnegbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
+ munb <- extraargs
+ sum(c(w) * dposnegbin(y, munb = munb, size = kmat, log = TRUE))
+ }
+
+ size.init <- matrix(0, nrow = n, ncol = NOS)
+ for (jay in 1:NOS) {
+ size.grid <- .gsize.mux * mean(munb.init[, jay])
+ TFvec <- (y[, jay] > 0)
+ size.init[, jay] <-
+ grid.search(size.grid, objfun = posnegbinomial.Loglikfun,
+ y = y[TFvec, jay], # x = x[TFvec, ],
+ w = w[TFvec, jay],
+ extraargs = munb.init[TFvec, jay])
+ }
}
- kay.init <-
- if ( is.Numeric( .isize )) {
- matrix( .isize, nrow = n, ncol = ncoly, byrow = TRUE)
+
+
+ if (length( .ionempstr0 )) {
+ onempstr0.init <- matrix( .ionempstr0 , n, ncoly, byrow = TRUE)
} else {
- zinegbin.Loglikfun <- function(kval, y, x, w, extraargs) {
- index0 <- (y == 0)
- pstr0vec <- extraargs$pstr0
- muvec <- extraargs$mu
-
- ans1 <- 0.0
- if (any( index0))
- ans1 <- ans1 + sum(w[ index0] *
- dzinegbin(x = y[ index0], size = kval,
- munb = muvec[ index0],
- pstr0 = pstr0vec[ index0], log = TRUE))
- if (any(!index0))
- ans1 <- ans1 + sum(w[!index0] *
- dzinegbin(x = y[!index0], size = kval,
- munb = muvec[!index0],
- pstr0 = pstr0vec[!index0], log = TRUE))
- ans1
- }
- k.grid <- 2^((-6):6)
- kay.init <- matrix(0, nrow = n, ncol = NOS)
- for (spp. in 1:NOS) {
- kay.init[, spp.] <-
- grid.search(k.grid, objfun = zinegbin.Loglikfun,
- y = y[, spp.], x = x, w = w[, spp.],
- extraargs = list(pstr0 = 1 - onempstr0.init[, spp.],
- mu = mum.init[, spp.]))
- }
- kay.init
+ onempstr0.init <- matrix(0, n, ncoly)
+ ipstr0.small <- .ipstr0.small # Easily represented exactly
+ for (jay in 1:NOS) {
+ Phi.init <- pmax(ipstr0.small,
+ weighted.mean(y[, jay] == 0, w[, jay]) -
+ dnbinom(0, mu = munb.init[, jay],
+ size = size.init[, jay]))
+ if (mean(Phi.init == ipstr0.small) > 0.95)
+ warning("from the initial values only, the data appears to ",
+ "have little or no 0-inflation")
+ onempstr0.init[, jay] <- 1 - Phi.init
+ } # for (jay)
}
+
+
+
+
etastart <-
- cbind(theta2eta(mum.init, .lmunb , earg = .emunb ),
- theta2eta(kay.init, .lsize , earg = .esize ),
+ cbind(theta2eta(munb.init, .lmunb , earg = .emunb ),
+ theta2eta(size.init, .lsize , earg = .esize ),
theta2eta(onempstr0.init, .lonempstr0 ,
earg = .eonempstr0 ))
etastart <-
- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
+ etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
}
}), list( .lonempstr0 = lonempstr0, .lmunb = lmunb, .lsize = lsize,
.eonempstr0 = eonempstr0, .emunb = emunb, .esize = esize,
- .ionempstr0 = ionempstr0, .isize = isize,
+ .ionempstr0 = ionempstr0, .imunb = imunb, .isize = isize,
+ .gsize.mux = gsize.mux,
.type.fitted = type.fitted,
- .ishrinkage = ishrinkage,
+ .ipstr0.small = ipstr0.small,
+ .ishrinkage = ishrinkage, .probs.y = probs.y,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -3835,23 +4276,35 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "munb", "pobs0", "pstr0", "onempstr0"))[1]
M1 <- 3
- NOS <- extra$NOS
- if (type.fitted %in% c("mean", "pobs0"))
+ NOS <- ncol(eta) / M1
+ if (type.fitted %in% c("mean", "munb", "pobs0"))
munb <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
.lmunb , earg = .emunb )
- if (type.fitted %in% c("pobs0"))
+
+ if (type.fitted %in% c("pobs0")) {
kmat <- eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lsize , earg = .esize )
+
+ tempk <- 1 / (1 + munb / kmat) # kmat / (kmat + munb)
+ prob0 <- tempk^kmat # p(0) from negative binomial
+
+ smallval <- 1e-3 # Something like this is needed
+ if (any(big.size <- munb / kmat < smallval)) {
+ prob0[big.size] <- exp(-munb[big.size]) # The limit as kmat --> Inf
+ }
+ }
+
onempstr0 <- eta2theta(eta[, M1*(1:NOS) , drop = FALSE],
.lonempstr0 , earg = .eonempstr0 )
+
ans <- switch(type.fitted,
- "mean" = (onempstr0) * munb,
- "pobs0" = 1 - onempstr0 + (onempstr0) *
- (kmat / (kmat + munb))^kmat, # P(Y=0)
+ "mean" = onempstr0 * munb,
+ "munb" = munb,
+ "pobs0" = 1 - onempstr0 + onempstr0 * prob0, # P(Y=0)
"pstr0" = 1 - onempstr0,
"onempstr0" = onempstr0)
if (length(extra$dimnamesy) &&
@@ -3874,12 +4327,11 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
misc$link <-
c(rep( .lmunb , length = NOS),
rep( .lsize , length = NOS),
- rep( .lonempstr0 , length = NOS))[interleave.VGAM(M1*NOS,
- M = M1)]
+ rep( .lonempstr0 , length = NOS))[interleave.VGAM(M1*NOS, M1 = M1)]
temp.names <-
c(mynames1,
mynames2,
- mynames3)[interleave.VGAM(M1*NOS, M = M1)]
+ mynames3)[interleave.VGAM(M1*NOS, M1 = M1)]
names(misc$link) <- temp.names
misc$earg <- vector("list", M1*NOS)
@@ -3954,7 +4406,7 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
deriv = eval(substitute(expression({
M1 <- 3
- NOS <- extra$NOS
+ NOS <- ncol(eta) / M1
munb <- eta2theta(eta[, M1*(1:NOS)-2, drop = FALSE],
.lmunb , earg = .emunb )
@@ -3965,20 +4417,46 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
donempstr0.deta <- dtheta.deta(onempstr0, .lonempstr0 ,
earg = .eonempstr0 )
- dmunb.deta <- dtheta.deta(munb , .lmunb , earg = .emunb )
- dsize.deta <- dtheta.deta(kmat , .lsize , earg = .esize )
+ dmunb.deta <- dtheta.deta(munb , .lmunb , earg = .emunb )
+ dsize.deta <- dtheta.deta(kmat , .lsize , earg = .esize )
dthetas.detas <-
(cbind(dmunb.deta,
dsize.deta,
- donempstr0.deta))[, interleave.VGAM(M1*NOS,
- M = M1)]
+ donempstr0.deta))[, interleave.VGAM(M1*NOS, M1 = M1)]
+
+
+
+
+ smallval <- 1e-2 # Something like this is needed
+ if (any(big.size <- munb / kmat < smallval)) {
+ warning("parameter 'size' has very large values; ",
+ "try fitting a zero-inflated Poisson ",
+ "model instead")
+ kmat[big.size] <- munb[big.size] / smallval
+ }
+
+
+
+ tempk <- 1 / (1 + munb / kmat) # kmat / (kmat + munb)
+ tempm <- munb / (kmat + munb)
+ prob0 <- tempk^kmat
+ oneminusf0 <- 1 - prob0
+ AA16 <- tempm + log(tempk)
+ df0.dmunb <- -tempk * prob0
+ df0.dkmat <- cbind(prob0 * AA16)
+ df02.dmunb2 <- prob0 * tempk * (1 + 1/kmat) / (1 + munb/kmat)
+ df02.dkmat2 <- prob0 * ((tempm^2) / kmat + AA16^2)
+ df02.dkmat.dmunb <- -prob0 * (tempm/kmat + AA16) / (1 + munb/kmat)
- dl.dmunb <- y / munb - (y + kmat) / (munb + kmat)
+ pstr0 <- 1 - onempstr0
+ AA <- pobs0 <- cbind(pstr0 + (onempstr0) * prob0)
+
+
+ dl.dmunb <- y / munb - (1 + y/kmat) / (1 + munb/kmat)
dl.dsize <- digamma(y + kmat) - digamma(kmat) -
- (y + kmat) / (munb + kmat) + 1 +
- log(kmat / (kmat + munb))
+ (y - munb) / (munb + kmat) + log(tempk)
dl.donempstr0 <- +1 / (onempstr0)
@@ -4009,114 +4487,195 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
dl.dthetas <-
cbind(dl.dmunb,
dl.dsize,
- dl.donempstr0)[, interleave.VGAM(M1*NOS, M = M1)]
+ dl.donempstr0)[, interleave.VGAM(M1*NOS, M1 = M1)]
c(w) * dl.dthetas * dthetas.detas
}), list( .lonempstr0 = lonempstr0, .lmunb = lmunb, .lsize = lsize,
.eonempstr0 = eonempstr0, .emunb = emunb, .esize = esize ))),
+
+
+
weight = eval(substitute(expression({
- wz <- matrix(0, n, M1*M - M1)
+ wz <- matrix(0, n, M + M-1 + M-2)
+ mymu <- munb / oneminusf0 # Is the same as 'mu', == E(Y)
- ind3 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
+ max.support <- .max.support
+ max.chunk.MB <- .max.chunk.MB
- run.varcov <- array(0.0, c(n, length(ind3$row.index), NOS))
- for (ii in 1:( .nsimEIM )) {
- ysim <- rzinegbin(n = n*NOS, pstr0 = 1 - onempstr0,
- size = kmat, mu = munb)
- dim(ysim) <- c(n, NOS)
- index0 <- (ysim[, spp.] == 0)
- dl.dmunb <- ysim / munb - (ysim + kmat) / (munb + kmat)
- dl.dsize <- digamma(ysim + kmat) - digamma(kmat) -
- (ysim + kmat) / (munb + kmat) + 1 +
- log(kmat / (kmat + munb))
- dl.donempstr0 <- +1 / (onempstr0)
+
+ ind2 <- matrix(FALSE, n, NOS) # Used for SFS
+ for (jay in 1:NOS) {
+ eff.p <- sort(c( .cutoff.prob , 1 - .cutoff.prob ))
+ Q.mins <- 1
+ Q.maxs <- qposnegbin(p = eff.p[2] ,
+ munb = munb[, jay],
+ size = kmat[, jay]) + 10
- for (spp. in 1:NOS) {
- index0 <- (ysim[, spp.] == 0)
- if (all(index0) || all(!index0)) {
- repeat {
- ysim[, spp.] <- rzinegbin(n = n,
- pstr0 = 1 - onempstr0[, spp.],
- size = kmat[, spp.],
- mu = munb[, spp.])
- index0 <- (ysim[, spp.] == 0)
- if (any(!index0) && any(index0))
- break
- }
- }
- munb. <- munb[index0, spp.]
- kmat. <- kmat[index0, spp.]
- onempstr0. <- onempstr0[index0, spp.]
+ eps.trig <- .eps.trig
+ Q.MAXS <- pmax(10, ceiling(1 / sqrt(eps.trig)))
+ Q.maxs <- pmin(Q.maxs, Q.MAXS)
- tempk. <- kmat. / (kmat. + munb.)
- tempm. <- munb. / (kmat. + munb.)
- prob0. <- tempk.^kmat.
- df0.dmunb. <- -tempk.* prob0.
- df0.dkmat. <- prob0. * (tempm. + log(tempk.))
- denom. <- 1 - onempstr0. + (onempstr0.) * prob0.
- dl.donempstr0[index0, spp.] <- -(1 - prob0.) / denom. # note "-"
- dl.dmunb[index0, spp.] <- (onempstr0.) * df0.dmunb. / denom.
- dl.dsize[index0, spp.] <- (onempstr0.) * df0.dkmat. / denom.
- sdl.dthetas <- cbind(dl.dmunb[, spp.],
- dl.dsize[, spp.],
- dl.donempstr0[, spp.])
+ ind1 <- if (max.chunk.MB > 0) (Q.maxs - Q.mins < max.support) else FALSE
+ if ((NN <- sum(ind1)) > 0) {
+ Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
+ n.chunks <- if (intercept.only) 1 else
+ max(1, ceiling( Object.Size / max.chunk.MB))
+ chunk.rows <- ceiling(NN / n.chunks)
+ ind2[, jay] <- ind1 # Save this
+ wind2 <- which(ind1)
- temp3 <- sdl.dthetas
- run.varcov[,, spp.] <- run.varcov[,, spp.] +
- temp3[, ind3$row.index] *
- temp3[, ind3$col.index]
+ upr.ptr <- 0
+ lwr.ptr <- upr.ptr + 1
+ while (lwr.ptr <= NN) {
+ upr.ptr <- min(upr.ptr + chunk.rows, NN)
+ sind2 <- wind2[lwr.ptr:upr.ptr]
+ wz[sind2, M1*jay - 1] <-
+ EIM.posNB.specialp(munb = munb[sind2, jay],
+ size = kmat[sind2, jay],
+ y.max = max(Q.maxs[sind2]),
+ cutoff.prob = .cutoff.prob ,
+ prob0 = prob0[sind2, jay],
+ df0.dkmat = df0.dkmat[sind2, jay],
+ df02.dkmat2 = df02.dkmat2[sind2, jay],
+ intercept.only = intercept.only,
+ second.deriv = FALSE)
+ if (FALSE)
+ wz2[sind2, M1*jay - 1] <-
+ EIM.posNB.speciald(munb = munb[sind2, jay],
+ size = kmat[sind2, jay],
+ y.min = min(Q.mins2[sind2]),
+ y.max = max(Q.maxs[sind2]),
+ cutoff.prob = .cutoff.prob ,
+ prob0 = prob0[sind2, jay],
+ df0.dkmat = df0.dkmat[sind2, jay],
+ df02.dkmat2 = df02.dkmat2[sind2, jay],
+ intercept.only = intercept.only,
+ second.deriv = FALSE)
- } # End of for (spp.) loop
- } # End of ii nsimEIM loop
- run.varcov <- run.varcov / .nsimEIM
- wz1 <- if (intercept.only) {
- for (spp. in 1:NOS) {
- for (jay in 1:length(ind3$row.index)) {
- run.varcov[, jay, spp.] <- mean(run.varcov[, jay, spp.])
- }
+
+
+
+ wz[sind2, M1*jay - 1] <-
+ wz[sind2, M1*jay - 1] * (1 - AA[sind2, jay]) -
+ (1-pstr0[sind2, jay]) * (df02.dkmat2[sind2, jay] -
+ (1-pstr0[sind2, jay]) * (df0.dkmat[sind2, jay]^2) / AA[sind2, jay])
+
+
+
+ if (any(eim.kk.TF <- wz[sind2, M1*jay - 1] <= 0 |
+ is.na(wz[sind2, M1*jay - 1]))) {
+ ind2[sind2[eim.kk.TF], jay] <- FALSE
+ }
+
+
+
+ lwr.ptr <- upr.ptr + 1
+ } # while
+
}
- run.varcov
- } else {
- run.varcov
- }
+ } # end of for (jay in 1:NOS)
- for (spp. in 1:NOS) {
- wz1[,, spp.] <- wz1[,, spp.] *
- dthetas.detas[, M1 * (spp. - 1) + ind3$row] *
- dthetas.detas[, M1 * (spp. - 1) + ind3$col]
- }
- for (spp. in 1:NOS) {
- for (jay in 1:M1) {
- for (kay in jay:M1) {
- cptr <- iam((spp. - 1) * M1 + jay,
- (spp. - 1) * M1 + kay, M = M)
- temp.wz1 <- wz1[,, spp.]
- wz[, cptr] <- temp.wz1[, iam(jay, kay, M = M1)]
- }
+
+
+
+
+
+
+
+ for (jay in 1:NOS) {
+ run.varcov <- 0
+ ii.TF <- !ind2[, jay] # Not assigned above
+ if (any(ii.TF)) {
+ kkvec <- kmat[ii.TF, jay]
+ muvec <- munb[ii.TF, jay]
+ PSTR0 <- pstr0[ii.TF, jay]
+ for (ii in 1:( .nsimEIM )) {
+ ysim <- rzinegbin(sum(ii.TF), pstr0 = PSTR0,
+ mu = muvec, size = kkvec)
+
+ index0 <- (ysim == 0)
+
+
+ dl.dk <- digamma(ysim + kkvec) - digamma(kkvec) -
+ (ysim - muvec) / (muvec + kkvec) +
+ log1p(-muvec / (kkvec + muvec)) # +
+
+ ans0 <- (1 - PSTR0) *
+ df0.dkmat[ii.TF , jay] / AA[ii.TF , jay]
+ dl.dk[index0] <- ans0[index0]
+
+ run.varcov <- run.varcov + dl.dk^2
+ } # end of for loop
+
+ run.varcov <- c(run.varcov / .nsimEIM )
+ ned2l.dk2 <- if (intercept.only) mean(run.varcov) else run.varcov
+
+ wz[ii.TF, M1*jay - 1] <- ned2l.dk2 # * (dsize.deta[ii.TF, jay])^2
}
}
- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / M1)
+
+ wz[, M1*(1:NOS) - 1] <- wz[, M1*(1:NOS) - 1] * dsize.deta^2
+
+
+
+
+ save.weights <- !all(ind2)
+
+
+ ned2l.donempstr02 <- oneminusf0 / (AA * (onempstr0))
+ wz[, M1*(1:NOS) ] <- ned2l.donempstr02 * donempstr0.deta^2
+
+ ned2l.donempstr0.dmunb <- -df0.dmunb / AA # Negated (1/2)
+ wz[, M + M-1 + M1*(1:NOS) - 2] <- ned2l.donempstr0.dmunb *
+ donempstr0.deta * dmunb.deta
+
+ ned2l.donempstr0.dsize <- -df0.dkmat / AA # Negated (2/2)
+ wz[, M + M1*(1:NOS) - 1] <- ned2l.donempstr0.dsize *
+ donempstr0.deta * dsize.deta
+
+ ned2l.dmunb2 <-
+ (1 - AA) * (mymu / munb^2 -
+ ((1 + mymu/kmat) / kmat) / (1 + munb/kmat)^2) -
+ (1-pstr0) * (df02.dmunb2 -
+ (1 - pstr0) * (df0.dmunb^2) / AA)
+ wz[, M1*(1:NOS) - 2] <- ned2l.dmunb2 * dmunb.deta^2
+
+
+ dAA.dmunb <- (onempstr0) * df0.dmunb
+ ned2l.dmunbsize <-
+ (1 - AA) * (munb - mymu) / (munb + kmat)^2 -
+ (onempstr0) * (df02.dkmat.dmunb -
+ df0.dkmat * dAA.dmunb / AA)
+
+ wz[, M + M1*(1:NOS) - 2] <- ned2l.dmunbsize * dmunb.deta *
+ dsize.deta
+
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
}), list( .lonempstr0 = lonempstr0,
- .eonempstr0 = eonempstr0, .nsimEIM = nsimEIM ))))
+ .eonempstr0 = eonempstr0, .nsimEIM = nsimEIM,
+ .cutoff.prob = cutoff.prob, .eps.trig = eps.trig,
+ .max.support = max.support,
+ .max.chunk.MB = max.chunk.MB ))))
} # End of zinegbinomialff
@@ -4130,13 +4689,16 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
zipoissonff <-
function(llambda = "loge", lonempstr0 = "logit",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
- ilambda = NULL, ionempstr0 = NULL, imethod = 1,
- ishrinkage = 0.8, zero = -2) {
+ type.fitted = c("mean", "lambda", "pobs0", "pstr0", "onempstr0"),
+ ilambda = NULL, ionempstr0 = NULL,
+ gonempstr0 = NULL, # (1:9) / 10,
+ imethod = 1,
+ ishrinkage = 0.95, probs.y = 0.35,
+ zero = "onempstr0") {
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "lambda", "pobs0", "pstr0", "onempstr0"))[1]
@@ -4148,6 +4710,7 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
eonempstr0 <- link2list(lonempstr0)
lonempstr0 <- attr(eonempstr0, "function.name")
+ ipstr0.small <- 1/64 # A number easily represented exactly
if (length(ilambda))
@@ -4159,18 +4722,6 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
stop("'ionempstr0' values must be inside the interval (0,1)")
- if (!is.Numeric(imethod, length.arg = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
-
- if (!is.Numeric(ishrinkage, length.arg = 1) ||
- ishrinkage < 0 ||
- ishrinkage > 1)
- stop("bad input for argument 'ishrinkage'")
-
-
-
new("vglmff",
blurb = c("Zero-inflated Poisson\n\n",
"Links: ",
@@ -4178,14 +4729,17 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
namesof("onempstr0", lonempstr0, earg = eonempstr0), "\n",
"Mean: onempstr0 * lambda"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("lambda", "onempstr0"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -4193,13 +4747,14 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
))),
initialize = eval(substitute(expression({
-
+ M1 <- 2
temp5 <-
w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
- Is.integer.y = TRUE,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -4210,79 +4765,68 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
ncoly <- ncol(y)
- M1 <- 2
extra$ncoly <- ncoly
extra$M1 <- M1
M <- M1 * ncoly
extra$type.fitted <- .type.fitted
extra$dimnamesy <- dimnames(y)
-
- mynames1 <- paste("lambda", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("onempstr0", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- param.names("lambda", ncoly)
+ mynames2 <- param.names("onempstr0", ncoly)
predictors.names <-
c(namesof(mynames1, .llambda , earg = .elambda , tag = FALSE),
namesof(mynames2, .lonempstr0 , earg = .eonempstr0 , tag = FALSE))[
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
if (!length(etastart)) {
+ matL <- Init.mu(y = y, w = w, imethod = .imethod , # x = x,
+ imu = .ilambda , ishrinkage = .ishrinkage ,
+ pos.only = TRUE,
+ probs.y = .probs.y )
- matL <- matrix(if (length( .ilambda )) .ilambda else 0,
- n, ncoly, byrow = TRUE)
matP <- matrix(if (length( .ionempstr0 )) .ionempstr0 else 0,
n, ncoly, byrow = TRUE)
+ phi0.grid <- .gonempstr0
+ ipstr0.small <- .ipstr0.small # A number easily represented exactly
+ if (!length( .ionempstr0 ))
for (jay in 1:ncoly) {
- yjay <- y[, jay]
-
- Phi0.init <- 1 - 0.85 * sum(w[yjay > 0]) / sum(w)
- Phi0.init[Phi0.init <= 0.02] <- 0.02 # Last resort
- Phi0.init[Phi0.init >= 0.98] <- 0.98 # Last resort
-
- if ( length(mustart)) {
- mustart <- matrix(mustart, n, ncoly) # Make sure right size
- Lambda.init <- mustart / (1 - Phi0.init)
- } else if ( .imethod == 2) {
- mymean <- weighted.mean(yjay[yjay > 0],
- w[yjay > 0]) + 1/16
- Lambda.init <- (1 - .ishrinkage ) * (yjay + 1/8) + .ishrinkage * mymean
- } else {
- use.this <- median(yjay[yjay > 0]) + 1 / 16
- Lambda.init <- (1 - .ishrinkage ) * (yjay + 1/8) + .ishrinkage * use.this
- }
-
zipois.Loglikfun <- function(phival, y, x, w, extraargs) {
sum(c(w) * dzipois(x = y, pstr0 = phival,
lambda = extraargs$lambda,
log = TRUE))
}
- phi0.grid <- seq(0.02, 0.98, len = 21)
- Phi0mat.init <- grid.search(phi0.grid,
- objfun = zipois.Loglikfun,
- y = y, x = x, w = w,
- extraargs = list(lambda = Lambda.init))
- if (length(mustart)) {
- Lambda.init <- Lambda.init / (1 - Phi0mat.init)
- }
+ Phi0.init <- if (length(phi0.grid)) {
+ grid.search(phi0.grid,
+ objfun = zipois.Loglikfun,
+ y = y[, jay], x = x, w = w[, jay],
+ extraargs = list(lambda = matL[, jay]))
+ } else {
+ pmax(ipstr0.small,
+ weighted.mean(y[, jay] == 0, w[, jay]) -
+ dpois(0, matL[, jay]))
+ }
+ if (mean(Phi0.init == ipstr0.small) > 0.95)
+ warning("from the initial values only, the data appears to ",
+ "have little or no 0-inflation")
- if (!length( .ilambda ))
- matL[, jay] <- Lambda.init
- if (!length( .ionempstr0 ))
- matP[, jay] <- Phi0mat.init
- }
+ matP[, jay] <- Phi0.init
+ } # for (jay)
etastart <-
cbind(theta2eta( matL, .llambda , earg = .elambda ),
theta2eta(1 - matP, .lonempstr0 , earg = .eonempstr0 ))[,
- interleave.VGAM(M, M = M1)]
+ interleave.VGAM(M, M1 = M1)]
mustart <- NULL # Since etastart has been computed.
}
}), list( .lonempstr0 = lonempstr0, .llambda = llambda,
.eonempstr0 = eonempstr0, .elambda = elambda,
.ionempstr0 = ionempstr0, .ilambda = ilambda,
- .type.fitted = type.fitted,
+ .gonempstr0 = gonempstr0,
+ .type.fitted = type.fitted, .probs.y = probs.y,
+ .ipstr0.small = ipstr0.small,
.imethod = imethod, .ishrinkage = ishrinkage ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -4293,10 +4837,10 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "lambda", "pobs0", "pstr0", "onempstr0"))[1]
M1 <- 2
- ncoly <- extra$ncoly
+ ncoly <- ncol(eta) / M1
lambda <- eta2theta(eta[, M1*(1:ncoly) - 1], .llambda ,
earg = .elambda )
onempstr0 <- eta2theta(eta[, M1*(1:ncoly) ], .lonempstr0 ,
@@ -4305,6 +4849,7 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
ans <- switch(type.fitted,
"mean" = onempstr0 * lambda,
+ "lambda" = lambda,
"pobs0" = 1 + onempstr0 * expm1(-lambda), # P(Y=0)
"pstr0" = 1 - onempstr0,
"onempstr0" = onempstr0)
@@ -4327,8 +4872,8 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
M1 <- extra$M1
misc$link <-
c(rep( .llambda , length = ncoly),
- rep( .lonempstr0 , length = ncoly))[interleave.VGAM(M, M = M1)]
- temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+ rep( .lonempstr0 , length = ncoly))[interleave.VGAM(M, M1 = M1)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
names(misc$link) <- temp.names
@@ -4360,11 +4905,9 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
function(mu, y, w, residuals = FALSE, eta,
extra = NULL,
summation = TRUE) {
- M1 <- 2
- ncoly <- extra$ncoly
- lambda <- eta2theta(eta[, M1*(1:ncoly) - 1], .llambda ,
+ lambda <- eta2theta(eta[, c(TRUE, FALSE)], .llambda ,
earg = .elambda )
- onempstr0 <- eta2theta(eta[, M1*(1:ncoly) ], .lonempstr0 ,
+ onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
earg = .eonempstr0 )
@@ -4406,10 +4949,10 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
deriv = eval(substitute(expression({
M1 <- 2
- ncoly <- extra$ncoly
- lambda <- eta2theta(eta[, M1*(1:ncoly) - 1], .llambda ,
- earg = .elambda )
- onempstr0 <- eta2theta(eta[, M1*(1:ncoly) ], .lonempstr0 ,
+ ncoly <- ncol(eta) / M1 # extra$ncoly
+ lambda <- eta2theta(eta[, c(TRUE, FALSE)], .llambda ,
+ earg = .elambda )
+ onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
earg = .eonempstr0 )
@@ -4427,7 +4970,7 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
ans <- c(w) * cbind(dl.dlambda * dlambda.deta,
dl.donempstr0 * donempstr0.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
if ( .llambda == "loge" && is.empty.list( .elambda ) &&
@@ -4595,7 +5138,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
function(
lpstr0 = "logit",
lprob = "logit",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+ type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
ipstr0 = NULL, iprob = NULL,
imethod = 1,
bias.red = 0.5,
@@ -4616,7 +5159,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
lprob <- attr(eprob, "function.name")
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
if (length(ipstr0))
@@ -4651,23 +5194,23 @@ rzigeom <- function(n, prob, pstr0 = 0) {
"Mean: (1 - pstr0) * (1 - prob) / prob"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("pstr0", "prob"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
.type.fitted = type.fitted ))),
initialize = eval(substitute(expression({
-
M1 <- 2
- if (any(y < 0))
- stop("the response must not have negative values")
temp5 <-
w.y.check(w = w, y = y,
@@ -4685,15 +5228,12 @@ rzigeom <- function(n, prob, pstr0 = 0) {
extra$dimnamesy <- dimnames(y)
- mynames1 <- if (ncoly == 1) "pstr0" else
- paste("pstr0", 1:ncoly, sep = "")
- mynames2 <- if (ncoly == 1) "prob" else
- paste("prob", 1:ncoly, sep = "")
-
+ mynames1 <- param.names("pstr0", ncoly)
+ mynames2 <- param.names("prob", ncoly)
predictors.names <-
c(namesof(mynames1, .lpstr0, earg = .epstr0, tag = FALSE),
namesof(mynames2, .lprob, earg = .eprob, tag = FALSE))[
- interleave.VGAM(M1 * NOS, M = M1)]
+ interleave.VGAM(M1 * NOS, M1 = M1)]
if (!length(etastart)) {
@@ -4736,7 +5276,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
etastart <-
cbind(theta2eta(psze.init, .lpstr0, earg = .epstr0),
theta2eta(prob.init, .lprob , earg = .eprob ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
}
}), list( .lprob = lprob, .lpstr0 = lpstr0,
.eprob = eprob, .epstr0 = epstr0,
@@ -4755,10 +5295,11 @@ rzigeom <- function(n, prob, pstr0 = 0) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
ans <- switch(type.fitted,
"mean" = (1 - pstr0) * (1 - prob) / prob,
+ "prob" = prob,
"pobs0" = pstr0 + (1 - pstr0) * prob, # P(Y=0)
"pstr0" = pstr0,
"onempstr0" = 1 - pstr0)
@@ -4779,14 +5320,14 @@ rzigeom <- function(n, prob, pstr0 = 0) {
last = eval(substitute(expression({
temp.names <- c(rep( .lpstr0 , len = NOS),
rep( .lprob , len = NOS))
- temp.names <- temp.names[interleave.VGAM(M1*NOS, M = M1)]
+ temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
misc$link <- temp.names
misc$earg <- vector("list", M1 * NOS)
names(misc$link) <-
names(misc$earg) <-
- c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M = M1)]
+ c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
for (ii in 1:NOS) {
misc$earg[[M1*ii-1]] <- .epstr0
@@ -4880,7 +5421,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
dl.deta12 <- c(w) * cbind(dl.dpstr0 * dpstr0.deta,
dl.dprob * dprob.deta)
- dl.deta12 <- dl.deta12[, interleave.VGAM(ncol(dl.deta12), M = M1)]
+ dl.deta12 <- dl.deta12[, interleave.VGAM(ncol(dl.deta12), M1 = M1)]
dl.deta12
}), list( .lprob = lprob, .lpstr0 = lpstr0,
.eprob = eprob, .epstr0 = epstr0 ))),
@@ -4930,11 +5471,11 @@ rzigeom <- function(n, prob, pstr0 = 0) {
zigeometricff <-
function(lprob = "logit",
lonempstr0 = "logit",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+ type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
iprob = NULL, ionempstr0 = NULL,
imethod = 1,
bias.red = 0.5,
- zero = -2) {
+ zero = "onempstr0") {
expected <- TRUE
@@ -4951,7 +5492,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
if (length(iprob))
@@ -4986,23 +5527,23 @@ rzigeom <- function(n, prob, pstr0 = 0) {
"Mean: onempstr0 * (1 - prob) / prob"),
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("prob", "onempstr0"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
.type.fitted = type.fitted ))),
initialize = eval(substitute(expression({
-
M1 <- 2
- if (any(y < 0))
- stop("the response must not have negative values")
temp5 <-
w.y.check(w = w, y = y,
@@ -5020,15 +5561,12 @@ rzigeom <- function(n, prob, pstr0 = 0) {
extra$dimnamesy <- dimnames(y)
- mynames1 <- if (ncoly == 1) "prob" else
- paste("prob", 1:ncoly, sep = "")
- mynames2 <- if (ncoly == 1) "onempstr0" else
- paste("onempstr0", 1:ncoly, sep = "")
-
+ mynames1 <- param.names("prob", ncoly)
+ mynames2 <- param.names("onempstr0", ncoly)
predictors.names <-
c(namesof(mynames1, .lprob , earg = .eprob , tag = FALSE),
namesof(mynames2, .lonempstr0 , earg = .eonempstr0 , tag = FALSE))[
- interleave.VGAM(M1*NOS, M = M1)]
+ interleave.VGAM(M1*NOS, M1 = M1)]
if (!length(etastart)) {
@@ -5071,7 +5609,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
etastart <-
cbind(theta2eta( prob.init, .lprob , earg = .eprob ),
theta2eta(1 - psze.init, .lonempstr0 , earg = .eonempstr0 ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
}
}), list( .lprob = lprob, .lonempstr0 = lonempstr0,
.eprob = eprob, .eonempstr0 = eonempstr0,
@@ -5092,10 +5630,11 @@ rzigeom <- function(n, prob, pstr0 = 0) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "pstr0", "onempstr0"))[1]
+ c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
ans <- switch(type.fitted,
"mean" = onempstr0 * (1 - prob) / prob,
+ "prob" = prob,
"pobs0" = 1 - onempstr0 + onempstr0 * prob, # P(Y=0)
"pstr0" = 1 - onempstr0,
"onempstr0" = onempstr0)
@@ -5116,14 +5655,14 @@ rzigeom <- function(n, prob, pstr0 = 0) {
last = eval(substitute(expression({
temp.names <- c(rep( .lprob , len = NOS),
rep( .lonempstr0 , len = NOS))
- temp.names <- temp.names[interleave.VGAM(M1*NOS, M = M1)]
+ temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
misc$link <- temp.names
misc$earg <- vector("list", M1 * NOS)
names(misc$link) <-
names(misc$earg) <-
- c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M = M1)]
+ c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
for (ii in 1:NOS) {
misc$earg[[M1*ii-1]] <- .eprob
@@ -5226,7 +5765,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
dl.deta12 <- c(w) * cbind(dl.dprob * dprob.deta,
dl.donempstr0 * donempstr0.deta)
- dl.deta12 <- dl.deta12[, interleave.VGAM(ncol(dl.deta12), M = M1)]
+ dl.deta12 <- dl.deta12[, interleave.VGAM(ncol(dl.deta12), M1 = M1)]
dl.deta12
}), list( .lprob = lprob, .lonempstr0 = lonempstr0,
.eprob = eprob, .eonempstr0 = eonempstr0 ))),
@@ -5319,6 +5858,10 @@ pzageom <- function(q, prob, pobs0 = 0) {
pposgeom(q[q > 0], prob = prob[q > 0])
ans[q < 0] <- 0
ans[q == 0] <- pobs0[q == 0]
+
+ ans <- pmax(0, ans)
+ ans <- pmin(1, ans)
+
ans
}
@@ -5413,6 +5956,10 @@ pzabinom <- function(q, size, prob, pobs0 = 0) {
pposbinom(q[q > 0], size = size[q > 0], prob = prob[q > 0])
ans[q < 0] <- 0
ans[q == 0] <- pobs0[q == 0]
+
+ ans <- pmax(0, ans)
+ ans <- pmin(1, ans)
+
ans
}
@@ -5460,7 +6007,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
zabinomial <-
function(lpobs0 = "logit",
lprob = "logit",
- type.fitted = c("mean", "pobs0"),
+ type.fitted = c("mean", "prob", "pobs0"),
ipobs0 = NULL, iprob = NULL,
imethod = 1,
zero = NULL # Was zero = 2 prior to 20130917
@@ -5478,7 +6025,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0"))[1]
+ c("mean", "prob", "pobs0"))[1]
if (length(ipobs0))
if (!is.Numeric(ipobs0, positive = TRUE) ||
@@ -5509,11 +6056,17 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
namesof("prob" , lprob, earg = eprob), "\n",
"Mean: (1 - pobs0) * prob / (1 - (1 - prob)^size)"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
+ Q1 = NA,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("pobs0", "prob"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -5624,7 +6177,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0"))[1]
+ c("mean", "prob", "pobs0"))[1]
phi0 <- eta2theta(eta[, 1], .lpobs0, earg = .epobs0 )
prob <- eta2theta(eta[, 2], .lprob, earg = .eprob )
@@ -5634,6 +6187,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
ans <- switch(type.fitted,
"mean" = (1 - phi0) * prob / (1 - (1 - prob)^Size),
+ "prob" = prob,
"pobs0" = phi0) # P(Y=0)
if (length(extra$dimnamesy) &&
is.matrix(ans) &&
@@ -5688,8 +6242,8 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
vfamily = c("zabinomial"),
deriv = eval(substitute(expression({
- NOS <- if (length(extra$NOS)) extra$NOS else 1
M1 <- 2
+ NOS <- if (length(extra$NOS)) extra$NOS else 1
orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
new.w <- if (length(extra$new.w)) extra$new.w else 1
@@ -5767,10 +6321,10 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
zabinomialff <-
function(lprob = "logit",
lonempobs0 = "logit",
- type.fitted = c("mean", "pobs0", "onempobs0"),
+ type.fitted = c("mean", "prob", "pobs0", "onempobs0"),
iprob = NULL, ionempobs0 = NULL,
imethod = 1,
- zero = 2) {
+ zero = "onempobs0") {
lprob <- as.list(substitute(lprob))
@@ -5783,7 +6337,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "onempobs0"))[1]
+ c("mean", "prob", "pobs0", "onempobs0"))[1]
if (length(iprob))
if (!is.Numeric(iprob, positive = TRUE) ||
@@ -5813,11 +6367,17 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
namesof("onempobs0", lonempobs0, earg = eonempobs0), "\n",
"Mean: onempobs0 * prob / (1 - (1 - prob)^size)"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
+ Q1 = NA,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("prob", "onempobs0"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -5926,7 +6486,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "onempobs0"))[1]
+ c("mean", "prob", "pobs0", "onempobs0"))[1]
prob <- eta2theta(eta[, 1], .lprob , earg = .eprob )
onempobs0 <- eta2theta(eta[, 2], .lonempobs0 , earg = .eonempobs0 )
@@ -5936,6 +6496,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
ans <- switch(type.fitted,
"mean" = onempobs0 * prob / (1 - (1 - prob)^Size),
+ "prob" = prob,
"pobs0" = 1 - onempobs0, # P(Y=0)
"onempobs0" = onempobs0) # P(Y>0)
if (length(extra$dimnamesy) &&
@@ -5991,8 +6552,8 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
vfamily = c("zabinomialff"),
deriv = eval(substitute(expression({
- NOS <- if (length(extra$NOS)) extra$NOS else 1
M1 <- 2
+ NOS <- if (length(extra$NOS)) extra$NOS else 1
orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
new.w <- if (length(extra$new.w)) extra$new.w else 1
@@ -6073,11 +6634,12 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
- zageometric <- function(lpobs0 = "logit", lprob = "logit",
- type.fitted = c("mean", "pobs0", "onempobs0"),
- imethod = 1,
- ipobs0 = NULL, iprob = NULL,
- zero = NULL) {
+ zageometric <-
+ function(lpobs0 = "logit", lprob = "logit",
+ type.fitted = c("mean", "prob", "pobs0", "onempobs0"),
+ imethod = 1,
+ ipobs0 = NULL, iprob = NULL,
+ zero = NULL) {
@@ -6090,7 +6652,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
lprob <- attr(eprob, "function.name")
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "onempobs0"))[1]
+ c("mean", "prob", "pobs0", "onempobs0"))[1]
if (!is.Numeric(imethod, length.arg = 1,
@@ -6117,14 +6679,17 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = FALSE,
+ parameters.names = c("pobs0", "prob"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -6133,14 +6698,13 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
initialize = eval(substitute(expression({
M1 <- 2
- if (any(y < 0))
- stop("the response must not have negative values")
temp5 <-
w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
- Is.integer.y = TRUE,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -6158,14 +6722,12 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
extra$type.fitted <- .type.fitted
- mynames1 <- if (ncoly == 1) "pobs0" else
- paste("pobs0", 1:ncoly, sep = "")
- mynames2 <- if (ncoly == 1) "prob" else
- paste("prob", 1:ncoly, sep = "")
+ mynames1 <- param.names("pobs0", ncoly)
+ mynames2 <- param.names("prob", ncoly)
predictors.names <-
c(namesof(mynames1, .lpobs0 , earg = .epobs0 , tag = FALSE),
namesof(mynames2, .lprob , earg = .eprob , tag = FALSE))[
- interleave.VGAM(M1*NOS, M = M1)]
+ interleave.VGAM(M1*NOS, M1 = M1)]
if (!length(etastart)) {
@@ -6193,7 +6755,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
etastart <- cbind(theta2eta(phi0.init, .lpobs0 , earg = .epobs0 ),
theta2eta(prob.init, .lprob , earg = .eprob ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
}
}), list( .lpobs0 = lpobs0, .lprob = lprob,
.epobs0 = epobs0, .eprob = eprob,
@@ -6208,10 +6770,9 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "onempobs0"))[1]
-
- NOS <- extra$NOS
+ c("mean", "prob", "pobs0", "onempobs0"))[1]
M1 <- 2
+ NOS <- ncol(eta) / M1
phi0 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
.lpobs0 , earg = .epobs0 ))
@@ -6221,6 +6782,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
ans <- switch(type.fitted,
"mean" = (1 - phi0) / prob,
+ "prob" = prob,
"pobs0" = phi0, # P(Y=0)
"onempobs0" = 1 - phi0) # P(Y>0)
if (length(extra$dimnamesy) &&
@@ -6239,14 +6801,14 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
last = eval(substitute(expression({
temp.names <- c(rep( .lpobs0 , len = NOS),
rep( .lprob , len = NOS))
- temp.names <- temp.names[interleave.VGAM(M1*NOS, M = M1)]
+ temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
misc$link <- temp.names
misc$earg <- vector("list", M1 * NOS)
names(misc$link) <-
names(misc$earg) <-
- c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M = M1)]
+ c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
for (ii in 1:NOS) {
misc$earg[[M1*ii-1]] <- .epobs0
@@ -6314,7 +6876,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
deriv = eval(substitute(expression({
M1 <- 2
- NOS <- extra$NOS
+ NOS <- ncol(eta) / M1 # extra$NOS
y0 <- extra$y0
skip <- extra$skip.these
@@ -6338,7 +6900,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
ans <- c(w) * cbind(dl.dphi0 * dphi0.deta,
dl.dprob * dprob.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
ans
}), list( .lpobs0 = lpobs0, .lprob = lprob,
.epobs0 = epobs0, .eprob = eprob ))),
@@ -6362,7 +6924,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
wz[, 1:NOS] <- tmp200
- wz <- wz[, interleave.VGAM(ncol(wz), M = M1)]
+ wz <- wz[, interleave.VGAM(ncol(wz), M1 = M1)]
wz
@@ -6373,11 +6935,12 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
- zageometricff <- function(lprob = "logit", lonempobs0 = "logit",
- type.fitted = c("mean", "pobs0", "onempobs0"),
- imethod = 1,
- iprob = NULL, ionempobs0 = NULL,
- zero = -2) {
+ zageometricff <-
+ function(lprob = "logit", lonempobs0 = "logit",
+ type.fitted = c("mean", "prob", "pobs0", "onempobs0"),
+ imethod = 1,
+ iprob = NULL, ionempobs0 = NULL,
+ zero = "onempobs0") {
lprob <- as.list(substitute(lprob))
@@ -6389,7 +6952,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
lonempobs0 <- attr(eonempobs0, "function.name")
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "onempobs0"))[1]
+ c("mean", "prob", "pobs0", "onempobs0"))[1]
if (!is.Numeric(imethod, length.arg = 1,
@@ -6418,14 +6981,17 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
constraints = eval(substitute(expression({
- dotzero <- .zero
- M1 <- 2
- eval(negzero.expression.VGAM)
+ constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+ predictors.names = predictors.names,
+ M1 = 2)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(M1 = 2,
Q1 = 1,
+ expected = TRUE,
+ multipleResponses = TRUE,
+ parameters.names = c("prob", "onempobs0"),
type.fitted = .type.fitted ,
zero = .zero )
}, list( .zero = zero,
@@ -6434,14 +7000,13 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
initialize = eval(substitute(expression({
M1 <- 2
- if (any(y < 0))
- stop("the response must not have negative values")
temp5 <-
w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
ncol.w.max = Inf,
ncol.y.max = Inf,
- Is.integer.y = TRUE,
out.wy = TRUE,
colsyperw = 1,
maximize = TRUE)
@@ -6459,14 +7024,12 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
extra$type.fitted <- .type.fitted
- mynames1 <- if (ncoly == 1) "prob" else
- paste("prob", 1:ncoly, sep = "")
- mynames2 <- if (ncoly == 1) "onempobs0" else
- paste("onempobs0", 1:ncoly, sep = "")
+ mynames1 <- param.names("prob", ncoly)
+ mynames2 <- param.names("onempobs0", ncoly)
predictors.names <-
c(namesof(mynames1, .lprob , earg = .eprob , tag = FALSE),
namesof(mynames2, .lonempobs0 , earg = .eonempobs0 , tag = FALSE))[
- interleave.VGAM(M1*NOS, M = M1)]
+ interleave.VGAM(M1*NOS, M1 = M1)]
if (!length(etastart)) {
@@ -6496,7 +7059,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
cbind(theta2eta( prob.init, .lprob , earg = .eprob ),
theta2eta(1 - phi0.init, .lonempobs0 , earg = .eonempobs0 ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = M1)]
+ etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
}
}), list( .lonempobs0 = lonempobs0, .lprob = lprob,
.eonempobs0 = eonempobs0, .eprob = eprob,
@@ -6511,7 +7074,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
}
type.fitted <- match.arg(type.fitted,
- c("mean", "pobs0", "onempobs0"))[1]
+ c("mean", "prob", "pobs0", "onempobs0"))[1]
NOS <- extra$NOS
M1 <- 2
@@ -6523,7 +7086,8 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
ans <- switch(type.fitted,
- "mean" = (onempobs0) / prob,
+ "mean" = onempobs0 / prob,
+ "prob" = prob,
"pobs0" = 1 - onempobs0, # P(Y=0)
"onempobs0" = onempobs0) # P(Y>0)
if (length(extra$dimnamesy) &&
@@ -6542,14 +7106,14 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
last = eval(substitute(expression({
temp.names <- c(rep( .lprob , len = NOS),
rep( .lonempobs0 , len = NOS))
- temp.names <- temp.names[interleave.VGAM(M1*NOS, M = M1)]
+ temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
misc$link <- temp.names
misc$earg <- vector("list", M1 * NOS)
names(misc$link) <-
names(misc$earg) <-
- c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M = M1)]
+ c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
for (ii in 1:NOS) {
misc$earg[[M1*ii-1]] <- .eprob
@@ -6618,7 +7182,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
deriv = eval(substitute(expression({
M1 <- 2
- NOS <- extra$NOS
+ NOS <- ncol(eta) / M1 # extra$NOS
y0 <- extra$y0
skip <- extra$skip.these
@@ -6644,7 +7208,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
ans <- c(w) * cbind(dl.dprob * dprob.deta,
dl.donempobs0 * donempobs0.deta)
- ans <- ans[, interleave.VGAM(ncol(ans), M = M1)]
+ ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
ans
}), list( .lonempobs0 = lonempobs0, .lprob = lprob,
.eonempobs0 = eonempobs0, .eprob = eprob ))),
@@ -6671,7 +7235,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
wz[, NOS+(1:NOS)] <- tmp200
- wz <- wz[, interleave.VGAM(ncol(wz), M = M1)]
+ wz <- wz[, interleave.VGAM(ncol(wz), M1 = M1)]
wz
diff --git a/R/links.q b/R/links.q
index f1672ad..783da5d 100644
--- a/R/links.q
+++ b/R/links.q
@@ -277,12 +277,7 @@ care.exp <- function(x,
}
if (inverse) {
switch(deriv+1, {
- yy <- theta
- Neg <- (theta < 0) & !is.na(theta)
- yy[ Neg] <- exp(theta[Neg]) / (1 + exp(theta[Neg]))
- Pos <- (theta >= 0) & !is.na(theta)
- yy[Pos] <- 1 / (1 + exp(-theta[Pos]))
- yy
+ plogis(theta)
},
1 / Recall(theta = theta,
bvalue = bvalue,
@@ -291,10 +286,7 @@ care.exp <- function(x,
stop("argument 'deriv' unmatched"))
} else {
switch(deriv+1, {
- temp2 <- log(theta) - log1p(-theta)
- if (any(near0.5 <- (abs(theta - 0.5) < 0.000125) & !is.na(theta)))
- temp2[near0.5] <- log(theta[near0.5] / (1 - theta[near0.5]))
- temp2
+ qlogis(theta)
},
exp(-log(theta) - log1p(-theta)),
(2 * theta - 1) / (exp(log(theta) + log1p(-theta)))^2,
@@ -1627,7 +1619,7 @@ warning("20150711; this function has not been updated")
nbcanlink <- function(theta,
size = NULL,
- wrt.eta = NULL,
+ wrt.param = NULL,
bvalue = NULL,
inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE) {
@@ -1656,8 +1648,8 @@ warning("20150711; this function has not been updated")
if (deriv > 0) {
- if (!(wrt.eta %in% 1:2))
- stop("argument 'wrt.eta' should be 1 or 2")
+ if (!(wrt.param %in% 1:2))
+ stop("argument 'wrt.param' should be 1 or 2")
}
@@ -1673,21 +1665,21 @@ warning("20150711; this function has not been updated")
ans
},
- if (wrt.eta == 1) (theta * (theta + kmatrix)) / kmatrix else
+ if (wrt.param == 1) (theta * (theta + kmatrix)) / kmatrix else
-(theta + kmatrix),
- if (wrt.eta == 1)
+ if (wrt.param == 1)
(2 * theta + kmatrix) * theta * (theta + kmatrix) / kmatrix^2 else
theta + kmatrix)
} else {
ans <-
switch(deriv+1,
- log(theta / (theta + kmatrix)) ,
+ log(theta / (theta + kmatrix)),
- if (wrt.eta == 1) kmatrix / (theta * (theta + kmatrix)) else
+ if (wrt.param == 1) kmatrix / (theta * (theta + kmatrix)) else
-1 / (theta + kmatrix),
- if (wrt.eta == 1)
+ if (wrt.param == 1)
(2 * theta + kmatrix) *
(-kmatrix) / (theta * (theta + kmatrix))^2 else
1 / (theta + kmatrix)^2)
@@ -1750,6 +1742,57 @@ setMethod("linkfun", "vglm", function(object, ...)
+ logitoffsetlink <-
+ function(theta,
+ offset = 0,
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE) {
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("logitoffsetlink(",
+ theta,
+ ", ", offset[1],
+ ")", sep = "") else
+ paste("log(",
+ as.char.expression(theta),
+ "/(1-",
+ as.char.expression(theta),
+ ")",
+ " - ", offset[1],
+ ")", sep = "")
+ if (tag)
+ string <- paste("Logit-with-offset:", string)
+ return(string)
+ }
+
+
+
+
+ if (inverse) {
+ switch(deriv+1, {
+ exp.eta <- exp(theta)
+ (exp.eta + offset) / (1 + exp.eta + offset)
+ },
+ 1 / Recall(theta = theta,
+ offset = offset,
+ inverse = FALSE, deriv = deriv),
+ theta * (1 - theta) * (1 - 2 * theta),
+ stop("argument 'deriv' unmatched"))
+ } else {
+ switch(deriv+1, {
+ temp2 <- log(theta / (1 - theta) - offset)
+ temp2
+ },
+ 1 / ((1 - theta) * (theta - (1-theta) * offset)),
+ (2 * (theta - offset * (1-theta)) - 1) / (
+ (theta - (1-theta)*offset) * (1-theta))^2,
+ stop("argument 'deriv' unmatched"))
+ }
+}
+
+
+
+
diff --git a/R/lrwaldtest.R b/R/lrwaldtest.R
index d1e5efb..df2b0f1 100644
--- a/R/lrwaldtest.R
+++ b/R/lrwaldtest.R
@@ -199,7 +199,7 @@ lrtest_vglm <- function(object, ..., name = NULL) {
}
}
- rval <- matrix(rep(as.numeric(NA), 5 * nmodels), ncol = 5)
+ rval <- matrix(rep(NA_real_, 5 * nmodels), ncol = 5)
colnames(rval) <- c("#Df", "LogLik", "Df", "Chisq", "Pr(>Chisq)")
rownames(rval) <- 1:nmodels
@@ -374,7 +374,7 @@ lrtest.default <- function(object, ..., name = NULL) {
}
}
- rval <- matrix(rep(as.numeric(NA), 5 * nmodels), ncol = 5)
+ rval <- matrix(rep(NA_real_, 5 * nmodels), ncol = 5)
colnames(rval) <- c("#Df", "LogLik", "Df", "Chisq", "Pr(>Chisq)")
rownames(rval) <- 1:nmodels
@@ -549,7 +549,7 @@ waldtest_default <- function(object, ..., vcov = NULL,
stop("to compare more than 2 models `vcov.' needs to be a function")
test <- match.arg(test)
- rval <- matrix(rep(as.numeric(NA), 4 * nmodels), ncol = 4)
+ rval <- matrix(rep(NA_real_, 4 * nmodels), ncol = 4)
colnames(rval) <- c("Res.Df", "Df", test,
paste("Pr(>", test, ")", sep = ""))
rownames(rval) <- 1:nmodels
diff --git a/R/mux.q b/R/mux.q
index 051288f..30ab1ed 100644
--- a/R/mux.q
+++ b/R/mux.q
@@ -62,7 +62,7 @@ mux2 <- function(cc, xmat) {
M <- d[1]
if (d[2] != p || d[3] != n)
stop("dimension size inconformable")
- ans <- rep(as.numeric(NA), n*M)
+ ans <- rep(NA_real_, n*M)
fred <- .C("mux2", as.double(cc), as.double(t(xmat)),
ans = as.double(ans), as.integer(p), as.integer(n),
as.integer(M), NAOK = TRUE)
@@ -81,7 +81,7 @@ mux22 <- function(cc, xmat, M, upper = FALSE, as.matrix = FALSE) {
index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
dimm.value <- nrow(cc) # Usually M or M(M+1)/2
- ans <- rep(as.numeric(NA), n*M)
+ ans <- rep(NA_real_, n*M)
fred <- .C("mux22", as.double(cc), as.double(t(xmat)),
ans = as.double(ans), as.integer(dimm.value),
as.integer(index$row), as.integer(index$col),
@@ -212,7 +212,7 @@ mux9 <- function(cc, xmat) {
M <- dimcc[1]
n <- dimcc[3]
- ans <- matrix(as.numeric(NA), n, M)
+ ans <- matrix(NA_real_, n, M)
fred <- .C("mux9", as.double(cc), as.double(xmat),
ans = as.double(ans),
as.integer(M), as.integer(n), NAOK = TRUE)
@@ -280,7 +280,7 @@ mux15 <- function(cc, xmat) {
if (max(abs(t(cc)-cc))>0.000001)
stop("argument 'cc' is not symmetric")
- ans <- rep(as.numeric(NA), n*M*M)
+ ans <- rep(NA_real_, n*M*M)
fred <- .C("mux15", as.double(cc), as.double(t(xmat)),
ans = as.double(ans), as.integer(M),
as.integer(n), NAOK = TRUE)
diff --git a/R/plot.vglm.q b/R/plot.vglm.q
index 8870903..2181ccf 100644
--- a/R/plot.vglm.q
+++ b/R/plot.vglm.q
@@ -838,7 +838,7 @@ vvplot.factor <-
about <- function(ux, M, Delta = 1 / M) {
if (M == 1) return(cbind(ux))
- ans <- matrix(as.numeric(NA), length(ux), M)
+ ans <- matrix(NA_real_, length(ux), M)
grid <- seq(-Delta, Delta, len = M)
for (ii in 1:M) {
ans[, ii] <- ux + grid[ii]
diff --git a/R/predict.vglm.q b/R/predict.vglm.q
index 2a13df6..cc946b3 100644
--- a/R/predict.vglm.q
+++ b/R/predict.vglm.q
@@ -6,6 +6,7 @@
+
predictvglm <-
function(object,
newdata = NULL,
@@ -21,6 +22,7 @@ predictvglm <-
if (missing(extra)) {
}
+
if (deriv != 0)
stop("'deriv' must be 0 for predictvglm()")
@@ -36,11 +38,11 @@ predictvglm <-
- pred <-
+ predn <-
if (se.fit) {
switch(type,
response = {
- warning("'type=\"response\"' and 'se.fit=TRUE' not valid ",
+ warning("'type='response' and 'se.fit=TRUE' are not valid ",
"together; setting 'se.fit = FALSE'")
se.fit <- FALSE
predictor <- predict.vlm(object, newdata = newdata,
@@ -122,19 +124,41 @@ predictvglm <-
deriv = deriv, dispersion = dispersion, ...)
}) # End of switch
}
+ } # End of se.fit == FALSE
+
+
+
+
+ try.this <- findFirstMethod("predictvglmS4VGAM", object at family@vfamily)
+ if (length(try.this)) {
+ predn <-
+ predictvglmS4VGAM(object = object,
+ VGAMff = new(try.this),
+ predn = predn, # This is 'new'
+ newdata = newdata,
+ type = type,
+ se.fit = se.fit,
+ deriv = deriv,
+ dispersion = dispersion,
+ untransform = untransform,
+ ...)
+ } else {
}
+
+
+
if (!length(newdata) && length(na.act)) {
if (se.fit) {
- pred$fitted.values <- napredict(na.act[[1]], pred$fitted.values)
- pred$se.fit <- napredict(na.act[[1]], pred$se.fit)
+ predn$fitted.values <- napredict(na.act[[1]], predn$fitted.values)
+ predn$se.fit <- napredict(na.act[[1]], predn$se.fit)
} else {
- pred <- napredict(na.act[[1]], pred)
+ predn <- napredict(na.act[[1]], predn)
}
}
- if (untransform) untransformVGAM(object, pred) else pred
-} # predictvglm
+ if (untransform) untransformVGAM(object, predn) else predn
+} # predictvglm
@@ -147,13 +171,15 @@ setMethod("predict", "vglm", function(object, ...)
-predict.rrvglm <- function(object,
- newdata = NULL,
- type = c("link", "response", "terms"),
- se.fit = FALSE,
- deriv = 0,
- dispersion = NULL,
- extra = object at extra, ...) {
+
+predict.rrvglm <-
+ function(object,
+ newdata = NULL,
+ type = c("link", "response", "terms"),
+ se.fit = FALSE,
+ deriv = 0,
+ dispersion = NULL,
+ extra = object at extra, ...) {
if (se.fit) {
stop("20030811; predict.rrvglm(..., se.fit=TRUE) not complete yet")
@@ -219,6 +245,9 @@ setMethod("predict", "rrvglm", function(object, ...)
+
+
+
untransformVGAM <- function(object, pred) {
@@ -291,9 +320,11 @@ untransformVGAM <- function(object, pred) {
upred[, ii] <- Theta
}
- dmn2 <- if (length(names(object at misc$link)))
- names(object at misc$link) else {
- if (length(object at misc$parameters)) object at misc$parameters else NULL
+ dmn2 <- if (length(names(object at misc$link))) {
+ names(object at misc$link)
+ } else {
+ if (length(object at misc$parameters))
+ object at misc$parameters else NULL
}
dimnames(upred) <- list(dimnames(upred)[[1]], dmn2)
upred
@@ -304,3 +335,36 @@ untransformVGAM <- function(object, pred) {
+setMethod("predictvglmS4VGAM", signature(VGAMff = "binom2.or"),
+ function(object,
+ VGAMff,
+ predn,
+ newdata = NULL,
+ type = c("link", "response", "terms"), # "parameters",
+ se.fit = FALSE,
+ deriv = 0,
+ dispersion = NULL,
+ untransform = FALSE,
+ extra = object at extra,
+ n.ahead = 1,
+ ...) {
+ # object at post <-
+ # callNextMethod(VGAMff = VGAMff,
+ # object = object,
+ # ...)
+ #object at post$reverse <- object at misc$reverse
+
+
+ if (se.fit) {
+ predn$junk.component <- rep(coef(object), length = n.ahead)
+ predn$se.fit.junk.component <- rep(diag(vcov(object)), length = n.ahead)
+ } else {
+ could.return.this.instead.of.predn <-
+ predn2 <- rep(coef(object), length = n.ahead)
+ }
+ predn
+})
+
+
+
+
diff --git a/R/predict.vlm.q b/R/predict.vlm.q
index 6759cd3..218beb2 100644
--- a/R/predict.vlm.q
+++ b/R/predict.vlm.q
@@ -7,6 +7,7 @@
+
predict.vlm <- function(object,
newdata = NULL,
type = c("response", "terms"),
@@ -77,7 +78,7 @@ predict.vlm <- function(object,
}
offset <- if (!is.null(off.num <- attr(ttob, "offset"))) {
- eval(attr(ttob, "variables")[[off.num+1]], newdata)
+ eval(attr(ttob, "variables")[[off.num + 1]], newdata)
} else if (!is.null(object at offset))
eval(object at call$offset, newdata)
@@ -164,8 +165,9 @@ predict.vlm <- function(object,
if (se.fit) {
object <- as(object, "vlm") # Coerce
fit.summary <- summaryvlm(object, dispersion=dispersion)
- sigma <- if (is.numeric(fit.summary at sigma)) fit.summary at sigma else
- sqrt(deviance(object) / object at df.residual) # was @ResSS
+ sigma <- if (is.numeric(fit.summary at sigma))
+ fit.summary at sigma else
+ sqrt(deviance(object) / object at df.residual) # was @ResSS
pred <- Build.terms.vlm(x = X_vlm, coefs = coefs,
cov = sigma^2 * fit.summary at cov.unscaled,
assign = vasgn,
@@ -239,7 +241,7 @@ predict.vlm <- function(object,
if (raw) {
kindex <- NULL
for (ii in 1:pp)
- kindex <- c(kindex, (ii-1)*M + (1:ncolHlist[ii]))
+ kindex <- c(kindex, (ii-1) * M + (1:ncolHlist[ii]))
if (se.fit) {
pred$fitted.values <- pred$fitted.values[, kindex, drop = FALSE]
pred$se.fit <- pred$se.fit[, kindex, drop = FALSE]
@@ -352,6 +354,8 @@ predict.vglm.se <- function(fit, ...) {
+
+
subconstraints <- function(assign, constraints) {
@@ -365,6 +369,7 @@ subconstraints <- function(assign, constraints) {
}
+
is.linear.term <- function(ch) {
lchar <- length(ch)
ans <- rep(FALSE, len = lchar)
@@ -379,6 +384,7 @@ is.linear.term <- function(ch) {
}
+
canonical.Hlist <- function(Hlist) {
for (ii in 1:length(Hlist)) {
temp <- Hlist[[ii]] * 0
diff --git a/R/print.vglm.q b/R/print.vglm.q
index daaab0f..09d6de1 100644
--- a/R/print.vglm.q
+++ b/R/print.vglm.q
@@ -9,6 +9,7 @@
show.vglm <- function(object) {
+
if (!is.null(cl <- object at call)) {
cat("Call:\n")
dput(cl)
@@ -51,6 +52,20 @@ show.vglm <- function(object) {
format(object at criterion[[ii]]), "\n")
}
+
+
+
+
+ try.this <- findFirstMethod("showvglmS4VGAM", object at family@vfamily)
+ if (length(try.this)) {
+ showvglmS4VGAM(object = object,
+ VGAMff = new(try.this))
+ } else {
+ }
+
+
+
+
invisible(object)
}
@@ -101,6 +116,18 @@ show.vgam <- function(object) {
cat(paste(criterion, ":", sep = ""),
format(object[[criterion]]), "\n")
+
+
+
+ try.this <- findFirstMethod("showvgamS4VGAM", object at family@vfamily)
+ if (length(try.this)) {
+ showvgamS4VGAM(object = object,
+ VGAMff = new(try.this))
+ } else {
+ }
+
+
+
invisible(object)
}
diff --git a/R/qtplot.q b/R/qtplot.q
index c08644f..3c5e38c 100644
--- a/R/qtplot.q
+++ b/R/qtplot.q
@@ -19,7 +19,7 @@ qtplot.lms.bcn <- function(percentiles = c(25, 50, 75),
eta = NULL, yoffset = 0) {
lp <- length(percentiles)
- answer <- matrix(as.numeric(NA), nrow(eta), lp,
+ answer <- matrix(NA_real_, nrow(eta), lp,
dimnames = list(dimnames(eta)[[1]],
paste(as.character(percentiles), "%", sep = "")))
for (ii in 1:lp) {
@@ -38,7 +38,7 @@ qtplot.lms.bcg <- function(percentiles = c(25,50,75),
cc <- percentiles
lp <- length(percentiles)
- answer <- matrix(as.numeric(NA), nrow(eta), lp,
+ answer <- matrix(NA_real_, nrow(eta), lp,
dimnames = list(dimnames(eta)[[1]],
paste(as.character(percentiles), "%", sep = "")))
lambda <- eta[, 1]
@@ -60,7 +60,7 @@ qtplot.lms.yjn <- function(percentiles = c(25,50,75),
cc <- percentiles
lp <- length(percentiles)
- answer <- matrix(as.numeric(NA), nrow(eta), lp,
+ answer <- matrix(NA_real_, nrow(eta), lp,
dimnames = list(dimnames(eta)[[1]],
paste(as.character(percentiles), "%", sep = "")))
lambda <- eta[, 1]
@@ -852,7 +852,7 @@ explot.lms.bcn <- function(percentiles = c(25, 50, 75),
eta = NULL, yoffset = 0) {
lp <- length(percentiles)
- answer <- matrix(as.numeric(NA), nrow(eta), lp,
+ answer <- matrix(NA_real_, nrow(eta), lp,
dimnames = list(dimnames(eta)[[1]],
paste(as.character(percentiles), "%", sep = "")))
for (ii in 1:lp) {
diff --git a/R/residuals.vlm.q b/R/residuals.vlm.q
index b5b1e90..72f1c13 100644
--- a/R/residuals.vlm.q
+++ b/R/residuals.vlm.q
@@ -34,7 +34,7 @@ residualsvlm <-
if (pooled.weight) return(NULL)
n <- object at misc$n
M <- object at misc$M
- wz <- weights(object, type = "w") # $weights
+ wz <- weights(object, type = "work") # $weights
if (!length(wz))
wz <- if (M == 1) rep(1, n) else matrix(1, n, M)
@@ -99,7 +99,7 @@ residualsvglm <-
n <- object at misc$n
M <- object at misc$M
- wz <- weights(object, type = "w") # $weights
+ wz <- weights(object, type = "work") # $weights
if (M == 1) {
if (any(wz < 0))
diff --git a/R/rrvglm.fit.q b/R/rrvglm.fit.q
index 2961d66..643f2cc 100644
--- a/R/rrvglm.fit.q
+++ b/R/rrvglm.fit.q
@@ -510,7 +510,7 @@ rrvglm.fit <-
stop("rrvglm only handles full-rank models (currently)")
if (nice31) {
- R <- matrix(as.numeric(NA), 5, 5)
+ R <- matrix(NA_real_, 5, 5)
} else {
R <- tfit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
R[lower.tri(R)] <- 0
diff --git a/R/summary.vglm.q b/R/summary.vglm.q
index d985002..6d78325 100644
--- a/R/summary.vglm.q
+++ b/R/summary.vglm.q
@@ -13,6 +13,7 @@
+
yformat <- function(x, digits = options()$digits) {
format(ifelse(abs(x) < 0.001, signif(x, digits), round(x, digits)))
}
@@ -21,12 +22,14 @@ yformat <- function(x, digits = options()$digits) {
+
summaryvglm <-
function(object, correlation = FALSE,
dispersion = NULL, digits = NULL,
presid = TRUE,
signif.stars = getOption("show.signif.stars"),
- nopredictors = FALSE
+ nopredictors = FALSE,
+ ... # Added 20151211
) {
@@ -48,6 +51,8 @@ summaryvglm <-
stuff <- summaryvlm(
object,
+ presid = FALSE,
+
correlation = correlation,
dispersion = dispersion)
@@ -79,6 +84,7 @@ summaryvglm <-
df = stuff at df,
sigma = stuff at sigma)
+
if (presid) {
Presid <- resid(object, type = "pearson")
if (length(Presid))
@@ -95,6 +101,23 @@ summaryvglm <-
if (is.numeric(stuff at dispersion))
slot(answer, "dispersion") <- stuff at dispersion
+
+
+
+
+
+ try.this <- findFirstMethod("summaryvglmS4VGAM", object at family@vfamily)
+ if (length(try.this)) {
+ new.postslot <-
+ summaryvglmS4VGAM(object = object,
+ VGAMff = new(try.this),
+ ...)
+ answer at post <- new.postslot
+ } else {
+ }
+
+
+
answer
}
@@ -104,10 +127,104 @@ summaryvglm <-
+
+setMethod("summaryvglmS4VGAM", signature(VGAMff = "cumulative"),
+ function(object,
+ VGAMff,
+ ...) {
+ object at post <-
+ callNextMethod(VGAMff = VGAMff,
+ object = object,
+ ...)
+ object at post$reverse <- object at misc$reverse
+
+
+ cfit <- coef(object, matrix = TRUE)
+ M <- ncol(cfit)
+ if (rownames(cfit)[1] == "(Intercept)")
+ object at post$expcoeffs <- exp(coef(object)[-(1:M)])
+
+
+ object at post
+})
+
+
+
+setMethod("showsummaryvglmS4VGAM", signature(VGAMff = "cumulative"),
+ function(object,
+ VGAMff,
+ ...) {
+
+ if (length(object at post$expcoeffs)) {
+ cat("\nExponentiated coefficients:\n")
+ print(object at post$expcoeffs)
+ }
+ if (FALSE) {
+ if (object at post$reverse)
+ cat("Reversed\n\n") else
+ cat("Not reversed\n\n")
+ }
+})
+
+
+
+
+
+
+setMethod("summaryvglmS4VGAM", signature(VGAMff = "multinomial"),
+ function(object,
+ VGAMff,
+ ...) {
+ object at post <-
+ callNextMethod(VGAMff = VGAMff,
+ object = object,
+ ...)
+ object at post$refLevel <- object at misc$refLevel
+ object at post
+})
+
+
+
+setMethod("showsummaryvglmS4VGAM", signature(VGAMff = "multinomial"),
+ function(object,
+ VGAMff,
+ ...) {
+ cat("\nReference group is level ", object at post$refLevel,
+ " of the response\n")
+ callNextMethod(VGAMff = VGAMff,
+ object = object,
+ ...)
+})
+
+
+
+setMethod("summaryvglmS4VGAM", signature(VGAMff = "VGAMcategorical"),
+ function(object,
+ VGAMff,
+ ...) {
+ object at post
+})
+
+
+setMethod("showsummaryvglmS4VGAM", signature(VGAMff = "VGAMcategorical"),
+ function(object,
+ VGAMff,
+ ...) {
+})
+
+
+
+
+
+
+
+
+
setMethod("logLik", "summary.vglm", function(object, ...)
logLik.vlm(object, ...))
+
show.summary.vglm <-
function(x,
digits = max(3L, getOption("digits") - 3L), # Same as glm()
@@ -115,7 +232,8 @@ show.summary.vglm <-
prefix = "",
presid = TRUE,
signif.stars = NULL, # Use this if logical; 20140728
- nopredictors = NULL # Use this if logical; 20150831
+ nopredictors = NULL, # Use this if logical; 20150831
+ ... # Added 20151214
) {
M <- x at misc$M
@@ -230,6 +348,7 @@ show.summary.vglm <-
cat("\nNumber of iterations:", format(trunc(x at iter)), "\n")
+
if (!is.null(correl)) {
ncol.X.vlm <- dim(correl)[2]
if (ncol.X.vlm > 1) {
@@ -241,11 +360,27 @@ show.summary.vglm <-
digits = digits)
}
}
+
+
+
+
+
+ try.this <- findFirstMethod("showsummaryvglmS4VGAM", x at family@vfamily)
+ if (length(try.this)) {
+ showsummaryvglmS4VGAM(object = x,
+ VGAMff = new(try.this),
+ ...)
+ } else {
+ }
+
+
+
invisible(NULL)
}
+
setMethod("summary", "vglm",
function(object, ...)
summaryvglm(object, ...))
@@ -268,6 +403,33 @@ setMethod("show", "summary.vglm",
+if (FALSE)
+show.summary.binom2.or <-
+ function(x,
+ digits = max(3L, getOption("digits") - 3L) # Same as glm()
+ ) {
+
+ if (length(x at post$oratio) == 1 &&
+ is.numeric(x at post$oratio)) {
+ cat("\nOdds ratio: ", round(x at post$oratio, digits), "\n")
+ }
+}
+
+
+
+
+if (FALSE)
+setMethod("show", "summary.binom2.or",
+ function(object)
+ show.summary.vglm(object))
+
+
+
+
+
+
+
+
vcovdefault <- function(object, ...) {
if (is.null(object at vcov))
stop("no default")
diff --git a/R/summary.vlm.q b/R/summary.vlm.q
index 71a5b1b..00945d8 100644
--- a/R/summary.vlm.q
+++ b/R/summary.vlm.q
@@ -33,8 +33,11 @@ summaryvlm <-
Coefs <- object at coefficients
cnames <- names(Coefs)
- if (presid) {
+ Presid <- if (presid) {
Presid <- residualsvlm(object, type = "pearson") # NULL if pooled.weight
+ Presid
+ } else {
+ NULL
}
if (any(is.na(Coefs))) {
diff --git a/R/vgam.control.q b/R/vgam.control.q
index 7f198b1..a5e8954 100644
--- a/R/vgam.control.q
+++ b/R/vgam.control.q
@@ -102,7 +102,7 @@ vgam.nlchisq <- function(qr, resid, wz, smomat, deriv, U, smooth.labels,
trivc <- trivial.constraints(constraints)
- ans <- rep(as.numeric(NA), length = ncol(smomat))
+ ans <- rep(NA_real_, length = ncol(smomat))
Uderiv <- vbacksub(U, t(deriv), M = M, n = n) # \bU_i^{-1} \biu_i
diff --git a/R/vglm.R b/R/vglm.R
index 4e4c121..a080dfb 100644
--- a/R/vglm.R
+++ b/R/vglm.R
@@ -115,7 +115,6 @@ vglm <- function(formula,
family = family,
control = control,
constraints = constraints,
- criterion = control$criterion,
extra = extra,
qr.arg = qr.arg,
Terms = mt, function.name = function.name, ...)
diff --git a/R/vglm.control.q b/R/vglm.control.q
index 3f15876..1f61120 100644
--- a/R/vglm.control.q
+++ b/R/vglm.control.q
@@ -125,21 +125,22 @@ vcontrol.expression <- expression({
control <- control # First one, e.g., vgam.control(...)
mylist <- family at vfamily
- for (i in length(mylist):1) {
+ for (jay in length(mylist):1) {
for (ii in 1:2) {
temp <- paste(if (ii == 1) "" else
paste(function.name, ".", sep = ""),
- mylist[i], ".control", sep = "")
+ mylist[jay], ".control", sep = "")
if (exists(temp, envir = VGAMenv)) {
temp <- get(temp)
temp <- temp(...)
- for (k in names(temp))
- control[[k]] <- temp[[k]]
+ for (kk in names(temp))
+ control[[kk]] <- temp[[kk]]
}
}
}
+
orig.criterion <- control$criterion
if (control$criterion != "coefficients") {
try.crit <- c(names(.min.criterion.VGAM), "coefficients")
diff --git a/R/vglm.fit.q b/R/vglm.fit.q
index bbd8efb..817c67f 100644
--- a/R/vglm.fit.q
+++ b/R/vglm.fit.q
@@ -14,12 +14,14 @@ vglm.fit <-
etastart = NULL, mustart = NULL, coefstart = NULL,
offset = 0, family,
control = vglm.control(),
- criterion = "coefficients",
qr.arg = FALSE,
constraints = NULL,
extra = NULL,
Terms = Terms, function.name = "vglm", ...) {
+ if (is.null(criterion <- control$criterion))
+ criterion <- "coefficients"
+
eff.n <- nrow(x) # + sum(abs(w[1:nrow(x)]))
specialCM <- NULL
@@ -62,6 +64,11 @@ vglm.fit <-
eval(slot(family, "initialize")) # Initialize mu & M (& optionally w)
+
+
+
+
+
if (length(etastart)) {
eta <- etastart
mu <- if (length(mustart)) mustart else
@@ -71,6 +78,8 @@ vglm.fit <-
"but there is no 'linkinv' slot to use it")
}
+
+
if (length(mustart)) {
mu <- mustart
if (length(body(slot(family, "linkfun")))) {
@@ -82,6 +91,24 @@ vglm.fit <-
}
+ validparams <- if (length(body(slot(family, "validparams")))) {
+ slot(family, "validparams")(eta, extra = extra)
+ } else {
+ TRUE
+ }
+ validfitted <- if (length(body(slot(family, "validfitted")))) {
+ slot(family, "validfitted")(mu, extra = extra)
+ } else {
+ TRUE
+ }
+ if (!(validparams && validfitted))
+ stop("could not obtain valid initial values. ",
+ "Try using 'etastart', 'coefstart' or 'mustart', else ",
+ "family-specific arguments such as 'imethod'.")
+
+
+
+
M <- if (is.matrix(eta)) ncol(eta) else 1
@@ -231,6 +258,29 @@ vglm.fit <-
new.crit < old.crit)))
if (!is.logical(take.half.step))
take.half.step <- TRUE
+
+
+ if (!take.half.step && length(old.coeffs)) {
+ validparams <- if (length(body(slot(family, "validparams")))) {
+ slot(family, "validparams")(eta, extra = extra)
+ } else {
+ TRUE
+ }
+ validfitted <- if (length(body(slot(family, "validfitted")))) {
+ slot(family, "validfitted")(mu, extra = extra)
+ } else {
+ TRUE
+ }
+ take.half.step <- !(validparams && validfitted)
+
+
+ if (take.half.step) {
+ stepsize <- orig.stepsize / 4
+ }
+ }
+
+
+
if (take.half.step) {
stepsize <- 2 * min(orig.stepsize, 2*stepsize)
new.coeffs.save <- new.coeffs
@@ -242,7 +292,7 @@ vglm.fit <-
flush.console()
}
stepsize <- stepsize / 2
- if (too.small <- stepsize < 0.001)
+ if (too.small <- stepsize < 1e-6)
break
new.coeffs <- (1-stepsize) * old.coeffs +
stepsize * new.coeffs.save
@@ -266,9 +316,23 @@ vglm.fit <-
tfun(mu = mu, y = y, w = w,
res = FALSE, eta = eta, extra))
- if ((criterion == "coefficients") ||
- ( minimize.criterion && new.crit < old.crit) ||
- (!minimize.criterion && new.crit > old.crit))
+
+ validparams <- if (length(body(slot(family, "validparams")))) {
+ slot(family, "validparams")(eta, extra = extra)
+ } else {
+ TRUE
+ }
+ validfitted <- if (length(body(slot(family, "validfitted")))) {
+ slot(family, "validfitted")(mu, extra = extra)
+ } else {
+ TRUE
+ }
+
+
+ if (validparams && validfitted &&
+ (criterion == "coefficients" ||
+ (( minimize.criterion && new.crit < old.crit) ||
+ (!minimize.criterion && new.crit > old.crit))))
break
} # of repeat
@@ -331,6 +395,7 @@ vglm.fit <-
old.coeffs <- new.coeffs
} # End of while()
+
if (maxit > 1 && iter >= maxit && !control$noWarning)
warning("convergence not obtained in ", maxit, " iterations")
diff --git a/R/vlm.wfit.q b/R/vlm.wfit.q
index e6e8844..839b51e 100644
--- a/R/vlm.wfit.q
+++ b/R/vlm.wfit.q
@@ -127,7 +127,7 @@ vlm.wfit <-
dx2 <- if (is.vlmX) NULL else dimnames(xmat)[[2]]
- B <- matrix(as.numeric(NA),
+ B <- matrix(NA_real_,
nrow = M, ncol = ncolx, dimnames = list(lp.names, dx2))
if (is.null(Hlist)) {
Hlist <- replace.constraints(vector("list", ncolx), diag(M), 1:ncolx)
diff --git a/R/vsmooth.spline.q b/R/vsmooth.spline.q
index 6fc0e65..dfc0f24 100644
--- a/R/vsmooth.spline.q
+++ b/R/vsmooth.spline.q
@@ -283,12 +283,12 @@ vsmooth.spline <-
if (all(!nonlin)) {
junk.fill <- new("vsmooth.spline.fit",
- "Bcoefficients" = matrix(as.numeric(NA), 1, 1),
+ "Bcoefficients" = matrix(NA_real_, 1, 1),
"knots" = numeric(0),
"xmin" = numeric(0),
"xmax" = numeric(0)) # 8/11/03
- dratio <- as.numeric(NA)
+ dratio <- NA_real_
object <-
new("vsmooth.spline",
@@ -634,7 +634,7 @@ predictvsmooth.spline.fit <- function(object, x, deriv = 0) {
good <- !(bad.left | bad.right)
ncb <- ncol(object at Bcoefficients)
- y <- matrix(as.numeric(NA), length(xs), ncb)
+ y <- matrix(NA_real_, length(xs), ncb)
if (ngood <- sum(good)) {
junk <- .C("Yee_vbvs", as.integer(ngood),
as.double(object at knots), as.double(object at Bcoefficients),
diff --git a/build/vignette.rds b/build/vignette.rds
index 109ed29..e76231e 100644
Binary files a/build/vignette.rds and b/build/vignette.rds differ
diff --git a/data/Huggins89.t1.rda b/data/Huggins89.t1.rda
index 4473871..fc819bc 100644
Binary files a/data/Huggins89.t1.rda and b/data/Huggins89.t1.rda differ
diff --git a/data/Huggins89table1.rda b/data/Huggins89table1.rda
index 1004846..9aa7bc5 100644
Binary files a/data/Huggins89table1.rda and b/data/Huggins89table1.rda differ
diff --git a/data/alclevels.rda b/data/alclevels.rda
index 2c3e528..77f6197 100644
Binary files a/data/alclevels.rda and b/data/alclevels.rda differ
diff --git a/data/alcoff.rda b/data/alcoff.rda
index e9fcf0d..e67e446 100644
Binary files a/data/alcoff.rda and b/data/alcoff.rda differ
diff --git a/data/auuc.rda b/data/auuc.rda
index b053803..bd45d80 100644
Binary files a/data/auuc.rda and b/data/auuc.rda differ
diff --git a/data/backPain.rda b/data/backPain.rda
index 38527f9..6ce3225 100644
Binary files a/data/backPain.rda and b/data/backPain.rda differ
diff --git a/data/beggs.rda b/data/beggs.rda
index 81e7ca8..646f791 100644
Binary files a/data/beggs.rda and b/data/beggs.rda differ
diff --git a/data/car.all.rda b/data/car.all.rda
index 93a0348..7647963 100644
Binary files a/data/car.all.rda and b/data/car.all.rda differ
diff --git a/data/cfibrosis.rda b/data/cfibrosis.rda
index 54e60bf..6c833f2 100644
Binary files a/data/cfibrosis.rda and b/data/cfibrosis.rda differ
diff --git a/data/corbet.rda b/data/corbet.rda
index 18d0b14..88ceb38 100644
Binary files a/data/corbet.rda and b/data/corbet.rda differ
diff --git a/data/crashbc.rda b/data/crashbc.rda
index c66a522..0e040a3 100644
Binary files a/data/crashbc.rda and b/data/crashbc.rda differ
diff --git a/data/crashf.rda b/data/crashf.rda
index 5051a1d..8f11ae9 100644
Binary files a/data/crashf.rda and b/data/crashf.rda differ
diff --git a/data/crashi.rda b/data/crashi.rda
index 08a6913..b8f8947 100644
Binary files a/data/crashi.rda and b/data/crashi.rda differ
diff --git a/data/crashmc.rda b/data/crashmc.rda
index 8e1f2c5..8a20da6 100644
Binary files a/data/crashmc.rda and b/data/crashmc.rda differ
diff --git a/data/crashp.rda b/data/crashp.rda
index 26d272a..723d7be 100644
Binary files a/data/crashp.rda and b/data/crashp.rda differ
diff --git a/data/crashtr.rda b/data/crashtr.rda
index 88df8ba..b1ab63c 100644
Binary files a/data/crashtr.rda and b/data/crashtr.rda differ
diff --git a/data/deermice.rda b/data/deermice.rda
index 2c3e32e..40e314b 100644
Binary files a/data/deermice.rda and b/data/deermice.rda differ
diff --git a/data/ducklings.rda b/data/ducklings.rda
index 8fe331c..491263f 100644
Binary files a/data/ducklings.rda and b/data/ducklings.rda differ
diff --git a/data/finney44.rda b/data/finney44.rda
index 18e5657..21ae1b2 100644
Binary files a/data/finney44.rda and b/data/finney44.rda differ
diff --git a/data/flourbeetle.rda b/data/flourbeetle.rda
index b84be12..ead5249 100644
Binary files a/data/flourbeetle.rda and b/data/flourbeetle.rda differ
diff --git a/data/hspider.rda b/data/hspider.rda
index 78b3f91..82ad0dc 100644
Binary files a/data/hspider.rda and b/data/hspider.rda differ
diff --git a/data/lakeO.rda b/data/lakeO.rda
index 14982c5..794caa5 100644
Binary files a/data/lakeO.rda and b/data/lakeO.rda differ
diff --git a/data/leukemia.rda b/data/leukemia.rda
index a306ba2..6634af0 100644
Binary files a/data/leukemia.rda and b/data/leukemia.rda differ
diff --git a/data/marital.nz.rda b/data/marital.nz.rda
index 476e299..b883ec3 100644
Binary files a/data/marital.nz.rda and b/data/marital.nz.rda differ
diff --git a/data/melbmaxtemp.rda b/data/melbmaxtemp.rda
index 69c7442..7a6f3f3 100644
Binary files a/data/melbmaxtemp.rda and b/data/melbmaxtemp.rda differ
diff --git a/data/pneumo.rda b/data/pneumo.rda
index affea33..02da66d 100644
Binary files a/data/pneumo.rda and b/data/pneumo.rda differ
diff --git a/data/prinia.rda b/data/prinia.rda
index 5055c0b..4d28a68 100644
Binary files a/data/prinia.rda and b/data/prinia.rda differ
diff --git a/data/ruge.rda b/data/ruge.rda
index f96920c..c4c2033 100644
Binary files a/data/ruge.rda and b/data/ruge.rda differ
diff --git a/data/toxop.rda b/data/toxop.rda
index f9179eb..8081ef2 100644
Binary files a/data/toxop.rda and b/data/toxop.rda differ
diff --git a/data/venice.rda b/data/venice.rda
index 3c06750..e21d790 100644
Binary files a/data/venice.rda and b/data/venice.rda differ
diff --git a/data/venice90.rda b/data/venice90.rda
index 9900e20..92d185a 100644
Binary files a/data/venice90.rda and b/data/venice90.rda differ
diff --git a/data/wine.rda b/data/wine.rda
index e80e170..877f503 100644
Binary files a/data/wine.rda and b/data/wine.rda differ
diff --git a/inst/doc/categoricalVGAM.pdf b/inst/doc/categoricalVGAM.pdf
index d3093fb..498bcfa 100644
Binary files a/inst/doc/categoricalVGAM.pdf and b/inst/doc/categoricalVGAM.pdf differ
diff --git a/inst/doc/crVGAM.pdf b/inst/doc/crVGAM.pdf
index 188fb99..f81f86a 100644
Binary files a/inst/doc/crVGAM.pdf and b/inst/doc/crVGAM.pdf differ
diff --git a/man/AR1.Rd b/man/AR1.Rd
index 541d880..ee9a1f1 100644
--- a/man/AR1.Rd
+++ b/man/AR1.Rd
@@ -1,5 +1,6 @@
\name{AR1}
\alias{AR1}
+\alias{AR1.control}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Autoregressive Process with Order-1 Family Function }
\description{
@@ -10,10 +11,13 @@
\usage{
AR1(ldrift = "identitylink", lsd = "loge", lvar = "loge",
lrho = "rhobit", idrift = NULL,
- isd = NULL, ivar = NULL, irho = NULL,
- ishrinkage = 0.9, type.likelihood = c("exact", "conditional"),
- var.arg = FALSE, nodrift = FALSE, almost1 = 0.99, zero = c(-2, -3))
+ isd = NULL, ivar = NULL, irho = NULL, imethod = 1,
+ ishrinkage = 1, type.likelihood = c("exact", "conditional"),
+ var.arg = FALSE, nodrift = FALSE, almost1 = 0.99,
+ zero = c(if (var.arg) "var" else "sd", "rho"))
+ AR1.control(half.stepsizing = FALSE, ...)
}
+% zero = c(-2, -3)
% deviance.arg = FALSE,
@@ -41,8 +45,12 @@ AR1(ldrift = "identitylink", lsd = "loge", lvar = "loge",
}
- \item{ishrinkage, zero}{
+ \item{ishrinkage, imethod, zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
+ The default for \code{zero} assumes there is a drift parameter to
+ be estimated (the default for that argument), so if a drift parameter
+ is suppressed and there are covariates, then \code{zero} will need
+ to be assigned the value 1 or 2 or \code{NULL}.
}
@@ -81,6 +89,21 @@ AR1(ldrift = "identitylink", lsd = "loge", lvar = "loge",
}
+
+\item{half.stepsizing, \ldots}{
+ A logical value, overwriting that of \code{\link{vglm.control}}.
+ Currently this setting is potentially dangerous, and is used
+ for aesthetics at the solution---no jittering occurs.
+ This can often be seen by setting \code{trace = TRUE} when the
+ value is set to \code{TRUE}.
+ The jittering is due to some heuristics applied to handle the
+ first observation---either by setting its prior weight to a value
+ very close to 0, else adjust for its EIM which is not of full rank.
+
+
+
+}
+
}
\details{
The AR-1 model implemented here has
@@ -163,18 +186,18 @@ AR1(ldrift = "identitylink", lsd = "loge", lvar = "loge",
}
\examples{
# Example 1: using arima.sim() to generate a stationary time series
-nn <- 100; set.seed(1)
+nn <- 1000; set.seed(1)
tsdata <- data.frame(x2 = runif(nn))
+ar.coef.1 <- rhobit(-2, inverse = TRUE) # Approx -0.8
+ar.coef.2 <- rhobit( 1, inverse = TRUE) # Approx 0.5
tsdata <- transform(tsdata,
index = 1:nn,
- TS1 = arima.sim(nn, model = list(ar = -0.80),
+ TS1 = arima.sim(nn, model = list(ar = ar.coef.1),
sd = exp(1.0)),
- TS2 = arima.sim(nn, model = list(ar = 0.50),
+ TS2 = arima.sim(nn, model = list(ar = ar.coef.2),
sd = exp(1.0 + 2 * x2)))
-fit1a <- vglm(cbind(TS1, TS2) ~ x2, AR1(zero = c(1:4, 6)),
- data = tsdata, trace = TRUE)
-rhobit(-0.8)
-rhobit( 0.5)
+fit1a <- vglm(cbind(TS1, TS2) ~ x2, AR1(zero = "rho", nodrift = TRUE),
+ data = tsdata, trace = TRUE)
coef(fit1a, matrix = TRUE)
summary(fit1a) # SEs are useful to know
@@ -203,3 +226,10 @@ head(weights(fit2a, type= "working")) # Ditto
+%fit1a <- vglm(cbind(TS1, TS2) ~ x2, AR1(zero = c(1:4, 6)),
+% data = tsdata, trace = TRUE)
+
+
+
+
+
diff --git a/man/CommonVGAMffArguments.Rd b/man/CommonVGAMffArguments.Rd
index e482eb8..78a6b43 100644
--- a/man/CommonVGAMffArguments.Rd
+++ b/man/CommonVGAMffArguments.Rd
@@ -270,8 +270,12 @@ except for \eqn{X_2}.
}
\item{zero}{
- An integer specifying which linear/additive predictor is modelled
- as intercept-only. That is, the regression coefficients are
+ Either an integer vector, or a vector of character strings.
+
+
+ If an integer, then it specifies which
+ linear/additive predictor is modelled as \emph{intercept-only}.
+ That is, the regression coefficients are
set to zero for all covariates except for the intercept.
If \code{zero} is specified then it may be a vector with values
from the set \eqn{\{1,2,\ldots,M\}}.
@@ -313,6 +317,42 @@ except for \eqn{X_2}.
would be equivalent to \code{zero = c(2, 3, 5, 8, 11)}.
+
+ The argument \code{zero} also
+ accepts a character vector (for \pkg{VGAM} 1.0-1 onwards).
+ Each value is fed into \code{\link[base]{grep}} with
+ \code{fixed = TRUE}, meaning that wildcards \code{"*"} are not useful.
+ See the example below---all the variants work;
+ those with \code{LOCAT} issue a warning that that value is unmatched.
+Importantly, the parameter names
+are \code{c("location1", "scale1", "location2", "scale2")}
+because there are 2 responses.
+Yee (2015) described \code{zero} for only numerical input.
+Allowing character input is particularly important when the
+number of parameters cannot be determined without having the actual
+data first. For example, with time series data, an ARMA(\eqn{p},\eqn{q}) process
+might have parameters \eqn{\theta_1,\ldots,\theta_p} which should
+be intercept-only by default. Then specifying a numerical default
+value for \code{zero} would be too difficult (there are the drift
+and scale parameters too).
+However, it is possible with the character representation:
+\code{zero = "theta"} would achieve this.
+In the future, most \pkg{VGAM} family functions might be converted
+ to the character representation---the advantage being that it
+ is more readable.
+ When programming a \pkg{VGAM} family function that allows character
+ input, the variable \code{predictors.names}
+ must be assigned correctly.
+
+
+%Note that \code{zero} accepts wildcards (cf. the Linux operating system):
+%\code{"location*"} means that \emph{all} location parameters
+%are intercept-only.
+% When programming a \pkg{VGAM} family function that allows character
+% input, the variables \code{parameters.names}
+% and \code{Q1}
+
+
}
\item{ishrinkage}{
Shrinkage factor \eqn{s} used for obtaining initial values.
@@ -451,6 +491,16 @@ except for \eqn{X_2}.
\references{
+
+
+Yee, T. W. (2015)
+Vector Generalized Linear and Additive Models:
+With an Implementation in R.
+New York, USA: \emph{Springer}.
+
+
+
+
Kosmidis, I. and Firth, D. (2009)
Bias reduction in exponential family nonlinear models.
\emph{Biometrika},
@@ -468,6 +518,7 @@ Bias reduction in exponential family nonlinear models.
\seealso{
\code{\link{Links}},
\code{\link{vglmff-class}},
+ \code{\link{UtilitiesVGAM}},
\code{\link{normal.vcm}},
\code{\link{multilogit}}.
@@ -538,6 +589,29 @@ fit2 <- vglm(cbind(normal, mild, severe) ~ let,
sratio(whitespace = TRUE, parallel = TRUE), data = pneumo)
head(predict(fit1), 2) # No white spaces
head(predict(fit2), 2) # Uses white spaces
+
+# Example 7 ('zero' argument with character input)
+set.seed(123); n <- 1000
+ldata <- data.frame(x2 = runif(n))
+ldata <- transform(ldata, y1 = rlogis(n, loc = 0+5*x2, scale = exp(2)))
+ldata <- transform(ldata, y2 = rlogis(n, loc = 0+5*x2, scale = exp(0+1*x2)))
+ldata <- transform(ldata, w1 = runif(n))
+ldata <- transform(ldata, w2 = runif(n))
+fit7 <- vglm(cbind(y1, y2) ~ x2,
+# logistic(zero = "location1"), # location1 is intercept-only
+# logistic(zero = "location2"),
+# logistic(zero = "location*"), # Not okay... all is unmatched
+# logistic(zero = "scale1"),
+# logistic(zero = "scale2"),
+# logistic(zero = "scale"), # Both scale parameters are matched
+ logistic(zero = c("location", "scale2")), # All but scale1
+# logistic(zero = c("LOCAT", "scale2")), # Only scale2 is matched
+# logistic(zero = c("LOCAT")), # Nothing is matched
+# trace = TRUE,
+# weights = cbind(w1, w2),
+ weights = w1,
+ data = ldata)
+coef(fit7, matrix = TRUE)
}
\keyword{models}
diff --git a/man/UtilitiesVGAM.Rd b/man/UtilitiesVGAM.Rd
new file mode 100644
index 0000000..13b98cf
--- /dev/null
+++ b/man/UtilitiesVGAM.Rd
@@ -0,0 +1,146 @@
+\name{UtilitiesVGAM}
+\alias{UtilitiesVGAM}
+\alias{param.names}
+\alias{dimm}
+\alias{interleave.VGAM}
+\title{Utility Functions for the VGAM Package }
+\description{
+ A set of common utility functions used by
+ \pkg{VGAM} family functions.
+
+}
+\usage{
+param.names(string, S)
+dimm(M, hbw = M)
+interleave.VGAM(.M, M1, inverse = FALSE)
+}
+\arguments{
+ \item{string}{
+ Character.
+ Name of the parameter.
+
+
+ }
+ \item{M, .M}{
+ Numeric. The total number of linear/additive predictors, called
+ \eqn{M}.
+ By total, it is meant summed over the number of responses.
+ Often, \eqn{M} is the total number of parameters to be estimated (but
+ this is not the same as the number of regression coefficients, unless
+ the RHS of the formula is an intercept-only).
+ The use of \code{.M} is unfortunate, but it is a compromise solution
+ to what is presented in Yee (2015).
+ Ideally, \code{.M} should be just \code{M}.
+
+
+ }
+ \item{M1}{
+ Numeric. The number of linear/additive predictors for one response, called
+ \eqn{M_1}.
+ This argument used to be called \code{M}, but is now renamed properly.
+
+
+ }
+ \item{inverse}{
+ Logical. Useful for the inverse function of \code{interleave.VGAM()}.
+
+
+
+ }
+ \item{S}{
+ Numeric. The number of responses.
+
+
+ }
+ \item{hbw}{
+ Numeric. The half-bandwidth, which measures the number
+ of bands emanating from the central diagonal band.
+
+
+ }
+}
+\value{
+ For \code{param.names()}, this function returns the parameter names
+ for \eqn{S} responses,
+ i.e., \code{string} is returned unchanged if \eqn{S=1},
+ else \code{paste(string, 1:S, sep = "")}.
+
+
+ For \code{dimm()}, this function returns the number of elements
+ to be stored for each of the working weight matrices.
+ They are represented as columns in the matrix \code{wz} in
+ e.g., \code{vglm.fit()}.
+ See the \emph{matrix-band} format described in
+ Section 18.3.5 of Yee (2015).
+
+
+
+ For \code{interleave.VGAM()}, this function returns a reordering
+ of the linear/additive predictors depending on the number of responses.
+ The arguments presented in Table 18.5 may not be valid
+ in your version of Yee (2015).
+
+
+}
+%\section{Warning }{
+% The \code{zero} argument is supplied for convenience but conflicts
+%}
+
+\details{
+ See Yee (2015) for some details about some of these functions.
+
+
+
+}
+
+\references{
+
+
+
+Yee, T. W. (2015)
+Vector Generalized Linear and Additive Models:
+With an Implementation in R.
+New York, USA: \emph{Springer}.
+
+
+
+}
+
+\seealso{
+ \code{\link{CommonVGAMffArguments}},
+ \code{\link{VGAM-package}}.
+
+
+}
+\author{T. W. Yee.
+ Victor Miranda added the \code{inverse} argument to \code{interleave.VGAM()}.
+
+
+}
+
+%\note{
+% See \code{\link{Links}} regarding a major change in
+%
+%}
+
+\examples{
+param.names("shape", 1) # "shape"
+param.names("shape", 3) # c("shape1", "shape2", "shape3")
+
+dimm(3, hbw = 1) # Diagonal matrix; the 3 elements need storage.
+dimm(3) # A general 3 x 3 symmetrix matrix has 6 unique elements.
+dimm(3, hbw = 2) # Tridiagonal matrix; the 3-3 element is 0 and unneeded.
+
+M1 <- 2; ncoly <- 3; M <- ncoly * M1
+mynames1 <- param.names("location", ncoly)
+mynames2 <- param.names("scale", ncoly)
+(parameters.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)])
+# The following is/was in Yee (2015) and has a poor/deceptive style:
+(parameters.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)])
+parameters.names[interleave.VGAM(M, M1 = M1, inverse = TRUE)]
+}
+\keyword{distribution}
+\keyword{regression}
+\keyword{programming}
+\keyword{models}
+
diff --git a/man/acat.Rd b/man/acat.Rd
index 158062c..d433556 100644
--- a/man/acat.Rd
+++ b/man/acat.Rd
@@ -119,6 +119,7 @@ The \pkg{VGAM} package for categorical data analysis.
response is a matrix;
see \code{\link[base:factor]{ordered}}.
+
}
\seealso{
@@ -126,8 +127,10 @@ The \pkg{VGAM} package for categorical data analysis.
\code{\link{cratio}},
\code{\link{sratio}},
\code{\link{multinomial}},
+ \code{\link{margeff}},
\code{\link{pneumo}}.
+
}
\examples{
pneumo <- transform(pneumo, let = log(exposure.time))
diff --git a/man/alaplace3.Rd b/man/alaplace3.Rd
index cdca950..1242dbf 100644
--- a/man/alaplace3.Rd
+++ b/man/alaplace3.Rd
@@ -24,11 +24,11 @@ alaplace2(tau = NULL, llocation = "identitylink", lscale = "loge",
ishrinkage = 0.95,
parallel.locat = TRUE ~ 0,
parallel.scale = FALSE ~ 0,
- digt = 4, idf.mu = 3, imethod = 1, zero = -2)
+ digt = 4, idf.mu = 3, imethod = 1, zero = "scale")
alaplace3(llocation = "identitylink", lscale = "loge", lkappa = "loge",
ilocation = NULL, iscale = NULL, ikappa = 1,
- imethod = 1, zero = 2:3)
+ imethod = 1, zero = c("scale", "kappa"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/betaII.Rd b/man/betaII.Rd
index f09feba..9c72543 100644
--- a/man/betaII.Rd
+++ b/man/betaII.Rd
@@ -10,9 +10,10 @@
betaII(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
iscale = NULL, ishape2.p = NULL, ishape3.q = NULL, imethod = 1,
gscale = exp(-5:5), gshape2.p = exp(-5:5), gshape3.q = exp(-5:5),
- probs.y = c(0.25, 0.5, 0.75), zero = -(2:3))
+ probs.y = c(0.25, 0.5, 0.75), zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
+% probs.y = c(0.25, 0.5, 0.75), zero = -(2:3)
\arguments{
\item{lscale, lshape2.p, lshape3.q}{
Parameter link functions applied to the
diff --git a/man/betaR.Rd b/man/betaR.Rd
index 75c96cb..334c69a 100644
--- a/man/betaR.Rd
+++ b/man/betaR.Rd
@@ -9,8 +9,8 @@
}
\usage{
betaR(lshape1 = "loge", lshape2 = "loge",
- i1 = NULL, i2 = NULL, trim = 0.05,
- A = 0, B = 1, parallel = FALSE, zero = NULL)
+ i1 = NULL, i2 = NULL, trim = 0.05,
+ A = 0, B = 1, parallel = FALSE, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/betabinomUC.Rd b/man/betabinomUC.Rd
index 9741559..ebf2cf6 100644
--- a/man/betabinomUC.Rd
+++ b/man/betabinomUC.Rd
@@ -8,10 +8,20 @@
\alias{pbetabinom.ab}
%\alias{qbetabinom.ab}
\alias{rbetabinom.ab}
+%\alias{Ozibetabinom}
+\alias{dozibetabinom}
+\alias{pozibetabinom}
+%\alias{qozibetabinom}
+\alias{rozibetabinom}
+\alias{dozibetabinom.ab}
+\alias{pozibetabinom.ab}
+%\alias{qozibetabinom.ab}
+\alias{rozibetabinom.ab}
\title{The Beta-Binomial Distribution}
\description{
Density, distribution function, and random
- generation for the beta-binomial distribution.
+ generation for the beta-binomial distribution
+ and the inflated beta-binomial distribution.
}
@@ -22,6 +32,14 @@ rbetabinom(n, size, prob, rho = 0)
dbetabinom.ab(x, size, shape1, shape2, log = FALSE, Inf.shape = 1e6)
pbetabinom.ab(q, size, shape1, shape2, log.p = FALSE)
rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
+dozibetabinom(x, size, prob, rho = 0, pstr0 = 0, pstrsize = 0, log = FALSE)
+pozibetabinom(q, size, prob, rho, pstr0 = 0, pstrsize = 0,
+ lower.tail = TRUE, log.p = FALSE)
+rozibetabinom(n, size, prob, rho = 0, pstr0 = 0, pstrsize = 0)
+dozibetabinom.ab(x, size, shape1, shape2, pstr0 = 0, pstrsize = 0, log = FALSE)
+pozibetabinom.ab(q, size, shape1, shape2, pstr0 = 0, pstrsize = 0,
+ lower.tail = TRUE, log.p = FALSE)
+rozibetabinom.ab(n, size, shape1, shape2, pstr0 = 0, pstrsize = 0)
}
@@ -58,9 +76,8 @@ rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
}
- \item{log, log.p}{
- Logical.
- If \code{TRUE} then all probabilities \code{p} are given as \code{log(p)}.
+ \item{log, log.p, lower.tail}{
+ Same meaning as \code{\link[stats]{runif}}.
}
@@ -83,6 +100,20 @@ rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
}
+
+ \item{pstr0}{
+ Probability of a structual zero (i.e., ignoring the beta-binomial distribution).
+ The default value of \code{pstr0} corresponds to the response having a
+ beta-binomial distribuion inflated only at \code{size}.
+
+ }
+
+ \item{pstrsize}{
+ Probability of a structual maximum value \code{size}. The default value of
+ \code{pstrsize} corresponds to the response having a beta-binomial distribution
+ inflated only at 0.
+
+ }
}
@@ -94,9 +125,13 @@ rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
% \code{qbetabinom} and \code{qbetabinom.ab} gives the quantile function, and
+ \code{dozibetabinom} and \code{dozibetabinom.ab} give the inflated density,
+ \code{pozibetabinom} and \code{pozibetabinom.ab} give the inflated distribution function, and
+ \code{rozibetabinom} and \code{rozibetabinom.ab} generate random inflated deviates.
+
}
-\author{ T. W. Yee }
+\author{ T. W. Yee and Xiangjie Xue}
\details{
The beta-binomial distribution is a binomial distribution whose
probability of success is not a constant but it is generated from a
@@ -111,9 +146,26 @@ rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
estimating the parameters, for the formula of the probability density
function and other details.
+ For the inflated beta-binomial distribution, the probability mass
+ function is
+ \deqn{P(Y = y) =(1 - pstr0 - pstrsize) \times BB(y) + pstr0 \times I[y = 0] +
+ pstrsize \times I[y = size]}{%
+ F(Y = y) =(1 - pstr0 - pstrsize) * BB(y) + pstr0 * I[y = 0] +
+ pstrsize * I[y = size]}
+
+ where \eqn{BB(y)} is the probability mass function
+ of the beta-binomial distribution with the same shape parameters
+ (\code{\link[VGAM]{pbetabinom.ab}}),
+ \code{pstr0} is the inflated probability at 0
+ and \code{pstrsize} is the inflated probability at 1.
+ The default values of \code{pstr0} and \code{pstrsize} mean that these
+ functions behave like the ordinary \code{\link[VGAM]{Betabinom}}
+ when only the essential arguments are inputted.
+
}
\note{
+ \code{pozibetabinom}, \code{pozibetabinom.ab},
\code{pbetabinom} and \code{pbetabinom.ab} can be particularly slow.
The functions here ending in \code{.ab} are called from those
functions which don't.
@@ -127,7 +179,8 @@ rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
}
\seealso{
\code{\link{betabinomial}},
- \code{\link{betabinomialff}}.
+ \code{\link{betabinomialff}},
+ \code{\link{Ozibeta}}.
}
@@ -155,7 +208,21 @@ barplot(rbind(dy, ty / sum(ty)),
", shape2=", s2, ") (blue) vs\n",
" Random generated beta-binomial(size=", N, ", prob=", s1/(s1+s2),
") (orange)", sep = ""), cex.main = 0.8,
- names.arg = as.character(xx)) }
+ names.arg = as.character(xx))
+
+set.seed(208); N <- 1000000; size = 20;
+pstr0 <- 0.2; pstrsize <- 0.2
+k <- rozibetabinom.ab(N, size, s1, s2, pstr0, pstrsize)
+hist(k, probability = TRUE, border = "blue",
+ main = "Blue = inflated; orange = ordinary beta-binomial",
+ breaks = -0.5 : (size + 0.5))
+sum(k == 0) / N # Proportion of 0
+sum(k == size) / N # Proportion of size
+lines(0 : size,
+ dbetabinom.ab(0 : size, size, s1, s2), col = "orange")
+lines(0 : size, col = "blue",
+ dozibetabinom.ab(0 : size, size, s1, s2, pstr0, pstrsize))
+}
}
\keyword{distribution}
diff --git a/man/betabinomial.Rd b/man/betabinomial.Rd
index f12a697..11a82fc 100644
--- a/man/betabinomial.Rd
+++ b/man/betabinomial.Rd
@@ -10,9 +10,10 @@
}
\usage{
betabinomial(lmu = "logit", lrho = "logit", irho = NULL, imethod = 1,
- ishrinkage = 0.95, nsimEIM = NULL, zero = 2)
+ ishrinkage = 0.95, nsimEIM = NULL, zero = "rho")
}
%- maybe also 'usage' for other objects documented here.
+% ishrinkage = 0.95, nsimEIM = NULL, zero = 2
\arguments{
\item{lmu, lrho}{
Link functions applied to the two parameters.
@@ -40,15 +41,16 @@ betabinomial(lmu = "logit", lrho = "logit", irho = NULL, imethod = 1,
}
\item{zero}{
- An integer specifying which
+ Specifyies which
linear/additive predictor is to be modelled as an intercept only.
- If assigned, the single value should be either \code{1} or \code{2}.
+ If assigned, the single value can be either \code{1} or \code{2}.
The default is to have a single correlation parameter.
To model both parameters as functions of the covariates assign
\code{zero = NULL}.
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
\item{ishrinkage, nsimEIM}{
See \code{\link{CommonVGAMffArguments}} for more information.
@@ -166,7 +168,7 @@ betabinomial(lmu = "logit", lrho = "logit", irho = NULL, imethod = 1,
\section{Warning }{
- If the estimated rho parameter is close to zero then it pays to
+ If the estimated rho parameter is close to 0 then it pays to
try \code{lrho = "rhobit"}. One day this may become the default
link function.
diff --git a/man/betabinomialff.Rd b/man/betabinomialff.Rd
index ef334b8..67c8ea9 100644
--- a/man/betabinomialff.Rd
+++ b/man/betabinomialff.Rd
@@ -33,11 +33,13 @@ betabinomialff(lshape1 = "loge", lshape2 = "loge", ishape1 = 1,
}
\item{zero}{
- An integer specifying which linear/additive predictor is to be modelled
+ Can be
+ an integer specifying which linear/additive predictor is to be modelled
as an intercept only. If assigned, the single value should be either
\code{1} or \code{2}. The default is to model both shape parameters
as functions of the covariates. If a failure to converge occurs,
try \code{zero = 2}.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
diff --git a/man/betaff.Rd b/man/betaff.Rd
index ec37305..35caaab 100644
--- a/man/betaff.Rd
+++ b/man/betaff.Rd
@@ -119,6 +119,7 @@ betaff(A = 0, B = 1, lmu = "logit", lphi = "loge",
\seealso{
\code{\link{betaR}},
\code{\link[stats:Beta]{Beta}},
+ \code{\link{dozibeta}},
\code{\link{genbetaII}},
\code{\link{betaII}},
\code{\link{betabinomialff}},
diff --git a/man/bigamma.mckay.Rd b/man/bigamma.mckay.Rd
index a84f4ef..c62c06e 100644
--- a/man/bigamma.mckay.Rd
+++ b/man/bigamma.mckay.Rd
@@ -10,7 +10,7 @@
\usage{
bigamma.mckay(lscale = "loge", lshape1 = "loge", lshape2 = "loge",
iscale = NULL, ishape1 = NULL, ishape2 = NULL,
- imethod = 1, zero = 2:3)
+ imethod = 1, zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/bilogistic.Rd b/man/bilogistic.Rd
index 5277ef8..6bb81f3 100644
--- a/man/bilogistic.Rd
+++ b/man/bilogistic.Rd
@@ -53,7 +53,8 @@ bilogistic(llocation = "identitylink", lscale = "loge",
\item{zero}{ An integer-valued vector specifying which
linear/additive predictors are modelled as intercepts only.
The default is none of them.
- If used, choose values from the set \{1,2,3,4\}.
+ If used, one can choose values from the set \{1,2,3,4\}.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
diff --git a/man/binom2.or.Rd b/man/binom2.or.Rd
index e8b25f9..484f929 100644
--- a/man/binom2.or.Rd
+++ b/man/binom2.or.Rd
@@ -13,7 +13,7 @@
}
\usage{
binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
- imu1 = NULL, imu2 = NULL, ioratio = NULL, zero = 3,
+ imu1 = NULL, imu2 = NULL, ioratio = NULL, zero = "oratio",
exchangeable = FALSE, tol = 0.001, more.robust = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -47,7 +47,10 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
}
\item{zero}{
Which linear/additive predictor is modelled as an intercept only?
+ The default is for the odds ratio.
A \code{NULL} means none.
+ See \code{\link{CommonVGAMffArguments}} for more details.
+
}
@@ -92,7 +95,7 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
The model is fitted by maximum likelihood estimation since the full
likelihood is specified.
The two binary responses are independent if and only if the odds ratio
- is unity, or equivalently, the log odds ratio is zero. Fisher scoring
+ is unity, or equivalently, the log odds ratio is 0. Fisher scoring
is implemented.
diff --git a/man/binom2.rho.Rd b/man/binom2.rho.Rd
index b22336c..5529a3e 100644
--- a/man/binom2.rho.Rd
+++ b/man/binom2.rho.Rd
@@ -10,24 +10,31 @@
}
\usage{
-binom2.rho(lrho = "rhobit", lmu = "probit", imu1 = NULL, imu2 = NULL,
- irho = NULL, imethod = 1, zero = 3,
+binom2.rho(lmu = "probit", lrho = "rhobit", imu1 = NULL, imu2 = NULL,
+ irho = NULL, imethod = 1, zero = "rho",
exchangeable = FALSE, grho = seq(-0.95, 0.95, by = 0.05),
nsimEIM = NULL)
binom2.Rho(rho = 0, imu1 = NULL, imu2 = NULL,
exchangeable = FALSE, nsimEIM = NULL)
}
%- maybe also 'usage' for other objects documented here.
+% binom2.rho(lrho = "rhobit", lmu = "probit", imu1 = NULL, imu2 = NULL,...)
\arguments{
+ \item{lmu}{
+ Link function applied to the marginal probabilities.
+ Should be left alone.
+
+
+ }
\item{lrho}{
Link function applied to the \eqn{\rho}{rho} association parameter.
See \code{\link{Links}} for more choices.
}
- \item{lmu}{
- Link function applied to the marginal probabilities.
- Should be left alone.
+ \item{imu1, imu2}{
+ Optional initial values for the two marginal probabilities.
+ May be a vector.
}
@@ -38,17 +45,12 @@ binom2.Rho(rho = 0, imu1 = NULL, imu2 = NULL,
}
- \item{imu1, imu2}{
- Optional initial values for the two marginal probabilities.
- May be a vector.
-
-
- }
\item{zero}{
- Which linear/additive predictor is modelled as an intercept only?
+ Specifies which linear/additive predictors are modelled as intercept-only.
A \code{NULL} means none.
Numerically, the \eqn{\rho}{rho} parameter is easiest modelled as
an intercept only, hence the default.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
@@ -156,11 +158,12 @@ Freedman, D. A. and Sekhon, J. S. (2010)
should have.
- By default, a constant \eqn{\rho}{rho} is fitted because \code{zero = 3}.
- Set \code{zero = NULL} if you want the \eqn{\rho}{rho} parameter to
- be modelled as a function of the explanatory variables. The value
- \eqn{\rho}{rho} lies in the interval \eqn{(-1,1)}{(-1,1)}, therefore
- a \code{\link{rhobit}} link is default.
+ By default, a constant \eqn{\rho}{rho} is fitted because
+ \code{zero = "rho"}. Set \code{zero = NULL} if you want
+ the \eqn{\rho}{rho} parameter to be modelled as a function
+ of the explanatory variables. The value \eqn{\rho}{rho}
+ lies in the interval \eqn{(-1,1)}{(-1,1)}, therefore a
+ \code{\link{rhobit}} link is default.
Converge problems can occur.
diff --git a/man/binormal.Rd b/man/binormal.Rd
index c8d91bc..4f12763 100644
--- a/man/binormal.Rd
+++ b/man/binormal.Rd
@@ -15,7 +15,7 @@ binormal(lmean1 = "identitylink", lmean2 = "identitylink",
isd1 = NULL, isd2 = NULL,
irho = NULL, imethod = 1,
eq.mean = FALSE, eq.sd = FALSE,
- zero = 3:5)
+ zero = c("sd", "rho"))
}
diff --git a/man/bisa.Rd b/man/bisa.Rd
index dd61346..88defe3 100644
--- a/man/bisa.Rd
+++ b/man/bisa.Rd
@@ -8,8 +8,8 @@
}
\usage{
-bisa(lscale = "loge", lshape = "loge",
- iscale = 1, ishape = NULL, imethod = 1, zero = NULL, nowarning = FALSE)
+bisa(lscale = "loge", lshape = "loge", iscale = 1,
+ ishape = NULL, imethod = 1, zero = "shape", nowarning = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -44,10 +44,14 @@ bisa(lscale = "loge", lshape = "loge",
}
\item{zero}{
- An integer-valued vector specifying which
- linear/additive predictors are modelled as intercepts only.
- The default is none of them.
+ Specifies which linear/additive predictor is modelled as intercept-only.
If used, choose one value from the set \{1,2\}.
+ See \code{\link{CommonVGAMffArguments}} for more details.
+
+
+
+% The default is none of them.
+
}
@@ -128,7 +132,8 @@ New York: Wiley.
\seealso{
\code{\link{pbisa}},
- \code{\link{inv.gaussianff}}.
+ \code{\link{inv.gaussianff}},
+ \code{\link{CommonVGAMffArguments}}.
}
diff --git a/man/bistudentt.Rd b/man/bistudentt.Rd
index 492b936..376820d 100644
--- a/man/bistudentt.Rd
+++ b/man/bistudentt.Rd
@@ -11,7 +11,7 @@
\usage{
bistudentt(ldf = "loglog", lrho = "rhobit",
idf = NULL, irho = NULL, imethod = 1,
- parallel = FALSE, zero = -1)
+ parallel = FALSE, zero = "rho")
}
%- maybe also 'usage' for other objects documented here.
%apply.parint = TRUE,
diff --git a/man/cauchy.Rd b/man/cauchy.Rd
index 381d3c0..5cb17d9 100644
--- a/man/cauchy.Rd
+++ b/man/cauchy.Rd
@@ -12,7 +12,7 @@
cauchy(llocation = "identitylink", lscale = "loge",
ilocation = NULL, iscale = NULL,
iprobs = seq(0.2, 0.8, by = 0.2),
- imethod = 1, nsimEIM = NULL, zero = 2)
+ imethod = 1, nsimEIM = NULL, zero = "scale")
cauchy1(scale.arg = 1, llocation = "identitylink",
ilocation = NULL, imethod = 1)
}
@@ -48,7 +48,7 @@ cauchy1(scale.arg = 1, llocation = "identitylink",
}
\item{zero, nsimEIM}{
- See \code{\link{CommonVGAMffArguments}} for more information.
+ See \code{\link{CommonVGAMffArguments}} for information.
}
diff --git a/man/cens.gumbel.Rd b/man/cens.gumbel.Rd
index 47b0252..00df388 100644
--- a/man/cens.gumbel.Rd
+++ b/man/cens.gumbel.Rd
@@ -10,8 +10,8 @@
}
\usage{
-cens.gumbel(llocation = "identitylink", lscale = "loge",
- iscale = NULL, mean = TRUE, percentiles = NULL, zero = 2)
+cens.gumbel(llocation = "identitylink", lscale = "loge", iscale = NULL,
+ mean = TRUE, percentiles = NULL, zero = "scale")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/cens.normal.Rd b/man/cens.normal.Rd
index 357e35f..f705e9e 100644
--- a/man/cens.normal.Rd
+++ b/man/cens.normal.Rd
@@ -12,7 +12,7 @@
}
\usage{
-cens.normal(lmu = "identitylink", lsd = "loge", imethod = 1, zero = 2)
+cens.normal(lmu = "identitylink", lsd = "loge", imethod = 1, zero = "sd")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -32,11 +32,12 @@ cens.normal(lmu = "identitylink", lsd = "loge", imethod = 1, zero = 2)
}
\item{zero}{
- An integer vector, containing the value 1 or 2. If so,
+ A vector, e.g., containing the value 1 or 2; if so,
the mean or standard deviation respectively are modelled
as an intercept only.
Setting \code{zero = NULL} means both linear/additive predictors
are modelled as functions of the explanatory variables.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
@@ -69,8 +70,8 @@ cens.normal(lmu = "identitylink", lsd = "loge", imethod = 1, zero = 2)
\author{ T. W. Yee }
\note{
- This function is an alternative to \code{\link{tobit}}
- but cannot handle a matrix response
+ This function, which is an alternative to \code{\link{tobit}},
+ cannot handle a matrix response
and uses different working weights.
If there are no censored observations then \code{\link{uninormal}}
is recommended instead.
@@ -100,13 +101,14 @@ cdata <- transform(cdata, L = runif(nn, 80, 90), # Lower censoring points
cdata <- transform(cdata, y = pmax(L, ystar)) # Left censored
cdata <- transform(cdata, y = pmin(U, y)) # Right censored
with(cdata, hist(y))
-Extra <- list(leftcensored = with(cdata, ystar < L),
+Extra <- list(leftcensored = with(cdata, ystar < L),
rightcensored = with(cdata, ystar > U))
fit1 <- vglm(y ~ x2, cens.normal, data = cdata, crit = "c", extra = Extra)
fit2 <- vglm(y ~ x2, tobit(Lower = with(cdata, L), Upper = with(cdata, U)),
data = cdata, crit = "c", trace = TRUE)
coef(fit1, matrix = TRUE)
-max(abs(coef(fit1, matrix = TRUE) - coef(fit2, matrix = TRUE))) # Should be 0
+max(abs(coef(fit1, matrix = TRUE) -
+ coef(fit2, matrix = TRUE))) # Should be 0
names(fit1 at extra)
}
}
diff --git a/man/cloglog.Rd b/man/cloglog.Rd
index abab645..a7a5c62 100644
--- a/man/cloglog.Rd
+++ b/man/cloglog.Rd
@@ -87,6 +87,7 @@ cloglog(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
\seealso{
\code{\link{Links}},
+ \code{\link{logitoffsetlink}},
\code{\link{logit}},
\code{\link{probit}},
\code{\link{cauchit}}.
diff --git a/man/coefvgam.Rd b/man/coefvgam.Rd
new file mode 100644
index 0000000..7116c97
--- /dev/null
+++ b/man/coefvgam.Rd
@@ -0,0 +1,89 @@
+\name{coefvgam}
+\alias{coefvgam}
+\alias{coef,vgam-method}
+\alias{coefficients,vgam-method}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Extract Model Coefficients of a vgam() Object}
+\description{
+ Extracts the estimated
+ coefficients from vgam() objects.
+
+
+}
+\usage{
+coefvgam(object, type = c("linear", "nonlinear"), ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{object}{ A
+ \code{\link{vgam}} object.
+
+
+ }
+ \item{type}{ Character.
+ The default is the first choice.
+
+
+ }
+ \item{\ldots}{
+ Optional arguments fed into
+ \code{\link{coefvlm}}.
+
+
+ }
+}
+\details{
+ For VGAMs, because modified backfitting is performed,
+ each fitted function is decomposed into a linear and nonlinear
+ (smooth) part.
+ The argument \code{type} is used to return which one is wanted.
+
+
+
+}
+\value{
+ A vector if \code{type = "linear"}.
+ A list if \code{type = "nonlinear"}, and each component of
+ this list corresponds to an \code{\link{s}} term;
+ the component contains an S4 object with slot names such as
+ \code{"Bcoefficients"},
+ \code{"knots"},
+ \code{"xmin"},
+ \code{"xmax"}.
+
+
+}
+%\references{
+%
+%
+%}
+\author{ Thomas W. Yee }
+
+%\note{
+%}
+
+%\section{Warning }{
+
+%}
+
+\seealso{
+ \code{\link{vgam}},
+ \code{\link{coefvlm}},
+ \code{\link[stats]{coef}}.
+
+
+% \code{\link{coef-method}},
+
+
+}
+\examples{
+fit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = hunua)
+coef(fit) # Same as coef(fit, type = "linear")
+(ii <- coef(fit, type = "nonlinear"))
+is.list(ii)
+names(ii)
+slotNames(ii[[1]])
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/coefvlm.Rd b/man/coefvlm.Rd
index 8455b31..eaa72e6 100644
--- a/man/coefvlm.Rd
+++ b/man/coefvlm.Rd
@@ -77,12 +77,15 @@ Reduced-rank vector generalized linear models.
\seealso{
\code{\link{vglm}},
+ \code{\link{coefvgam}},
\code{\link[stats]{coef}}.
+
% \code{\link{coef-method}},
+
}
\examples{
zdata <- data.frame(x2 = runif(nn <- 200))
diff --git a/man/cratio.Rd b/man/cratio.Rd
index 6d403d7..0590a9b 100644
--- a/man/cratio.Rd
+++ b/man/cratio.Rd
@@ -5,6 +5,7 @@
\description{
Fits a continuation ratio logit/probit/cloglog/cauchit/...
regression model to an ordered (preferably) factor response.
+
}
\usage{
cratio(link = "logit", parallel = FALSE, reverse = FALSE, zero = NULL,
@@ -81,11 +82,6 @@ Agresti, A. (2013)
3rd ed. Hoboken, NJ, USA: Wiley.
-Simonoff, J. S. (2003)
-\emph{Analyzing Categorical Data},
-New York: Springer-Verlag.
-
-
McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
@@ -135,6 +131,7 @@ The \pkg{VGAM} package for categorical data analysis.
\code{\link{acat}},
\code{\link{cumulative}},
\code{\link{multinomial}},
+ \code{\link{margeff}},
\code{\link{pneumo}},
\code{\link{logit}},
\code{\link{probit}},
@@ -152,7 +149,12 @@ coef(fit, matrix = TRUE)
constraints(fit)
predict(fit)
predict(fit, untransform = TRUE)
+margeff(fit)
}
\keyword{models}
\keyword{regression}
+%Simonoff, J. S. (2003)
+%\emph{Analyzing Categorical Data},
+%New York: Springer-Verlag.
+
diff --git a/man/dagum.Rd b/man/dagum.Rd
index 7975b68..e81df31 100644
--- a/man/dagum.Rd
+++ b/man/dagum.Rd
@@ -10,9 +10,10 @@
dagum(lscale = "loge", lshape1.a = "loge", lshape2.p = "loge",
iscale = NULL, ishape1.a = NULL, ishape2.p = NULL, imethod = 1,
lss = TRUE, gscale = exp(-5:5), gshape1.a = exp(-5:5), gshape2.p = exp(-5:5),
- probs.y = c(0.25, 0.5, 0.75), zero = ifelse(lss, -(2:3), -c(1, 3)))
+ probs.y = c(0.25, 0.5, 0.75), zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
+% zero = ifelse(lss, -(2:3), -c(1, 3))
\arguments{
\item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information.
diff --git a/man/double.cens.normal.Rd b/man/double.cens.normal.Rd
index 0f3164f..560a205 100644
--- a/man/double.cens.normal.Rd
+++ b/man/double.cens.normal.Rd
@@ -9,7 +9,7 @@
}
\usage{
double.cens.normal(r1 = 0, r2 = 0, lmu = "identitylink", lsd = "loge",
- imu = NULL, isd = NULL, zero = 2)
+ imu = NULL, isd = NULL, zero = "sd")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -37,17 +37,20 @@ double.cens.normal(r1 = 0, r2 = 0, lmu = "identitylink", lsd = "loge",
\code{r1} or \code{r2} are positive.
+
By default, the mean is the first linear/additive predictor and
the log of the standard deviation is the second linear/additive
predictor.
+
} \value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Harter, H. L. and Moore, A. H. (1966)
@@ -95,7 +98,8 @@ c(sd(mu.save), sd(sd.save))
# Data from Sarhan and Greenberg (1962); MLEs are mu = 9.2606, sd = 1.3754
strontium90 <- data.frame(y = c(8.2, 8.4, 9.1, 9.8, 9.9))
-fit <- vglm(y ~ 1, double.cens.normal(r1 = 2, r2 = 3, isd = 6), strontium90, trace = TRUE)
+fit <- vglm(y ~ 1, double.cens.normal(r1 = 2, r2 = 3, isd = 6),
+ data = strontium90, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/double.expbinomial.Rd b/man/double.expbinomial.Rd
index 5336994..7e5b1cb 100644
--- a/man/double.expbinomial.Rd
+++ b/man/double.expbinomial.Rd
@@ -11,8 +11,9 @@
}
\usage{
double.expbinomial(lmean = "logit", ldispersion = "logit",
- idispersion = 0.25, zero = 2)
+ idispersion = 0.25, zero = "dispersion")
}
+% idispersion = 0.25, zero = 2
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{lmean, ldispersion}{
@@ -31,12 +32,13 @@ double.expbinomial(lmean = "logit", ldispersion = "logit",
}
\item{zero}{
- An integer specifying which
- linear/additive predictor is to be modelled as an intercept only.
- If assigned, the single value should be either \code{1} or \code{2}.
+ A vector specifying which
+ linear/additive predictor is to be modelled as intercept-only.
+ If assigned, the single value can be either \code{1} or \code{2}.
The default is to have a single dispersion parameter value.
To model both parameters as functions of the covariates assign
\code{zero = NULL}.
+ See \code{\link{CommonVGAMffArguments}} for more details.
}
diff --git a/man/expint.Rd b/man/expint3.Rd
similarity index 67%
rename from man/expint.Rd
rename to man/expint3.Rd
index 5b9cad1..c443bed 100644
--- a/man/expint.Rd
+++ b/man/expint3.Rd
@@ -10,14 +10,14 @@ The Exponential Integral and Variants
\description{
Computes the exponential integral \eqn{Ei(x)} for real values,
as well as \eqn{\exp(-x) \times Ei(x)}{exp(-x) * Ei(x)} and
- \eqn{E_1(x)}.
+ \eqn{E_1(x)} and their derivatives (up to the 3rd derivative).
}
\usage{
-expint(x)
-expexpint(x)
-expint.E1(x)
+expint(x, deriv = 0)
+expexpint(x, deriv = 0)
+expint.E1(x, deriv = 0)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -26,21 +26,33 @@ expint.E1(x)
}
+
+\item{deriv}{Integer. Either 0, 1, 2 or 3.
+
+
+}
+
}
\details{
The exponential integral \eqn{Ei(x)} function is the integral of
- \eqn{exp(t) / t}
+ \eqn{\exp(t) / t}{exp(t) / t}
from 0 to \eqn{x}, for positive real \eqn{x}.
The function \eqn{E_1(x)} is the integral of
- \eqn{exp(-t) / t}
+ \eqn{\exp(-t) / t}{exp(-t) / t}
from \eqn{x} to infinity, for positive real \eqn{x}.
+
+
}
\value{
- Function \code{expint(x)} returns \eqn{Ei(x)},
- function \code{expexpint(x)} returns \eqn{\exp(-x) \times Ei(x)}{exp(-x) * Ei(x)},
- function \code{expint.E1(x)} returns \eqn{E_1(x)}.
+ Function \code{expint(x, deriv = n)} returns the
+ \eqn{n}th derivative of \eqn{Ei(x)} (up to the 3rd),
+ function \code{expexpint(x, deriv = n)} returns the
+ \eqn{n}th derivative of
+ \eqn{\exp(-x) \times Ei(x)}{exp(-x) * Ei(x)} (up to the 3rd),
+ function \code{expint.E1(x, deriv = n)} returns the \eqn{n}th derivative of
+ \eqn{E_1(x)}(up to the 3rd).
}
@@ -53,12 +65,16 @@ expint.E1(x)
}
\author{
T. W. Yee has simply written a small wrapper function to call the
-above FORTRAN code.
+NETLIB FORTRAN code.
+Xiangjie Xue modified the functions to calculate derivatives.
+Higher derivatives can actually be calculated---please let me
+know if you need it.
+
}
-\note{
-This function has not been tested thoroughly.
+\section{Warning }{
+These functions have not been tested thoroughly.
}
@@ -92,3 +108,5 @@ abline(h = 0, v = 0, lty = "dashed", col = "blue")
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{math}
+
+
diff --git a/man/fisk.Rd b/man/fisk.Rd
index c3ecf2f..f90ebba 100644
--- a/man/fisk.Rd
+++ b/man/fisk.Rd
@@ -10,8 +10,7 @@
\usage{
fisk(lscale = "loge", lshape1.a = "loge", iscale = NULL,
ishape1.a = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5),
- gshape1.a = exp(-5:5), probs.y = c(0.25, 0.5, 0.75), zero = ifelse(lss,
- -2, -1))
+ gshape1.a = exp(-5:5), probs.y = c(0.25, 0.5, 0.75), zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/fittedvlm.Rd b/man/fittedvlm.Rd
index c1484b1..9c720b3 100644
--- a/man/fittedvlm.Rd
+++ b/man/fittedvlm.Rd
@@ -123,7 +123,7 @@ zdata <- data.frame(x2 = runif(nn <- 1000))
zdata <- transform(zdata, pstr0.3 = logit(-0.5 , inverse = TRUE),
lambda.3 = loge(-0.5 + 2*x2, inverse = TRUE))
zdata <- transform(zdata, y1 = rzipois(nn, lambda = lambda.3, pstr0 = pstr0.3))
-fit3 <- vglm(y1 ~ x2, zipoisson (zero = NULL), data = zdata, crit = "coef")
+fit3 <- vglm(y1 ~ x2, zipoisson(zero = NULL), data = zdata, trace = TRUE)
head(fitted(fit3, type.fitted = "mean" )) # E(Y), which is the default
head(fitted(fit3, type.fitted = "pobs0")) # P(Y = 0)
head(fitted(fit3, type.fitted = "pstr0")) # Prob of a structural 0
diff --git a/man/freund61.Rd b/man/freund61.Rd
index c54a069..9a7a22a 100644
--- a/man/freund61.Rd
+++ b/man/freund61.Rd
@@ -39,10 +39,11 @@ freund61(la = "loge", lap = "loge", lb = "loge", lbp = "loge",
}
\item{zero}{
- An integer-valued vector specifying which
+ A vector specifying which
linear/additive predictors are modelled as intercepts only.
- The values must be from the set \{1,2,3,4\}.
+ The values can be from the set \{1,2,3,4\}.
The default is none of them.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
diff --git a/man/gamma2.Rd b/man/gamma2.Rd
index 76e316f..b0b74dc 100644
--- a/man/gamma2.Rd
+++ b/man/gamma2.Rd
@@ -10,7 +10,7 @@
\usage{
gamma2(lmu = "loge", lshape = "loge",
imethod = 1, ishape = NULL,
- parallel = FALSE, deviance.arg = FALSE, zero = -2)
+ parallel = FALSE, deviance.arg = FALSE, zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
% apply.parint = FALSE,
@@ -51,21 +51,24 @@ gamma2(lmu = "loge", lshape = "loge",
}
\item{zero}{
+ See \code{\link{CommonVGAMffArguments}} for information.
+
+
% An integer specifying which
% linear/additive predictor is to be modelled as an intercept only.
% If assigned, the single value should be either 1 or 2 or \code{NULL}.
% The default is to model \eqn{shape} as an intercept only.
% A value \code{NULL} means neither 1 or 2.
- Integer valued vector, usually assigned \eqn{-2} or \eqn{2} if
- used at all. Specifies which of the two linear/additive predictors
- are modelled as an intercept only. By default, the shape parameter
- (after \code{lshape} is applied) is modelled as a single unknown
- number that is estimated. It can be modelled as a function of
- the explanatory variables by setting \code{zero = NULL}. A negative
- value means that the value is recycled, so setting \eqn{-2} means
- all shape parameters are intercept only.
- See \code{\link{CommonVGAMffArguments}} for more information.
+% Integer valued vector, usually assigned \eqn{-2} or \eqn{2} if
+% used at all. Specifies which of the two linear/additive predictors
+% are modelled as an intercept only. By default, the shape parameter
+% (after \code{lshape} is applied) is modelled as a single unknown
+% number that is estimated. It can be modelled as a function of
+% the explanatory variables by setting \code{zero = NULL}. A negative
+% value means that the value is recycled, so setting \eqn{-2} means
+% all shape parameters are intercept only.
+% See \code{\link{CommonVGAMffArguments}} for more information.
}
diff --git a/man/gammaR.Rd b/man/gammaR.Rd
index c4eb840..69cdd52 100644
--- a/man/gammaR.Rd
+++ b/man/gammaR.Rd
@@ -7,8 +7,9 @@
}
\usage{
gammaR(lrate = "loge", lshape = "loge", irate = NULL,
- ishape = NULL, lss = TRUE, zero = ifelse(lss, -2, -1))
+ ishape = NULL, lss = TRUE, zero = "shape")
}
+% zero = ifelse(lss, -2, -1)
%- maybe also 'usage' for other objects documented here.
\arguments{
% \item{nowarning}{ Logical. Suppress a warning? }
diff --git a/man/genbetaII.Rd b/man/genbetaII.Rd
index c976050..c322ce6 100644
--- a/man/genbetaII.Rd
+++ b/man/genbetaII.Rd
@@ -13,9 +13,10 @@ genbetaII(lscale = "loge", lshape1.a = "loge", lshape2.p = "loge",
ishape2.p = NULL, ishape3.q = NULL, lss = TRUE,
gscale = exp(-5:5), gshape1.a = exp(-5:5),
gshape2.p = exp(-5:5), gshape3.q = exp(-5:5),
- zero = ifelse(lss, -(2:4), -c(1, 3:4)))
+ zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
+% zero = ifelse(lss, -(2:4), -c(1, 3:4))
\arguments{
\item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information.
@@ -54,10 +55,14 @@ genbetaII(lscale = "loge", lshape1.a = "loge", lshape2.p = "loge",
% }
\item{zero}{
- An integer-valued vector specifying which
- linear/additive predictors are modelled as intercepts only.
The default is to set all the shape parameters to be
intercept-only.
+ See \code{\link{CommonVGAMffArguments}} for information.
+
+
+
+% An integer-valued vector specifying which
+% linear/additive predictors are modelled as intercepts only.
diff --git a/man/gengamma.Rd b/man/gengamma.Rd
index 9a9b264..5e0556e 100644
--- a/man/gengamma.Rd
+++ b/man/gengamma.Rd
@@ -35,11 +35,13 @@ gengamma.stacy(lscale = "loge", ld = "loge", lk = "loge",
}
\item{zero}{
- An integer-valued vector specifying which
- linear/additive predictors are modelled as intercepts only.
- The values must be from the set \{1,2,3\}.
- The default value means none are modelled as intercept-only terms.
- See \code{\link{CommonVGAMffArguments}} for more information.
+ See \code{\link{CommonVGAMffArguments}} for information.
+
+
+% An integer-valued vector specifying which
+% linear/additive predictors are modelled as intercepts only.
+% The values must be from the set \{1,2,3\}.
+% The default value means none are modelled as intercept-only terms.
}
diff --git a/man/genpoisson.Rd b/man/genpoisson.Rd
index 400ec9c..e61db0a 100644
--- a/man/genpoisson.Rd
+++ b/man/genpoisson.Rd
@@ -9,7 +9,8 @@
\usage{
genpoisson(llambda = "rhobit", ltheta = "loge",
ilambda = NULL, itheta = NULL,
- use.approx = TRUE, imethod = 1, ishrinkage = 0.95, zero = -1)
+ use.approx = TRUE, imethod = 1, ishrinkage = 0.95,
+ zero = "lambda")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/geometric.Rd b/man/geometric.Rd
index bd2d2b2..6b75090 100644
--- a/man/geometric.Rd
+++ b/man/geometric.Rd
@@ -32,7 +32,7 @@ truncgeometric(upper.limit = Inf,
}
\item{iprob, imethod, zero}{
- See \code{\link{CommonVGAMffArguments}} for more details.
+ See \code{\link{CommonVGAMffArguments}} for details.
}
diff --git a/man/gev.Rd b/man/gev.Rd
index b4d82ed..c4ba363 100644
--- a/man/gev.Rd
+++ b/man/gev.Rd
@@ -12,11 +12,13 @@
gev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
percentiles = c(95, 99), iscale=NULL, ishape = NULL,
imethod = 1, gshape = c(-0.45, 0.45), tolshape0 = 0.001,
- type.fitted = c("percentiles", "mean"), giveWarning = TRUE, zero = 2:3)
+ type.fitted = c("percentiles", "mean"), giveWarning = TRUE,
+ zero = c("scale", "shape"))
egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
percentiles = c(95, 99), iscale=NULL, ishape = NULL,
imethod = 1, gshape = c(-0.45, 0.45), tolshape0 = 0.001,
- type.fitted = c("percentiles", "mean"), giveWarning = TRUE, zero = 2:3)
+ type.fitted = c("percentiles", "mean"), giveWarning = TRUE,
+ zero = c("scale", "shape"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -128,13 +130,14 @@ egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
}
\item{zero}{
- An integer-valued vector specifying which
+ A specifying which
linear/additive predictors are modelled as intercepts only.
- The values must be from the set \{1,2,3\} corresponding
+ The values can be from the set \{1,2,3\} corresponding
respectively to \eqn{\mu}{mu}, \eqn{\sigma}{sigma}, \eqn{\xi}{xi}.
If \code{zero = NULL} then all linear/additive predictors are modelled as
a linear combination of the explanatory variables.
For many data sets having \code{zero = 3} is a good idea.
+ See \code{\link{CommonVGAMffArguments}} for information.
}
@@ -185,7 +188,7 @@ egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
}
\section{Warning }{
- Currently, if an estimate of \eqn{\xi}{xi} is too close to zero then
+ Currently, if an estimate of \eqn{\xi}{xi} is too close to 0 then
an error will occur for \code{gev()} with multivariate responses.
In general, \code{egev()} is more reliable than \code{gev()}.
diff --git a/man/gpd.Rd b/man/gpd.Rd
index 39cfb98..052111e 100644
--- a/man/gpd.Rd
+++ b/man/gpd.Rd
@@ -11,7 +11,7 @@
gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
percentiles = c(90, 95), iscale = NULL, ishape = NULL,
tolshape0 = 0.001, type.fitted = c("percentiles", "mean"),
- giveWarning = TRUE, imethod = 1, zero = -2)
+ giveWarning = TRUE, imethod = 1, zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -123,7 +123,7 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
}
\item{zero}{
- An integer-valued vector specifying which
+ Can be an integer-valued vector specifying which
linear/additive predictors are modelled as intercepts only.
For one response, the value should be from the set \{1,2\} corresponding
respectively to \eqn{\sigma}{sigma} and \eqn{\xi}{xi}.
diff --git a/man/gumbel.Rd b/man/gumbel.Rd
index b6f93c4..0bd7746 100644
--- a/man/gumbel.Rd
+++ b/man/gumbel.Rd
@@ -66,8 +66,8 @@ egumbel(llocation = "identitylink", lscale = "loge",
% }
\item{zero}{
- An integer-valued vector specifying which linear/additive predictors
- are modelled as intercepts only. The value (possibly values) must
+ A vector specifying which linear/additive predictors
+ are modelled as intercepts only. The value (possibly values) can
be from the set \{1, 2\} corresponding respectively to \eqn{\mu}{mu}
and \eqn{\sigma}{sigma}. By default all linear/additive predictors
are modelled as a linear combination of the explanatory variables.
diff --git a/man/gumbelII.Rd b/man/gumbelII.Rd
index a93a863..3bff665 100644
--- a/man/gumbelII.Rd
+++ b/man/gumbelII.Rd
@@ -12,9 +12,10 @@
\usage{
gumbelII(lscale = "loge", lshape = "loge", iscale = NULL, ishape = NULL,
probs.y = c(0.2, 0.5, 0.8), perc.out = NULL, imethod = 1,
- zero = -1, nowarning = FALSE)
+ zero = "shape", nowarning = FALSE)
}
%- maybe also 'usage' for other objects documented here.
+% zero = "scale", nowarning = FALSE 20151128
\arguments{
\item{nowarning}{ Logical. Suppress a warning? }
diff --git a/man/huber.Rd b/man/huber.Rd
index 67e0cb5..e3ba3a3 100644
--- a/man/huber.Rd
+++ b/man/huber.Rd
@@ -12,7 +12,7 @@
\usage{
huber1(llocation = "identitylink", k = 0.862, imethod = 1)
huber2(llocation = "identitylink", lscale = "loge",
- k = 0.862, imethod = 1, zero = 2)
+ k = 0.862, imethod = 1, zero = "scale")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -31,7 +31,7 @@ huber2(llocation = "identitylink", lscale = "loge",
\item{imethod, zero}{
See \code{\link{CommonVGAMffArguments}} for information.
The default value of \code{zero} means the scale parameter is
- modelled as an intercept-only.
+ modelled as intercept-only.
}
diff --git a/man/inv.gaussianff.Rd b/man/inv.gaussianff.Rd
index 893bc18..da9efcc 100644
--- a/man/inv.gaussianff.Rd
+++ b/man/inv.gaussianff.Rd
@@ -31,7 +31,7 @@ inv.gaussianff(lmu = "loge", llambda = "loge",
}
\item{imethod, ishrinkage, zero}{
- See \code{\link{CommonVGAMffArguments}} for more information.
+ See \code{\link{CommonVGAMffArguments}} for information.
}
diff --git a/man/inv.lomax.Rd b/man/inv.lomax.Rd
index 9da37e5..eb3277d 100644
--- a/man/inv.lomax.Rd
+++ b/man/inv.lomax.Rd
@@ -7,9 +7,9 @@
inverse Lomax distribution.
}
\usage{
-inv.lomax(lscale = "loge", lshape2.p = "loge", iscale = NULL,
- ishape2.p = NULL, imethod = 1, gscale = exp(-5:5), gshape2.p = exp(-5:5),
- probs.y = c(0.25, 0.5, 0.75), zero = -2)
+inv.lomax(lscale = "loge", lshape2.p = "loge", iscale = NULL,
+ ishape2.p = NULL, imethod = 1, gscale = exp(-5:5),
+ gshape2.p = exp(-5:5), probs.y = c(0.25, 0.5, 0.75), zero = "shape2.p")
}
%- maybe also 'usage' for other objects documented here.
diff --git a/man/inv.paralogistic.Rd b/man/inv.paralogistic.Rd
index b0d84d4..cf14cb1 100644
--- a/man/inv.paralogistic.Rd
+++ b/man/inv.paralogistic.Rd
@@ -10,7 +10,7 @@
inv.paralogistic(lscale = "loge", lshape1.a = "loge", iscale = NULL,
ishape1.a = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5),
gshape1.a = exp(-5:5), probs.y = c(0.25, 0.5, 0.75),
- zero = ifelse(lss, -2, -1))
+ zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/laplace.Rd b/man/laplace.Rd
index 121ebc2..9ed8de4 100644
--- a/man/laplace.Rd
+++ b/man/laplace.Rd
@@ -9,7 +9,7 @@
}
\usage{
laplace(llocation = "identitylink", lscale = "loge",
- ilocation = NULL, iscale = NULL, imethod = 1, zero = 2)
+ ilocation = NULL, iscale = NULL, imethod = 1, zero = "scale")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -35,7 +35,7 @@ laplace(llocation = "identitylink", lscale = "loge",
}
\item{zero}{
- See \code{\link{CommonVGAMffArguments}} for more information.
+ See \code{\link{CommonVGAMffArguments}} for information.
}
diff --git a/man/lerch.Rd b/man/lerch.Rd
index fbf5509..f649aec 100644
--- a/man/lerch.Rd
+++ b/man/lerch.Rd
@@ -56,7 +56,8 @@ lerch(x, s, v, tolerance = 1.0e-10, iter = 100)
}
\references{
- \url{http://aksenov.freeshell.org/lerchphi/source/lerchphi.c}.
+ Originally the code was found at
+ \code{http://aksenov.freeshell.org/lerchphi/source/lerchphi.c}.
Bateman, H. (1953)
diff --git a/man/levy.Rd b/man/levy.Rd
index e9e552a..e18f422 100644
--- a/man/levy.Rd
+++ b/man/levy.Rd
@@ -81,8 +81,9 @@ levy(location = 0, lscale = "loge", iscale = NULL)
\seealso{
- The Nolan article is at
- \url{http://academic2.american.edu/~jpnolan/stable/chap1.pdf}.
+ The Nolan article was at
+ \code{http://academic2.american.edu/~jpnolan/stable/chap1.pdf}.
+
% \code{\link{dlevy}}.
diff --git a/man/lgammaff.Rd b/man/lgammaff.Rd
index 61a6f09..5d0dd73 100644
--- a/man/lgammaff.Rd
+++ b/man/lgammaff.Rd
@@ -11,7 +11,8 @@
\usage{
lgamma1(lshape = "loge", ishape = NULL)
lgamma3(llocation = "identitylink", lscale = "loge", lshape = "loge",
- ilocation = NULL, iscale = NULL, ishape = 1, zero = 2:3)
+ ilocation = NULL, iscale = NULL, ishape = 1,
+ zero = c("scale", "shape"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/lino.Rd b/man/lino.Rd
index 008f9fc..d0cc96e 100644
--- a/man/lino.Rd
+++ b/man/lino.Rd
@@ -32,10 +32,12 @@ lino(lshape1 = "loge", lshape2 = "loge", llambda = "loge",
}
\item{zero}{
- An integer-valued vector specifying which
+ Can be an integer-valued vector specifying which
linear/additive predictors are modelled as intercepts only.
Here, the values must be from the set \{1,2,3\} which correspond to
\eqn{a}, \eqn{b}, \eqn{\lambda}{lambda}, respectively.
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
}
}
diff --git a/man/lms.bcg.Rd b/man/lms.bcg.Rd
index 1484e17..6f68878 100644
--- a/man/lms.bcg.Rd
+++ b/man/lms.bcg.Rd
@@ -7,7 +7,7 @@
to the gamma distribution.
}
\usage{
-lms.bcg(percentiles = c(25, 50, 75), zero = c(1, 3),
+lms.bcg(percentiles = c(25, 50, 75), zero = c("lambda", "sigma"),
llambda = "identitylink", lmu = "identitylink", lsigma = "loge",
idf.mu = 4, idf.sigma = 2, ilambda = 1, isigma = NULL)
}
diff --git a/man/lms.bcn.Rd b/man/lms.bcn.Rd
index 860ce77..3f52f7b 100644
--- a/man/lms.bcn.Rd
+++ b/man/lms.bcn.Rd
@@ -8,7 +8,7 @@
}
\usage{
-lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
+lms.bcn(percentiles = c(25, 50, 75), zero = c("lambda", "sigma"),
llambda = "identitylink", lmu = "identitylink", lsigma = "loge",
idf.mu = 4, idf.sigma = 2, ilambda = 1,
isigma = NULL, tol0 = 0.001)
@@ -32,7 +32,7 @@ lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
}
\item{zero}{
- An integer-valued vector specifying which
+ Can be an integer-valued vector specifying which
linear/additive predictors are modelled as intercepts only.
The values must be from the set \{1,2,3\}.
The default value usually increases the chance of successful convergence.
@@ -148,7 +148,7 @@ Of the three functions, it is often a good idea to allow
\eqn{\lambda(x)}{lambda(x)} and \eqn{\sigma(x)}{sigma(x)}
usually vary more smoothly with \eqn{x}. This is somewhat
reflected in the default value for the argument \code{zero},
-viz. \code{zero = c(1,3)}.
+viz. \code{zero = c(1, 3)}.
}
@@ -199,10 +199,11 @@ Quantile regression via vector generalized additive models.
In general, the lambda and sigma functions should be more smoother
than the mean function.
- Having \code{zero = 1}, \code{zero = 3} or \code{zero = c(1,3)}
+ Having \code{zero = 1}, \code{zero = 3} or \code{zero = c(1, 3)}
is often a good idea. See the example below.
+
% While it is usual to regress the response against a single
% covariate, it is possible to add other explanatory variables,
% e.g., gender.
diff --git a/man/lms.yjn.Rd b/man/lms.yjn.Rd
index ec5ee15..7f504a9 100644
--- a/man/lms.yjn.Rd
+++ b/man/lms.yjn.Rd
@@ -8,12 +8,12 @@
to normality.
}
\usage{
-lms.yjn(percentiles = c(25, 50, 75), zero = c(1,3),
+lms.yjn(percentiles = c(25, 50, 75), zero = c("lambda", "sigma"),
llambda = "identitylink", lsigma = "loge",
idf.mu = 4, idf.sigma = 2,
ilambda = 1, isigma = NULL, rule = c(10, 5),
yoffset = NULL, diagW = FALSE, iters.diagW = 6)
-lms.yjn2(percentiles=c(25,50,75), zero=c(1,3),
+lms.yjn2(percentiles=c(25,50,75), zero = c("lambda", "sigma"),
llambda = "identitylink", lmu = "identitylink", lsigma = "loge",
idf.mu = 4, idf.sigma = 2, ilambda = 1.0,
isigma = NULL, yoffset = NULL, nsimEIM = 250)
diff --git a/man/log1mexp.Rd b/man/log1mexp.Rd
new file mode 100644
index 0000000..7697d24
--- /dev/null
+++ b/man/log1mexp.Rd
@@ -0,0 +1,90 @@
+\name{log1mexp}
+\alias{log1mexp}
+\alias{log1pexp}
+
+\title{
+ Logarithms with an Unit Offset and Exponential Term
+}
+\description{
+Computes \code{log(1 + exp(x))} and \code{log(1 - exp(-x))} accurately.
+
+}
+\usage{
+log1mexp(x)
+log1pexp(x)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x}{
+ A vector of reals (numeric). Complex numbers not allowed since
+ \code{\link[base]{expm1}} and \code{\link[base]{log1p}} do not handle these.
+
+
+}
+}
+\details{
+%% ~~ If necessary, more details than the description above ~~
+Computes \code{log(1 + exp(x))} and \code{log(1 - exp(-x))}
+accurately. An adjustment is made when \eqn{x} is away from 0
+in value.
+
+
+}
+\value{
+
+\code{log1mexp(x)} gives the value of \eqn{\log(1-\exp(-x))}{log(1-exp(-x))}.
+
+
+
+\code{log1pexp(x)} gives the value of \eqn{\log(1+\exp(x))}{log(1+exp(x))}.
+
+
+
+}
+\references{
+
+Maechler, Martin (2012).
+Accurately Computing log(1-exp(-|a|)).
+Assessed from the \pkg{Rmpfr} package.
+
+
+}
+\author{
+This is a direct translation of the function in Martin Maechler's
+(2012) paper by Xiangjie Xue
+and T. W. Yee.
+
+
+}
+\note{
+If \code{NA} or \code{NaN} is present in the input, the
+corresponding output will be \code{NA}.
+
+
+}
+
+%% ~Make other sections like Warning with \section{Warning }{....} ~
+
+\seealso{
+ \code{\link[base]{log1p}},
+ \code{\link[base]{expm1}},
+ \code{\link[base]{exp}},
+ \code{\link[base]{log}}
+
+
+
+}
+\examples{
+x <- c(10, 50, 100, 200, 400, 500, 800, 1000, 1e4, 1e5, 1e20, Inf, NA)
+log1pexp(x)
+log(1 + exp(x)) # Naive; suffers from overflow
+log1mexp(x)
+log(1 - exp(-x))
+y <- -x
+log1pexp(y)
+log(1 + exp(y)) # Naive; suffers from inaccuracy
+}
+
+
+
+
diff --git a/man/log1pexp.Rd b/man/log1pexp.Rd
deleted file mode 100644
index e588800..0000000
--- a/man/log1pexp.Rd
+++ /dev/null
@@ -1,66 +0,0 @@
-\name{log1pexp}
-\alias{log1pexp}
-%- Also NEED an '\alias' for EACH other topic documented here.
-\title{
-Logarithms with an Unit Offset and Exponential Term
-
-
-}
-\description{
-Computes \code{log(1 + exp(x))} accurately.
-
-}
-\usage{
-log1pexp(x)
-}
-%- maybe also 'usage' for other objects documented here.
-\arguments{
- \item{x}{
-A vector of reals (numeric).
-Complex numbers not allowed since \code{\link{log1p}} does
-not handle these.
-
-
-}
-}
-\details{
- Computes \code{log(1 + exp(x))} accurately.
- An adjustment is made when \code{x} is positive and large in value.
-
-
-}
-\value{
- Returns \code{log(1 + exp(x))}.
-
-
-}
-%\references{
-%
-%}
-%\author{
-%T. W. Yee
-%
-%}
-%\note{
-%
-%}
-
-%% ~Make other sections like Warning with \section{Warning }{....} ~
-
-\seealso{
- \code{\link[base:log]{log1p}},
- \code{\link[base:log]{exp}}.
-
-
-}
-\examples{
-x <- c(10, 50, 100, 200, 400, 500, 800, 1000, 1e4, 1e5, 1e20, Inf)
-log1pexp(x)
-log(1 + exp(x)) # Naive; suffers from overflow
-x <- -c(10, 50, 100, 200, 400, 500, 800, 1000, 1e4, 1e5, 1e20, Inf)
-log1pexp(x)
-log(1 + exp(x)) # Naive; suffers from inaccuracy
-}
-% Add one or more standard keywords, see file 'KEYWORDS' in the
-% R documentation directory.
-\keyword{math}
diff --git a/man/logistic.Rd b/man/logistic.Rd
index 52c1f6f..7d37c5b 100644
--- a/man/logistic.Rd
+++ b/man/logistic.Rd
@@ -13,7 +13,7 @@
\usage{
logistic1(llocation = "identitylink", scale.arg = 1, imethod = 1)
logistic(llocation = "identitylink", lscale = "loge",
- ilocation = NULL, iscale = NULL, imethod = 1, zero = -2)
+ ilocation = NULL, iscale = NULL, imethod = 1, zero = "scale")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -31,12 +31,12 @@ logistic(llocation = "identitylink", lscale = "loge",
}
\item{ilocation, iscale}{
- See \code{\link{CommonVGAMffArguments}} for more information.
+ See \code{\link{CommonVGAMffArguments}} for information.
}
\item{imethod, zero}{
- See \code{\link{CommonVGAMffArguments}} for more information.
+ See \code{\link{CommonVGAMffArguments}} for information.
}
@@ -117,6 +117,7 @@ A Note on Deriving the Information Matrix for a Logistic Distribution,
\seealso{
\code{\link[stats:Logistic]{rlogis}},
+ \code{\link{CommonVGAMffArguments}},
\code{\link{logit}},
\code{\link{cumulative}},
\code{\link{bilogistic}},
diff --git a/man/logit.Rd b/man/logit.Rd
index b50ac12..16ea190 100644
--- a/man/logit.Rd
+++ b/man/logit.Rd
@@ -117,11 +117,13 @@ extlogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
\seealso{
\code{\link{Links}},
+ \code{\link{logitoffsetlink}},
\code{\link{probit}},
\code{\link{cloglog}},
\code{\link{cauchit}},
\code{\link{logistic1}},
\code{\link{loge}},
+ \code{\link[stats]{plogis}},
\code{\link{multilogit}}.
diff --git a/man/logitoffsetlink.Rd b/man/logitoffsetlink.Rd
new file mode 100644
index 0000000..394b6e2
--- /dev/null
+++ b/man/logitoffsetlink.Rd
@@ -0,0 +1,106 @@
+\name{logitoffsetlink}
+\alias{logitoffsetlink}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Logit-with-an-Offset Link Function }
+\description{
+ Computes the logitoffsetlink transformation, including its inverse and the
+ first two derivatives.
+
+}
+\usage{
+logitoffsetlink(theta, offset = 0, inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+
+
+ }
+ \item{offset}{
+ The offset value(s), which must be non-negative.
+ It is called \eqn{K} below.
+
+
+ }
+
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
+
+
+ }
+
+}
+\details{
+ This link function allows for some asymmetry compared to the
+ ordinary \code{\link{logit}} link.
+ The formula is
+ \deqn{\log(\theta/(1-\theta) - K)}{%
+ log(theta/(1-theta) - K)}
+ and the default value for the offset \eqn{K} is corresponds to the
+ ordinary \code{\link{logit}} link.
+ When \code{inverse = TRUE} will mean that the value will
+ lie in the interval \eqn{(K / (1+K), 1)}.
+
+
+}
+\value{
+ For \code{logitoffsetlink} with \code{deriv = 0}, the
+ logitoffsetlink of \code{theta}, i.e.,
+ \code{log(theta/(1-theta) - K)} when \code{inverse = FALSE},
+ and if \code{inverse = TRUE} then
+ \code{(K + exp(theta))/(1 + exp(theta) + K)}.
+
+
+
+ For \code{deriv = 1}, then the function returns
+ \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta}
+ if \code{inverse = FALSE},
+ else if \code{inverse = TRUE} then it returns the reciprocal.
+
+
+
+ Here, all logarithms are natural logarithms, i.e., to base \emph{e}.
+
+
+}
+\references{
+ Komori, O. and Eguchi, S. et al., 2016.
+ An asymmetric logistic model for ecological data.
+ \emph{Methods in Ecology and Evolution},
+ \bold{7}.
+
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ This function is numerical less stability than
+ \code{\link{logit}}.
+
+
+}
+
+\seealso{
+ \code{\link{Links}},
+ \code{\link{logit}}.
+
+
+}
+\examples{
+p <- seq(0.05, 0.99, by = 0.01); myoff <- 0.05
+logitoffsetlink(p, myoff)
+max(abs(logitoffsetlink(logitoffsetlink(p, myoff),
+ myoff, inverse = TRUE) - p)) # Should be 0
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
+
+
+
+
+
+
diff --git a/man/loglinb2.Rd b/man/loglinb2.Rd
index cab41ed..5483a6e 100644
--- a/man/loglinb2.Rd
+++ b/man/loglinb2.Rd
@@ -7,9 +7,9 @@
}
\usage{
-loglinb2(exchangeable = FALSE, zero = 3)
-
+loglinb2(exchangeable = FALSE, zero = "u12")
}
+%loglinb2(exchangeable = FALSE, zero = 3)
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{exchangeable}{ Logical.
@@ -17,8 +17,11 @@ loglinb2(exchangeable = FALSE, zero = 3)
be equal. Should be set \code{TRUE} for ears, eyes, etc. data.
}
- \item{zero}{ Which linear/additive predictor is modelled as an
- intercept only? A \code{NULL} means none of them.
+ \item{zero}{ Which linear/additive predictors are modelled as
+ intercept-only?
+ A \code{NULL} means none of them.
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
}
diff --git a/man/loglinb3.Rd b/man/loglinb3.Rd
index fca6d61..1a81ed8 100644
--- a/man/loglinb3.Rd
+++ b/man/loglinb3.Rd
@@ -7,7 +7,7 @@
}
\usage{
-loglinb3(exchangeable = FALSE, zero = 4:6)
+loglinb3(exchangeable = FALSE, zero = c("u12", "u13", "u23"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -16,8 +16,10 @@ loglinb3(exchangeable = FALSE, zero = 4:6)
be equal.
}
- \item{zero}{ Which linear/additive predictor is modelled as an
- intercept only? A \code{NULL} means none.
+ \item{zero}{ Which linear/additive predictors are modelled as
+ intercept-only?
+ A \code{NULL} means none.
+ See \code{\link{CommonVGAMffArguments}} for further information.
}
diff --git a/man/lognormal.Rd b/man/lognormal.Rd
index b7cb034..bb3c72a 100644
--- a/man/lognormal.Rd
+++ b/man/lognormal.Rd
@@ -9,7 +9,7 @@
}
\usage{
-lognormal(lmeanlog = "identitylink", lsdlog = "loge", zero = 2)
+lognormal(lmeanlog = "identitylink", lsdlog = "loge", zero = "sdlog")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -32,10 +32,10 @@ lognormal(lmeanlog = "identitylink", lsdlog = "loge", zero = 2)
\item{zero}{
- An integer-valued vector specifying which
- linear/additive predictors are modelled as intercepts only.
+ Specifies which
+ linear/additive predictor is modelled as intercept-only.
For \code{lognormal()},
- the values must be from the set \{1,2\} which correspond to
+ the values can be from the set \{1,2\} which correspond to
\code{mu}, \code{sigma}, respectively.
See \code{\link{CommonVGAMffArguments}} for more information.
diff --git a/man/lomax.Rd b/man/lomax.Rd
index b8797f2..51e222e 100644
--- a/man/lomax.Rd
+++ b/man/lomax.Rd
@@ -10,7 +10,7 @@
\usage{
lomax(lscale = "loge", lshape3.q = "loge", iscale = NULL,
ishape3.q = NULL, imethod = 1, gscale = exp(-5:5),
- gshape3.q = exp(-5:5), probs.y = c(0.25, 0.5, 0.75), zero = -2)
+ gshape3.q = exp(-5:5), probs.y = c(0.25, 0.5, 0.75), zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/makeham.Rd b/man/makeham.Rd
index 5f34a07..7a44c09 100644
--- a/man/makeham.Rd
+++ b/man/makeham.Rd
@@ -58,6 +58,7 @@ makeham(lscale = "loge", lshape = "loge", lepsilon = "loge",
See \code{\link{CommonVGAMffArguments}}.
Argument \code{probs.y} is used only when \code{imethod = 2}.
+
}
\item{oim.mean}{
To be currently ignored.
diff --git a/man/margeff.Rd b/man/margeff.Rd
index 97fa023..f3b16d1 100644
--- a/man/margeff.Rd
+++ b/man/margeff.Rd
@@ -1,23 +1,34 @@
\name{margeff}
\alias{margeff}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Marginal effects for the multinomial logit and cumulative models }
+\title{ Marginal effects for several categorical response models }
\description{
Marginal effects for the multinomial logit model and
- cumulative logit/probit/... models: the derivative
- of the fitted probabilities with respect to each explanatory
- variable.
+ cumulative logit/probit/... models and
+ continuation ratio models and
+ stopping ratio models and
+ adjacent categories models:
+ the derivative of the fitted probabilities with respect to
+ each explanatory variable.
+
}
\usage{
-margeff(object, subset = NULL)
+margeff(object, subset = NULL, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
- A \code{\link{vglm}} \code{\link{multinomial}}
+ A \code{\link{vglm}} object,
+ with one of the following family functions:
+ \code{\link{multinomial}},
+ \code{\link{cumulative}},
+ \code{\link{cratio}},
+ \code{\link{sratio}}
or
- \code{\link{cumulative}} object.
+ \code{\link{acat}}.
+
+
}
\item{subset}{
@@ -25,15 +36,35 @@ margeff(object, subset = NULL)
Recycling is used if possible.
The default means all observations.
+
}
+ \item{\dots}{
+ further arguments passed into the other methods functions.
+% e.g., \code{subset}.
+
+
+ }
}
\details{
Computes the derivative of the fitted probabilities
- of a multinomial logit model
- or
- cumulative logit/probit/... model
+ of the categorical response model
with respect to each explanatory variable.
+ Formerly one big function, this function now uses S4
+ dispatch to break up the computations.
+
+
+
+% 20151215
+ The function \code{margeff()} is \emph{not} generic. However, it
+ calls the function \code{margeffS4VGAM()} which \emph{is}.
+ This is based on the class of the \code{VGAMff} argument, and
+ it uses the S4 function \code{\link[methods]{setMethod}} to
+ correctly dispatch to the required methods function.
+ The inheritance is given by the \code{vfamily} slot of the
+ \pkg{VGAM} family function.
+
+
}
\value{
@@ -42,7 +73,7 @@ margeff(object, subset = NULL)
\eqn{M+1} levels, and there are \eqn{n} observations.
- If
+ In general, if
\code{is.numeric(subset)}
and
\code{length(subset) == 1} then a
@@ -51,7 +82,13 @@ margeff(object, subset = NULL)
}
% \references{ ~put references to the literature/web site here ~ }
-\author{ T. W. Yee }
+\author{ T. W. Yee,
+with some help and motivation from Stasha Rmandic.
+
+
+
+}
+
\section{Warning }{
Care is needed in interpretation, e.g., the change is not
universally accurate for a unit change in each explanatory
@@ -71,6 +108,15 @@ margeff(object, subset = NULL)
of the form \code{ ~ x2 + x3 + x4}, etc.
+
+ Some numerical problems may occur if the fitted values are
+ close to 0 or 1 for the
+ \code{\link{cratio}} and
+ \code{\link{sratio}} models.
+ Models with offsets may result in an incorrect answer.
+
+
+
}
\note{
@@ -82,19 +128,33 @@ margeff(object, subset = NULL)
nor \code{\link{vgam}} objects.
- For \code{\link{multinomial}}
- if \code{subset} is numeric then the function uses a \code{for} loop over
- the observations (slow).
- The default computations use vectorization; this uses more memory than a
- \code{for} loop but is faster.
+
+% 20151211; this is now false, so can delete this:
+% For \code{\link{multinomial}},
+% if \code{subset} is numeric then the function uses a \code{for} loop over
+% the observations (slow).
+% The default computations use vectorization; this uses more memory than a
+% \code{for} loop but is faster.
+
+
+
+ Some other limitations are imposed, e.g.,
+ for \code{\link{acat}} models
+ only a \code{\link{loge}} link is allowed.
+
}
\seealso{
\code{\link{multinomial}},
\code{\link{cumulative}},
+ \code{\link{propodds}},
+ \code{\link{acat}},
+ \code{\link{cratio}},
+ \code{\link{sratio}},
\code{\link{vglm}}.
+
}
\examples{
diff --git a/man/mccullagh89.Rd b/man/mccullagh89.Rd
index a474747..ae2e0d1 100644
--- a/man/mccullagh89.Rd
+++ b/man/mccullagh89.Rd
@@ -26,10 +26,8 @@ mccullagh89(ltheta = "rhobit", lnu = logoff(offset = 0.5),
}
\item{zero}{
- An integer-valued vector specifying which
- linear/additive predictors are modelled as intercepts only.
- The default is none of them.
- If used, choose one value from the set \{1,2\}.
+ See \code{\link{CommonVGAMffArguments}} for information.
+
}
}
diff --git a/man/micmen.Rd b/man/micmen.Rd
index 80434e7..914d301 100644
--- a/man/micmen.Rd
+++ b/man/micmen.Rd
@@ -48,24 +48,16 @@ micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
}
\item{imethod, probs.x}{
- See \code{\link{CommonVGAMffArguments}} for more information.
+ See \code{\link{CommonVGAMffArguments}} for information.
}
- \item{nsimEIM}{
- See \code{\link{CommonVGAMffArguments}} for more information.
+ \item{nsimEIM, zero}{
+ See \code{\link{CommonVGAMffArguments}} for information.
}
\item{oim}{
Use the OIM?
- See \code{\link{CommonVGAMffArguments}} for more information.
-
- }
- \item{zero}{
- An integer-valued vector specifying which
- linear/additive predictors are modelled as intercepts only.
- The values must be from the set \{1,2\}.
- A \code{NULL} means none.
- See \code{\link{CommonVGAMffArguments}} for more information.
+ See \code{\link{CommonVGAMffArguments}} for information.
}
}
diff --git a/man/mix2exp.Rd b/man/mix2exp.Rd
index 1bf94cc..30abe0c 100644
--- a/man/mix2exp.Rd
+++ b/man/mix2exp.Rd
@@ -10,7 +10,7 @@
}
\usage{
mix2exp(lphi = "logit", llambda = "loge", iphi = 0.5, il1 = NULL,
- il2 = NULL, qmu = c(0.8, 0.2), nsimEIM = 100, zero = 1)
+ il2 = NULL, qmu = c(0.8, 0.2), nsimEIM = 100, zero = "phi")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/mix2normal.Rd b/man/mix2normal.Rd
index d2d07d0..0e95155 100644
--- a/man/mix2normal.Rd
+++ b/man/mix2normal.Rd
@@ -10,7 +10,7 @@
\usage{
mix2normal(lphi = "logit", lmu = "identitylink", lsd = "loge",
iphi = 0.5, imu1 = NULL, imu2 = NULL, isd1 = NULL, isd2 = NULL,
- qmu = c(0.2, 0.8), eq.sd = TRUE, nsimEIM = 100, zero = 1)
+ qmu = c(0.2, 0.8), eq.sd = TRUE, nsimEIM = 100, zero = "phi")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -73,8 +73,9 @@ mix2normal(lphi = "logit", lmu = "identitylink", lsd = "loge",
}
\item{zero}{
- An integer specifying which linear/additive predictor is modelled as
- intercepts only. If given, the value or values must be from the
+ May be an integer vector
+ specifying which linear/additive predictors are modelled as
+ intercept-only. If given, the value or values can be from the
set \eqn{\{1,2,\ldots,5\}}{1,2,...,5}.
The default is the first one only, meaning \eqn{\phi}{phi}
is a single parameter even when there are explanatory variables.
diff --git a/man/mix2poisson.Rd b/man/mix2poisson.Rd
index 22d5201..4a56ea6 100644
--- a/man/mix2poisson.Rd
+++ b/man/mix2poisson.Rd
@@ -10,7 +10,7 @@
\usage{
mix2poisson(lphi = "logit", llambda = "loge",
iphi = 0.5, il1 = NULL, il2 = NULL,
- qmu = c(0.2, 0.8), nsimEIM = 100, zero = 1)
+ qmu = c(0.2, 0.8), nsimEIM = 100, zero = "phi")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/multinomial.Rd b/man/multinomial.Rd
index d4318ee..14a9802 100644
--- a/man/multinomial.Rd
+++ b/man/multinomial.Rd
@@ -15,13 +15,14 @@ multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
\arguments{
\item{zero}{
- An integer-valued vector specifying which
+ Can be an integer-valued vector specifying which
linear/additive predictors are modelled as intercepts only.
Any values must be from the set \{1,2,\ldots,\eqn{M}\}.
The default value means none are modelled as intercept-only terms.
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
\item{parallel}{
A logical, or formula specifying which terms have
diff --git a/man/nbcanlink.Rd b/man/nbcanlink.Rd
index 2b06596..862b705 100644
--- a/man/nbcanlink.Rd
+++ b/man/nbcanlink.Rd
@@ -8,7 +8,7 @@
}
\usage{
-nbcanlink(theta, size = NULL, wrt.eta = NULL, bvalue = NULL,
+nbcanlink(theta, size = NULL, wrt.param = NULL, bvalue = NULL,
inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -20,13 +20,13 @@ nbcanlink(theta, size = NULL, wrt.eta = NULL, bvalue = NULL,
}
- \item{size, wrt.eta}{
+ \item{size, wrt.param}{
\code{size} contains the \eqn{k} matrix which
must be of a conformable dimension as \code{theta}.
- Also, if \code{deriv > 0} then \code{wrt.eta}
+ Also, if \code{deriv > 0} then \code{wrt.param}
is either 1 or 2 (1 for with respect to the first
- linear predictor, and 2 for with respect to the second
- linear predictor (a function of \eqn{k})).
+ parameter, and 2 for with respect to the second
+ parameter (\code{size})).
}
diff --git a/man/negbinomial.Rd b/man/negbinomial.Rd
index 95402be..c25ad10 100644
--- a/man/negbinomial.Rd
+++ b/man/negbinomial.Rd
@@ -10,18 +10,28 @@
}
\usage{
-negbinomial(lmu = "loge", lsize = "loge",
- imu = NULL, isize = NULL, probs.y = 0.75,
- nsimEIM = 250, cutoff.prob = 0.995,
- max.qnbinom = 1000, max.chunk.MB = 20,
- deviance.arg = FALSE, imethod = 1, gsize = exp((-4):4),
- parallel = FALSE, ishrinkage = 0.95, zero = -2)
-polya(lprob = "logit", lsize = "loge",
- iprob = NULL, isize = NULL, probs.y = 0.75, nsimEIM = 100,
- imethod = 1, ishrinkage = 0.95, zero = -2)
-polyaR(lsize = "loge", lprob = "logit",
- isize = NULL, iprob = NULL, probs.y = 0.75, nsimEIM = 100,
- imethod = 1, ishrinkage = 0.95, zero = -1)
+negbinomial(zero = "size", parallel = FALSE, deviance.arg = FALSE,
+ mds.min = 1e-04, nsimEIM = 500, cutoff.prob = 0.999, eps.trig = 1e-7,
+ max.support = 4000, max.chunk.MB = 30,
+ lmu = "loge", lsize = "loge",
+ imethod = 1, imu = NULL, probs.y = 0.35,
+ ishrinkage = 0.95, isize = NULL, gsize.mux = exp((-12:6)/2))
+polya(zero = "size", type.fitted = c("mean", "prob"),
+ mds.min = 1e-04, nsimEIM = 500, cutoff.prob = 0.999,
+ eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
+ lprob = "logit", lsize = "loge",
+ imethod = 1, iprob = NULL,
+ probs.y = 0.35, ishrinkage = 0.95,
+ isize = NULL, gsize.mux = exp((-12:6)/2),
+ imunb = NULL)
+polyaR(zero = "size", type.fitted = c("mean", "prob"),
+ mds.min = 1e-04, nsimEIM = 500, cutoff.prob = 0.999,
+ eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
+ lsize = "loge", lprob = "logit",
+ imethod = 1, isize = NULL,
+ iprob = NULL, probs.y = 0.35,
+ ishrinkage = 0.95, gsize.mux = exp((-12:6)/2),
+ imunb = NULL)
}
% deviance.arg = FALSE,
@@ -43,13 +53,13 @@ polyaR(lsize = "loge", lprob = "logit",
}
- \item{imu, isize, iprob}{
+ \item{imu, imunb, isize, iprob}{
Optional initial values for the mean and \eqn{k} and \eqn{p}.
For \eqn{k}, if failure to converge occurs then try different values
(and/or use \code{imethod}).
For a \eqn{S}-column response, \code{isize} can be of length \eqn{S}.
A value \code{NULL} means an initial value for each response is
- computed internally using a gridsearch based on \code{gsize}.
+ computed internally using a gridsearch based on \code{gsize.mux}.
The last argument is ignored if used within \code{\link{cqo}}; see
the \code{iKvector} argument of \code{\link{qrrvglm.control}} instead.
@@ -75,7 +85,12 @@ polyaR(lsize = "loge", lprob = "logit",
of \code{\link[stats:NegBinomial]{qnbinom}}
in order to obtain an upper limit for the approximate
support of the distribution, called \code{Qmax}, say.
- Hence the approximate support is \code{0:Qmax}.
+ Similarly, the value \code{1-p} is
+ fed into the \code{p} argument
+ of \code{\link[stats:NegBinomial]{qnbinom}}
+ in order to obtain a lower limit for the approximate
+ support of the distribution, called \code{Qmin}, say.
+ Hence the approximate support is \code{Qmin:Qmax}.
This argument should be
a numeric and close to 1 but never exactly 1.
Used to specify how many terms of the infinite series
@@ -83,6 +98,7 @@ polyaR(lsize = "loge", lprob = "logit",
EIM are actually used.
The closer this argument is to 1, the more accurate the
standard errors of the regression coefficients will be.
+ If this argument is too small, convergence will take longer.
@@ -94,8 +110,8 @@ polyaR(lsize = "loge", lprob = "logit",
}
- \item{max.chunk.MB, max.qnbinom}{
- \code{max.qnbinom} is used to describe the eligibility of
+ \item{max.chunk.MB, max.support}{
+ \code{max.support} is used to describe the eligibility of
individual observations
to have their EIM computed by the \emph{exact method}.
Here, we are concerned about
@@ -103,23 +119,30 @@ polyaR(lsize = "loge", lprob = "logit",
The exact method algorithm operates separately on each response
variable,
and it constructs a large matrix provided that the number of columns
- is less than \code{max.qnbinom}.
+ is less than \code{max.support}.
If so, then the computations are done in chunks, so
that no more than about \code{max.chunk.MB} megabytes
of memory is used at a time (actually, it is proportional to this amount).
Regarding eligibility of this algorithm, each observation must
- have the \code{cutoff.prob} quantile less than \code{max.qnbinom}
- as its approximate support.
+ have the length of the vector, starting from
+ the \code{1-cutoff.prob} quantile
+ and finishing up at the \code{cutoff.prob} quantile,
+ less than \code{max.support}
+ (as its approximate support).
If you have abundant memory then you might try setting
\code{max.chunk.MB = Inf}, but then the computations might take
a very long time.
- Setting \code{max.chunk.MB = 0} or \code{max.qnbinom = 0}
+ Setting \code{max.chunk.MB = 0} or \code{max.support = 0}
will force the EIM to be computed using the SFS algorithm only
(this \emph{used to be} the default method for \emph{all} the observations).
When the fitted values of the model are large and \eqn{k} is small,
the computation of the EIM will be costly with respect to time
and memory if the exact method is used. Hence the argument
- \code{max.qnbinom} limits the cost in terms of time.
+ \code{max.support} limits the cost in terms of time.
+ For intercept-only models \code{max.support} is multiplied by
+ a number (such as 10) because only one inner product needs be computed.
+ Note: \code{max.support} is an upper bound and limits the number of
+ terms dictated by the \code{eps.trig} argument.
% Thus the number of columns of the matrix can be controlled by
@@ -128,8 +151,46 @@ polyaR(lsize = "loge", lprob = "logit",
}
-\item{gsize}{
+\item{mds.min}{
+Numeric.
+Minimum value of the NBD mean divided by \code{size} parameter.
+The closer this ratio is to 0, the closer the distribution is
+to a Poisson.
+Iterations will stop when an estimate of \eqn{k} is so large,
+relative to the mean, than it is below this threshold.
+
+
+
+ }
+
+
+\item{eps.trig}{
+Numeric.
+A small positive value used in the computation of the EIMs.
+It focusses on the denominator of the terms of a series.
+Each term in the series (that is used to approximate an infinite series)
+has a value greater than \code{size / sqrt(eps.trig)},
+thus very small terms are ignored.
+It's a good idea to set a smaller value that will result in more accuracy,
+but it will require a greater computing time (when \eqn{k} is close to 0).
+And adjustment to \code{max.support} may be needed.
+In particular, the quantity computed by special means
+is \eqn{\psi(k) - E[\psi(Y+k)]}{trigamma(k) - E[trigamma(Y+k)]},
+which is the difference between two
+\code{\link[base]{trigamma}}.
+functions. It is part of the calculation of the EIM with
+respect to the \code{size} parameter.
+
+
+
+}
+\item{gsize.mux}{
Similar to \code{gsigma} in \code{\link{CommonVGAMffArguments}}.
+ However, this grid is multiplied by the initial
+ estimates of the NBD mean parameter.
+ That is, it is on a relative scale rather than on an
+ absolute scale.
+
}
@@ -208,20 +269,25 @@ polyaR(lsize = "loge", lprob = "logit",
}
\item{zero}{
- Integer valued vector, usually assigned \eqn{-2} or \eqn{2} if used
- at all. Specifies which of the two linear/additive predictors are
- modelled as an intercept only. By default, the \eqn{k} parameter
- (after \code{lsize} is applied) is modelled as a single unknown
- number that is estimated. It can be modelled as a function of the
- explanatory variables by setting \code{zero = NULL}; this has been
- called a NB-H model by Hilbe (2011). A negative value
- means that the value is recycled, so setting \eqn{-2} means all \eqn{k}
- are intercept-only.
+ Can be an integer-valued vector, usually assigned \eqn{-2}
+ or \eqn{2} if used at all. Specifies which of the two
+ linear/additive predictors are modelled as an intercept
+ only. By default, the \eqn{k} parameter (after \code{lsize}
+ is applied) is modelled as a single unknown number that
+ is estimated. It can be modelled as a function of the
+ explanatory variables by setting \code{zero = NULL}; this
+ has been called a NB-H model by Hilbe (2011). A negative
+ value means that the value is recycled, so setting \eqn{-2}
+ means all \eqn{k} are intercept-only.
See \code{\link{CommonVGAMffArguments}} for more information.
}
+ \item{type.fitted}{
+ See \code{\link{CommonVGAMffArguments}} for details.
+
+ }
}
\details{
The negative binomial distribution can be motivated in several ways,
@@ -309,11 +375,31 @@ polyaR(lsize = "loge", lprob = "logit",
\section{Warning}{
Poisson regression corresponds to \eqn{k} equalling
infinity. If the data is Poisson or close to Poisson,
- numerical problems will occur. Possibly choosing a
- log-log link may help in such cases, otherwise try
- \code{\link{poissonff}} or \code{\link{quasipoissonff}}.
- It is possible to fit a NBD that has a similar variance
- function as a quasi-Poisson; see the NB-1 example below.
+ numerical problems will occur.
+ Some corrective measures are taken, e.g.,
+ \eqn{k} is capped during estimation
+ to some large value and a warning is issued.
+ Note that \code{dnbinom(0, mu, size = Inf)}
+ currently
+ is a \code{NaN} (a bug),
+ therefore if the data has some 0s then
+ setting \code{crit = "coef"} will avoid the problem that
+ the log-likelihood will be undefined during the last
+ stages of estimation.
+ And setting \code{stepsize = 0.5} for half stepping is
+ probably a good idea too.
+ Possibly setting \code{crit = "coef"} is a good idea because
+ the log-likelihood is often a \code{NaN} when the \code{size}
+ value is very large.
+
+
+
+% Possibly choosing a log-log link may help in such cases,
+% otherwise try \code{\link{poissonff}} or
+% \code{\link{quasipoissonff}}. It is possible to fit a NBD
+% that has a similar variance function as a quasi-Poisson; see
+% the NB-1 example below.
+
These functions are fragile; the maximum likelihood
@@ -327,14 +413,17 @@ polyaR(lsize = "loge", lprob = "logit",
over large values when using this argument.
+
If one wants to force SFS
to be used on all observations, then
- set \code{max.qnbinom = 0} or \code{max.chunk.MB = 0}.
+ set \code{max.support = 0} or \code{max.chunk.MB = 0}.
If one wants to force the exact method
to be used for all observations, then
- set \code{max.qnbinom = Inf}.
+ set \code{max.support = Inf}.
If the computer has \emph{much} memory, then trying
- \code{max.chunk.MB = Inf} may provide a small speed increase.
+ \code{max.chunk.MB = Inf} and
+ \code{max.support = Inf}
+ may provide a small speed increase.
If SFS is used at all, then the \code{@weights} slot of the
fitted object will be a matrix;
otherwise that slot will be a \code{0 x 0} matrix.
@@ -353,6 +442,7 @@ polyaR(lsize = "loge", lprob = "logit",
and \code{\link{vgam}}.
+
}
\references{
Lawless, J. F. (1987)
@@ -431,7 +521,7 @@ Fitting the negative binomial distribution to biological data.
outliers or is large in magnitude.
If convergence failure occurs, try using arguments
(in recommended decreasing order)
- \code{max.qnbinom},
+ \code{max.support},
\code{nsimEIM},
\code{cutoff.prob},
\code{ishrinkage},
@@ -532,21 +622,25 @@ Coef(fit) # For intercept-only models
deviance(fit) # NB2 only; needs 'crit = "coef"' & 'deviance = TRUE' above
# Example 2: simulated data with multiple responses
-ndata <- data.frame(x2 = runif(nn <- 300))
+\dontrun{
+ndata <- data.frame(x2 = runif(nn <- 200))
ndata <- transform(ndata, y1 = rnbinom(nn, mu = exp(3+x2), size = exp(1)),
y2 = rnbinom(nn, mu = exp(2-x2), size = exp(0)))
fit1 <- vglm(cbind(y1, y2) ~ x2, negbinomial, data = ndata, trace = TRUE)
coef(fit1, matrix = TRUE)
+}
# Example 3: large counts implies SFS is used
+\dontrun{
ndata <- transform(ndata, y3 = rnbinom(nn, mu = exp(10+x2), size = exp(1)))
with(ndata, range(y3)) # Large counts
fit2 <- vglm(y3 ~ x2, negbinomial, data = ndata, trace = TRUE)
coef(fit2, matrix = TRUE)
head(fit2 at weights) # Non-empty; SFS was used
+}
# Example 4: a NB-1 to estimate a negative binomial with Var(Y) = phi0 * mu
-nn <- 500 # Number of observations
+nn <- 200 # Number of observations
phi0 <- 10 # Specify this; should be greater than unity
delta0 <- 1 / (phi0 - 1)
mydata <- data.frame(x2 = runif(nn), x3 = runif(nn))
@@ -580,3 +674,21 @@ summary(glm(y3 ~ x2 + x3, quasipoisson, mydata))$disper # cf. moment estimator
+%lmu = "loge", lsize = "loge",
+% imu = NULL, isize = NULL,
+% nsimEIM = 250, cutoff.prob = 0.999,
+% max.support = 2000, max.chunk.MB = 30,
+% deviance.arg = FALSE, imethod = 1,
+% probs.y = 0.75, ishrinkage = 0.95,
+% gsize = exp((-4):4),
+% parallel = FALSE, ishrinkage = 0.95, zero = "size")
+
+
+
+%polya(lprob = "logit", lsize = "loge",
+% iprob = NULL, isize = NULL, probs.y = 0.75, nsimEIM = 100,
+% imethod = 1, ishrinkage = 0.95, zero = "size")
+%polyaR(lsize = "loge", lprob = "logit",
+% isize = NULL, iprob = NULL, probs.y = 0.75, nsimEIM = 100,
+% imethod = 1, ishrinkage = 0.95, zero = "size")
+
diff --git a/man/negbinomial.size.Rd b/man/negbinomial.size.Rd
index 8b308e7..1b61dfb 100644
--- a/man/negbinomial.size.Rd
+++ b/man/negbinomial.size.Rd
@@ -9,7 +9,7 @@
}
\usage{
negbinomial.size(size = Inf, lmu = "loge", imu = NULL,
- probs.y = 0.75, imethod = 1,
+ probs.y = 0.35, imethod = 1,
ishrinkage = 0.95, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
diff --git a/man/normal.vcm.Rd b/man/normal.vcm.Rd
index 2c51f40..e3e6b5a 100644
--- a/man/normal.vcm.Rd
+++ b/man/normal.vcm.Rd
@@ -16,7 +16,7 @@ normal.vcm(link.list = list("(Default)" = "identitylink"),
lsd = "loge", lvar = "loge",
esd = list(), evar = list(),
var.arg = FALSE, imethod = 1,
- icoefficients = NULL, isd = NULL, zero = "M")
+ icoefficients = NULL, isd = NULL, zero = "sd")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -63,11 +63,12 @@ normal.vcm(link.list = list("(Default)" = "identitylink"),
\item{zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
The default applies to the last one,
- viz. the standard deviation/variance.
+ viz. the standard deviation/variance parameter.
}
+
}
\details{
This function allows all the usual LM regression coefficients to be
diff --git a/man/notdocumentedyet.Rd b/man/notdocumentedyet.Rd
index f326f8b..1da8c9e 100644
--- a/man/notdocumentedyet.Rd
+++ b/man/notdocumentedyet.Rd
@@ -3,6 +3,27 @@
%
%
%
+% 201602:
+\alias{Init.mu}
+\alias{.min.criterion.VGAM}
+\alias{predictvglmS4VGAM}
+% 201601:
+\alias{EIM.NB.speciald}
+\alias{EIM.NB.specialp}
+\alias{EIM.posNB.speciald}
+\alias{EIM.posNB.specialp}
+\alias{showvglmS4VGAM}
+\alias{showvgamS4VGAM}
+%\alias{coefvgam}
+%
+% 201512:
+\alias{margeffS4VGAM}
+\alias{showsummaryvglmS4VGAM}
+\alias{summaryvglmS4VGAM}
+\alias{findFirstMethod}
+\alias{cratio.derivs}
+\alias{subsetarray3}
+\alias{tapplymat1}
% 201509, for a bug in car::linearHypothesis() and car:::Anova():
\alias{as.char.expression}
\alias{coef.vlm}
@@ -20,8 +41,8 @@
\alias{qlms.bcn}
\alias{dlms.bcn}
\alias{dbetaII}
-\alias{AR1.control}
-\alias{param.names}
+% \alias{AR1.control}
+% \alias{param.names} % 20151105
%\alias{is.buggy}
%\alias{is.buggy.vlm}
%
@@ -36,7 +57,7 @@
\alias{grid.search}
\alias{expected.betabin.ab}
% 201406;
-\alias{interleave.VGAM}
+% \alias{interleave.VGAM} DONE 20151204
\alias{interleave.cmat} % 201506;
\alias{marcumQ}
\alias{QR.Q}
@@ -367,7 +388,7 @@
\alias{deviance.qrrvglm}
%\alias{df.residual}
%\alias{df.residual_vlm}
-\alias{dimm}
+% \alias{dimm} % 20151105
% \alias{dneg.binomial}
\alias{dnorm2}
%\alias{dotC}
@@ -631,7 +652,7 @@
\alias{vglm.multinomial.control}
\alias{vglm.multinomial.deviance.control}
\alias{dmultinomial}
-\alias{vglm.vcategorical.control}
+\alias{vglm.VGAMcategorical.control}
% \alias{vindex}
% \alias{vlabel}
\alias{vlm}
diff --git a/man/ozibetaUC.Rd b/man/ozibetaUC.Rd
new file mode 100644
index 0000000..299c637
--- /dev/null
+++ b/man/ozibetaUC.Rd
@@ -0,0 +1,121 @@
+\name{Ozibeta}
+\alias{Ozibeta}
+\alias{dozibeta}
+\alias{pozibeta}
+\alias{qozibeta}
+\alias{rozibeta}
+\title{The Zero/One-Inflated Beta Distribution}
+\description{
+ Density, distribution function, and random
+ generation for the zero/one-inflated beta distribution.
+
+
+}
+\usage{
+dozibeta(x, shape1, shape2, pobs0 = 0, pobs1 = 0, log = FALSE,
+ tol = .Machine$double.eps)
+pozibeta(q, shape1, shape2, pobs0 = 0, pobs1 = 0,
+ lower.tail = TRUE, log.p = FALSE, tol = .Machine$double.eps)
+qozibeta(p, shape1, shape2, pobs0 = 0, pobs1 = 0,
+ lower.tail = TRUE, log.p = FALSE, tol = .Machine$double.eps)
+rozibeta(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
+ tol = .Machine$double.eps)
+}
+
+
+\arguments{
+ \item{x, q, p, n}{Same as \code{\link[stats]{Beta}}. }
+ \item{pobs0}{
+ vector of probabilities that 0 are observed (\eqn{\omega_0}{omega_0}). }
+ \item{pobs1}{
+ vector of probabilities that 1 are observed (\eqn{\omega_1}{omega_1}). }
+
+ \item{shape1, shape2}{
+ Same as \code{\link[stats]{Beta}}.
+ They are called \code{a} and \code{b} in
+ \code{\link[base:Special]{beta}} respectively.
+
+
+ }
+ \item{lower.tail, log, log.p}{
+ Same as \code{\link[stats]{Beta}}.
+
+
+ }
+ \item{tol}{
+ Numeric, tolerance for testing equality with 0.
+
+
+ }
+
+
+}
+\value{
+ \code{dozibeta} gives the density,
+ \code{pozibeta} gives the distribution function,
+ \code{qozibeta} gives the quantile, and
+ \code{rozibeta} generates random deviates.
+
+
+
+
+}
+\author{ Xiangjie Xue and T. W. Yee }
+\details{
+ This distribution is a mixture of a discrete distribution
+ with a continuous distribution.
+ The cumulative distribution function of \eqn{Y} is
+ \deqn{F(y) =(1 - \omega_0 -\omega_1) B(y) + \omega_0 \times I[0 \leq y] +
+ \omega_1 \times I[1 \leq y]}{%
+ F(y) =(1 - omega_0 - omega_1) B(y) + omega_0 * I[0 <= y] +
+ omega_1 * I[1 <= y]}
+ where \eqn{B(y)} is the cumulative distribution function
+ of the beta distribution with the same shape parameters
+ (\code{\link[stats]{pbeta}}),
+ \eqn{\omega_0}{omega_0} is the inflated probability at 0
+ and \eqn{\omega_1}{omega_1} is the inflated probability at 1.
+ The default values of \eqn{\omega_j}{omega_j} mean that these
+ functions behave like the ordinary \code{\link[stats]{Beta}}
+ when only the essential arguments are inputted.
+
+
+
+}
+%\note{
+%
+%
+%
+%}
+\seealso{
+ \code{\link[base:Special]{beta}},
+ \code{\link{betaR}},
+ \code{\link{Betabinom}}.
+
+
+}
+\examples{
+\dontrun{
+set.seed(208); N <- 10000
+k <- rozibeta(N, 2, 3, 0.2, 0.2)
+hist(k, probability = TRUE, border = "blue",
+ main = "Blue = inflated; orange = ordinary beta")
+sum(k == 0) / N # Proportion of 0
+sum(k == 1) / N # Proportion of 1
+Ngrid <- 1000
+lines(seq(0, 1, length = Ngrid),
+ dbeta(seq(0, 1, length = Ngrid), 2, 3), col = "orange")
+lines(seq(0, 1, length = Ngrid), col = "blue",
+ dozibeta(seq(0, 1, length = Ngrid), 2 , 3, 0.2, 0.2))
+
+set.seed(1234); k <- runif(1000)
+sum(abs(qozibeta(k, 2, 3) - qbeta(k, 2, 3)) > .Machine$double.eps) # Should be 0
+sum(abs(pozibeta(k, 10, 7) - pbeta(k, 10, 7)) > .Machine$double.eps) # Should be 0
+}
+}
+\keyword{distribution}
+
+
+%dozibeta(c(-1, NA, 0.5, 2), 2, 3, 0.2, 0.2) # should be NA
+%dozibeta(0.5, c(NA, Inf), 4, 0.2, 0.1) # should be NA
+%dozibeta(0.5, 2.2, 4.3, NA, 0.3) # should be NA
+%dozibeta(0.5, 2, 3, 0.5, 0.6) # should NaN
diff --git a/man/paralogistic.Rd b/man/paralogistic.Rd
index 0f99576..3225657 100644
--- a/man/paralogistic.Rd
+++ b/man/paralogistic.Rd
@@ -11,10 +11,10 @@
\usage{
paralogistic(lscale = "loge", lshape1.a = "loge", iscale = NULL,
ishape1.a = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5),
- gshape1.a = exp(-5:5), probs.y = c(0.25, 0.5, 0.75),
- zero = ifelse(lss, -2, -1))
+ gshape1.a = exp(-5:5), probs.y = c(0.25, 0.5, 0.75), zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
+% zero = ifelse(lss, -2, -1)
\arguments{
\item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information.
diff --git a/man/pgamma.deriv.Rd b/man/pgamma.deriv.Rd
index acb7f38..4bdd885 100644
--- a/man/pgamma.deriv.Rd
+++ b/man/pgamma.deriv.Rd
@@ -81,7 +81,8 @@ pgamma.deriv(q, shape, tmax = 100)
T. W. Yee wrote the wrapper function to the Fortran subroutine
written by R. J. Moore. The subroutine was modified to run using
double precision.
- The original code came from \url{http://lib.stat.cmu.edu/apstat/187}.
+ The original code came from \code{http://lib.stat.cmu.edu/apstat/187}.
+ but this website has since become stale.
}
diff --git a/man/poissonff.Rd b/man/poissonff.Rd
index f7d2989..f0774fb 100644
--- a/man/poissonff.Rd
+++ b/man/poissonff.Rd
@@ -53,7 +53,7 @@ poissonff(link = "loge", dispersion = 1, onedpar = FALSE, imu = NULL,
}
\item{zero}{
- An integer-valued vector specifying which linear/additive predictors
+ Can be an integer-valued vector specifying which linear/additive predictors
are modelled as intercepts only. The values must be from the set
\{1,2,\ldots,\eqn{M}\}, where \eqn{M} is the number of columns of the
matrix response.
diff --git a/man/posnegbinomial.Rd b/man/posnegbinomial.Rd
index f2002dd..fd2ff9c 100644
--- a/man/posnegbinomial.Rd
+++ b/man/posnegbinomial.Rd
@@ -8,10 +8,13 @@
}
\usage{
-posnegbinomial(lmunb = "loge", lsize = "loge",
- isize = NULL, zero = -2, nsimEIM = 250,
- ishrinkage = 0.95, imethod = 1)
-
+posnegbinomial(zero = "size", type.fitted = c("mean", "munb", "prob0"),
+ nsimEIM = 500, cutoff.prob = 0.999,
+ eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
+ lmunb = "loge", lsize = "loge",
+ imethod = 1, imunb = NULL, probs.y = 0.35,
+ ishrinkage = 0.95, isize = NULL,
+ gsize.mux = exp((-12:6)/2))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -41,15 +44,35 @@ posnegbinomial(lmunb = "loge", lsize = "loge",
}
- \item{nsimEIM, zero}{
+ \item{nsimEIM, zero, eps.trig}{
See \code{\link{CommonVGAMffArguments}}.
}
+ \item{probs.y, cutoff.prob}{
+ Similar to \code{\link{negbinomial}}.
+
+
+ }
+ \item{imunb, max.support}{
+ Similar to \code{\link{negbinomial}}.
+
+
+ }
+ \item{max.chunk.MB, gsize.mux}{
+ Similar to \code{\link{negbinomial}}.
+
+
+ }
\item{ishrinkage, imethod}{
See \code{\link{negbinomial}}.
}
+ \item{type.fitted}{
+ See \code{\link{CommonVGAMffArguments}} for details.
+
+
+ }
}
\details{
The positive negative binomial distribution is an ordinary negative
@@ -73,7 +96,7 @@ posnegbinomial(lmunb = "loge", lsize = "loge",
ordinary negative binomial distribution.
- This function handles \emph{multivariate} responses, so that a matrix
+ This function handles \emph{multiple} responses, so that a matrix
can be used as the response. The number of columns is the number
of species, say, and setting \code{zero = -2} means that \emph{all}
species have a \code{k} equalling a (different) intercept only.
@@ -81,15 +104,30 @@ posnegbinomial(lmunb = "loge", lsize = "loge",
}
\section{Warning}{
- The Poisson model corresponds to \code{k} equalling infinity.
- If the data is Poisson or close to Poisson, numerical problems may
- occur. Possibly a loglog link could be added in the future to try help
- handle this problem.
+ This family function is fragile;
+ at least two cases will lead to numerical problems.
+ Firstly,
+ the positive-Poisson model corresponds to \code{k} equalling infinity.
+ If the data is positive-Poisson or close to positive-Poisson,
+ then the estimated \code{k} will diverge to \code{Inf} or some
+ very large value.
+ Secondly, if the data is clustered about the value 1 because
+ the \code{munb} parameter is close to 0
+ then numerical problems will also occur.
+ Users should set \code{trace = TRUE} to monitor convergence.
+ In the situation when both cases hold, the result returned
+ (which will be untrustworthy) will depend on the initial values.
+
+
+% Then trying a \code{\link{loglog}} link might help
+% handle this problem.
+
+ This \pkg{VGAM} family function inherits the same warnings as
+ \code{\link{negbinomial}}.
+ And if \code{k} is much less than 1 then the estimation may
+ be slow.
- This \pkg{VGAM} family function is computationally expensive
- and usually runs slowly;
- setting \code{trace = TRUE} is useful for monitoring convergence.
}
@@ -118,7 +156,17 @@ posnegbinomial(lmunb = "loge", lsize = "loge",
}
\author{ Thomas W. Yee }
\note{
- This family function handles multiple responses.
+ If the estimated \eqn{k} is very large then fitting a
+ \code{\link{pospoisson}} model is a good idea.
+
+
+
+ If both \code{munb} and \eqn{k} are large then it may be
+ necessary to decrease \code{eps.trig} and increase
+ \code{max.support} so that the EIMs are positive-definite,
+ e.g.,
+ \code{eps.trig = 1e-8} and \code{max.support = Inf}.
+
}
@@ -141,20 +189,19 @@ posnegbinomial(lmunb = "loge", lsize = "loge",
}
\examples{
-\dontrun{
pdata <- data.frame(x2 = runif(nn <- 1000))
pdata <- transform(pdata, y1 = rposnegbin(nn, munb = exp(0+2*x2), size = exp(1)),
y2 = rposnegbin(nn, munb = exp(1+2*x2), size = exp(3)))
fit <- vglm(cbind(y1, y2) ~ x2, posnegbinomial, data = pdata, trace = TRUE)
coef(fit, matrix = TRUE)
-dim(depvar(fit)) # dim(fit at y) is not as good
+dim(depvar(fit)) # Using dim(fit at y) is not recommended
# Another artificial data example
pdata2 <- data.frame(munb = exp(2), size = exp(3)); nn <- 1000
pdata2 <- transform(pdata2, y3 = rposnegbin(nn, munb = munb, size = size))
with(pdata2, table(y3))
-fit <- vglm(y3 ~ 1, posnegbinomial, pdata2, trace = TRUE)
+fit <- vglm(y3 ~ 1, posnegbinomial, data = pdata2, trace = TRUE)
coef(fit, matrix = TRUE)
with(pdata2, mean(y3)) # Sample mean
head(with(pdata2, munb/(1-(size/(size+munb))^size)), 1) # Population mean
@@ -168,7 +215,8 @@ coef(fit, matrix = TRUE)
Coef(fit)
(khat <- Coef(fit)["size"])
pdf2 <- dposnegbin(x = with(corbet, ofreq), mu = fitted(fit), size = khat)
-print( with(corbet, cbind(ofreq, species, fitted = pdf2*sum(species))), digits = 1)
+print(with(corbet, cbind(ofreq, species, fitted = pdf2*sum(species))), dig = 1)
+\dontrun{
with(corbet,
matplot(ofreq, cbind(species, fitted = pdf2*sum(species)), las = 1,
xlab = "Observed frequency (of individual butterflies)",
@@ -184,3 +232,12 @@ matplot(ofreq, cbind(species, fitted = pdf2*sum(species)), las = 1,
+%posnegbinomial(lmunb = "loge", lsize = "loge", imunb = NULL,
+% isize = NULL, zero = "size", nsimEIM = 250,
+% probs.y = 0.75, cutoff.prob = 0.999,
+% max.support = 2000, max.chunk.MB = 30,
+% gsize = exp((-4):4), ishrinkage = 0.95, imethod = 1)
+
+
+
+
diff --git a/man/posnormal.Rd b/man/posnormal.Rd
index a5607ca..32ad616 100644
--- a/man/posnormal.Rd
+++ b/man/posnormal.Rd
@@ -10,7 +10,7 @@ posnormal(lmean = "identitylink", lsd = "loge",
eq.mean = FALSE, eq.sd = FALSE,
gmean = exp((-5:5)/2), gsd = exp((-1:5)/2),
imean = NULL, isd = NULL, probs.y = 0.10, imethod = 1,
- nsimEIM = NULL, zero = -2)
+ nsimEIM = NULL, zero = "sd")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -55,7 +55,7 @@ posnormal(lmean = "identitylink", lsd = "loge",
}
\item{zero, nsimEIM, probs.y}{
- See \code{\link{CommonVGAMffArguments}} for more information.
+ See \code{\link{CommonVGAMffArguments}} for information.
}
diff --git a/man/pospoisson.Rd b/man/pospoisson.Rd
index 84f3d1b..644262f 100644
--- a/man/pospoisson.Rd
+++ b/man/pospoisson.Rd
@@ -6,8 +6,8 @@
Fits a positive Poisson distribution.
}
\usage{
-pospoisson(link = "loge", expected = TRUE,
- ilambda = NULL, imethod = 1, zero = NULL)
+pospoisson(link = "loge", type.fitted = c("mean", "lambda", "prob0"),
+ expected = TRUE, ilambda = NULL, imethod = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -23,7 +23,12 @@ pospoisson(link = "loge", expected = TRUE,
}
\item{ilambda, imethod, zero}{
- See \code{\link{CommonVGAMffArguments}} for more information.
+ See \code{\link{CommonVGAMffArguments}} for information.
+
+ }
+ \item{type.fitted}{
+ See \code{\link{CommonVGAMffArguments}} for details.
+
}
diff --git a/man/prentice74.Rd b/man/prentice74.Rd
index 9099a8a..77ab681 100644
--- a/man/prentice74.Rd
+++ b/man/prentice74.Rd
@@ -9,7 +9,8 @@
}
\usage{
prentice74(llocation = "identitylink", lscale = "loge", lshape = "identitylink",
- ilocation = NULL, iscale = NULL, ishape = NULL, zero = 2:3)
+ ilocation = NULL, iscale = NULL, ishape = NULL,
+ zero = c("scale", "shape"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -36,9 +37,9 @@ prentice74(llocation = "identitylink", lscale = "loge", lshape = "identitylink",
}
\item{zero}{
- An integer-valued vector specifying which
+ Can be an integer-valued vector specifying which
linear/additive predictors are modelled as intercepts-only.
- The values must be from the set \{1,2,3\}.
+ Then the values must be from the set \{1,2,3\}.
See \code{\link{CommonVGAMffArguments}} for more information.
diff --git a/man/quasibinomialff.Rd b/man/quasibinomialff.Rd
index 8e82d6c..2ccdfb3 100644
--- a/man/quasibinomialff.Rd
+++ b/man/quasibinomialff.Rd
@@ -49,10 +49,11 @@ quasibinomialff(link = "logit", multiple.responses = FALSE,
}
\item{zero}{
- An integer-valued vector specifying which linear/additive predictors
+ Can be an integer-valued vector specifying which linear/additive predictors
are modelled as intercepts only. The values must be from the set
\{1,2,\ldots,\eqn{M}\}, where \eqn{M} is the number of columns of
the matrix response.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
diff --git a/man/quasipoissonff.Rd b/man/quasipoissonff.Rd
index 77d022a..3d26149 100644
--- a/man/quasipoissonff.Rd
+++ b/man/quasipoissonff.Rd
@@ -31,10 +31,13 @@ quasipoissonff(link = "loge", onedpar = FALSE,
}
\item{zero}{
- An integer-valued vector specifying which linear/additive predictors
+ Can be an integer-valued vector specifying which linear/additive predictors
are modelled as intercepts only. The values must be from the set
\{1,2,\ldots,\eqn{M}\}, where \eqn{M} is the number of columns of the
matrix response.
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
+
}
}
diff --git a/man/rec.normal.Rd b/man/rec.normal.Rd
index 93eba5b..05eaeb1 100644
--- a/man/rec.normal.Rd
+++ b/man/rec.normal.Rd
@@ -35,11 +35,12 @@ rec.normal(lmean = "identitylink", lsd = "loge",
}
\item{zero}{
- An integer vector, containing the value 1 or 2. If so, the mean or
+ Can be an integer vector, containing the value 1 or 2. If so, the mean or
standard deviation respectively are modelled as an intercept only.
Usually, setting \code{zero = 2} will be used, if used at all.
The default value \code{NULL} means both linear/additive predictors
are modelled as functions of the explanatory variables.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
diff --git a/man/riceff.Rd b/man/riceff.Rd
index 404fbe3..3791916 100644
--- a/man/riceff.Rd
+++ b/man/riceff.Rd
@@ -29,14 +29,14 @@ riceff(lsigma = "loge", lvee = "loge", isigma = NULL,
}
\item{ivee, isigma}{
Optional initial values for the parameters.
- See \code{\link{CommonVGAMffArguments}} for more information.
If convergence failure occurs (this \pkg{VGAM} family function seems
to require good initial values) try using these arguments.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
\item{nsimEIM, zero}{
- See \code{\link{CommonVGAMffArguments}} for more information.
+ See \code{\link{CommonVGAMffArguments}} for information.
}
diff --git a/man/sc.studentt2.Rd b/man/sc.studentt2.Rd
index a03afc4..991a212 100644
--- a/man/sc.studentt2.Rd
+++ b/man/sc.studentt2.Rd
@@ -11,7 +11,7 @@
}
\usage{
sc.studentt2(percentile = 50, llocation = "identitylink", lscale = "loge",
- ilocation = NULL, iscale = NULL, imethod = 1, zero = 2)
+ ilocation = NULL, iscale = NULL, imethod = 1, zero = "scale")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/simplex.Rd b/man/simplex.Rd
index 6acda67..ebcb7d8 100644
--- a/man/simplex.Rd
+++ b/man/simplex.Rd
@@ -9,9 +9,8 @@
}
\usage{
-simplex(lmu = "logit", lsigma = "loge",
- imu = NULL, isigma = NULL,
- imethod = 1, ishrinkage = 0.95, zero = 2)
+simplex(lmu = "logit", lsigma = "loge", imu = NULL, isigma = NULL,
+ imethod = 1, ishrinkage = 0.95, zero = "sigma")
}
%- maybe also 'usage' for other objects documented here.
@@ -29,7 +28,7 @@ simplex(lmu = "logit", lsigma = "loge",
}
\item{imethod, ishrinkage, zero}{
- See \code{\link{CommonVGAMffArguments}} for more information.
+ See \code{\link{CommonVGAMffArguments}} for information.
}
diff --git a/man/sinmad.Rd b/man/sinmad.Rd
index 50c9d16..6080be8 100644
--- a/man/sinmad.Rd
+++ b/man/sinmad.Rd
@@ -11,9 +11,10 @@ sinmad(lscale = "loge", lshape1.a = "loge", lshape3.q = "loge",
iscale = NULL, ishape1.a = NULL, ishape3.q = NULL, imethod = 1,
lss = TRUE, gscale = exp(-5:5), gshape1.a = exp(-5:5),
gshape3.q = exp(-5:5), probs.y = c(0.25, 0.5, 0.75),
- zero = ifelse(lss, -(2:3), -c(1, 3)))
+ zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
+% zero = ifelse(lss, -(2:3), -c(1, 3))
\arguments{
\item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information.
diff --git a/man/skellam.Rd b/man/skellam.Rd
index a4bceaa..0a9e1d7 100644
--- a/man/skellam.Rd
+++ b/man/skellam.Rd
@@ -28,7 +28,7 @@ skellam(lmu1 = "loge", lmu2 = "loge", imu1 = NULL, imu2 = NULL,
}
\item{nsimEIM, parallel, zero}{
- See \code{\link{CommonVGAMffArguments}} for more information.
+ See \code{\link{CommonVGAMffArguments}} for information.
In particular, setting \code{parallel=TRUE} will constrain the
two means to be equal.
diff --git a/man/slash.Rd b/man/slash.Rd
index 1350328..59f61fa 100644
--- a/man/slash.Rd
+++ b/man/slash.Rd
@@ -41,7 +41,8 @@ slash(lmu = "identitylink", lsigma = "loge",
}
\item{nsimEIM, zero}{
- See \code{\link{CommonVGAMffArguments}} for more information.
+ See \code{\link{CommonVGAMffArguments}} for information.
+
}
\item{smallno}{
diff --git a/man/sratio.Rd b/man/sratio.Rd
index 7445bd8..b666eaf 100644
--- a/man/sratio.Rd
+++ b/man/sratio.Rd
@@ -35,11 +35,12 @@ sratio(link = "logit", parallel = FALSE, reverse = FALSE,
}
\item{zero}{
- An integer-valued vector specifying which
+ Can be an integer-valued vector specifying which
linear/additive predictors are modelled as intercepts only.
The values must be from the set \{1,2,\ldots,\eqn{M}\}.
The default value means none are modelled as intercept-only terms.
+
}
\item{whitespace}{
See \code{\link{CommonVGAMffArguments}} for information.
@@ -132,6 +133,7 @@ The \pkg{VGAM} package for categorical data analysis.
\code{\link{acat}},
\code{\link{cumulative}},
\code{\link{multinomial}},
+ \code{\link{margeff}},
\code{\link{pneumo}},
\code{\link{logit}},
\code{\link{probit}},
diff --git a/man/studentt.Rd b/man/studentt.Rd
index c9735df..55db46a 100644
--- a/man/studentt.Rd
+++ b/man/studentt.Rd
@@ -12,10 +12,10 @@
\usage{
studentt (ldf = "loglog", idf = NULL, tol1 = 0.1, imethod = 1)
studentt2(df = Inf, llocation = "identitylink", lscale = "loge",
- ilocation = NULL, iscale = NULL, imethod = 1, zero = -2)
+ ilocation = NULL, iscale = NULL, imethod = 1, zero = "scale")
studentt3(llocation = "identitylink", lscale = "loge", ldf = "loglog",
ilocation = NULL, iscale = NULL, idf = NULL,
- imethod = 1, zero = -(2:3))
+ imethod = 1, zero = c("scale", "df"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/summaryvglm.Rd b/man/summaryvglm.Rd
index c0c384a..a9444e4 100644
--- a/man/summaryvglm.Rd
+++ b/man/summaryvglm.Rd
@@ -12,10 +12,10 @@
summaryvglm(object, correlation = FALSE,
dispersion = NULL, digits = NULL, presid = TRUE,
signif.stars = getOption("show.signif.stars"),
- nopredictors = FALSE)
+ nopredictors = FALSE, ...)
\method{show}{summary.vglm}(x, digits = max(3L, getOption("digits") - 3L),
quote = TRUE, prefix = "", presid = TRUE,
- signif.stars = NULL, nopredictors = NULL)
+ signif.stars = NULL, nopredictors = NULL, ...)
}
\arguments{
\item{object}{an object of class \code{"vglm"}, usually, a result of a
@@ -41,6 +41,7 @@ summaryvglm(object, correlation = FALSE,
are not printed out.
The default is that they are. }
\item{prefix}{ Not used. }
+ \item{\ldots}{ Not used. }
}
@@ -98,6 +99,21 @@ distribution is used.
% handled by \code{\link{summary.lm}}.
+
+
+% 20151215
+ It is possible for programmers to write a methods function to
+ print out extra quantities when \code{summary(vglmObject)} is
+ called.
+ The generic function is \code{summaryvglmS4VGAM()}, and one
+ can use the S4 function \code{\link[methods]{setMethod}} to
+ compute the quantities needed.
+ Also needed is the generic function is \code{showsummaryvglmS4VGAM()}
+ to actually print the quantities out.
+
+
+
+
}
\value{
\code{summaryvglm} returns an object of class \code{"summary.vglm"};
diff --git a/man/tikuv.Rd b/man/tikuv.Rd
index 1f8286e..6280cf3 100644
--- a/man/tikuv.Rd
+++ b/man/tikuv.Rd
@@ -7,7 +7,8 @@
}
\usage{
-tikuv(d, lmean = "identitylink", lsigma = "loge", isigma = NULL, zero = 2)
+tikuv(d, lmean = "identitylink", lsigma = "loge", isigma = NULL,
+ zero = "sigma")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -43,13 +44,14 @@ tikuv(d, lmean = "identitylink", lsigma = "loge", isigma = NULL, zero = 2)
}
\item{zero}{
- An integer-valued vector specifying which
- linear/additive predictors are modelled as intercepts only.
- The values must be from the set \{1,2\} corresponding
+ A vector specifying which
+ linear/additive predictors are modelled as intercept-only.
+ The values can be from the set \{1,2\}, corresponding
respectively to \eqn{\mu}{mu}, \eqn{\sigma}{sigma}.
If \code{zero = NULL} then all linear/additive predictors are modelled as
a linear combination of the explanatory variables.
For many data sets having \code{zero = 2} is a good idea.
+ See \code{\link{CommonVGAMffArguments}} for information.
}
diff --git a/man/tobit.Rd b/man/tobit.Rd
index 72b27dd..679ee69 100644
--- a/man/tobit.Rd
+++ b/man/tobit.Rd
@@ -10,7 +10,7 @@
tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
imu = NULL, isd = NULL,
type.fitted = c("uncensored", "censored", "mean.obs"),
- byrow.arg = FALSE, imethod = 1, zero = -2)
+ byrow.arg = FALSE, imethod = 1, zero = "sd")
}
% 20151024 yettodo: maybe add a new option to 'type.fitted':
% type.fitted = c("uncensored", "censored", "mean.obs", "truncated"),
@@ -81,12 +81,12 @@ tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
}
\item{zero}{
- An integer vector, containing the value 1 or 2. If so,
+ A vector, e.g., containing the value 1 or 2. If so,
the mean or standard deviation respectively are modelled
as an intercept-only.
Setting \code{zero = NULL} means both linear/additive predictors
are modelled as functions of the explanatory variables.
- See \code{\link{CommonVGAMffArguments}} for information.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
diff --git a/man/truncweibull.Rd b/man/truncweibull.Rd
index 8dee297..2811f69 100644
--- a/man/truncweibull.Rd
+++ b/man/truncweibull.Rd
@@ -14,7 +14,7 @@ truncweibull(lower.limit = 1e-5,
lAlpha = "loge", lBetaa = "loge",
iAlpha = NULL, iBetaa = NULL,
nrfs = 1, probs.y = c(0.2, 0.5, 0.8),
- imethod = 1, zero = -2)
+ imethod = 1, zero = "Betaa")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -44,7 +44,9 @@ truncweibull(lower.limit = 1e-5,
}
\item{imethod, nrfs, zero, probs.y}{
- Details at \code{\link{weibullR}}.
+ Details at \code{\link{weibullR}}
+ and \code{\link{CommonVGAMffArguments}}.
+
}
}
diff --git a/man/undocumented-methods.Rd b/man/undocumented-methods.Rd
index 7a1ef0f..052833c 100644
--- a/man/undocumented-methods.Rd
+++ b/man/undocumented-methods.Rd
@@ -4,6 +4,50 @@
%\alias{ccoef-method}
%
%
+% 201602:
+\alias{predictvglmS4VGAM,ANY,binom2.or-method}
+% 201601:
+\alias{showvglmS4VGAM,ANY,acat-method}
+\alias{showvgamS4VGAM,ANY,acat-method}
+\alias{showvglmS4VGAM,ANY,multinomial-method}
+\alias{showvgamS4VGAM,ANY,multinomial-method}
+%
+%\alias{coef,vgam-method}
+%\alias{coefficients,vgam-method}
+% 201512:
+\alias{summaryvglmS4VGAM,ANY,binom2.or-method}
+\alias{showsummaryvglmS4VGAM,ANY,binom2.or-method}
+%
+\alias{summaryvglmS4VGAM,ANY,posbernoulli.tb-method}
+\alias{showsummaryvglmS4VGAM,ANY,posbernoulli.tb-method}
+%
+\alias{showsummaryvglmS4VGAM,ANY,posbernoulli.b-method}
+\alias{showsummaryvglmS4VGAM,ANY,posbernoulli.t-method}
+%
+\alias{summaryvglmS4VGAM,ANY,VGAMcategorical-method}
+\alias{summaryvglmS4VGAM,ANY,cumulative-method}
+\alias{summaryvglmS4VGAM,ANY,multinomial-method}
+%
+\alias{showsummaryvglmS4VGAM,ANY,VGAMcategorical-method}
+\alias{showsummaryvglmS4VGAM,ANY,cumulative-method}
+\alias{showsummaryvglmS4VGAM,ANY,multinomial-method}
+%
+\alias{margeffS4VGAM,ANY,ANY,VGAMcategorical-method}
+\alias{margeffS4VGAM,ANY,ANY,VGAMordinal-method}
+\alias{margeffS4VGAM,ANY,ANY,acat-method}
+\alias{margeffS4VGAM,ANY,ANY,cratio-method}
+\alias{margeffS4VGAM,ANY,ANY,sratio-method}
+\alias{margeffS4VGAM,ANY,ANY,cumulative-method}
+\alias{margeffS4VGAM,ANY,ANY,multinomial-method}
+%
+%\alias{margeffS4VGAM,ANY,VGAMcategorical-method}
+%\alias{margeffS4VGAM,ANY,VGAMordinal-method}
+%\alias{margeffS4VGAM,ANY,acat-method}
+%\alias{margeffS4VGAM,ANY,cratio-method}
+%\alias{margeffS4VGAM,ANY,sratio-method}
+%\alias{margeffS4VGAM,ANY,cumulative-method}
+%\alias{margeffS4VGAM,ANY,multinomial-method}
+%
% 201509:
\alias{term.names,ANY-method}
\alias{term.names,vlm-method}
diff --git a/man/uninormal.Rd b/man/uninormal.Rd
index 2d0fafe..c7e6a20 100644
--- a/man/uninormal.Rd
+++ b/man/uninormal.Rd
@@ -12,7 +12,7 @@
\usage{
uninormal(lmean = "identitylink", lsd = "loge", lvar = "loge",
var.arg = FALSE, imethod = 1, isd = NULL, parallel = FALSE,
- smallno = 1e-05, zero = -2)
+ smallno = 1e-05, zero = "sd")
}
%- maybe also 'usage' for other objects documented here.
% apply.parint = FALSE,
diff --git a/man/vglmff-class.Rd b/man/vglmff-class.Rd
index fd3917d..75c1d6f 100644
--- a/man/vglmff-class.Rd
+++ b/man/vglmff-class.Rd
@@ -177,6 +177,16 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
of the weight matrices.
}
+
+ \item{\code{validfitted, validparams}:}{
+ Functions that test that the fitted values and
+ all parameters are within range.
+ These functions can issue a warning if violations are detected.
+
+
+ }
+
+
}
}
diff --git a/man/vonmises.Rd b/man/vonmises.Rd
index f81fe52..72b94ad 100644
--- a/man/vonmises.Rd
+++ b/man/vonmises.Rd
@@ -44,7 +44,10 @@ vonmises(llocation = extlogit(min = 0, max = 2 * pi), lscale = "loge",
An integer-valued vector specifying which
linear/additive predictors are modelled as intercepts only.
The default is none of them.
- If used, choose one value from the set \{1,2\}.
+ If used, one can choose one value from the set \{1,2\}.
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
+
}
% \item{hstep}{ Positive numeric. The \eqn{h} used for the finite difference
diff --git a/man/weibull.mean.Rd b/man/weibull.mean.Rd
index 96de32d..95a33e5 100644
--- a/man/weibull.mean.Rd
+++ b/man/weibull.mean.Rd
@@ -14,7 +14,7 @@
\usage{
weibull.mean(lmean = "loge", lshape = "loge", imean = NULL,
ishape = NULL, probs.y = c(0.2, 0.5, 0.8),
- imethod = 1, zero = -2)
+ imethod = 1, zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/weibullR.Rd b/man/weibullR.Rd
index f0c27b7..eee48b6 100644
--- a/man/weibullR.Rd
+++ b/man/weibullR.Rd
@@ -13,7 +13,7 @@
\usage{
weibullR(lscale = "loge", lshape = "loge",
iscale = NULL, ishape = NULL, lss = TRUE, nrfs = 1,
- probs.y = c(0.2, 0.5, 0.8), imethod = 1, zero = ifelse(lss, -2, -1))
+ probs.y = c(0.2, 0.5, 0.8), imethod = 1, zero = "shape")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -49,6 +49,7 @@ weibullR(lscale = "loge", lshape = "loge",
\item{zero, probs.y, lss}{
Details at \code{\link{CommonVGAMffArguments}}.
+
}
}
\details{
diff --git a/man/yip88.Rd b/man/yip88.Rd
index dca6dcf..4935062 100644
--- a/man/yip88.Rd
+++ b/man/yip88.Rd
@@ -7,7 +7,7 @@
}
\usage{
-yip88(link = "loge", n.arg = NULL)
+yip88(link = "loge", n.arg = NULL, imethod = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -22,6 +22,11 @@ yip88(link = "loge", n.arg = NULL)
number of zeros can be determined.
}
+ \item{imethod}{
+ Details at \code{\link{CommonVGAMffArguments}}.
+
+
+ }
}
\details{
The method implemented here, Yip (1988), maximizes a \emph{conditional}
@@ -145,9 +150,6 @@ coef(fit3, matrix = TRUE)
Coef(fit3) # Estimate of lambda (they get 0.6997 with SE 0.1520)
head(fitted(fit3))
mean(yy) # Compare this with fitted(fit3)
-
-
-
}
\keyword{models}
\keyword{regression}
diff --git a/man/zabinomial.Rd b/man/zabinomial.Rd
index 0f59f78..1cddeb0 100644
--- a/man/zabinomial.Rd
+++ b/man/zabinomial.Rd
@@ -11,11 +11,11 @@
}
\usage{
zabinomial(lpobs0 = "logit", lprob = "logit",
- type.fitted = c("mean", "pobs0"),
+ type.fitted = c("mean", "prob", "pobs0"),
ipobs0 = NULL, iprob = NULL, imethod = 1, zero = NULL)
zabinomialff(lprob = "logit", lonempobs0 = "logit",
- type.fitted = c("mean", "pobs0", "onempobs0"),
- iprob = NULL, ionempobs0 = NULL, imethod = 1, zero = 2)
+ type.fitted = c("mean", "prob", "pobs0", "onempobs0"),
+ iprob = NULL, ionempobs0 = NULL, imethod = 1, zero = "onempobs0")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -161,11 +161,12 @@ zdata <- transform(zdata,
y1 = rzabinom(nn, size = size, prob = prob, pobs0 = pobs0))
with(zdata, table(y1))
-fit <- vglm(cbind(y1, size - y1) ~ x2, zabinomial(zero = NULL), data = zdata, trace = TRUE)
-coef(fit, matrix = TRUE)
-head(fitted(fit))
-head(predict(fit))
-summary(fit)
+zfit <- vglm(cbind(y1, size - y1) ~ x2, zabinomial(zero = NULL),
+ data = zdata, trace = TRUE)
+coef(zfit, matrix = TRUE)
+head(fitted(zfit))
+head(predict(zfit))
+summary(zfit)
}
\keyword{models}
\keyword{regression}
diff --git a/man/zageometric.Rd b/man/zageometric.Rd
index e9955f4..f43ded2 100644
--- a/man/zageometric.Rd
+++ b/man/zageometric.Rd
@@ -11,11 +11,11 @@
}
\usage{
zageometric(lpobs0 = "logit", lprob = "logit",
- type.fitted = c("mean", "pobs0", "onempobs0"),
+ type.fitted = c("mean", "prob", "pobs0", "onempobs0"),
imethod = 1, ipobs0 = NULL, iprob = NULL, zero = NULL)
zageometricff(lprob = "logit", lonempobs0 = "logit",
- type.fitted = c("mean", "pobs0", "onempobs0"),
- imethod = 1, iprob = NULL, ionempobs0 = NULL, zero = -2)
+ type.fitted = c("mean", "prob", "pobs0", "onempobs0"),
+ imethod = 1, iprob = NULL, ionempobs0 = NULL, zero = "onempobs0")
}
%- maybe also 'usage' for other objects documented here.
diff --git a/man/zanegbinomial.Rd b/man/zanegbinomial.Rd
index 7b1ff86..72b0327 100644
--- a/man/zanegbinomial.Rd
+++ b/man/zanegbinomial.Rd
@@ -10,14 +10,19 @@
}
\usage{
-zanegbinomial(lpobs0 = "logit", lmunb = "loge", lsize = "loge",
- type.fitted = c("mean", "pobs0"),
- ipobs0 = NULL, isize = NULL, zero = -3, imethod = 1,
- nsimEIM = 250, ishrinkage = 0.95)
+zanegbinomial(zero = "size", type.fitted = c("mean", "munb", "pobs0"),
+ nsimEIM = 500, cutoff.prob = 0.999, eps.trig = 1e-7,
+ max.support = 4000, max.chunk.MB = 30, lpobs0 = "logit",
+ lmunb = "loge", lsize = "loge", imethod = 1, ipobs0 = NULL,
+ imunb = NULL, probs.y = 0.35, ishrinkage = 0.95,
+ isize = NULL, gsize.mux = exp((-12:6)/2))
zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
- type.fitted = c("mean", "pobs0", "onempobs0"),
- isize = NULL, ionempobs0 = NULL, zero = c(-2, -3),
- imethod = 1, nsimEIM = 250, ishrinkage = 0.95)
+ type.fitted = c("mean", "munb", "pobs0", "onempobs0"),
+ isize = NULL, ionempobs0 = NULL, zero = c("size",
+ "onempobs0"), probs.y = 0.35, cutoff.prob = 0.999,
+ eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
+ gsize.mux = exp((-12:6)/2), imethod = 1, imunb = NULL,
+ nsimEIM = 500, ishrinkage = 0.95)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -64,8 +69,9 @@ zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
% epobs0 = list(), emunb = list(), esize = list(),
% }
- \item{ipobs0, isize}{
- Optional initial values for \eqn{p_0}{pobs0} and \code{k}.
+ \item{ipobs0, imunb, isize}{
+ Optional initial values for \eqn{p_0}{pobs0} and \code{munb}
+ and \code{k}.
If given then it is okay to give one value
for each response/species by inputting a vector whose length
is the number of columns of the response matrix.
@@ -77,7 +83,7 @@ zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
% the probability of an observed value is to be modelled with the
% covariates.
Specifies which of the three linear predictors are
- modelled as an intercept only.
+ modelled as intercept-only.
% By default, the \code{k} and \eqn{p_0}{pobs0}
% parameters for each response are modelled as
% single unknown numbers that are estimated.
@@ -100,6 +106,18 @@ zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
and \code{\link{CommonVGAMffArguments}}.
}
+
+
+ \item{probs.y, cutoff.prob, gsize.mux, eps.trig}{
+ See \code{\link{negbinomial}}.
+% and \code{\link{CommonVGAMffArguments}}.
+
+ }
+ \item{max.support, max.chunk.MB}{
+ See \code{\link{negbinomial}}.
+% and \code{\link{CommonVGAMffArguments}}.
+
+ }
}
\details{
@@ -174,26 +192,30 @@ for counts with extra zeros.
}
\section{Warning }{
+ This family function is fragile; it inherits the same difficulties as
+ \code{\link{posnegbinomial}}.
Convergence for this \pkg{VGAM} family function seems to depend quite
strongly on providing good initial values.
+
This \pkg{VGAM} family function is computationally expensive
and usually runs slowly;
setting \code{trace = TRUE} is useful for monitoring convergence.
+
Inference obtained from \code{summary.vglm} and \code{summary.vgam}
may or may not be correct. In particular, the p-values, standard errors
and degrees of freedom may need adjustment. Use simulation on artificial
data to check that these are reasonable.
+
}
\author{ T. W. Yee }
\note{
-
Note this family function allows \eqn{p_0}{pobs0} to be modelled as
functions of the covariates provided \code{zero} is set correctly.
It is a conditional model, not a mixture model.
@@ -246,3 +268,33 @@ head(predict(fit))
\keyword{models}
\keyword{regression}
+
+% lpobs0 = "logit", lmunb = "loge", lsize = "loge",
+% type.fitted = c("mean", "pobs0"),
+% ipobs0 = NULL, isize = NULL, zero = "size",
+% probs.y = 0.75, cutoff.prob = 0.999,
+% max.support = 2000, max.chunk.MB = 30,
+% gsize = exp((-4):4),
+% imethod = 1, nsimEIM = 250, ishrinkage = 0.95)
+
+
+
+%zanegbinomial(
+%zero = "size", type.fitted = c("mean", "pobs0"),
+% nsimEIM = 250, cutoff.prob = 0.999,
+% max.support = 2000, max.chunk.MB = 30,
+% lpobs0 = "logit", lmunb = "loge", lsize = "loge",
+% imethod = 1, ipobs0 = NULL, probs.y = 0.75,
+% ishrinkage = 0.95, isize = NULL, gsize = exp((-4):4))
+
+%zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
+% type.fitted = c("mean", "pobs0", "onempobs0"), isize = NULL,
+% ionempobs0 = NULL, zero = c("size", "onempobs0"),
+% probs.y = 0.75, cutoff.prob = 0.999,
+% max.support = 2000, max.chunk.MB = 30,
+% gsize = exp((-4):4),
+% imethod = 1, nsimEIM = 250, ishrinkage = 0.95)
+
+
+
+
diff --git a/man/zapoisson.Rd b/man/zapoisson.Rd
index 3b1c93a..cc87972 100644
--- a/man/zapoisson.Rd
+++ b/man/zapoisson.Rd
@@ -10,10 +10,14 @@
}
\usage{
-zapoisson(lpobs0 = "logit", llambda = "loge",
- type.fitted = c("mean", "pobs0", "onempobs0"), zero = NULL)
-zapoissonff(llambda = "loge", lonempobs0 = "logit",
- type.fitted = c("mean", "pobs0", "onempobs0"), zero = -2)
+zapoisson(lpobs0 = "logit", llambda = "loge", type.fitted =
+ c("mean", "lambda", "pobs0", "onempobs0"), imethod = 1,
+ ipobs0 = NULL, ilambda = NULL, ishrinkage = 0.95, probs.y = 0.35,
+ zero = NULL)
+zapoissonff(llambda = "loge", lonempobs0 = "logit", type.fitted =
+ c("mean", "lambda", "pobs0", "onempobs0"), imethod = 1,
+ ilambda = NULL, ionempobs0 = NULL, ishrinkage = 0.95,
+ probs.y = 0.35, zero = "onempobs0")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -50,8 +54,15 @@ zapoissonff(llambda = "loge", lonempobs0 = "logit",
% }
- \item{zero}{
- See \code{\link{CommonVGAMffArguments}} for more information.
+ \item{imethod, ipobs0, ionempobs0, ilambda, ishrinkage}{
+ See \code{\link{CommonVGAMffArguments}} for information.
+
+
+
+ }
+ \item{probs.y, zero}{
+ See \code{\link{CommonVGAMffArguments}} for information.
+
% Integer valued vector, usually assigned \eqn{-1} or \eqn{1} if used
% at all. Specifies which of the two linear/additive predictors are
@@ -230,3 +241,11 @@ with(Abdata, mean(yy)) # Compare this with fitted(fit3)
\keyword{models}
\keyword{regression}
+
+%zapoisson(lpobs0 = "logit", llambda = "loge",
+% type.fitted = c("mean", "pobs0", "onempobs0"), zero = NULL)
+%zapoissonff(llambda = "loge", lonempobs0 = "logit",
+% type.fitted = c("mean", "pobs0", "onempobs0"), zero = "onempobs0")
+
+
+
diff --git a/man/zero.Rd b/man/zero.Rd
index 6b8925d..78412f1 100644
--- a/man/zero.Rd
+++ b/man/zero.Rd
@@ -4,8 +4,9 @@
\title{ The zero Argument in VGAM Family Functions }
\description{
The \code{zero} argument allows users to conveniently
- model certain linear/additive predictors as intercepts
- only.
+ model certain linear/additive predictors as intercept-only.
+
+
}
% \usage{
% VGAMfamilyFunction(zero = 3)
@@ -51,10 +52,13 @@
without having to input all the constraint matrices explicitly.
- The \code{zero} argument should be assigned an integer vector from the
+ The \code{zero} argument can be assigned an integer vector from the
set \{\code{1:M}\} where \code{M} is the number of linear/additive
predictors. Full details about constraint matrices can be found in
the references.
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
+
}
@@ -94,6 +98,7 @@ Reduced-rank vector generalized linear models.
}
\seealso{
+ \code{\link{CommonVGAMffArguments}},
\code{\link{constraints}}.
@@ -105,7 +110,7 @@ args(binom2.or)
args(gpd)
#LMS quantile regression example
-fit <- vglm(BMI ~ sm.bs(age, df = 4), lms.bcg(zero = c(1,3)),
+fit <- vglm(BMI ~ sm.bs(age, df = 4), lms.bcg(zero = c(1, 3)),
data = bmi.nz, trace = TRUE)
coef(fit, matrix = TRUE)
}
diff --git a/man/zeta.Rd b/man/zeta.Rd
index 0d40cef..b9a2f58 100644
--- a/man/zeta.Rd
+++ b/man/zeta.Rd
@@ -25,11 +25,12 @@ zeta(x, deriv = 0)
}
}
\details{
- While the usual definition involves an infinite series, more efficient
- methods have been devised to compute the value. In particular,
- this function uses Euler-Maclaurin summation. Theoretically, the
- zeta function can be computed over the whole complex plane because of
- analytic continuation.
+ While the usual definition involves an infinite series that
+ converges when the real part of the argument is \eqn{> 1},
+ more efficient methods have been devised to compute the
+ value. In particular, this function uses Euler-Maclaurin
+ summation. Theoretically, the zeta function can be computed
+ over the whole complex plane because of analytic continuation.
The formula used here for analytic continuation is
diff --git a/man/zetaff.Rd b/man/zetaff.Rd
index 8e609b2..ff7506e 100644
--- a/man/zetaff.Rd
+++ b/man/zetaff.Rd
@@ -11,11 +11,11 @@ zetaff(link = "loge", init.p = NULL, zero = NULL)
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{link, init.p, zero}{
- See \code{\link{CommonVGAMffArguments}} for more information.
These arguments apply to the (positive) parameter \eqn{p}.
See \code{\link{Links}} for more choices.
Choosing \code{\link{loglog}} constrains \eqn{p>1}, but
may fail if the maximum likelihood estimate is less than one.
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
diff --git a/man/zibinomial.Rd b/man/zibinomial.Rd
index 307ca9d..e91a1b6 100644
--- a/man/zibinomial.Rd
+++ b/man/zibinomial.Rd
@@ -10,12 +10,13 @@
}
\usage{
zibinomial(lpstr0 = "logit", lprob = "logit",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
- ipstr0 = NULL, zero = NULL, multiple.responses = FALSE, imethod = 1)
+ type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
+ ipstr0 = NULL, zero = NULL, multiple.responses = FALSE,
+ imethod = 1)
zibinomialff(lprob = "logit", lonempstr0 = "logit",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
- ionempstr0 = NULL, zero = 2, multiple.responses = FALSE,
- imethod = 1)
+ type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
+ ionempstr0 = NULL, zero = "onempstr0",
+ multiple.responses = FALSE, imethod = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -73,7 +74,7 @@ zibinomialff(lprob = "logit", lonempstr0 = "logit",
}
\item{zero, imethod}{
See \code{\link{CommonVGAMffArguments}} for information.
- Argument \code{zero} has changed its default value for version 0.9-2.
+ Argument \code{zero} changed its default value for version 0.9-2.
}
@@ -208,7 +209,8 @@ zdata <- transform(zdata,
y = rzibinom(nn, size = sv, prob = mubin, pstr0 = pstr0))
with(zdata, table(y))
fit <- vglm(cbind(y, sv - y) ~ 1, zibinomialff, data = zdata, trace = TRUE)
-fit <- vglm(cbind(y, sv - y) ~ 1, zibinomialff, data = zdata, trace = TRUE, stepsize = 0.5)
+fit <- vglm(cbind(y, sv - y) ~ 1, zibinomialff, data = zdata, trace = TRUE,
+ stepsize = 0.5)
coef(fit, matrix = TRUE)
Coef(fit) # Useful for intercept-only models
diff --git a/man/zigeometric.Rd b/man/zigeometric.Rd
index 217c966..842c685 100644
--- a/man/zigeometric.Rd
+++ b/man/zigeometric.Rd
@@ -10,13 +10,13 @@
}
\usage{
zigeometric(lpstr0 = "logit", lprob = "logit",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+ type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
ipstr0 = NULL, iprob = NULL,
imethod = 1, bias.red = 0.5, zero = NULL)
zigeometricff(lprob = "logit", lonempstr0 = "logit",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+ type.fitted = c("mean", "prob", "pobs0", "pstr0", "onempstr0"),
iprob = NULL, ionempstr0 = NULL,
- imethod = 1, bias.red = 0.5, zero = -2)
+ imethod = 1, bias.red = 0.5, zero = "onempstr0")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/zinegbinomial.Rd b/man/zinegbinomial.Rd
index 8a39f4d..fc44414 100644
--- a/man/zinegbinomial.Rd
+++ b/man/zinegbinomial.Rd
@@ -9,14 +9,22 @@
}
\usage{
-zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
- ipstr0 = NULL, isize = NULL, zero = -3,
- imethod = 1, ishrinkage = 0.95, nsimEIM = 250)
+zinegbinomial(zero = "size",
+ type.fitted = c("mean", "munb", "pobs0", "pstr0",
+ "onempstr0"),
+ nsimEIM = 500, cutoff.prob = 0.999, eps.trig = 1e-7,
+ max.support = 4000, max.chunk.MB = 30,
+ lpstr0 = "logit", lmunb = "loge", lsize = "loge",
+ imethod = 1, ipstr0 = NULL, imunb = NULL,
+ probs.y = 0.35, ishrinkage = 0.95,
+ isize = NULL, gsize.mux = exp((-12:6)/2))
zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
- isize = NULL, ionempstr0 = NULL, zero = c(-2, -3),
- imethod = 1, ishrinkage = 0.95, nsimEIM = 250)
+ type.fitted = c("mean", "munb", "pobs0", "pstr0",
+ "onempstr0"), imunb = NULL, isize = NULL, ionempstr0 =
+ NULL, zero = c("size", "onempstr0"), imethod = 1,
+ ishrinkage = 0.95, probs.y = 0.35, cutoff.prob = 0.999,
+ eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
+ gsize.mux = exp((-12:6)/2), nsimEIM = 500)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -42,11 +50,14 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
}
- \item{ipstr0, isize}{
- Optional initial values for \eqn{\phi}{pstr0} and \eqn{k}{k}.
+ \item{ipstr0, isize, imunb}{
+ Optional initial values for \eqn{\phi}{pstr0}
+ and \eqn{k}{k}
+ and \eqn{\mu}{munb}.
The default is to compute an initial value internally for both.
If a vector then recycling is used.
+
}
\item{lonempstr0, ionempstr0}{
@@ -63,28 +74,46 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
If failure to converge occurs try another value
and/or else specify a value for \code{ishrinkage}.
+
+
}
\item{zero}{
- Integers specifying which linear/additive predictor is modelled
- as intercepts only. If given, their absolute values must be
+ Specifies which linear/additive predictors are to be modelled
+ as intercept-only. They can be such that their absolute values are
either 1 or 2 or 3.
The default is the \eqn{\phi}{pstr0} and \eqn{k} parameters
(both for each response).
See \code{\link{CommonVGAMffArguments}} for more information.
+
+
}
\item{ishrinkage, nsimEIM}{
See \code{\link{CommonVGAMffArguments}} for information.
+
+ }
+ \item{probs.y, cutoff.prob, max.support, max.chunk.MB }{
+ See \code{\link{negbinomial}}
+ and/or \code{\link{posnegbinomial}} for details,
+
+
+ }
+ \item{gsize.mux, eps.trig}{
+ These arguments relate to grid searching in the initialization process.
+ See \code{\link{negbinomial}}
+ and/or \code{\link{posnegbinomial}} for details,
+
+
}
}
\details{
These functions are based on
\deqn{P(Y=0) = \phi + (1-\phi) (k/(k+\mu))^k,}{%
- P(Y=0) = \phi + (1-\phi) * (k/(k+\mu))^k,}
+ P(Y=0) = phi + (1- phi) * (k/(k+munb))^k,}
and for \eqn{y=1,2,\ldots},
\deqn{P(Y=y) = (1-\phi) \, dnbinom(y, \mu, k).}{%
- P(Y=y) = (1-\phi) * dnbinom(y, \mu, k).}
+ P(Y=y) = (1- phi) * dnbinom(y, munb, k).}
The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 < phi < 1}.
The mean of \eqn{Y} is \eqn{(1-\phi) \mu}{(1-phi)*munb}
(returned as the fitted values).
@@ -97,7 +126,7 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
of the negative binomial distribution.
- Independent multivariate responses are handled.
+ Independent multiple responses are handled.
If so then arguments \code{ipstr0} and \code{isize} may be vectors
with length equal to the number of responses.
@@ -152,6 +181,7 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
negative binomial as \eqn{k} tends to infinity.
+
The zero-\emph{deflated} negative binomial distribution
might be fitted by setting \code{lpstr0 = identitylink},
albeit, not entirely reliably. See \code{\link{zipoisson}}
@@ -164,28 +194,51 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
}
\section{Warning }{
- Numerical problems can occur, e.g., when the probability of
- zero is actually less than, not more than, the nominal
+ This model can be difficult to fit to data,
+ and this family function is fragile.
+ The model is especially difficult to fit reliably when
+ the estimated \eqn{k} parameter is very large (so the model
+ approaches a zero-inflated Poisson distribution) or
+ much less than 1
+ (and gets more difficult as it approaches 0).
+ Numerical problems can also occur, e.g., when the probability of
+ a zero is actually less than, and not more than, the nominal
probability of zero.
+ Similarly, numerical problems can occur if there is little
+ or no 0-inflation, or when the sample size is small.
Half-stepping is not uncommon.
- If failure to converge occurs, try using combinations of arguments
+ Successful convergence is sensitive to the initial values, therefore
+ if failure to converge occurs, try using combinations of arguments
\code{stepsize} (in \code{\link{vglm.control}}),
\code{imethod},
+ \code{imunb},
\code{ishrinkage},
\code{ipstr0},
\code{isize}, and/or
\code{zero} if there are explanatory variables.
+ Else try fitting an ordinary \code{\link{negbinomial}} model
+ or a \code{\link{zipoisson}} model.
+
+
+% An infinite loop might occur if some of the fitted values
+% (the means) are too close to 0.
- An infinite loop might occur if some of the fitted values
- (the means) are too close to 0.
- This \pkg{VGAM} family function is computationally expensive
- and usually runs slowly;
+ This \pkg{VGAM} family function can be computationally expensive
+ and can run slowly;
setting \code{trace = TRUE} is useful for monitoring convergence.
+
+% 20160208; A bug caused this, but has been fixed now:
+% And \code{\link{zinegbinomial}} may converge slowly when
+% the estimated \eqn{k} parameter is less than 1;
+% and get slower as it approaches 0.
+
+
+
}
\seealso{
@@ -197,16 +250,15 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
}
\examples{
-\dontrun{ # Example 1
+# Example 1
ndata <- data.frame(x2 = runif(nn <- 1000))
ndata <- transform(ndata, pstr0 = logit(-0.5 + 1 * x2, inverse = TRUE),
munb = exp( 3 + 1 * x2),
size = exp( 0 + 2 * x2))
ndata <- transform(ndata,
- y1 = rzinegbin(nn, mu = munb, size = size, pstr0 = pstr0),
- y2 = rzinegbin(nn, mu = munb, size = size, pstr0 = pstr0))
+ y1 = rzinegbin(nn, mu = munb, size = size, pstr0 = pstr0))
with(ndata, table(y1)["0"] / sum(table(y1)))
-fit <- vglm(cbind(y1, y2) ~ x2, zinegbinomial(zero = NULL), data = ndata)
+fit <- vglm(y1 ~ x2, zinegbinomial(zero = NULL), data = ndata)
coef(fit, matrix = TRUE)
summary(fit)
head(cbind(fitted(fit), with(ndata, (1 - pstr0) * munb)))
@@ -214,6 +266,7 @@ round(vcov(fit), 3)
# Example 2: RR-ZINB could also be called a COZIVGLM-ZINB-2
+\dontrun{
ndata <- data.frame(x2 = runif(nn <- 2000))
ndata <- transform(ndata, x3 = runif(nn))
ndata <- transform(ndata, eta1 = 3 + 1 * x2 + 2 * x3)
@@ -232,3 +285,37 @@ Coef(rrzinb)
\keyword{models}
\keyword{regression}
+%zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
+% type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+% ipstr0 = NULL, isize = NULL, zero = "size",
+% imethod = 1, ishrinkage = 0.95,
+% probs.y = 0.75, cutoff.prob = 0.999,
+% max.support = 2000, max.chunk.MB = 30,
+% gpstr0 = 1:19/20, gsize = exp((-4):4),
+% nsimEIM = 250)
+
+
+%zinegbinomial(zero = "size",
+% type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+% nsimEIM = 250, cutoff.prob = 0.999, max.support = 2000,
+% max.chunk.MB = 30,
+% lpstr0 = "logit", lmunb = "loge", lsize = "loge",
+% imethod = 1, ipstr0 = NULL, imunb = NULL,
+% probs.y = 0.85, ishrinkage = 0.95,
+% isize = NULL, gpstr0 = 1:19/20, gsize = exp((-4):4))
+%zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
+% type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+% isize = NULL, ionempstr0 = NULL,
+% zero = c("size", "onempstr0"),
+% imethod = 1, ishrinkage = 0.95,
+% probs.y = 0.75, cutoff.prob = 0.999,
+% max.support = 2000, max.chunk.MB = 30,
+% gonempstr0 = 1:19/20, gsize = exp((-4):4),
+% nsimEIM = 250)
+
+
+%ndata <- transform(ndata,
+% y1 = rzinegbin(nn, mu = munb, size = size, pstr0 = pstr0),
+% y2 = rzinegbin(nn, mu = munb, size = size, pstr0 = pstr0))
+%with(ndata, table(y1)["0"] / sum(table(y1)))
+%fit <- vglm(cbind(y1, y2) ~ x2, zinegbinomial(zero = NULL), data = ndata)
diff --git a/man/zipebcom.Rd b/man/zipebcom.Rd
index 5c22b6a..5ff413e 100644
--- a/man/zipebcom.Rd
+++ b/man/zipebcom.Rd
@@ -13,7 +13,7 @@
\usage{
zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
imu12 = NULL, iphi12 = NULL, ioratio = NULL,
- zero = 2:3, tol = 0.001, addRidge = 0.001)
+ zero = c("phi12", "oratio"), tol = 0.001, addRidge = 0.001)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
diff --git a/man/zipoisson.Rd b/man/zipoisson.Rd
index 25794ae..85c7184 100644
--- a/man/zipoisson.Rd
+++ b/man/zipoisson.Rd
@@ -9,14 +9,15 @@
}
\usage{
-zipoisson(lpstr0 = "logit", llambda = "loge",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
- ipstr0 = NULL, ilambda = NULL,
- imethod = 1, ishrinkage = 0.8, zero = NULL)
-zipoissonff(llambda = "loge", lonempstr0 = "logit",
- type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
- ilambda = NULL, ionempstr0 = NULL,
- imethod = 1, ishrinkage = 0.8, zero = -2)
+zipoisson(lpstr0 = "logit", llambda = "loge", type.fitted =
+ c("mean", "lambda", "pobs0", "pstr0", "onempstr0"), ipstr0 =
+ NULL, ilambda = NULL, gpstr0 = NULL, imethod = 1,
+ ishrinkage = 0.95, probs.y = 0.35, zero = NULL)
+zipoissonff(llambda = "loge", lonempstr0 = "logit", type.fitted =
+ c("mean", "lambda", "pobs0", "pstr0", "onempstr0"),
+ ilambda = NULL, ionempstr0 = NULL, gonempstr0 = NULL,
+ imethod = 1, ishrinkage = 0.95, probs.y = 0.35, zero =
+ "onempstr0")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -80,14 +81,21 @@ zipoissonff(llambda = "loge", lonempstr0 = "logit",
}
\item{zero}{
- An integer specifying which linear/additive predictor is modelled as
- intercepts only. If given, the value must be either 1 or 2, and the
+ Specifies which linear/additive predictors are to be modelled as
+ intercept-only. If given, the value can be either 1 or 2, and the
default is none of them. Setting \code{zero = 1} makes \eqn{\phi}{phi}
a single parameter.
See \code{\link{CommonVGAMffArguments}} for more information.
}
+ \item{gpstr0, gonempstr0, probs.y}{
+ Details at \code{\link{CommonVGAMffArguments}}.
+
+
+ }
+
+
}
\details{
These models are a mixture of a Poisson distribution and the value 0;
@@ -315,3 +323,18 @@ summary(rrzip)
%# lambda <- (fitted(fit1, type = "mean") / fitted(fit1, type = "onempstr0"))[1]
%# (prob.struc.0 <- pstr0 / dzipois(x = 0, lambda = lambda, pstr0 = pstr0))
% fit at misc$pobs0 # Estimate of P(Y = 0)
+
+
+%zipoisson(lpstr0 = "logit", llambda = "loge",
+% type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+% ipstr0 = NULL, ilambda = NULL,
+% imethod = 1, ishrinkage = 0.8, zero = NULL)
+%zipoissonff(llambda = "loge", lonempstr0 = "logit",
+% type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+% ilambda = NULL, ionempstr0 = NULL,
+% imethod = 1, ishrinkage = 0.8, zero = "onempstr0")
+
+
+
+
+
diff --git a/src/tyeepolygamma3.c b/src/tyeepolygamma3.c
index 62a1665..0c2c11a 100644
--- a/src/tyeepolygamma3.c
+++ b/src/tyeepolygamma3.c
@@ -13,6 +13,11 @@ void tyee_C_dgam1w(double sjwyig9t[], double lfu2qhid[], int *f8yswcat, int *dvh
void tyee_C_tgam1w(double sjwyig9t[], double lfu2qhid[], int *f8yswcat, int *dvhw1ulq);
void tyee_C_cum8sum(double ci1oyxas[], double lfu2qhid[], int *nlfu2qhid,
double valong[], int *ntot, int *notdvhw1ulq);
+void eimpnbinomspecialp(int *interceptonly, double *nrows,
+ double *ncols, double *sizevec,
+ double *pnbinommat,
+ double *rowsums);
+
void tyee_C_vdgam1(double *xval, double *lfu2qhid, int *dvhw1ulq) {
@@ -135,3 +140,50 @@ void tyee_C_cum8sum(double ci1oyxas[], double lfu2qhid[], int *nlfu2qhid,
*notdvhw1ulq = (iii == *nlfu2qhid) ? 0 : 1;
}
+
+
+
+
+
+void eimpnbinomspecialp(int *interceptonly,
+ double *nrows,
+ double *ncols,
+ double *sizevec, /* length is nrows */
+ double *pnbinommat,
+ double *rowsums) {
+
+
+ double ayfnwr1v, yq6lorbx, tmp1 = 0.0, tmp2;
+ double *fpdlcqk9rowsums, *fpdlcqk9sizevec;
+
+
+ if (*interceptonly == 1) {
+ for (yq6lorbx = 0; yq6lorbx < *ncols; yq6lorbx++) {
+ tmp2 = (*sizevec + yq6lorbx);
+ tmp1 += *pnbinommat++ / (tmp2 * tmp2);
+ }
+ *rowsums = tmp1;
+ return;
+ }
+
+
+
+ fpdlcqk9rowsums = rowsums;
+ for (ayfnwr1v = 0; ayfnwr1v < *nrows; ayfnwr1v++)
+ *fpdlcqk9rowsums++ = 0.0;
+
+ for (yq6lorbx = 0; yq6lorbx < *ncols; yq6lorbx++) {
+ fpdlcqk9rowsums = rowsums;
+ fpdlcqk9sizevec = sizevec;
+ for (ayfnwr1v = 0; ayfnwr1v < *nrows; ayfnwr1v++) {
+ tmp2 = (yq6lorbx + *fpdlcqk9sizevec++);
+ tmp1 = *pnbinommat++ / (tmp2 * tmp2);
+ *fpdlcqk9rowsums++ += tmp1;
+ }
+ }
+}
+
+
+
+
+
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-vgam.git
More information about the debian-science-commits
mailing list